problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_17585 | rasdani/github-patches | git_diff | zulip__zulip-27515 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Display mentions of cross realm bots as mention pills
<!-- Issue description -->
As reported on CZO, cross realm bots aren't being included in the data set available for rendering user mentions. As a result, their mentions don't appear as user pills. E.g., in a quote-and-reply:

We should fix this.
<!-- Link to a message in the chat.zulip.org discussion. Message links will still work even if the topic is renamed or resolved. Link back to this issue from the chat.zulip.org thread. -->
[CZO thread](https://chat.zulip.org/#narrow/stream/9-issues/topic/silent.20mentions.20of.20system.20bots.20in.20quote.20and.20reply/near/1608266)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/mention.py`
Content:
```
1 import functools
2 import re
3 from dataclasses import dataclass
4 from typing import Dict, List, Match, Optional, Set, Tuple
5
6 from django.db.models import Q
7
8 from zerver.models import UserGroup, UserProfile, get_linkable_streams
9
10 BEFORE_MENTION_ALLOWED_REGEX = r"(?<![^\s\'\"\(\{\[\/<])"
11
12 # Match multi-word string between @** ** or match any one-word
13 # sequences after @
14 MENTIONS_RE = re.compile(
15 rf"{BEFORE_MENTION_ALLOWED_REGEX}@(?P<silent>_?)(\*\*(?P<match>[^\*]+)\*\*)"
16 )
17 USER_GROUP_MENTIONS_RE = re.compile(
18 rf"{BEFORE_MENTION_ALLOWED_REGEX}@(?P<silent>_?)(\*(?P<match>[^\*]+)\*)"
19 )
20
21 topic_wildcards = frozenset(["topic"])
22 stream_wildcards = frozenset(["all", "everyone", "stream"])
23
24
25 @dataclass
26 class FullNameInfo:
27 id: int
28 full_name: str
29
30
31 @dataclass
32 class UserFilter:
33 id: Optional[int]
34 full_name: Optional[str]
35
36 def Q(self) -> Q:
37 if self.full_name is not None and self.id is not None:
38 return Q(full_name__iexact=self.full_name, id=self.id)
39 elif self.id is not None:
40 return Q(id=self.id)
41 elif self.full_name is not None:
42 return Q(full_name__iexact=self.full_name)
43 else:
44 raise AssertionError("totally empty filter makes no sense")
45
46
47 @dataclass
48 class MentionText:
49 text: Optional[str]
50 is_topic_wildcard: bool
51 is_stream_wildcard: bool
52
53
54 @dataclass
55 class PossibleMentions:
56 mention_texts: Set[str]
57 message_has_topic_wildcards: bool
58 message_has_stream_wildcards: bool
59
60
61 class MentionBackend:
62 def __init__(self, realm_id: int) -> None:
63 self.realm_id = realm_id
64 self.user_cache: Dict[Tuple[int, str], FullNameInfo] = {}
65 self.stream_cache: Dict[str, int] = {}
66
67 def get_full_name_info_list(self, user_filters: List[UserFilter]) -> List[FullNameInfo]:
68 result: List[FullNameInfo] = []
69 unseen_user_filters: List[UserFilter] = []
70
71 # Try to get messages from the user_cache first.
72 # This loop populates two lists:
73 # - results are the objects we pull from cache
74 # - unseen_user_filters are filters where need to hit the DB
75 for user_filter in user_filters:
76 # We expect callers who take advantage of our user_cache to supply both
77 # id and full_name in the user mentions in their messages.
78 if user_filter.id is not None and user_filter.full_name is not None:
79 user = self.user_cache.get((user_filter.id, user_filter.full_name), None)
80 if user is not None:
81 result.append(user)
82 continue
83
84 # BOO! We have to go the database.
85 unseen_user_filters.append(user_filter)
86
87 # Most of the time, we have to go to the database to get user info,
88 # unless our last loop found everything in the cache.
89 if unseen_user_filters:
90 q_list = [user_filter.Q() for user_filter in unseen_user_filters]
91
92 rows = (
93 UserProfile.objects.filter(
94 realm_id=self.realm_id,
95 is_active=True,
96 )
97 .filter(
98 functools.reduce(lambda a, b: a | b, q_list),
99 )
100 .only(
101 "id",
102 "full_name",
103 )
104 )
105
106 user_list = [FullNameInfo(id=row.id, full_name=row.full_name) for row in rows]
107
108 # We expect callers who take advantage of our cache to supply both
109 # id and full_name in the user mentions in their messages.
110 for user in user_list:
111 self.user_cache[(user.id, user.full_name)] = user
112
113 result += user_list
114
115 return result
116
117 def get_stream_name_map(self, stream_names: Set[str]) -> Dict[str, int]:
118 if not stream_names:
119 return {}
120
121 result: Dict[str, int] = {}
122 unseen_stream_names: List[str] = []
123
124 for stream_name in stream_names:
125 if stream_name in self.stream_cache:
126 result[stream_name] = self.stream_cache[stream_name]
127 else:
128 unseen_stream_names.append(stream_name)
129
130 if unseen_stream_names:
131 q_list = {Q(name=name) for name in unseen_stream_names}
132
133 rows = (
134 get_linkable_streams(
135 realm_id=self.realm_id,
136 )
137 .filter(
138 functools.reduce(lambda a, b: a | b, q_list),
139 )
140 .values(
141 "id",
142 "name",
143 )
144 )
145
146 for row in rows:
147 self.stream_cache[row["name"]] = row["id"]
148 result[row["name"]] = row["id"]
149
150 return result
151
152
153 def user_mention_matches_topic_wildcard(mention: str) -> bool:
154 return mention in topic_wildcards
155
156
157 def user_mention_matches_stream_wildcard(mention: str) -> bool:
158 return mention in stream_wildcards
159
160
161 def extract_mention_text(m: Match[str]) -> MentionText:
162 text = m.group("match")
163 if text in topic_wildcards:
164 return MentionText(text=None, is_topic_wildcard=True, is_stream_wildcard=False)
165 if text in stream_wildcards:
166 return MentionText(text=None, is_topic_wildcard=False, is_stream_wildcard=True)
167 return MentionText(text=text, is_topic_wildcard=False, is_stream_wildcard=False)
168
169
170 def possible_mentions(content: str) -> PossibleMentions:
171 # mention texts can either be names, or an extended name|id syntax.
172 texts = set()
173 message_has_topic_wildcards = False
174 message_has_stream_wildcards = False
175 for m in MENTIONS_RE.finditer(content):
176 mention_text = extract_mention_text(m)
177 text = mention_text.text
178 if text:
179 texts.add(text)
180 if mention_text.is_topic_wildcard:
181 message_has_topic_wildcards = True
182 if mention_text.is_stream_wildcard:
183 message_has_stream_wildcards = True
184 return PossibleMentions(
185 mention_texts=texts,
186 message_has_topic_wildcards=message_has_topic_wildcards,
187 message_has_stream_wildcards=message_has_stream_wildcards,
188 )
189
190
191 def possible_user_group_mentions(content: str) -> Set[str]:
192 return {m.group("match") for m in USER_GROUP_MENTIONS_RE.finditer(content)}
193
194
195 def get_possible_mentions_info(
196 mention_backend: MentionBackend, mention_texts: Set[str]
197 ) -> List[FullNameInfo]:
198 if not mention_texts:
199 return []
200
201 user_filters = list()
202
203 name_re = r"(?P<full_name>.+)?\|(?P<mention_id>\d+)$"
204 for mention_text in mention_texts:
205 name_syntax_match = re.match(name_re, mention_text)
206 if name_syntax_match:
207 full_name = name_syntax_match.group("full_name")
208 mention_id = name_syntax_match.group("mention_id")
209 if full_name:
210 # For **name|id** mentions as mention_id
211 # cannot be null inside this block.
212 user_filters.append(UserFilter(full_name=full_name, id=int(mention_id)))
213 else:
214 # For **|id** syntax.
215 user_filters.append(UserFilter(full_name=None, id=int(mention_id)))
216 else:
217 # For **name** syntax.
218 user_filters.append(UserFilter(full_name=mention_text, id=None))
219
220 return mention_backend.get_full_name_info_list(user_filters)
221
222
223 class MentionData:
224 def __init__(self, mention_backend: MentionBackend, content: str) -> None:
225 self.mention_backend = mention_backend
226 realm_id = mention_backend.realm_id
227 mentions = possible_mentions(content)
228 possible_mentions_info = get_possible_mentions_info(mention_backend, mentions.mention_texts)
229 self.full_name_info = {row.full_name.lower(): row for row in possible_mentions_info}
230 self.user_id_info = {row.id: row for row in possible_mentions_info}
231 self.init_user_group_data(realm_id=realm_id, content=content)
232 self.has_stream_wildcards = mentions.message_has_stream_wildcards
233 self.has_topic_wildcards = mentions.message_has_topic_wildcards
234
235 def message_has_stream_wildcards(self) -> bool:
236 return self.has_stream_wildcards
237
238 def message_has_topic_wildcards(self) -> bool:
239 return self.has_topic_wildcards
240
241 def init_user_group_data(self, realm_id: int, content: str) -> None:
242 self.user_group_name_info: Dict[str, UserGroup] = {}
243 self.user_group_members: Dict[int, List[int]] = {}
244 user_group_names = possible_user_group_mentions(content)
245 if user_group_names:
246 for group in UserGroup.objects.filter(
247 realm_id=realm_id, name__in=user_group_names, is_system_group=False
248 ).prefetch_related("direct_members"):
249 self.user_group_name_info[group.name.lower()] = group
250 self.user_group_members[group.id] = [m.id for m in group.direct_members.all()]
251
252 def get_user_by_name(self, name: str) -> Optional[FullNameInfo]:
253 # warning: get_user_by_name is not dependable if two
254 # users of the same full name are mentioned. Use
255 # get_user_by_id where possible.
256 return self.full_name_info.get(name.lower(), None)
257
258 def get_user_by_id(self, id: int) -> Optional[FullNameInfo]:
259 return self.user_id_info.get(id, None)
260
261 def get_user_ids(self) -> Set[int]:
262 """
263 Returns the user IDs that might have been mentioned by this
264 content. Note that because this data structure has not parsed
265 the message and does not know about escaping/code blocks, this
266 will overestimate the list of user ids.
267 """
268 return set(self.user_id_info.keys())
269
270 def get_user_group(self, name: str) -> Optional[UserGroup]:
271 return self.user_group_name_info.get(name.lower(), None)
272
273 def get_group_members(self, user_group_id: int) -> List[int]:
274 return self.user_group_members.get(user_group_id, [])
275
276 def get_stream_name_map(self, stream_names: Set[str]) -> Dict[str, int]:
277 return self.mention_backend.get_stream_name_map(stream_names)
278
279
280 def silent_mention_syntax_for_user(user_profile: UserProfile) -> str:
281 return f"@_**{user_profile.full_name}|{user_profile.id}**"
282
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/lib/mention.py b/zerver/lib/mention.py
--- a/zerver/lib/mention.py
+++ b/zerver/lib/mention.py
@@ -3,6 +3,7 @@
from dataclasses import dataclass
from typing import Dict, List, Match, Optional, Set, Tuple
+from django.conf import settings
from django.db.models import Q
from zerver.models import UserGroup, UserProfile, get_linkable_streams
@@ -91,9 +92,9 @@
rows = (
UserProfile.objects.filter(
- realm_id=self.realm_id,
- is_active=True,
+ Q(realm_id=self.realm_id) | Q(email__in=settings.CROSS_REALM_BOT_EMAILS),
)
+ .filter(is_active=True)
.filter(
functools.reduce(lambda a, b: a | b, q_list),
)
| {"golden_diff": "diff --git a/zerver/lib/mention.py b/zerver/lib/mention.py\n--- a/zerver/lib/mention.py\n+++ b/zerver/lib/mention.py\n@@ -3,6 +3,7 @@\n from dataclasses import dataclass\n from typing import Dict, List, Match, Optional, Set, Tuple\n \n+from django.conf import settings\n from django.db.models import Q\n \n from zerver.models import UserGroup, UserProfile, get_linkable_streams\n@@ -91,9 +92,9 @@\n \n rows = (\n UserProfile.objects.filter(\n- realm_id=self.realm_id,\n- is_active=True,\n+ Q(realm_id=self.realm_id) | Q(email__in=settings.CROSS_REALM_BOT_EMAILS),\n )\n+ .filter(is_active=True)\n .filter(\n functools.reduce(lambda a, b: a | b, q_list),\n )\n", "issue": "Display mentions of cross realm bots as mention pills\n<!-- Issue description -->\r\n\r\nAs reported on CZO, cross realm bots aren't being included in the data set available for rendering user mentions. As a result, their mentions don't appear as user pills. E.g., in a quote-and-reply:\r\n\r\n\r\n\r\nWe should fix this.\r\n\r\n<!-- Link to a message in the chat.zulip.org discussion. Message links will still work even if the topic is renamed or resolved. Link back to this issue from the chat.zulip.org thread. -->\r\n\r\n[CZO thread](https://chat.zulip.org/#narrow/stream/9-issues/topic/silent.20mentions.20of.20system.20bots.20in.20quote.20and.20reply/near/1608266)\n", "before_files": [{"content": "import functools\nimport re\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Match, Optional, Set, Tuple\n\nfrom django.db.models import Q\n\nfrom zerver.models import UserGroup, UserProfile, get_linkable_streams\n\nBEFORE_MENTION_ALLOWED_REGEX = r\"(?<![^\\s\\'\\\"\\(\\{\\[\\/<])\"\n\n# Match multi-word string between @** ** or match any one-word\n# sequences after @\nMENTIONS_RE = re.compile(\n rf\"{BEFORE_MENTION_ALLOWED_REGEX}@(?P<silent>_?)(\\*\\*(?P<match>[^\\*]+)\\*\\*)\"\n)\nUSER_GROUP_MENTIONS_RE = re.compile(\n rf\"{BEFORE_MENTION_ALLOWED_REGEX}@(?P<silent>_?)(\\*(?P<match>[^\\*]+)\\*)\"\n)\n\ntopic_wildcards = frozenset([\"topic\"])\nstream_wildcards = frozenset([\"all\", \"everyone\", \"stream\"])\n\n\n@dataclass\nclass FullNameInfo:\n id: int\n full_name: str\n\n\n@dataclass\nclass UserFilter:\n id: Optional[int]\n full_name: Optional[str]\n\n def Q(self) -> Q:\n if self.full_name is not None and self.id is not None:\n return Q(full_name__iexact=self.full_name, id=self.id)\n elif self.id is not None:\n return Q(id=self.id)\n elif self.full_name is not None:\n return Q(full_name__iexact=self.full_name)\n else:\n raise AssertionError(\"totally empty filter makes no sense\")\n\n\n@dataclass\nclass MentionText:\n text: Optional[str]\n is_topic_wildcard: bool\n is_stream_wildcard: bool\n\n\n@dataclass\nclass PossibleMentions:\n mention_texts: Set[str]\n message_has_topic_wildcards: bool\n message_has_stream_wildcards: bool\n\n\nclass MentionBackend:\n def __init__(self, realm_id: int) -> None:\n self.realm_id = realm_id\n self.user_cache: Dict[Tuple[int, str], FullNameInfo] = {}\n self.stream_cache: Dict[str, int] = {}\n\n def get_full_name_info_list(self, user_filters: List[UserFilter]) -> List[FullNameInfo]:\n result: List[FullNameInfo] = []\n unseen_user_filters: List[UserFilter] = []\n\n # Try to get messages from the user_cache first.\n # This loop populates two lists:\n # - results are the objects we pull from cache\n # - unseen_user_filters are filters where need to hit the DB\n for user_filter in user_filters:\n # We expect callers who take advantage of our user_cache to supply both\n # id and full_name in the user mentions in their messages.\n if user_filter.id is not None and user_filter.full_name is not None:\n user = self.user_cache.get((user_filter.id, user_filter.full_name), None)\n if user is not None:\n result.append(user)\n continue\n\n # BOO! We have to go the database.\n unseen_user_filters.append(user_filter)\n\n # Most of the time, we have to go to the database to get user info,\n # unless our last loop found everything in the cache.\n if unseen_user_filters:\n q_list = [user_filter.Q() for user_filter in unseen_user_filters]\n\n rows = (\n UserProfile.objects.filter(\n realm_id=self.realm_id,\n is_active=True,\n )\n .filter(\n functools.reduce(lambda a, b: a | b, q_list),\n )\n .only(\n \"id\",\n \"full_name\",\n )\n )\n\n user_list = [FullNameInfo(id=row.id, full_name=row.full_name) for row in rows]\n\n # We expect callers who take advantage of our cache to supply both\n # id and full_name in the user mentions in their messages.\n for user in user_list:\n self.user_cache[(user.id, user.full_name)] = user\n\n result += user_list\n\n return result\n\n def get_stream_name_map(self, stream_names: Set[str]) -> Dict[str, int]:\n if not stream_names:\n return {}\n\n result: Dict[str, int] = {}\n unseen_stream_names: List[str] = []\n\n for stream_name in stream_names:\n if stream_name in self.stream_cache:\n result[stream_name] = self.stream_cache[stream_name]\n else:\n unseen_stream_names.append(stream_name)\n\n if unseen_stream_names:\n q_list = {Q(name=name) for name in unseen_stream_names}\n\n rows = (\n get_linkable_streams(\n realm_id=self.realm_id,\n )\n .filter(\n functools.reduce(lambda a, b: a | b, q_list),\n )\n .values(\n \"id\",\n \"name\",\n )\n )\n\n for row in rows:\n self.stream_cache[row[\"name\"]] = row[\"id\"]\n result[row[\"name\"]] = row[\"id\"]\n\n return result\n\n\ndef user_mention_matches_topic_wildcard(mention: str) -> bool:\n return mention in topic_wildcards\n\n\ndef user_mention_matches_stream_wildcard(mention: str) -> bool:\n return mention in stream_wildcards\n\n\ndef extract_mention_text(m: Match[str]) -> MentionText:\n text = m.group(\"match\")\n if text in topic_wildcards:\n return MentionText(text=None, is_topic_wildcard=True, is_stream_wildcard=False)\n if text in stream_wildcards:\n return MentionText(text=None, is_topic_wildcard=False, is_stream_wildcard=True)\n return MentionText(text=text, is_topic_wildcard=False, is_stream_wildcard=False)\n\n\ndef possible_mentions(content: str) -> PossibleMentions:\n # mention texts can either be names, or an extended name|id syntax.\n texts = set()\n message_has_topic_wildcards = False\n message_has_stream_wildcards = False\n for m in MENTIONS_RE.finditer(content):\n mention_text = extract_mention_text(m)\n text = mention_text.text\n if text:\n texts.add(text)\n if mention_text.is_topic_wildcard:\n message_has_topic_wildcards = True\n if mention_text.is_stream_wildcard:\n message_has_stream_wildcards = True\n return PossibleMentions(\n mention_texts=texts,\n message_has_topic_wildcards=message_has_topic_wildcards,\n message_has_stream_wildcards=message_has_stream_wildcards,\n )\n\n\ndef possible_user_group_mentions(content: str) -> Set[str]:\n return {m.group(\"match\") for m in USER_GROUP_MENTIONS_RE.finditer(content)}\n\n\ndef get_possible_mentions_info(\n mention_backend: MentionBackend, mention_texts: Set[str]\n) -> List[FullNameInfo]:\n if not mention_texts:\n return []\n\n user_filters = list()\n\n name_re = r\"(?P<full_name>.+)?\\|(?P<mention_id>\\d+)$\"\n for mention_text in mention_texts:\n name_syntax_match = re.match(name_re, mention_text)\n if name_syntax_match:\n full_name = name_syntax_match.group(\"full_name\")\n mention_id = name_syntax_match.group(\"mention_id\")\n if full_name:\n # For **name|id** mentions as mention_id\n # cannot be null inside this block.\n user_filters.append(UserFilter(full_name=full_name, id=int(mention_id)))\n else:\n # For **|id** syntax.\n user_filters.append(UserFilter(full_name=None, id=int(mention_id)))\n else:\n # For **name** syntax.\n user_filters.append(UserFilter(full_name=mention_text, id=None))\n\n return mention_backend.get_full_name_info_list(user_filters)\n\n\nclass MentionData:\n def __init__(self, mention_backend: MentionBackend, content: str) -> None:\n self.mention_backend = mention_backend\n realm_id = mention_backend.realm_id\n mentions = possible_mentions(content)\n possible_mentions_info = get_possible_mentions_info(mention_backend, mentions.mention_texts)\n self.full_name_info = {row.full_name.lower(): row for row in possible_mentions_info}\n self.user_id_info = {row.id: row for row in possible_mentions_info}\n self.init_user_group_data(realm_id=realm_id, content=content)\n self.has_stream_wildcards = mentions.message_has_stream_wildcards\n self.has_topic_wildcards = mentions.message_has_topic_wildcards\n\n def message_has_stream_wildcards(self) -> bool:\n return self.has_stream_wildcards\n\n def message_has_topic_wildcards(self) -> bool:\n return self.has_topic_wildcards\n\n def init_user_group_data(self, realm_id: int, content: str) -> None:\n self.user_group_name_info: Dict[str, UserGroup] = {}\n self.user_group_members: Dict[int, List[int]] = {}\n user_group_names = possible_user_group_mentions(content)\n if user_group_names:\n for group in UserGroup.objects.filter(\n realm_id=realm_id, name__in=user_group_names, is_system_group=False\n ).prefetch_related(\"direct_members\"):\n self.user_group_name_info[group.name.lower()] = group\n self.user_group_members[group.id] = [m.id for m in group.direct_members.all()]\n\n def get_user_by_name(self, name: str) -> Optional[FullNameInfo]:\n # warning: get_user_by_name is not dependable if two\n # users of the same full name are mentioned. Use\n # get_user_by_id where possible.\n return self.full_name_info.get(name.lower(), None)\n\n def get_user_by_id(self, id: int) -> Optional[FullNameInfo]:\n return self.user_id_info.get(id, None)\n\n def get_user_ids(self) -> Set[int]:\n \"\"\"\n Returns the user IDs that might have been mentioned by this\n content. Note that because this data structure has not parsed\n the message and does not know about escaping/code blocks, this\n will overestimate the list of user ids.\n \"\"\"\n return set(self.user_id_info.keys())\n\n def get_user_group(self, name: str) -> Optional[UserGroup]:\n return self.user_group_name_info.get(name.lower(), None)\n\n def get_group_members(self, user_group_id: int) -> List[int]:\n return self.user_group_members.get(user_group_id, [])\n\n def get_stream_name_map(self, stream_names: Set[str]) -> Dict[str, int]:\n return self.mention_backend.get_stream_name_map(stream_names)\n\n\ndef silent_mention_syntax_for_user(user_profile: UserProfile) -> str:\n return f\"@_**{user_profile.full_name}|{user_profile.id}**\"\n", "path": "zerver/lib/mention.py"}], "after_files": [{"content": "import functools\nimport re\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Match, Optional, Set, Tuple\n\nfrom django.conf import settings\nfrom django.db.models import Q\n\nfrom zerver.models import UserGroup, UserProfile, get_linkable_streams\n\nBEFORE_MENTION_ALLOWED_REGEX = r\"(?<![^\\s\\'\\\"\\(\\{\\[\\/<])\"\n\n# Match multi-word string between @** ** or match any one-word\n# sequences after @\nMENTIONS_RE = re.compile(\n rf\"{BEFORE_MENTION_ALLOWED_REGEX}@(?P<silent>_?)(\\*\\*(?P<match>[^\\*]+)\\*\\*)\"\n)\nUSER_GROUP_MENTIONS_RE = re.compile(\n rf\"{BEFORE_MENTION_ALLOWED_REGEX}@(?P<silent>_?)(\\*(?P<match>[^\\*]+)\\*)\"\n)\n\ntopic_wildcards = frozenset([\"topic\"])\nstream_wildcards = frozenset([\"all\", \"everyone\", \"stream\"])\n\n\n@dataclass\nclass FullNameInfo:\n id: int\n full_name: str\n\n\n@dataclass\nclass UserFilter:\n id: Optional[int]\n full_name: Optional[str]\n\n def Q(self) -> Q:\n if self.full_name is not None and self.id is not None:\n return Q(full_name__iexact=self.full_name, id=self.id)\n elif self.id is not None:\n return Q(id=self.id)\n elif self.full_name is not None:\n return Q(full_name__iexact=self.full_name)\n else:\n raise AssertionError(\"totally empty filter makes no sense\")\n\n\n@dataclass\nclass MentionText:\n text: Optional[str]\n is_topic_wildcard: bool\n is_stream_wildcard: bool\n\n\n@dataclass\nclass PossibleMentions:\n mention_texts: Set[str]\n message_has_topic_wildcards: bool\n message_has_stream_wildcards: bool\n\n\nclass MentionBackend:\n def __init__(self, realm_id: int) -> None:\n self.realm_id = realm_id\n self.user_cache: Dict[Tuple[int, str], FullNameInfo] = {}\n self.stream_cache: Dict[str, int] = {}\n\n def get_full_name_info_list(self, user_filters: List[UserFilter]) -> List[FullNameInfo]:\n result: List[FullNameInfo] = []\n unseen_user_filters: List[UserFilter] = []\n\n # Try to get messages from the user_cache first.\n # This loop populates two lists:\n # - results are the objects we pull from cache\n # - unseen_user_filters are filters where need to hit the DB\n for user_filter in user_filters:\n # We expect callers who take advantage of our user_cache to supply both\n # id and full_name in the user mentions in their messages.\n if user_filter.id is not None and user_filter.full_name is not None:\n user = self.user_cache.get((user_filter.id, user_filter.full_name), None)\n if user is not None:\n result.append(user)\n continue\n\n # BOO! We have to go the database.\n unseen_user_filters.append(user_filter)\n\n # Most of the time, we have to go to the database to get user info,\n # unless our last loop found everything in the cache.\n if unseen_user_filters:\n q_list = [user_filter.Q() for user_filter in unseen_user_filters]\n\n rows = (\n UserProfile.objects.filter(\n Q(realm_id=self.realm_id) | Q(email__in=settings.CROSS_REALM_BOT_EMAILS),\n )\n .filter(is_active=True)\n .filter(\n functools.reduce(lambda a, b: a | b, q_list),\n )\n .only(\n \"id\",\n \"full_name\",\n )\n )\n\n user_list = [FullNameInfo(id=row.id, full_name=row.full_name) for row in rows]\n\n # We expect callers who take advantage of our cache to supply both\n # id and full_name in the user mentions in their messages.\n for user in user_list:\n self.user_cache[(user.id, user.full_name)] = user\n\n result += user_list\n\n return result\n\n def get_stream_name_map(self, stream_names: Set[str]) -> Dict[str, int]:\n if not stream_names:\n return {}\n\n result: Dict[str, int] = {}\n unseen_stream_names: List[str] = []\n\n for stream_name in stream_names:\n if stream_name in self.stream_cache:\n result[stream_name] = self.stream_cache[stream_name]\n else:\n unseen_stream_names.append(stream_name)\n\n if unseen_stream_names:\n q_list = {Q(name=name) for name in unseen_stream_names}\n\n rows = (\n get_linkable_streams(\n realm_id=self.realm_id,\n )\n .filter(\n functools.reduce(lambda a, b: a | b, q_list),\n )\n .values(\n \"id\",\n \"name\",\n )\n )\n\n for row in rows:\n self.stream_cache[row[\"name\"]] = row[\"id\"]\n result[row[\"name\"]] = row[\"id\"]\n\n return result\n\n\ndef user_mention_matches_topic_wildcard(mention: str) -> bool:\n return mention in topic_wildcards\n\n\ndef user_mention_matches_stream_wildcard(mention: str) -> bool:\n return mention in stream_wildcards\n\n\ndef extract_mention_text(m: Match[str]) -> MentionText:\n text = m.group(\"match\")\n if text in topic_wildcards:\n return MentionText(text=None, is_topic_wildcard=True, is_stream_wildcard=False)\n if text in stream_wildcards:\n return MentionText(text=None, is_topic_wildcard=False, is_stream_wildcard=True)\n return MentionText(text=text, is_topic_wildcard=False, is_stream_wildcard=False)\n\n\ndef possible_mentions(content: str) -> PossibleMentions:\n # mention texts can either be names, or an extended name|id syntax.\n texts = set()\n message_has_topic_wildcards = False\n message_has_stream_wildcards = False\n for m in MENTIONS_RE.finditer(content):\n mention_text = extract_mention_text(m)\n text = mention_text.text\n if text:\n texts.add(text)\n if mention_text.is_topic_wildcard:\n message_has_topic_wildcards = True\n if mention_text.is_stream_wildcard:\n message_has_stream_wildcards = True\n return PossibleMentions(\n mention_texts=texts,\n message_has_topic_wildcards=message_has_topic_wildcards,\n message_has_stream_wildcards=message_has_stream_wildcards,\n )\n\n\ndef possible_user_group_mentions(content: str) -> Set[str]:\n return {m.group(\"match\") for m in USER_GROUP_MENTIONS_RE.finditer(content)}\n\n\ndef get_possible_mentions_info(\n mention_backend: MentionBackend, mention_texts: Set[str]\n) -> List[FullNameInfo]:\n if not mention_texts:\n return []\n\n user_filters = list()\n\n name_re = r\"(?P<full_name>.+)?\\|(?P<mention_id>\\d+)$\"\n for mention_text in mention_texts:\n name_syntax_match = re.match(name_re, mention_text)\n if name_syntax_match:\n full_name = name_syntax_match.group(\"full_name\")\n mention_id = name_syntax_match.group(\"mention_id\")\n if full_name:\n # For **name|id** mentions as mention_id\n # cannot be null inside this block.\n user_filters.append(UserFilter(full_name=full_name, id=int(mention_id)))\n else:\n # For **|id** syntax.\n user_filters.append(UserFilter(full_name=None, id=int(mention_id)))\n else:\n # For **name** syntax.\n user_filters.append(UserFilter(full_name=mention_text, id=None))\n\n return mention_backend.get_full_name_info_list(user_filters)\n\n\nclass MentionData:\n def __init__(self, mention_backend: MentionBackend, content: str) -> None:\n self.mention_backend = mention_backend\n realm_id = mention_backend.realm_id\n mentions = possible_mentions(content)\n possible_mentions_info = get_possible_mentions_info(mention_backend, mentions.mention_texts)\n self.full_name_info = {row.full_name.lower(): row for row in possible_mentions_info}\n self.user_id_info = {row.id: row for row in possible_mentions_info}\n self.init_user_group_data(realm_id=realm_id, content=content)\n self.has_stream_wildcards = mentions.message_has_stream_wildcards\n self.has_topic_wildcards = mentions.message_has_topic_wildcards\n\n def message_has_stream_wildcards(self) -> bool:\n return self.has_stream_wildcards\n\n def message_has_topic_wildcards(self) -> bool:\n return self.has_topic_wildcards\n\n def init_user_group_data(self, realm_id: int, content: str) -> None:\n self.user_group_name_info: Dict[str, UserGroup] = {}\n self.user_group_members: Dict[int, List[int]] = {}\n user_group_names = possible_user_group_mentions(content)\n if user_group_names:\n for group in UserGroup.objects.filter(\n realm_id=realm_id, name__in=user_group_names, is_system_group=False\n ).prefetch_related(\"direct_members\"):\n self.user_group_name_info[group.name.lower()] = group\n self.user_group_members[group.id] = [m.id for m in group.direct_members.all()]\n\n def get_user_by_name(self, name: str) -> Optional[FullNameInfo]:\n # warning: get_user_by_name is not dependable if two\n # users of the same full name are mentioned. Use\n # get_user_by_id where possible.\n return self.full_name_info.get(name.lower(), None)\n\n def get_user_by_id(self, id: int) -> Optional[FullNameInfo]:\n return self.user_id_info.get(id, None)\n\n def get_user_ids(self) -> Set[int]:\n \"\"\"\n Returns the user IDs that might have been mentioned by this\n content. Note that because this data structure has not parsed\n the message and does not know about escaping/code blocks, this\n will overestimate the list of user ids.\n \"\"\"\n return set(self.user_id_info.keys())\n\n def get_user_group(self, name: str) -> Optional[UserGroup]:\n return self.user_group_name_info.get(name.lower(), None)\n\n def get_group_members(self, user_group_id: int) -> List[int]:\n return self.user_group_members.get(user_group_id, [])\n\n def get_stream_name_map(self, stream_names: Set[str]) -> Dict[str, int]:\n return self.mention_backend.get_stream_name_map(stream_names)\n\n\ndef silent_mention_syntax_for_user(user_profile: UserProfile) -> str:\n return f\"@_**{user_profile.full_name}|{user_profile.id}**\"\n", "path": "zerver/lib/mention.py"}]} | 3,590 | 189 |
gh_patches_debug_59855 | rasdani/github-patches | git_diff | cupy__cupy-2938 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop support of older NumPy (<=1.14)?
According to [NEP 29](https://numpy.org/neps/nep-0029-deprecation_policy.html), an unusual NumPy Enhancement Proposal that declares a community-wide policy instead of merely proposing changes to NumPy itself, the support of NumPy <=1.14 will be dropped in early January, 2020, which is a few days later:
> Drop Schedule
> ...
> On Jan 07, 2020 drop support for Numpy 1.14 (initially released on Jan 06, 2018)
Would CuPy consider following NEP 29 so that some test codes can be simplified without worrying too much about backward compatibilities? I've seen this caused hard time for a few PRs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 from setuptools import setup
5 import sys
6
7 import cupy_setup_build
8
9
10 if sys.version_info[:3] == (3, 5, 0):
11 if not int(os.getenv('CUPY_PYTHON_350_FORCE', '0')):
12 msg = """
13 CuPy does not work with Python 3.5.0.
14
15 We strongly recommend to use another version of Python.
16 If you want to use CuPy with Python 3.5.0 at your own risk,
17 set 1 to CUPY_PYTHON_350_FORCE environment variable."""
18 print(msg)
19 sys.exit(1)
20
21
22 requirements = {
23 'setup': [
24 'fastrlock>=0.3',
25 ],
26 'install': [
27 'numpy>=1.9.0',
28 'fastrlock>=0.3',
29 ],
30 'stylecheck': [
31 'autopep8==1.3.5',
32 'flake8==3.5.0',
33 'pbr==4.0.4',
34 'pycodestyle==2.3.1',
35 ],
36 'test': [
37 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.
38 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0
39 'mock',
40 ],
41 'doctest': [
42 'matplotlib',
43 'theano',
44 ],
45 'docs': [
46 'sphinx',
47 'sphinx_rtd_theme',
48 ],
49 'travis': [
50 '-r stylecheck',
51 '-r docs',
52 ],
53 'appveyor': [
54 '-r test',
55 ],
56 'jenkins': [
57 '-r test',
58 'pytest-timeout',
59 'pytest-cov',
60 'coveralls',
61 'codecov',
62 ],
63 }
64
65
66 def reduce_requirements(key):
67 # Resolve recursive requirements notation (-r)
68 reqs = requirements[key]
69 resolved_reqs = []
70 for req in reqs:
71 if req.startswith('-r'):
72 depend_key = req[2:].lstrip()
73 reduce_requirements(depend_key)
74 resolved_reqs += requirements[depend_key]
75 else:
76 resolved_reqs.append(req)
77 requirements[key] = resolved_reqs
78
79
80 for k in requirements.keys():
81 reduce_requirements(k)
82
83
84 extras_require = {k: v for k, v in requirements.items() if k != 'install'}
85
86
87 setup_requires = requirements['setup']
88 install_requires = requirements['install']
89 tests_require = requirements['test']
90
91
92 package_data = {
93 'cupy': [
94 'core/include/cupy/complex/arithmetic.h',
95 'core/include/cupy/complex/catrig.h',
96 'core/include/cupy/complex/catrigf.h',
97 'core/include/cupy/complex/ccosh.h',
98 'core/include/cupy/complex/ccoshf.h',
99 'core/include/cupy/complex/cexp.h',
100 'core/include/cupy/complex/cexpf.h',
101 'core/include/cupy/complex/clog.h',
102 'core/include/cupy/complex/clogf.h',
103 'core/include/cupy/complex/complex.h',
104 'core/include/cupy/complex/complex_inl.h',
105 'core/include/cupy/complex/cpow.h',
106 'core/include/cupy/complex/cproj.h',
107 'core/include/cupy/complex/csinh.h',
108 'core/include/cupy/complex/csinhf.h',
109 'core/include/cupy/complex/csqrt.h',
110 'core/include/cupy/complex/csqrtf.h',
111 'core/include/cupy/complex/ctanh.h',
112 'core/include/cupy/complex/ctanhf.h',
113 'core/include/cupy/complex/math_private.h',
114 'core/include/cupy/carray.cuh',
115 'core/include/cupy/complex.cuh',
116 'core/include/cupy/atomics.cuh',
117 'core/include/cupy/cuComplex_bridge.h',
118 'core/include/cupy/_cuda/cuda-*/*.h',
119 'core/include/cupy/_cuda/cuda-*/*.hpp',
120 'cuda/cupy_thrust.cu',
121 ],
122 }
123
124 package_data['cupy'] += cupy_setup_build.prepare_wheel_libs()
125
126 package_name = cupy_setup_build.get_package_name()
127 long_description = cupy_setup_build.get_long_description()
128 ext_modules = cupy_setup_build.get_ext_modules()
129 build_ext = cupy_setup_build.custom_build_ext
130 sdist = cupy_setup_build.sdist_with_cython
131
132 here = os.path.abspath(os.path.dirname(__file__))
133 # Get __version__ variable
134 exec(open(os.path.join(here, 'cupy', '_version.py')).read())
135
136 CLASSIFIERS = """\
137 Development Status :: 5 - Production/Stable
138 Intended Audience :: Science/Research
139 Intended Audience :: Developers
140 License :: OSI Approved :: MIT License
141 Programming Language :: Python
142 Programming Language :: Python :: 3
143 Programming Language :: Python :: 3.5
144 Programming Language :: Python :: 3.6
145 Programming Language :: Python :: 3.7
146 Programming Language :: Python :: 3 :: Only
147 Programming Language :: Cython
148 Topic :: Software Development
149 Topic :: Scientific/Engineering
150 Operating System :: Microsoft :: Windows
151 Operating System :: POSIX
152 Operating System :: MacOS
153 """
154
155
156 setup(
157 name=package_name,
158 version=__version__, # NOQA
159 description='CuPy: NumPy-like API accelerated with CUDA',
160 long_description=long_description,
161 author='Seiya Tokui',
162 author_email='[email protected]',
163 url='https://cupy.chainer.org/',
164 license='MIT License',
165 project_urls={
166 "Bug Tracker": "https://github.com/cupy/cupy/issues",
167 "Documentation": "https://docs-cupy.chainer.org/",
168 "Source Code": "https://github.com/cupy/cupy",
169 },
170 classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
171 packages=[
172 'cupy',
173 'cupy.binary',
174 'cupy.core',
175 'cupy.creation',
176 'cupy.cuda',
177 'cupy.cuda.memory_hooks',
178 'cupy.ext',
179 'cupy.fft',
180 'cupy.indexing',
181 'cupy.io',
182 'cupy.lib',
183 'cupy.linalg',
184 'cupy.logic',
185 'cupy.manipulation',
186 'cupy.math',
187 'cupy.misc',
188 'cupy.padding',
189 'cupy.prof',
190 'cupy.random',
191 'cupy._sorting',
192 'cupy.sparse',
193 'cupy.sparse.linalg',
194 'cupy.statistics',
195 'cupy.testing',
196 'cupyx',
197 'cupyx.fallback_mode',
198 'cupyx.scipy',
199 'cupyx.scipy.fft',
200 'cupyx.scipy.fftpack',
201 'cupyx.scipy.ndimage',
202 'cupyx.scipy.sparse',
203 'cupyx.scipy.sparse.linalg',
204 'cupyx.scipy.special',
205 'cupyx.scipy.linalg',
206 'cupyx.linalg',
207 'cupyx.linalg.sparse'
208 ],
209 package_data=package_data,
210 zip_safe=False,
211 python_requires='>=3.5.0',
212 setup_requires=setup_requires,
213 install_requires=install_requires,
214 tests_require=tests_require,
215 extras_require=extras_require,
216 ext_modules=ext_modules,
217 cmdclass={'build_ext': build_ext,
218 'sdist': sdist},
219 )
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@
'fastrlock>=0.3',
],
'install': [
- 'numpy>=1.9.0',
+ 'numpy>=1.15',
'fastrlock>=0.3',
],
'stylecheck': [
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,7 +24,7 @@\n 'fastrlock>=0.3',\n ],\n 'install': [\n- 'numpy>=1.9.0',\n+ 'numpy>=1.15',\n 'fastrlock>=0.3',\n ],\n 'stylecheck': [\n", "issue": "Drop support of older NumPy (<=1.14)?\nAccording to [NEP 29](https://numpy.org/neps/nep-0029-deprecation_policy.html), an unusual NumPy Enhancement Proposal that declares a community-wide policy instead of merely proposing changes to NumPy itself, the support of NumPy <=1.14 will be dropped in early January, 2020, which is a few days later:\r\n> Drop Schedule\r\n> ...\r\n> On Jan 07, 2020 drop support for Numpy 1.14 (initially released on Jan 06, 2018)\r\n\r\nWould CuPy consider following NEP 29 so that some test codes can be simplified without worrying too much about backward compatibilities? I've seen this caused hard time for a few PRs.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup\nimport sys\n\nimport cupy_setup_build\n\n\nif sys.version_info[:3] == (3, 5, 0):\n if not int(os.getenv('CUPY_PYTHON_350_FORCE', '0')):\n msg = \"\"\"\nCuPy does not work with Python 3.5.0.\n\nWe strongly recommend to use another version of Python.\nIf you want to use CuPy with Python 3.5.0 at your own risk,\nset 1 to CUPY_PYTHON_350_FORCE environment variable.\"\"\"\n print(msg)\n sys.exit(1)\n\n\nrequirements = {\n 'setup': [\n 'fastrlock>=0.3',\n ],\n 'install': [\n 'numpy>=1.9.0',\n 'fastrlock>=0.3',\n ],\n 'stylecheck': [\n 'autopep8==1.3.5',\n 'flake8==3.5.0',\n 'pbr==4.0.4',\n 'pycodestyle==2.3.1',\n ],\n 'test': [\n 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.\n 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0\n 'mock',\n ],\n 'doctest': [\n 'matplotlib',\n 'theano',\n ],\n 'docs': [\n 'sphinx',\n 'sphinx_rtd_theme',\n ],\n 'travis': [\n '-r stylecheck',\n '-r docs',\n ],\n 'appveyor': [\n '-r test',\n ],\n 'jenkins': [\n '-r test',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n\npackage_data = {\n 'cupy': [\n 'core/include/cupy/complex/arithmetic.h',\n 'core/include/cupy/complex/catrig.h',\n 'core/include/cupy/complex/catrigf.h',\n 'core/include/cupy/complex/ccosh.h',\n 'core/include/cupy/complex/ccoshf.h',\n 'core/include/cupy/complex/cexp.h',\n 'core/include/cupy/complex/cexpf.h',\n 'core/include/cupy/complex/clog.h',\n 'core/include/cupy/complex/clogf.h',\n 'core/include/cupy/complex/complex.h',\n 'core/include/cupy/complex/complex_inl.h',\n 'core/include/cupy/complex/cpow.h',\n 'core/include/cupy/complex/cproj.h',\n 'core/include/cupy/complex/csinh.h',\n 'core/include/cupy/complex/csinhf.h',\n 'core/include/cupy/complex/csqrt.h',\n 'core/include/cupy/complex/csqrtf.h',\n 'core/include/cupy/complex/ctanh.h',\n 'core/include/cupy/complex/ctanhf.h',\n 'core/include/cupy/complex/math_private.h',\n 'core/include/cupy/carray.cuh',\n 'core/include/cupy/complex.cuh',\n 'core/include/cupy/atomics.cuh',\n 'core/include/cupy/cuComplex_bridge.h',\n 'core/include/cupy/_cuda/cuda-*/*.h',\n 'core/include/cupy/_cuda/cuda-*/*.hpp',\n 'cuda/cupy_thrust.cu',\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\nsdist = cupy_setup_build.sdist_with_cython\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, 'cupy', '_version.py')).read())\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.5\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: Microsoft :: Windows\nOperating System :: POSIX\nOperating System :: MacOS\n\"\"\"\n\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy-like API accelerated with CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://cupy.chainer.org/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs-cupy.chainer.org/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=[\n 'cupy',\n 'cupy.binary',\n 'cupy.core',\n 'cupy.creation',\n 'cupy.cuda',\n 'cupy.cuda.memory_hooks',\n 'cupy.ext',\n 'cupy.fft',\n 'cupy.indexing',\n 'cupy.io',\n 'cupy.lib',\n 'cupy.linalg',\n 'cupy.logic',\n 'cupy.manipulation',\n 'cupy.math',\n 'cupy.misc',\n 'cupy.padding',\n 'cupy.prof',\n 'cupy.random',\n 'cupy._sorting',\n 'cupy.sparse',\n 'cupy.sparse.linalg',\n 'cupy.statistics',\n 'cupy.testing',\n 'cupyx',\n 'cupyx.fallback_mode',\n 'cupyx.scipy',\n 'cupyx.scipy.fft',\n 'cupyx.scipy.fftpack',\n 'cupyx.scipy.ndimage',\n 'cupyx.scipy.sparse',\n 'cupyx.scipy.sparse.linalg',\n 'cupyx.scipy.special',\n 'cupyx.scipy.linalg',\n 'cupyx.linalg',\n 'cupyx.linalg.sparse'\n ],\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.5.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext,\n 'sdist': sdist},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup\nimport sys\n\nimport cupy_setup_build\n\n\nif sys.version_info[:3] == (3, 5, 0):\n if not int(os.getenv('CUPY_PYTHON_350_FORCE', '0')):\n msg = \"\"\"\nCuPy does not work with Python 3.5.0.\n\nWe strongly recommend to use another version of Python.\nIf you want to use CuPy with Python 3.5.0 at your own risk,\nset 1 to CUPY_PYTHON_350_FORCE environment variable.\"\"\"\n print(msg)\n sys.exit(1)\n\n\nrequirements = {\n 'setup': [\n 'fastrlock>=0.3',\n ],\n 'install': [\n 'numpy>=1.15',\n 'fastrlock>=0.3',\n ],\n 'stylecheck': [\n 'autopep8==1.3.5',\n 'flake8==3.5.0',\n 'pbr==4.0.4',\n 'pycodestyle==2.3.1',\n ],\n 'test': [\n 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.\n 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0\n 'mock',\n ],\n 'doctest': [\n 'matplotlib',\n 'theano',\n ],\n 'docs': [\n 'sphinx',\n 'sphinx_rtd_theme',\n ],\n 'travis': [\n '-r stylecheck',\n '-r docs',\n ],\n 'appveyor': [\n '-r test',\n ],\n 'jenkins': [\n '-r test',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n\npackage_data = {\n 'cupy': [\n 'core/include/cupy/complex/arithmetic.h',\n 'core/include/cupy/complex/catrig.h',\n 'core/include/cupy/complex/catrigf.h',\n 'core/include/cupy/complex/ccosh.h',\n 'core/include/cupy/complex/ccoshf.h',\n 'core/include/cupy/complex/cexp.h',\n 'core/include/cupy/complex/cexpf.h',\n 'core/include/cupy/complex/clog.h',\n 'core/include/cupy/complex/clogf.h',\n 'core/include/cupy/complex/complex.h',\n 'core/include/cupy/complex/complex_inl.h',\n 'core/include/cupy/complex/cpow.h',\n 'core/include/cupy/complex/cproj.h',\n 'core/include/cupy/complex/csinh.h',\n 'core/include/cupy/complex/csinhf.h',\n 'core/include/cupy/complex/csqrt.h',\n 'core/include/cupy/complex/csqrtf.h',\n 'core/include/cupy/complex/ctanh.h',\n 'core/include/cupy/complex/ctanhf.h',\n 'core/include/cupy/complex/math_private.h',\n 'core/include/cupy/carray.cuh',\n 'core/include/cupy/complex.cuh',\n 'core/include/cupy/atomics.cuh',\n 'core/include/cupy/cuComplex_bridge.h',\n 'core/include/cupy/_cuda/cuda-*/*.h',\n 'core/include/cupy/_cuda/cuda-*/*.hpp',\n 'cuda/cupy_thrust.cu',\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\nsdist = cupy_setup_build.sdist_with_cython\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, 'cupy', '_version.py')).read())\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.5\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: Microsoft :: Windows\nOperating System :: POSIX\nOperating System :: MacOS\n\"\"\"\n\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy-like API accelerated with CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://cupy.chainer.org/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs-cupy.chainer.org/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=[\n 'cupy',\n 'cupy.binary',\n 'cupy.core',\n 'cupy.creation',\n 'cupy.cuda',\n 'cupy.cuda.memory_hooks',\n 'cupy.ext',\n 'cupy.fft',\n 'cupy.indexing',\n 'cupy.io',\n 'cupy.lib',\n 'cupy.linalg',\n 'cupy.logic',\n 'cupy.manipulation',\n 'cupy.math',\n 'cupy.misc',\n 'cupy.padding',\n 'cupy.prof',\n 'cupy.random',\n 'cupy._sorting',\n 'cupy.sparse',\n 'cupy.sparse.linalg',\n 'cupy.statistics',\n 'cupy.testing',\n 'cupyx',\n 'cupyx.fallback_mode',\n 'cupyx.scipy',\n 'cupyx.scipy.fft',\n 'cupyx.scipy.fftpack',\n 'cupyx.scipy.ndimage',\n 'cupyx.scipy.sparse',\n 'cupyx.scipy.sparse.linalg',\n 'cupyx.scipy.special',\n 'cupyx.scipy.linalg',\n 'cupyx.linalg',\n 'cupyx.linalg.sparse'\n ],\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.5.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext,\n 'sdist': sdist},\n)\n", "path": "setup.py"}]} | 2,617 | 88 |
gh_patches_debug_41247 | rasdani/github-patches | git_diff | holoviz__panel-5710 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PanelCallbackHandler design principle
yesterday I changed the PanelCallBackHandler handler to also output the final response from the agent.

The change is marked below.

I can see that Streamlits Callback handler does not write the 'output'. The user/ developer have to manually do this.

I'm thinking that maybe it was a mistake by me to change this. What do you think @ahuang11 ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/chat/langchain.py`
Content:
```
1 """The langchain module integrates Langchain support with Panel."""
2
3 from __future__ import annotations
4
5 from typing import Any, Dict, Union
6
7 try:
8 from langchain.callbacks.base import BaseCallbackHandler
9 from langchain.schema import AgentAction, AgentFinish, LLMResult
10 except ImportError:
11 BaseCallbackHandler = object
12 AgentAction = None
13 AgentFinish = None
14 LLMResult = None
15
16 from ..chat.feed import ChatFeed
17 from ..chat.interface import ChatInterface
18 from ..chat.message import DEFAULT_AVATARS
19 from ..layout import Accordion
20
21
22 class PanelCallbackHandler(BaseCallbackHandler):
23 """
24 The Langchain `PanelCallbackHandler` itself is not a widget or pane, but is useful for rendering
25 and streaming output from Langchain Tools, Agents, and Chains as `ChatMessage` objects.
26
27 Reference: https://panel.holoviz.org/reference/chat/PanelCallbackHandler.html
28
29 :Example:
30
31 >>> chat_interface = pn.widgets.ChatInterface(callback=callback, callback_user="Langchain")
32 >>> callback_handler = pn.widgets.langchain.PanelCallbackHandler(instance=chat_interface)
33 >>> llm = ChatOpenAI(streaming=True, callbacks=[callback_handler])
34 >>> chain = ConversationChain(llm=llm)
35
36 """
37
38 def __init__(
39 self,
40 instance: ChatFeed | ChatInterface,
41 user: str = "LangChain",
42 avatar: str = DEFAULT_AVATARS["langchain"],
43 ):
44 if BaseCallbackHandler is object:
45 raise ImportError(
46 "LangChainCallbackHandler requires `langchain` to be installed."
47 )
48 self.instance = instance
49 self._message = None
50 self._active_user = user
51 self._active_avatar = avatar
52 self._disabled_state = self.instance.disabled
53 self._is_streaming = None
54
55 self._input_user = user # original user
56 self._input_avatar = avatar
57
58 def _update_active(self, avatar: str, label: str):
59 """
60 Prevent duplicate labels from being appended to the same user.
61 """
62 # not a typo; Langchain passes a string :/
63 if label == "None":
64 return
65
66 self._active_avatar = avatar
67 if f"- {label}" not in self._active_user:
68 self._active_user = f"{self._active_user} - {label}"
69
70 def _stream(self, message: str):
71 return self.instance.stream(
72 message,
73 user=self._active_user,
74 avatar=self._active_avatar,
75 message=self._message,
76 )
77
78 def on_llm_start(self, serialized: Dict[str, Any], *args, **kwargs):
79 model = kwargs.get("invocation_params", {}).get("model_name", "")
80 self._is_streaming = serialized.get("kwargs", {}).get("streaming")
81 messages = self.instance.objects
82 if messages[-1].user != self._active_user:
83 self._message = None
84 if self._active_user and model not in self._active_user:
85 self._active_user = f"{self._active_user} ({model})"
86 return super().on_llm_start(serialized, *args, **kwargs)
87
88 def on_llm_new_token(self, token: str, **kwargs) -> None:
89 self._message = self._stream(token)
90 return super().on_llm_new_token(token, **kwargs)
91
92 def on_llm_end(self, response: LLMResult, *args, **kwargs):
93 if not self._is_streaming:
94 # on_llm_new_token does not get called if not streaming
95 self._message = self.instance.send(
96 response.generations[0][0].text,
97 user=self._active_user,
98 avatar=self._active_avatar,
99 respond=False,
100 )
101
102 self._active_user = self._input_user
103 self._active_avatar = self._input_avatar
104 self._message = None
105 return super().on_llm_end(response, *args, **kwargs)
106
107 def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], *args, **kwargs):
108 return super().on_llm_error(error, *args, **kwargs)
109
110 def on_agent_action(self, action: AgentAction, *args, **kwargs: Any) -> Any:
111 return super().on_agent_action(action, *args, **kwargs)
112
113 def on_agent_finish(self, finish: AgentFinish, *args, **kwargs: Any) -> Any:
114 return super().on_agent_finish(finish, *args, **kwargs)
115
116 def on_tool_start(
117 self, serialized: Dict[str, Any], input_str: str, *args, **kwargs
118 ):
119 self._update_active(DEFAULT_AVATARS["tool"], serialized["name"])
120 return super().on_tool_start(serialized, input_str, *args, **kwargs)
121
122 def on_tool_end(self, output: str, *args, **kwargs):
123 self._stream(output)
124 return super().on_tool_end(output, *args, **kwargs)
125
126 def on_tool_error(
127 self, error: Union[Exception, KeyboardInterrupt], *args, **kwargs
128 ):
129 return super().on_tool_error(error, *args, **kwargs)
130
131 def on_chain_start(
132 self, serialized: Dict[str, Any], inputs: Dict[str, Any], *args, **kwargs
133 ):
134 self._disabled_state = self.instance.disabled
135 self.instance.disabled = True
136 return super().on_chain_start(serialized, inputs, *args, **kwargs)
137
138 def on_chain_end(self, outputs: Dict[str, Any], *args, **kwargs):
139 if 'output' in outputs: # The chain is finished. Report the result
140 self.instance.disabled = self._disabled_state
141 self._stream(outputs['output'])
142 return super().on_chain_end(outputs, *args, **kwargs)
143
144 def on_retriever_error(
145 self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
146 ) -> Any:
147 """Run when Retriever errors."""
148 return super().on_retriever_error(error, **kwargs)
149
150 def on_retriever_end(self, documents, **kwargs: Any) -> Any:
151 """Run when Retriever ends running."""
152 objects = [(f"Document {index}", document.page_content) for index, document in enumerate(documents)]
153 message = Accordion(*objects, sizing_mode="stretch_width", margin=(10,13,10,5))
154 self.instance.send(
155 message,
156 user="LangChain (retriever)",
157 avatar=DEFAULT_AVATARS["retriever"],
158 respond=False,
159 )
160 return super().on_retriever_end(documents=documents, **kwargs)
161
162 def on_text(self, text: str, **kwargs: Any):
163 """Run when text is received."""
164 return super().on_text(text, **kwargs)
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/chat/langchain.py b/panel/chat/langchain.py
--- a/panel/chat/langchain.py
+++ b/panel/chat/langchain.py
@@ -22,7 +22,8 @@
class PanelCallbackHandler(BaseCallbackHandler):
"""
The Langchain `PanelCallbackHandler` itself is not a widget or pane, but is useful for rendering
- and streaming output from Langchain Tools, Agents, and Chains as `ChatMessage` objects.
+ and streaming the *chain of thought* from Langchain Tools, Agents, and Chains
+ as `ChatMessage` objects.
Reference: https://panel.holoviz.org/reference/chat/PanelCallbackHandler.html
@@ -67,13 +68,20 @@
if f"- {label}" not in self._active_user:
self._active_user = f"{self._active_user} - {label}"
+ def _reset_active(self):
+ self._active_user = self._input_user
+ self._active_avatar = self._input_avatar
+ self._message = None
+
def _stream(self, message: str):
- return self.instance.stream(
- message,
- user=self._active_user,
- avatar=self._active_avatar,
- message=self._message,
- )
+ if message.strip():
+ return self.instance.stream(
+ message,
+ user=self._active_user,
+ avatar=self._active_avatar,
+ message=self._message,
+ )
+ return self._message
def on_llm_start(self, serialized: Dict[str, Any], *args, **kwargs):
model = kwargs.get("invocation_params", {}).get("model_name", "")
@@ -99,9 +107,7 @@
respond=False,
)
- self._active_user = self._input_user
- self._active_avatar = self._input_avatar
- self._message = None
+ self._reset_active()
return super().on_llm_end(response, *args, **kwargs)
def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], *args, **kwargs):
@@ -117,10 +123,12 @@
self, serialized: Dict[str, Any], input_str: str, *args, **kwargs
):
self._update_active(DEFAULT_AVATARS["tool"], serialized["name"])
+ self._stream(f"Tool input: {input_str}")
return super().on_tool_start(serialized, input_str, *args, **kwargs)
def on_tool_end(self, output: str, *args, **kwargs):
self._stream(output)
+ self._reset_active()
return super().on_tool_end(output, *args, **kwargs)
def on_tool_error(
@@ -136,9 +144,7 @@
return super().on_chain_start(serialized, inputs, *args, **kwargs)
def on_chain_end(self, outputs: Dict[str, Any], *args, **kwargs):
- if 'output' in outputs: # The chain is finished. Report the result
- self.instance.disabled = self._disabled_state
- self._stream(outputs['output'])
+ self.instance.disabled = self._disabled_state
return super().on_chain_end(outputs, *args, **kwargs)
def on_retriever_error(
| {"golden_diff": "diff --git a/panel/chat/langchain.py b/panel/chat/langchain.py\n--- a/panel/chat/langchain.py\n+++ b/panel/chat/langchain.py\n@@ -22,7 +22,8 @@\n class PanelCallbackHandler(BaseCallbackHandler):\n \"\"\"\n The Langchain `PanelCallbackHandler` itself is not a widget or pane, but is useful for rendering\n- and streaming output from Langchain Tools, Agents, and Chains as `ChatMessage` objects.\n+ and streaming the *chain of thought* from Langchain Tools, Agents, and Chains\n+ as `ChatMessage` objects.\n \n Reference: https://panel.holoviz.org/reference/chat/PanelCallbackHandler.html\n \n@@ -67,13 +68,20 @@\n if f\"- {label}\" not in self._active_user:\n self._active_user = f\"{self._active_user} - {label}\"\n \n+ def _reset_active(self):\n+ self._active_user = self._input_user\n+ self._active_avatar = self._input_avatar\n+ self._message = None\n+\n def _stream(self, message: str):\n- return self.instance.stream(\n- message,\n- user=self._active_user,\n- avatar=self._active_avatar,\n- message=self._message,\n- )\n+ if message.strip():\n+ return self.instance.stream(\n+ message,\n+ user=self._active_user,\n+ avatar=self._active_avatar,\n+ message=self._message,\n+ )\n+ return self._message\n \n def on_llm_start(self, serialized: Dict[str, Any], *args, **kwargs):\n model = kwargs.get(\"invocation_params\", {}).get(\"model_name\", \"\")\n@@ -99,9 +107,7 @@\n respond=False,\n )\n \n- self._active_user = self._input_user\n- self._active_avatar = self._input_avatar\n- self._message = None\n+ self._reset_active()\n return super().on_llm_end(response, *args, **kwargs)\n \n def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], *args, **kwargs):\n@@ -117,10 +123,12 @@\n self, serialized: Dict[str, Any], input_str: str, *args, **kwargs\n ):\n self._update_active(DEFAULT_AVATARS[\"tool\"], serialized[\"name\"])\n+ self._stream(f\"Tool input: {input_str}\")\n return super().on_tool_start(serialized, input_str, *args, **kwargs)\n \n def on_tool_end(self, output: str, *args, **kwargs):\n self._stream(output)\n+ self._reset_active()\n return super().on_tool_end(output, *args, **kwargs)\n \n def on_tool_error(\n@@ -136,9 +144,7 @@\n return super().on_chain_start(serialized, inputs, *args, **kwargs)\n \n def on_chain_end(self, outputs: Dict[str, Any], *args, **kwargs):\n- if 'output' in outputs: # The chain is finished. Report the result\n- self.instance.disabled = self._disabled_state\n- self._stream(outputs['output'])\n+ self.instance.disabled = self._disabled_state\n return super().on_chain_end(outputs, *args, **kwargs)\n \n def on_retriever_error(\n", "issue": "PanelCallbackHandler design principle\nyesterday I changed the PanelCallBackHandler handler to also output the final response from the agent.\r\n\r\n\r\n\r\nThe change is marked below.\r\n\r\n\r\n\r\nI can see that Streamlits Callback handler does not write the 'output'. The user/ developer have to manually do this. \r\n\r\n\r\n\r\nI'm thinking that maybe it was a mistake by me to change this. What do you think @ahuang11 ?\n", "before_files": [{"content": "\"\"\"The langchain module integrates Langchain support with Panel.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Any, Dict, Union\n\ntry:\n from langchain.callbacks.base import BaseCallbackHandler\n from langchain.schema import AgentAction, AgentFinish, LLMResult\nexcept ImportError:\n BaseCallbackHandler = object\n AgentAction = None\n AgentFinish = None\n LLMResult = None\n\nfrom ..chat.feed import ChatFeed\nfrom ..chat.interface import ChatInterface\nfrom ..chat.message import DEFAULT_AVATARS\nfrom ..layout import Accordion\n\n\nclass PanelCallbackHandler(BaseCallbackHandler):\n \"\"\"\n The Langchain `PanelCallbackHandler` itself is not a widget or pane, but is useful for rendering\n and streaming output from Langchain Tools, Agents, and Chains as `ChatMessage` objects.\n\n Reference: https://panel.holoviz.org/reference/chat/PanelCallbackHandler.html\n\n :Example:\n\n >>> chat_interface = pn.widgets.ChatInterface(callback=callback, callback_user=\"Langchain\")\n >>> callback_handler = pn.widgets.langchain.PanelCallbackHandler(instance=chat_interface)\n >>> llm = ChatOpenAI(streaming=True, callbacks=[callback_handler])\n >>> chain = ConversationChain(llm=llm)\n\n \"\"\"\n\n def __init__(\n self,\n instance: ChatFeed | ChatInterface,\n user: str = \"LangChain\",\n avatar: str = DEFAULT_AVATARS[\"langchain\"],\n ):\n if BaseCallbackHandler is object:\n raise ImportError(\n \"LangChainCallbackHandler requires `langchain` to be installed.\"\n )\n self.instance = instance\n self._message = None\n self._active_user = user\n self._active_avatar = avatar\n self._disabled_state = self.instance.disabled\n self._is_streaming = None\n\n self._input_user = user # original user\n self._input_avatar = avatar\n\n def _update_active(self, avatar: str, label: str):\n \"\"\"\n Prevent duplicate labels from being appended to the same user.\n \"\"\"\n # not a typo; Langchain passes a string :/\n if label == \"None\":\n return\n\n self._active_avatar = avatar\n if f\"- {label}\" not in self._active_user:\n self._active_user = f\"{self._active_user} - {label}\"\n\n def _stream(self, message: str):\n return self.instance.stream(\n message,\n user=self._active_user,\n avatar=self._active_avatar,\n message=self._message,\n )\n\n def on_llm_start(self, serialized: Dict[str, Any], *args, **kwargs):\n model = kwargs.get(\"invocation_params\", {}).get(\"model_name\", \"\")\n self._is_streaming = serialized.get(\"kwargs\", {}).get(\"streaming\")\n messages = self.instance.objects\n if messages[-1].user != self._active_user:\n self._message = None\n if self._active_user and model not in self._active_user:\n self._active_user = f\"{self._active_user} ({model})\"\n return super().on_llm_start(serialized, *args, **kwargs)\n\n def on_llm_new_token(self, token: str, **kwargs) -> None:\n self._message = self._stream(token)\n return super().on_llm_new_token(token, **kwargs)\n\n def on_llm_end(self, response: LLMResult, *args, **kwargs):\n if not self._is_streaming:\n # on_llm_new_token does not get called if not streaming\n self._message = self.instance.send(\n response.generations[0][0].text,\n user=self._active_user,\n avatar=self._active_avatar,\n respond=False,\n )\n\n self._active_user = self._input_user\n self._active_avatar = self._input_avatar\n self._message = None\n return super().on_llm_end(response, *args, **kwargs)\n\n def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], *args, **kwargs):\n return super().on_llm_error(error, *args, **kwargs)\n\n def on_agent_action(self, action: AgentAction, *args, **kwargs: Any) -> Any:\n return super().on_agent_action(action, *args, **kwargs)\n\n def on_agent_finish(self, finish: AgentFinish, *args, **kwargs: Any) -> Any:\n return super().on_agent_finish(finish, *args, **kwargs)\n\n def on_tool_start(\n self, serialized: Dict[str, Any], input_str: str, *args, **kwargs\n ):\n self._update_active(DEFAULT_AVATARS[\"tool\"], serialized[\"name\"])\n return super().on_tool_start(serialized, input_str, *args, **kwargs)\n\n def on_tool_end(self, output: str, *args, **kwargs):\n self._stream(output)\n return super().on_tool_end(output, *args, **kwargs)\n\n def on_tool_error(\n self, error: Union[Exception, KeyboardInterrupt], *args, **kwargs\n ):\n return super().on_tool_error(error, *args, **kwargs)\n\n def on_chain_start(\n self, serialized: Dict[str, Any], inputs: Dict[str, Any], *args, **kwargs\n ):\n self._disabled_state = self.instance.disabled\n self.instance.disabled = True\n return super().on_chain_start(serialized, inputs, *args, **kwargs)\n\n def on_chain_end(self, outputs: Dict[str, Any], *args, **kwargs):\n if 'output' in outputs: # The chain is finished. Report the result\n self.instance.disabled = self._disabled_state\n self._stream(outputs['output'])\n return super().on_chain_end(outputs, *args, **kwargs)\n\n def on_retriever_error(\n self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n ) -> Any:\n \"\"\"Run when Retriever errors.\"\"\"\n return super().on_retriever_error(error, **kwargs)\n\n def on_retriever_end(self, documents, **kwargs: Any) -> Any:\n \"\"\"Run when Retriever ends running.\"\"\"\n objects = [(f\"Document {index}\", document.page_content) for index, document in enumerate(documents)]\n message = Accordion(*objects, sizing_mode=\"stretch_width\", margin=(10,13,10,5))\n self.instance.send(\n message,\n user=\"LangChain (retriever)\",\n avatar=DEFAULT_AVATARS[\"retriever\"],\n respond=False,\n )\n return super().on_retriever_end(documents=documents, **kwargs)\n\n def on_text(self, text: str, **kwargs: Any):\n \"\"\"Run when text is received.\"\"\"\n return super().on_text(text, **kwargs)\n", "path": "panel/chat/langchain.py"}], "after_files": [{"content": "\"\"\"The langchain module integrates Langchain support with Panel.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Any, Dict, Union\n\ntry:\n from langchain.callbacks.base import BaseCallbackHandler\n from langchain.schema import AgentAction, AgentFinish, LLMResult\nexcept ImportError:\n BaseCallbackHandler = object\n AgentAction = None\n AgentFinish = None\n LLMResult = None\n\nfrom ..chat.feed import ChatFeed\nfrom ..chat.interface import ChatInterface\nfrom ..chat.message import DEFAULT_AVATARS\nfrom ..layout import Accordion\n\n\nclass PanelCallbackHandler(BaseCallbackHandler):\n \"\"\"\n The Langchain `PanelCallbackHandler` itself is not a widget or pane, but is useful for rendering\n and streaming the *chain of thought* from Langchain Tools, Agents, and Chains\n as `ChatMessage` objects.\n\n Reference: https://panel.holoviz.org/reference/chat/PanelCallbackHandler.html\n\n :Example:\n\n >>> chat_interface = pn.widgets.ChatInterface(callback=callback, callback_user=\"Langchain\")\n >>> callback_handler = pn.widgets.langchain.PanelCallbackHandler(instance=chat_interface)\n >>> llm = ChatOpenAI(streaming=True, callbacks=[callback_handler])\n >>> chain = ConversationChain(llm=llm)\n\n \"\"\"\n\n def __init__(\n self,\n instance: ChatFeed | ChatInterface,\n user: str = \"LangChain\",\n avatar: str = DEFAULT_AVATARS[\"langchain\"],\n ):\n if BaseCallbackHandler is object:\n raise ImportError(\n \"LangChainCallbackHandler requires `langchain` to be installed.\"\n )\n self.instance = instance\n self._message = None\n self._active_user = user\n self._active_avatar = avatar\n self._disabled_state = self.instance.disabled\n self._is_streaming = None\n\n self._input_user = user # original user\n self._input_avatar = avatar\n\n def _update_active(self, avatar: str, label: str):\n \"\"\"\n Prevent duplicate labels from being appended to the same user.\n \"\"\"\n # not a typo; Langchain passes a string :/\n if label == \"None\":\n return\n\n self._active_avatar = avatar\n if f\"- {label}\" not in self._active_user:\n self._active_user = f\"{self._active_user} - {label}\"\n\n def _reset_active(self):\n self._active_user = self._input_user\n self._active_avatar = self._input_avatar\n self._message = None\n\n def _stream(self, message: str):\n if message.strip():\n return self.instance.stream(\n message,\n user=self._active_user,\n avatar=self._active_avatar,\n message=self._message,\n )\n return self._message\n\n def on_llm_start(self, serialized: Dict[str, Any], *args, **kwargs):\n model = kwargs.get(\"invocation_params\", {}).get(\"model_name\", \"\")\n self._is_streaming = serialized.get(\"kwargs\", {}).get(\"streaming\")\n messages = self.instance.objects\n if messages[-1].user != self._active_user:\n self._message = None\n if self._active_user and model not in self._active_user:\n self._active_user = f\"{self._active_user} ({model})\"\n return super().on_llm_start(serialized, *args, **kwargs)\n\n def on_llm_new_token(self, token: str, **kwargs) -> None:\n self._message = self._stream(token)\n return super().on_llm_new_token(token, **kwargs)\n\n def on_llm_end(self, response: LLMResult, *args, **kwargs):\n if not self._is_streaming:\n # on_llm_new_token does not get called if not streaming\n self._message = self.instance.send(\n response.generations[0][0].text,\n user=self._active_user,\n avatar=self._active_avatar,\n respond=False,\n )\n\n self._reset_active()\n return super().on_llm_end(response, *args, **kwargs)\n\n def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], *args, **kwargs):\n return super().on_llm_error(error, *args, **kwargs)\n\n def on_agent_action(self, action: AgentAction, *args, **kwargs: Any) -> Any:\n return super().on_agent_action(action, *args, **kwargs)\n\n def on_agent_finish(self, finish: AgentFinish, *args, **kwargs: Any) -> Any:\n return super().on_agent_finish(finish, *args, **kwargs)\n\n def on_tool_start(\n self, serialized: Dict[str, Any], input_str: str, *args, **kwargs\n ):\n self._update_active(DEFAULT_AVATARS[\"tool\"], serialized[\"name\"])\n self._stream(f\"Tool input: {input_str}\")\n return super().on_tool_start(serialized, input_str, *args, **kwargs)\n\n def on_tool_end(self, output: str, *args, **kwargs):\n self._stream(output)\n self._reset_active()\n return super().on_tool_end(output, *args, **kwargs)\n\n def on_tool_error(\n self, error: Union[Exception, KeyboardInterrupt], *args, **kwargs\n ):\n return super().on_tool_error(error, *args, **kwargs)\n\n def on_chain_start(\n self, serialized: Dict[str, Any], inputs: Dict[str, Any], *args, **kwargs\n ):\n self._disabled_state = self.instance.disabled\n self.instance.disabled = True\n return super().on_chain_start(serialized, inputs, *args, **kwargs)\n\n def on_chain_end(self, outputs: Dict[str, Any], *args, **kwargs):\n self.instance.disabled = self._disabled_state\n return super().on_chain_end(outputs, *args, **kwargs)\n\n def on_retriever_error(\n self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n ) -> Any:\n \"\"\"Run when Retriever errors.\"\"\"\n return super().on_retriever_error(error, **kwargs)\n\n def on_retriever_end(self, documents, **kwargs: Any) -> Any:\n \"\"\"Run when Retriever ends running.\"\"\"\n objects = [(f\"Document {index}\", document.page_content) for index, document in enumerate(documents)]\n message = Accordion(*objects, sizing_mode=\"stretch_width\", margin=(10,13,10,5))\n self.instance.send(\n message,\n user=\"LangChain (retriever)\",\n avatar=DEFAULT_AVATARS[\"retriever\"],\n respond=False,\n )\n return super().on_retriever_end(documents=documents, **kwargs)\n\n def on_text(self, text: str, **kwargs: Any):\n \"\"\"Run when text is received.\"\"\"\n return super().on_text(text, **kwargs)\n", "path": "panel/chat/langchain.py"}]} | 2,378 | 739 |
gh_patches_debug_11256 | rasdani/github-patches | git_diff | spyder-ide__spyder-4208 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IPython console fails to start (Ipython >=6.x)
## Description of your problem
**What steps will reproduce the problem?**
1. install latest IPython (>=6.x)
2. install spyder (git master)
3. run spyder3
**What is the expected output? What do you see instead?**
The Ipython console should start but instead it fails with an import error for a missing module:
ipython.core.utils.quick_guide
which is no more available in latest IPython.
**Please provide any additional information below**
just commenting the offending [line](https://github.com/spyder-ide/spyder/blob/3ea3f8fe6bcbfdec44db708c7895952c9e449205/spyder/widgets/ipythonconsole/shell.py#L91) gives a working ipython console.
## Versions and main components
* Spyder Version: git master
* Python Version: 3.6
* Qt Version: 5.8
* PyQt Version: 5.7
* Operating system: Linux
## Dependencies
Please go to the menu entry `Help > Optional Dependencies` (or
`Help > Dependencies`), press the button `Copy to clipboard`
and paste the contents below:
Computer Used for testing not available at the moment of writing, I'll edit the issue once avaialable
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spyder/widgets/ipythonconsole/shell.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """
8 Shell Widget for the IPython Console
9 """
10
11 import ast
12 import uuid
13
14 from qtpy.QtCore import Signal
15 from qtpy.QtWidgets import QMessageBox
16
17 from spyder.config.base import _
18 from spyder.config.gui import config_shortcut
19 from spyder.py3compat import to_text_string
20 from spyder.utils import programs
21 from spyder.widgets.ipythonconsole import (ControlWidget, DebuggingWidget,
22 HelpWidget, NamepaceBrowserWidget,
23 PageControlWidget)
24
25
26 class ShellWidget(NamepaceBrowserWidget, HelpWidget, DebuggingWidget):
27 """
28 Shell widget for the IPython Console
29
30 This is the widget in charge of executing code
31 """
32 # NOTE: Signals can't be assigned separately to each widget
33 # That's why we define all needed signals here.
34
35 # For NamepaceBrowserWidget
36 sig_namespace_view = Signal(object)
37 sig_var_properties = Signal(object)
38
39 # For DebuggingWidget
40 sig_input_reply = Signal()
41 sig_pdb_step = Signal(str, int)
42 sig_prompt_ready = Signal()
43 sig_dbg_kernel_restart = Signal()
44
45 # For ShellWidget
46 focus_changed = Signal()
47 new_client = Signal()
48 sig_got_reply = Signal()
49 sig_kernel_restarted = Signal(str)
50
51 def __init__(self, ipyclient, additional_options, interpreter_versions,
52 external_kernel, *args, **kw):
53 # To override the Qt widget used by RichJupyterWidget
54 self.custom_control = ControlWidget
55 self.custom_page_control = PageControlWidget
56 super(ShellWidget, self).__init__(*args, **kw)
57
58 self.ipyclient = ipyclient
59 self.additional_options = additional_options
60 self.interpreter_versions = interpreter_versions
61 self.external_kernel = external_kernel
62
63 self.set_background_color()
64
65 # Keyboard shortcuts
66 self.shortcuts = self.create_shortcuts()
67
68 # To save kernel replies in silent execution
69 self._kernel_reply = None
70
71 #---- Public API ----------------------------------------------------------
72 def set_exit_callback(self):
73 """Set exit callback for this shell."""
74 self.exit_requested.connect(self.ipyclient.exit_callback)
75
76 def is_running(self):
77 if self.kernel_client is not None and \
78 self.kernel_client.channels_running:
79 return True
80 else:
81 return False
82
83 def set_cwd(self, dirname):
84 """Set shell current working directory."""
85 return self.silent_execute(
86 u"get_ipython().kernel.set_cwd(r'{}')".format(dirname))
87
88 # --- To handle the banner
89 def long_banner(self):
90 """Banner for IPython widgets with pylab message"""
91 # Default banner
92 from IPython.core.usage import quick_guide
93 banner_parts = [
94 'Python %s\n' % self.interpreter_versions['python_version'],
95 'Type "copyright", "credits" or "license" for more information.\n\n',
96 'IPython %s -- An enhanced Interactive Python.\n' % \
97 self.interpreter_versions['ipython_version'],
98 quick_guide
99 ]
100 banner = ''.join(banner_parts)
101
102 # Pylab additions
103 pylab_o = self.additional_options['pylab']
104 autoload_pylab_o = self.additional_options['autoload_pylab']
105 mpl_installed = programs.is_module_installed('matplotlib')
106 if mpl_installed and (pylab_o and autoload_pylab_o):
107 pylab_message = ("\nPopulating the interactive namespace from "
108 "numpy and matplotlib\n")
109 banner = banner + pylab_message
110
111 # Sympy additions
112 sympy_o = self.additional_options['sympy']
113 if sympy_o:
114 lines = """
115 These commands were executed:
116 >>> from __future__ import division
117 >>> from sympy import *
118 >>> x, y, z, t = symbols('x y z t')
119 >>> k, m, n = symbols('k m n', integer=True)
120 >>> f, g, h = symbols('f g h', cls=Function)
121 """
122 banner = banner + lines
123 if (pylab_o and sympy_o):
124 lines = """
125 Warning: pylab (numpy and matplotlib) and symbolic math (sympy) are both
126 enabled at the same time. Some pylab functions are going to be overrided by
127 the sympy module (e.g. plot)
128 """
129 banner = banner + lines
130 return banner
131
132 def short_banner(self):
133 """Short banner with Python and QtConsole versions"""
134 banner = 'Python %s -- IPython %s' % (
135 self.interpreter_versions['python_version'],
136 self.interpreter_versions['ipython_version'])
137 return banner
138
139 # --- To define additional shortcuts
140 def clear_console(self):
141 self.execute("%clear")
142
143 def reset_namespace(self, force=False):
144 """Reset the namespace by removing all names defined by the user."""
145 reset_str = _("Reset IPython namespace")
146 warn_str = _("All user-defined variables will be removed."
147 "<br>Are you sure you want to reset the namespace?")
148 if not force:
149 reply = QMessageBox.question(self, reset_str,
150 warn_str,
151 QMessageBox.Yes | QMessageBox.No
152 )
153
154 if reply == QMessageBox.Yes:
155 self.execute("%reset -f")
156 else:
157 self.silent_execute("%reset -f")
158
159 def set_background_color(self):
160 light_color_o = self.additional_options['light_color']
161 if not light_color_o:
162 self.set_default_style(colors='linux')
163
164 def create_shortcuts(self):
165 inspect = config_shortcut(self._control.inspect_current_object,
166 context='Console', name='Inspect current object',
167 parent=self)
168 clear_console = config_shortcut(self.clear_console, context='Console',
169 name='Clear shell', parent=self)
170 restart_kernel = config_shortcut(self.ipyclient.restart_kernel,
171 context='ipython_console',
172 name='Restart kernel', parent=self)
173 new_tab = config_shortcut(lambda: self.new_client.emit(),
174 context='ipython_console', name='new tab', parent=self)
175 reset_namespace = config_shortcut(lambda: self.reset_namespace(),
176 context='ipython_console',
177 name='reset namespace', parent=self)
178 array_inline = config_shortcut(lambda: self.enter_array_inline(),
179 context='array_builder',
180 name='enter array inline', parent=self)
181 array_table = config_shortcut(lambda: self.enter_array_table(),
182 context='array_builder',
183 name='enter array table', parent=self)
184
185 return [inspect, clear_console, restart_kernel, new_tab,
186 reset_namespace, array_inline, array_table]
187
188 # --- To communicate with the kernel
189 def silent_execute(self, code):
190 """Execute code in the kernel without increasing the prompt"""
191 self.kernel_client.execute(to_text_string(code), silent=True)
192
193 def silent_exec_method(self, code):
194 """Silently execute a kernel method and save its reply
195
196 The methods passed here **don't** involve getting the value
197 of a variable but instead replies that can be handled by
198 ast.literal_eval.
199
200 To get a value see `get_value`
201
202 Parameters
203 ----------
204 code : string
205 Code that contains the kernel method as part of its
206 string
207
208 See Also
209 --------
210 handle_exec_method : Method that deals with the reply
211
212 Note
213 ----
214 This is based on the _silent_exec_callback method of
215 RichJupyterWidget. Therefore this is licensed BSD
216 """
217 # Generate uuid, which would be used as an indication of whether or
218 # not the unique request originated from here
219 local_uuid = to_text_string(uuid.uuid1())
220 code = to_text_string(code)
221 msg_id = self.kernel_client.execute('', silent=True,
222 user_expressions={ local_uuid:code })
223 self._kernel_methods[local_uuid] = code
224 self._request_info['execute'][msg_id] = self._ExecutionRequest(msg_id,
225 'silent_exec_method')
226
227 def handle_exec_method(self, msg):
228 """
229 Handle data returned by silent executions of kernel methods
230
231 This is based on the _handle_exec_callback of RichJupyterWidget.
232 Therefore this is licensed BSD.
233 """
234 user_exp = msg['content'].get('user_expressions')
235 if not user_exp:
236 return
237 for expression in user_exp:
238 if expression in self._kernel_methods:
239 # Process kernel reply
240 method = self._kernel_methods[expression]
241 reply = user_exp[expression]
242 data = reply.get('data')
243 if 'get_namespace_view' in method:
244 if data is not None and 'text/plain' in data:
245 view = ast.literal_eval(data['text/plain'])
246 else:
247 view = None
248 self.sig_namespace_view.emit(view)
249 elif 'get_var_properties' in method:
250 if data is not None and 'text/plain' in data:
251 properties = ast.literal_eval(data['text/plain'])
252 else:
253 properties = None
254 self.sig_var_properties.emit(properties)
255 else:
256 if data is not None and 'text/plain' in data:
257 self._kernel_reply = ast.literal_eval(data['text/plain'])
258 else:
259 self._kernel_reply = None
260 self.sig_got_reply.emit()
261
262 # Remove method after being processed
263 self._kernel_methods.pop(expression)
264
265 #---- Private methods (overrode by us) ---------------------------------
266 def _context_menu_make(self, pos):
267 """Reimplement the IPython context menu"""
268 menu = super(ShellWidget, self)._context_menu_make(pos)
269 return self.ipyclient.add_actions_to_context_menu(menu)
270
271 def _banner_default(self):
272 """
273 Reimplement banner creation to let the user decide if he wants a
274 banner or not
275 """
276 # Don't change banner for external kernels
277 if self.external_kernel:
278 return ''
279 show_banner_o = self.additional_options['show_banner']
280 if show_banner_o:
281 return self.long_banner()
282 else:
283 return self.short_banner()
284
285 def _kernel_restarted_message(self, died=True):
286 msg = _("Kernel died, restarting") if died else _("Kernel restarting")
287 self.sig_kernel_restarted.emit(msg)
288
289 #---- Qt methods ----------------------------------------------------------
290 def focusInEvent(self, event):
291 """Reimplement Qt method to send focus change notification"""
292 self.focus_changed.emit()
293 return super(ShellWidget, self).focusInEvent(event)
294
295 def focusOutEvent(self, event):
296 """Reimplement Qt method to send focus change notification"""
297 self.focus_changed.emit()
298 return super(ShellWidget, self).focusOutEvent(event)
299
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/spyder/widgets/ipythonconsole/shell.py b/spyder/widgets/ipythonconsole/shell.py
--- a/spyder/widgets/ipythonconsole/shell.py
+++ b/spyder/widgets/ipythonconsole/shell.py
@@ -89,7 +89,10 @@
def long_banner(self):
"""Banner for IPython widgets with pylab message"""
# Default banner
- from IPython.core.usage import quick_guide
+ try:
+ from IPython.core.usage import quick_guide
+ except Exception:
+ quick_guide = ''
banner_parts = [
'Python %s\n' % self.interpreter_versions['python_version'],
'Type "copyright", "credits" or "license" for more information.\n\n',
| {"golden_diff": "diff --git a/spyder/widgets/ipythonconsole/shell.py b/spyder/widgets/ipythonconsole/shell.py\n--- a/spyder/widgets/ipythonconsole/shell.py\n+++ b/spyder/widgets/ipythonconsole/shell.py\n@@ -89,7 +89,10 @@\n def long_banner(self):\n \"\"\"Banner for IPython widgets with pylab message\"\"\"\n # Default banner\n- from IPython.core.usage import quick_guide\n+ try:\n+ from IPython.core.usage import quick_guide\n+ except Exception:\n+ quick_guide = ''\n banner_parts = [\n 'Python %s\\n' % self.interpreter_versions['python_version'],\n 'Type \"copyright\", \"credits\" or \"license\" for more information.\\n\\n',\n", "issue": "IPython console fails to start (Ipython >=6.x)\n## Description of your problem\r\n\r\n**What steps will reproduce the problem?**\r\n\r\n1. install latest IPython (>=6.x)\r\n2. install spyder (git master)\r\n3. run spyder3\r\n\r\n**What is the expected output? What do you see instead?**\r\n\r\nThe Ipython console should start but instead it fails with an import error for a missing module:\r\n\r\n ipython.core.utils.quick_guide\r\n\r\nwhich is no more available in latest IPython.\r\n\r\n**Please provide any additional information below**\r\n\r\njust commenting the offending [line](https://github.com/spyder-ide/spyder/blob/3ea3f8fe6bcbfdec44db708c7895952c9e449205/spyder/widgets/ipythonconsole/shell.py#L91) gives a working ipython console.\r\n\r\n\r\n## Versions and main components\r\n\r\n* Spyder Version: git master\r\n* Python Version: 3.6\r\n* Qt Version: 5.8\r\n* PyQt Version: 5.7\r\n* Operating system: Linux\r\n\r\n\r\n## Dependencies\r\n\r\nPlease go to the menu entry `Help > Optional Dependencies` (or\r\n`Help > Dependencies`), press the button `Copy to clipboard`\r\nand paste the contents below:\r\n\r\nComputer Used for testing not available at the moment of writing, I'll edit the issue once avaialable \r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nShell Widget for the IPython Console\n\"\"\"\n\nimport ast\nimport uuid\n\nfrom qtpy.QtCore import Signal\nfrom qtpy.QtWidgets import QMessageBox\n\nfrom spyder.config.base import _\nfrom spyder.config.gui import config_shortcut\nfrom spyder.py3compat import to_text_string\nfrom spyder.utils import programs\nfrom spyder.widgets.ipythonconsole import (ControlWidget, DebuggingWidget,\n HelpWidget, NamepaceBrowserWidget,\n PageControlWidget)\n\n\nclass ShellWidget(NamepaceBrowserWidget, HelpWidget, DebuggingWidget):\n \"\"\"\n Shell widget for the IPython Console\n\n This is the widget in charge of executing code\n \"\"\"\n # NOTE: Signals can't be assigned separately to each widget\n # That's why we define all needed signals here.\n\n # For NamepaceBrowserWidget\n sig_namespace_view = Signal(object)\n sig_var_properties = Signal(object)\n\n # For DebuggingWidget\n sig_input_reply = Signal()\n sig_pdb_step = Signal(str, int)\n sig_prompt_ready = Signal()\n sig_dbg_kernel_restart = Signal()\n\n # For ShellWidget\n focus_changed = Signal()\n new_client = Signal()\n sig_got_reply = Signal()\n sig_kernel_restarted = Signal(str)\n\n def __init__(self, ipyclient, additional_options, interpreter_versions,\n external_kernel, *args, **kw):\n # To override the Qt widget used by RichJupyterWidget\n self.custom_control = ControlWidget\n self.custom_page_control = PageControlWidget\n super(ShellWidget, self).__init__(*args, **kw)\n\n self.ipyclient = ipyclient\n self.additional_options = additional_options\n self.interpreter_versions = interpreter_versions\n self.external_kernel = external_kernel\n\n self.set_background_color()\n\n # Keyboard shortcuts\n self.shortcuts = self.create_shortcuts()\n\n # To save kernel replies in silent execution\n self._kernel_reply = None\n\n #---- Public API ----------------------------------------------------------\n def set_exit_callback(self):\n \"\"\"Set exit callback for this shell.\"\"\"\n self.exit_requested.connect(self.ipyclient.exit_callback)\n\n def is_running(self):\n if self.kernel_client is not None and \\\n self.kernel_client.channels_running:\n return True\n else:\n return False\n\n def set_cwd(self, dirname):\n \"\"\"Set shell current working directory.\"\"\"\n return self.silent_execute(\n u\"get_ipython().kernel.set_cwd(r'{}')\".format(dirname))\n\n # --- To handle the banner\n def long_banner(self):\n \"\"\"Banner for IPython widgets with pylab message\"\"\"\n # Default banner\n from IPython.core.usage import quick_guide\n banner_parts = [\n 'Python %s\\n' % self.interpreter_versions['python_version'],\n 'Type \"copyright\", \"credits\" or \"license\" for more information.\\n\\n',\n 'IPython %s -- An enhanced Interactive Python.\\n' % \\\n self.interpreter_versions['ipython_version'],\n quick_guide\n ]\n banner = ''.join(banner_parts)\n\n # Pylab additions\n pylab_o = self.additional_options['pylab']\n autoload_pylab_o = self.additional_options['autoload_pylab']\n mpl_installed = programs.is_module_installed('matplotlib')\n if mpl_installed and (pylab_o and autoload_pylab_o):\n pylab_message = (\"\\nPopulating the interactive namespace from \"\n \"numpy and matplotlib\\n\")\n banner = banner + pylab_message\n\n # Sympy additions\n sympy_o = self.additional_options['sympy']\n if sympy_o:\n lines = \"\"\"\nThese commands were executed:\n>>> from __future__ import division\n>>> from sympy import *\n>>> x, y, z, t = symbols('x y z t')\n>>> k, m, n = symbols('k m n', integer=True)\n>>> f, g, h = symbols('f g h', cls=Function)\n\"\"\"\n banner = banner + lines\n if (pylab_o and sympy_o):\n lines = \"\"\"\nWarning: pylab (numpy and matplotlib) and symbolic math (sympy) are both \nenabled at the same time. Some pylab functions are going to be overrided by \nthe sympy module (e.g. plot)\n\"\"\"\n banner = banner + lines\n return banner\n\n def short_banner(self):\n \"\"\"Short banner with Python and QtConsole versions\"\"\"\n banner = 'Python %s -- IPython %s' % (\n self.interpreter_versions['python_version'],\n self.interpreter_versions['ipython_version'])\n return banner\n\n # --- To define additional shortcuts\n def clear_console(self):\n self.execute(\"%clear\")\n\n def reset_namespace(self, force=False):\n \"\"\"Reset the namespace by removing all names defined by the user.\"\"\"\n reset_str = _(\"Reset IPython namespace\")\n warn_str = _(\"All user-defined variables will be removed.\"\n \"<br>Are you sure you want to reset the namespace?\")\n if not force:\n reply = QMessageBox.question(self, reset_str,\n warn_str,\n QMessageBox.Yes | QMessageBox.No\n )\n\n if reply == QMessageBox.Yes:\n self.execute(\"%reset -f\")\n else:\n self.silent_execute(\"%reset -f\")\n\n def set_background_color(self):\n light_color_o = self.additional_options['light_color']\n if not light_color_o:\n self.set_default_style(colors='linux')\n\n def create_shortcuts(self):\n inspect = config_shortcut(self._control.inspect_current_object,\n context='Console', name='Inspect current object',\n parent=self)\n clear_console = config_shortcut(self.clear_console, context='Console',\n name='Clear shell', parent=self)\n restart_kernel = config_shortcut(self.ipyclient.restart_kernel,\n context='ipython_console',\n name='Restart kernel', parent=self)\n new_tab = config_shortcut(lambda: self.new_client.emit(),\n context='ipython_console', name='new tab', parent=self)\n reset_namespace = config_shortcut(lambda: self.reset_namespace(),\n context='ipython_console',\n name='reset namespace', parent=self)\n array_inline = config_shortcut(lambda: self.enter_array_inline(),\n context='array_builder',\n name='enter array inline', parent=self)\n array_table = config_shortcut(lambda: self.enter_array_table(),\n context='array_builder',\n name='enter array table', parent=self)\n\n return [inspect, clear_console, restart_kernel, new_tab,\n reset_namespace, array_inline, array_table]\n\n # --- To communicate with the kernel\n def silent_execute(self, code):\n \"\"\"Execute code in the kernel without increasing the prompt\"\"\"\n self.kernel_client.execute(to_text_string(code), silent=True)\n\n def silent_exec_method(self, code):\n \"\"\"Silently execute a kernel method and save its reply\n\n The methods passed here **don't** involve getting the value\n of a variable but instead replies that can be handled by\n ast.literal_eval.\n\n To get a value see `get_value`\n\n Parameters\n ----------\n code : string\n Code that contains the kernel method as part of its\n string\n\n See Also\n --------\n handle_exec_method : Method that deals with the reply\n\n Note\n ----\n This is based on the _silent_exec_callback method of\n RichJupyterWidget. Therefore this is licensed BSD\n \"\"\"\n # Generate uuid, which would be used as an indication of whether or\n # not the unique request originated from here\n local_uuid = to_text_string(uuid.uuid1())\n code = to_text_string(code)\n msg_id = self.kernel_client.execute('', silent=True,\n user_expressions={ local_uuid:code })\n self._kernel_methods[local_uuid] = code\n self._request_info['execute'][msg_id] = self._ExecutionRequest(msg_id,\n 'silent_exec_method')\n\n def handle_exec_method(self, msg):\n \"\"\"\n Handle data returned by silent executions of kernel methods\n\n This is based on the _handle_exec_callback of RichJupyterWidget.\n Therefore this is licensed BSD.\n \"\"\"\n user_exp = msg['content'].get('user_expressions')\n if not user_exp:\n return\n for expression in user_exp:\n if expression in self._kernel_methods:\n # Process kernel reply\n method = self._kernel_methods[expression]\n reply = user_exp[expression]\n data = reply.get('data')\n if 'get_namespace_view' in method:\n if data is not None and 'text/plain' in data:\n view = ast.literal_eval(data['text/plain'])\n else:\n view = None\n self.sig_namespace_view.emit(view)\n elif 'get_var_properties' in method:\n if data is not None and 'text/plain' in data:\n properties = ast.literal_eval(data['text/plain'])\n else:\n properties = None\n self.sig_var_properties.emit(properties)\n else:\n if data is not None and 'text/plain' in data:\n self._kernel_reply = ast.literal_eval(data['text/plain'])\n else:\n self._kernel_reply = None\n self.sig_got_reply.emit()\n\n # Remove method after being processed\n self._kernel_methods.pop(expression)\n\n #---- Private methods (overrode by us) ---------------------------------\n def _context_menu_make(self, pos):\n \"\"\"Reimplement the IPython context menu\"\"\"\n menu = super(ShellWidget, self)._context_menu_make(pos)\n return self.ipyclient.add_actions_to_context_menu(menu)\n\n def _banner_default(self):\n \"\"\"\n Reimplement banner creation to let the user decide if he wants a\n banner or not\n \"\"\"\n # Don't change banner for external kernels\n if self.external_kernel:\n return ''\n show_banner_o = self.additional_options['show_banner']\n if show_banner_o:\n return self.long_banner()\n else:\n return self.short_banner()\n\n def _kernel_restarted_message(self, died=True):\n msg = _(\"Kernel died, restarting\") if died else _(\"Kernel restarting\")\n self.sig_kernel_restarted.emit(msg)\n\n #---- Qt methods ----------------------------------------------------------\n def focusInEvent(self, event):\n \"\"\"Reimplement Qt method to send focus change notification\"\"\"\n self.focus_changed.emit()\n return super(ShellWidget, self).focusInEvent(event)\n\n def focusOutEvent(self, event):\n \"\"\"Reimplement Qt method to send focus change notification\"\"\"\n self.focus_changed.emit()\n return super(ShellWidget, self).focusOutEvent(event)\n", "path": "spyder/widgets/ipythonconsole/shell.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nShell Widget for the IPython Console\n\"\"\"\n\nimport ast\nimport uuid\n\nfrom qtpy.QtCore import Signal\nfrom qtpy.QtWidgets import QMessageBox\n\nfrom spyder.config.base import _\nfrom spyder.config.gui import config_shortcut\nfrom spyder.py3compat import to_text_string\nfrom spyder.utils import programs\nfrom spyder.widgets.ipythonconsole import (ControlWidget, DebuggingWidget,\n HelpWidget, NamepaceBrowserWidget,\n PageControlWidget)\n\n\nclass ShellWidget(NamepaceBrowserWidget, HelpWidget, DebuggingWidget):\n \"\"\"\n Shell widget for the IPython Console\n\n This is the widget in charge of executing code\n \"\"\"\n # NOTE: Signals can't be assigned separately to each widget\n # That's why we define all needed signals here.\n\n # For NamepaceBrowserWidget\n sig_namespace_view = Signal(object)\n sig_var_properties = Signal(object)\n\n # For DebuggingWidget\n sig_input_reply = Signal()\n sig_pdb_step = Signal(str, int)\n sig_prompt_ready = Signal()\n sig_dbg_kernel_restart = Signal()\n\n # For ShellWidget\n focus_changed = Signal()\n new_client = Signal()\n sig_got_reply = Signal()\n sig_kernel_restarted = Signal(str)\n\n def __init__(self, ipyclient, additional_options, interpreter_versions,\n external_kernel, *args, **kw):\n # To override the Qt widget used by RichJupyterWidget\n self.custom_control = ControlWidget\n self.custom_page_control = PageControlWidget\n super(ShellWidget, self).__init__(*args, **kw)\n\n self.ipyclient = ipyclient\n self.additional_options = additional_options\n self.interpreter_versions = interpreter_versions\n self.external_kernel = external_kernel\n\n self.set_background_color()\n\n # Keyboard shortcuts\n self.shortcuts = self.create_shortcuts()\n\n # To save kernel replies in silent execution\n self._kernel_reply = None\n\n #---- Public API ----------------------------------------------------------\n def set_exit_callback(self):\n \"\"\"Set exit callback for this shell.\"\"\"\n self.exit_requested.connect(self.ipyclient.exit_callback)\n\n def is_running(self):\n if self.kernel_client is not None and \\\n self.kernel_client.channels_running:\n return True\n else:\n return False\n\n def set_cwd(self, dirname):\n \"\"\"Set shell current working directory.\"\"\"\n return self.silent_execute(\n u\"get_ipython().kernel.set_cwd(r'{}')\".format(dirname))\n\n # --- To handle the banner\n def long_banner(self):\n \"\"\"Banner for IPython widgets with pylab message\"\"\"\n # Default banner\n try:\n from IPython.core.usage import quick_guide\n except Exception:\n quick_guide = ''\n banner_parts = [\n 'Python %s\\n' % self.interpreter_versions['python_version'],\n 'Type \"copyright\", \"credits\" or \"license\" for more information.\\n\\n',\n 'IPython %s -- An enhanced Interactive Python.\\n' % \\\n self.interpreter_versions['ipython_version'],\n quick_guide\n ]\n banner = ''.join(banner_parts)\n\n # Pylab additions\n pylab_o = self.additional_options['pylab']\n autoload_pylab_o = self.additional_options['autoload_pylab']\n mpl_installed = programs.is_module_installed('matplotlib')\n if mpl_installed and (pylab_o and autoload_pylab_o):\n pylab_message = (\"\\nPopulating the interactive namespace from \"\n \"numpy and matplotlib\\n\")\n banner = banner + pylab_message\n\n # Sympy additions\n sympy_o = self.additional_options['sympy']\n if sympy_o:\n lines = \"\"\"\nThese commands were executed:\n>>> from __future__ import division\n>>> from sympy import *\n>>> x, y, z, t = symbols('x y z t')\n>>> k, m, n = symbols('k m n', integer=True)\n>>> f, g, h = symbols('f g h', cls=Function)\n\"\"\"\n banner = banner + lines\n if (pylab_o and sympy_o):\n lines = \"\"\"\nWarning: pylab (numpy and matplotlib) and symbolic math (sympy) are both \nenabled at the same time. Some pylab functions are going to be overrided by \nthe sympy module (e.g. plot)\n\"\"\"\n banner = banner + lines\n return banner\n\n def short_banner(self):\n \"\"\"Short banner with Python and QtConsole versions\"\"\"\n banner = 'Python %s -- IPython %s' % (\n self.interpreter_versions['python_version'],\n self.interpreter_versions['ipython_version'])\n return banner\n\n # --- To define additional shortcuts\n def clear_console(self):\n self.execute(\"%clear\")\n\n def reset_namespace(self, force=False):\n \"\"\"Reset the namespace by removing all names defined by the user.\"\"\"\n reset_str = _(\"Reset IPython namespace\")\n warn_str = _(\"All user-defined variables will be removed.\"\n \"<br>Are you sure you want to reset the namespace?\")\n if not force:\n reply = QMessageBox.question(self, reset_str,\n warn_str,\n QMessageBox.Yes | QMessageBox.No\n )\n\n if reply == QMessageBox.Yes:\n self.execute(\"%reset -f\")\n else:\n self.silent_execute(\"%reset -f\")\n\n def set_background_color(self):\n light_color_o = self.additional_options['light_color']\n if not light_color_o:\n self.set_default_style(colors='linux')\n\n def create_shortcuts(self):\n inspect = config_shortcut(self._control.inspect_current_object,\n context='Console', name='Inspect current object',\n parent=self)\n clear_console = config_shortcut(self.clear_console, context='Console',\n name='Clear shell', parent=self)\n restart_kernel = config_shortcut(self.ipyclient.restart_kernel,\n context='ipython_console',\n name='Restart kernel', parent=self)\n new_tab = config_shortcut(lambda: self.new_client.emit(),\n context='ipython_console', name='new tab', parent=self)\n reset_namespace = config_shortcut(lambda: self.reset_namespace(),\n context='ipython_console',\n name='reset namespace', parent=self)\n array_inline = config_shortcut(lambda: self.enter_array_inline(),\n context='array_builder',\n name='enter array inline', parent=self)\n array_table = config_shortcut(lambda: self.enter_array_table(),\n context='array_builder',\n name='enter array table', parent=self)\n\n return [inspect, clear_console, restart_kernel, new_tab,\n reset_namespace, array_inline, array_table]\n\n # --- To communicate with the kernel\n def silent_execute(self, code):\n \"\"\"Execute code in the kernel without increasing the prompt\"\"\"\n self.kernel_client.execute(to_text_string(code), silent=True)\n\n def silent_exec_method(self, code):\n \"\"\"Silently execute a kernel method and save its reply\n\n The methods passed here **don't** involve getting the value\n of a variable but instead replies that can be handled by\n ast.literal_eval.\n\n To get a value see `get_value`\n\n Parameters\n ----------\n code : string\n Code that contains the kernel method as part of its\n string\n\n See Also\n --------\n handle_exec_method : Method that deals with the reply\n\n Note\n ----\n This is based on the _silent_exec_callback method of\n RichJupyterWidget. Therefore this is licensed BSD\n \"\"\"\n # Generate uuid, which would be used as an indication of whether or\n # not the unique request originated from here\n local_uuid = to_text_string(uuid.uuid1())\n code = to_text_string(code)\n msg_id = self.kernel_client.execute('', silent=True,\n user_expressions={ local_uuid:code })\n self._kernel_methods[local_uuid] = code\n self._request_info['execute'][msg_id] = self._ExecutionRequest(msg_id,\n 'silent_exec_method')\n\n def handle_exec_method(self, msg):\n \"\"\"\n Handle data returned by silent executions of kernel methods\n\n This is based on the _handle_exec_callback of RichJupyterWidget.\n Therefore this is licensed BSD.\n \"\"\"\n user_exp = msg['content'].get('user_expressions')\n if not user_exp:\n return\n for expression in user_exp:\n if expression in self._kernel_methods:\n # Process kernel reply\n method = self._kernel_methods[expression]\n reply = user_exp[expression]\n data = reply.get('data')\n if 'get_namespace_view' in method:\n if data is not None and 'text/plain' in data:\n view = ast.literal_eval(data['text/plain'])\n else:\n view = None\n self.sig_namespace_view.emit(view)\n elif 'get_var_properties' in method:\n if data is not None and 'text/plain' in data:\n properties = ast.literal_eval(data['text/plain'])\n else:\n properties = None\n self.sig_var_properties.emit(properties)\n else:\n if data is not None and 'text/plain' in data:\n self._kernel_reply = ast.literal_eval(data['text/plain'])\n else:\n self._kernel_reply = None\n self.sig_got_reply.emit()\n\n # Remove method after being processed\n self._kernel_methods.pop(expression)\n\n #---- Private methods (overrode by us) ---------------------------------\n def _context_menu_make(self, pos):\n \"\"\"Reimplement the IPython context menu\"\"\"\n menu = super(ShellWidget, self)._context_menu_make(pos)\n return self.ipyclient.add_actions_to_context_menu(menu)\n\n def _banner_default(self):\n \"\"\"\n Reimplement banner creation to let the user decide if he wants a\n banner or not\n \"\"\"\n # Don't change banner for external kernels\n if self.external_kernel:\n return ''\n show_banner_o = self.additional_options['show_banner']\n if show_banner_o:\n return self.long_banner()\n else:\n return self.short_banner()\n\n def _kernel_restarted_message(self, died=True):\n msg = _(\"Kernel died, restarting\") if died else _(\"Kernel restarting\")\n self.sig_kernel_restarted.emit(msg)\n\n #---- Qt methods ----------------------------------------------------------\n def focusInEvent(self, event):\n \"\"\"Reimplement Qt method to send focus change notification\"\"\"\n self.focus_changed.emit()\n return super(ShellWidget, self).focusInEvent(event)\n\n def focusOutEvent(self, event):\n \"\"\"Reimplement Qt method to send focus change notification\"\"\"\n self.focus_changed.emit()\n return super(ShellWidget, self).focusOutEvent(event)\n", "path": "spyder/widgets/ipythonconsole/shell.py"}]} | 3,657 | 169 |
gh_patches_debug_37127 | rasdani/github-patches | git_diff | pantsbuild__pants-11673 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
python_source_root not added to syspath
I have a project structure along the lines of this:
```
packages/
package_a/
BUILD
__init__.py
package_a.py
package_b/
BUILD
__init__.py
package_b.py
protos/
package_a/
BUILD
foobar.proto
service_a/
BUILD
bazqux.proto
services/
service_a/
BUILD
__init__.py
main.py
service_b/
BUILD
__init__.py
main.py
```
`/packages`, `/protos` and `/services` are all configured as source roots. In each BUILD files residing in `protos/*` I have either the line `protobuf_library(python_source_root='services')` or `protobuf_library(python_source_root='packages')`, which means that the generated code ends up in `packages/*` or `services/*` instead.
This is working just fine as long as the running code is in the same source root as the generated protobuf code, but when code in `services/` is dependent on protos that has `python_source_root` set to `packages`, Python can't find the module unless an actual module from the same source root is also a dependency. I did some digging around, and it seems like the issue is that the source root specified in `python_source_root` is never explicitly added to Python's syspath, which is why imports fail if no "real" packages from the same source roots are used. So using the same example as earlier I see `services` and `protos`, but `packages`, where the generated code is placed, is missing.
I created a [proof-of-concept repository](https://github.com/jyggen/pants-issue-11666) in case my rambling makes little sense. The issue can be seen by running `./pants test services/::`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/python/util_rules/python_sources.py`
Content:
```
1 # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from dataclasses import dataclass
5 from typing import Iterable, List, Tuple, Type
6
7 from pants.backend.python.target_types import PythonSources
8 from pants.backend.python.util_rules import ancestor_files
9 from pants.backend.python.util_rules.ancestor_files import AncestorFiles, AncestorFilesRequest
10 from pants.core.target_types import FilesSources, ResourcesSources
11 from pants.core.util_rules import source_files, stripped_source_files
12 from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
13 from pants.core.util_rules.stripped_source_files import StrippedSourceFiles
14 from pants.engine.fs import MergeDigests, Snapshot
15 from pants.engine.rules import Get, MultiGet, collect_rules, rule
16 from pants.engine.target import Sources, Target
17 from pants.engine.unions import UnionMembership
18 from pants.source.source_root import SourceRoot, SourceRootRequest
19 from pants.util.logging import LogLevel
20 from pants.util.meta import frozen_after_init
21
22
23 @dataclass(frozen=True)
24 class PythonSourceFiles:
25 """Sources that can be introspected by Python, relative to a set of source roots.
26
27 Specifically, this will filter out to only have Python, and, optionally, resources() and
28 files() targets; and will add any missing `__init__.py` files to ensure that modules are
29 recognized correctly.
30
31 Use-cases that introspect Python source code (e.g., the `test, `lint`, `fmt` goals) can
32 request this type to get relevant sources that are still relative to their source roots.
33 That way the paths they report are the unstripped ones the user is familiar with.
34
35 The sources can also be imported and used by Python (e.g., for the `test` goal), but only
36 if sys.path is modified to include the source roots.
37 """
38
39 source_files: SourceFiles
40 source_roots: Tuple[str, ...] # Source roots for the specified source files.
41
42
43 @dataclass(frozen=True)
44 class StrippedPythonSourceFiles:
45 """A PythonSourceFiles that has had its source roots stripped."""
46
47 stripped_source_files: StrippedSourceFiles
48
49
50 @frozen_after_init
51 @dataclass(unsafe_hash=True)
52 class PythonSourceFilesRequest:
53 targets: Tuple[Target, ...]
54 include_resources: bool
55 include_files: bool
56
57 def __init__(
58 self,
59 targets: Iterable[Target],
60 *,
61 include_resources: bool = True,
62 include_files: bool = False
63 ) -> None:
64 self.targets = tuple(targets)
65 self.include_resources = include_resources
66 self.include_files = include_files
67
68 @property
69 def valid_sources_types(self) -> Tuple[Type[Sources], ...]:
70 types: List[Type[Sources]] = [PythonSources]
71 if self.include_resources:
72 types.append(ResourcesSources)
73 if self.include_files:
74 types.append(FilesSources)
75 return tuple(types)
76
77
78 @rule(level=LogLevel.DEBUG)
79 async def prepare_python_sources(
80 request: PythonSourceFilesRequest, union_membership: UnionMembership
81 ) -> PythonSourceFiles:
82 sources = await Get(
83 SourceFiles,
84 SourceFilesRequest(
85 (tgt.get(Sources) for tgt in request.targets),
86 for_sources_types=request.valid_sources_types,
87 enable_codegen=True,
88 ),
89 )
90
91 missing_init_files = await Get(
92 AncestorFiles,
93 AncestorFilesRequest("__init__.py", sources.snapshot),
94 )
95
96 init_injected = await Get(
97 Snapshot,
98 MergeDigests((sources.snapshot.digest, missing_init_files.snapshot.digest)),
99 )
100
101 source_root_objs = await MultiGet(
102 Get(SourceRoot, SourceRootRequest, SourceRootRequest.for_target(tgt))
103 for tgt in request.targets
104 if (
105 tgt.has_field(PythonSources)
106 or tgt.has_field(ResourcesSources)
107 or tgt.get(Sources).can_generate(PythonSources, union_membership)
108 or tgt.get(Sources).can_generate(ResourcesSources, union_membership)
109 )
110 )
111 source_root_paths = {source_root_obj.path for source_root_obj in source_root_objs}
112 return PythonSourceFiles(
113 SourceFiles(init_injected, sources.unrooted_files), tuple(sorted(source_root_paths))
114 )
115
116
117 @rule(level=LogLevel.DEBUG)
118 async def strip_python_sources(python_sources: PythonSourceFiles) -> StrippedPythonSourceFiles:
119 stripped = await Get(StrippedSourceFiles, SourceFiles, python_sources.source_files)
120 return StrippedPythonSourceFiles(stripped)
121
122
123 def rules():
124 return [
125 *collect_rules(),
126 *ancestor_files.rules(),
127 *source_files.rules(),
128 *stripped_source_files.rules(),
129 ]
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/backend/python/util_rules/python_sources.py b/src/python/pants/backend/python/util_rules/python_sources.py
--- a/src/python/pants/backend/python/util_rules/python_sources.py
+++ b/src/python/pants/backend/python/util_rules/python_sources.py
@@ -13,7 +13,7 @@
from pants.core.util_rules.stripped_source_files import StrippedSourceFiles
from pants.engine.fs import MergeDigests, Snapshot
from pants.engine.rules import Get, MultiGet, collect_rules, rule
-from pants.engine.target import Sources, Target
+from pants.engine.target import HydratedSources, HydrateSourcesRequest, Sources, Target
from pants.engine.unions import UnionMembership
from pants.source.source_root import SourceRoot, SourceRootRequest
from pants.util.logging import LogLevel
@@ -98,15 +98,39 @@
MergeDigests((sources.snapshot.digest, missing_init_files.snapshot.digest)),
)
- source_root_objs = await MultiGet(
- Get(SourceRoot, SourceRootRequest, SourceRootRequest.for_target(tgt))
- for tgt in request.targets
- if (
- tgt.has_field(PythonSources)
- or tgt.has_field(ResourcesSources)
- or tgt.get(Sources).can_generate(PythonSources, union_membership)
- or tgt.get(Sources).can_generate(ResourcesSources, union_membership)
+ # Codegen is able to generate code in any arbitrary location, unlike sources normally being
+ # rooted under the target definition. To determine source roots for these generated files, we
+ # cannot use the normal `SourceRootRequest.for_target()` and we instead must determine
+ # a source root for every individual generated file. So, we re-resolve the codegen sources here.
+ python_and_resources_targets = []
+ codegen_targets = []
+ for tgt in request.targets:
+ if tgt.has_field(PythonSources) or tgt.has_field(ResourcesSources):
+ python_and_resources_targets.append(tgt)
+ elif tgt.get(Sources).can_generate(PythonSources, union_membership) or tgt.get(
+ Sources
+ ).can_generate(ResourcesSources, union_membership):
+ codegen_targets.append(tgt)
+ codegen_sources = await MultiGet(
+ Get(
+ HydratedSources,
+ HydrateSourcesRequest(
+ tgt.get(Sources), for_sources_types=request.valid_sources_types, enable_codegen=True
+ ),
)
+ for tgt in codegen_targets
+ )
+ source_root_requests = [
+ *(SourceRootRequest.for_target(tgt) for tgt in python_and_resources_targets),
+ *(
+ SourceRootRequest.for_file(f)
+ for sources in codegen_sources
+ for f in sources.snapshot.files
+ ),
+ ]
+
+ source_root_objs = await MultiGet(
+ Get(SourceRoot, SourceRootRequest, req) for req in source_root_requests
)
source_root_paths = {source_root_obj.path for source_root_obj in source_root_objs}
return PythonSourceFiles(
| {"golden_diff": "diff --git a/src/python/pants/backend/python/util_rules/python_sources.py b/src/python/pants/backend/python/util_rules/python_sources.py\n--- a/src/python/pants/backend/python/util_rules/python_sources.py\n+++ b/src/python/pants/backend/python/util_rules/python_sources.py\n@@ -13,7 +13,7 @@\n from pants.core.util_rules.stripped_source_files import StrippedSourceFiles\n from pants.engine.fs import MergeDigests, Snapshot\n from pants.engine.rules import Get, MultiGet, collect_rules, rule\n-from pants.engine.target import Sources, Target\n+from pants.engine.target import HydratedSources, HydrateSourcesRequest, Sources, Target\n from pants.engine.unions import UnionMembership\n from pants.source.source_root import SourceRoot, SourceRootRequest\n from pants.util.logging import LogLevel\n@@ -98,15 +98,39 @@\n MergeDigests((sources.snapshot.digest, missing_init_files.snapshot.digest)),\n )\n \n- source_root_objs = await MultiGet(\n- Get(SourceRoot, SourceRootRequest, SourceRootRequest.for_target(tgt))\n- for tgt in request.targets\n- if (\n- tgt.has_field(PythonSources)\n- or tgt.has_field(ResourcesSources)\n- or tgt.get(Sources).can_generate(PythonSources, union_membership)\n- or tgt.get(Sources).can_generate(ResourcesSources, union_membership)\n+ # Codegen is able to generate code in any arbitrary location, unlike sources normally being\n+ # rooted under the target definition. To determine source roots for these generated files, we\n+ # cannot use the normal `SourceRootRequest.for_target()` and we instead must determine\n+ # a source root for every individual generated file. So, we re-resolve the codegen sources here.\n+ python_and_resources_targets = []\n+ codegen_targets = []\n+ for tgt in request.targets:\n+ if tgt.has_field(PythonSources) or tgt.has_field(ResourcesSources):\n+ python_and_resources_targets.append(tgt)\n+ elif tgt.get(Sources).can_generate(PythonSources, union_membership) or tgt.get(\n+ Sources\n+ ).can_generate(ResourcesSources, union_membership):\n+ codegen_targets.append(tgt)\n+ codegen_sources = await MultiGet(\n+ Get(\n+ HydratedSources,\n+ HydrateSourcesRequest(\n+ tgt.get(Sources), for_sources_types=request.valid_sources_types, enable_codegen=True\n+ ),\n )\n+ for tgt in codegen_targets\n+ )\n+ source_root_requests = [\n+ *(SourceRootRequest.for_target(tgt) for tgt in python_and_resources_targets),\n+ *(\n+ SourceRootRequest.for_file(f)\n+ for sources in codegen_sources\n+ for f in sources.snapshot.files\n+ ),\n+ ]\n+\n+ source_root_objs = await MultiGet(\n+ Get(SourceRoot, SourceRootRequest, req) for req in source_root_requests\n )\n source_root_paths = {source_root_obj.path for source_root_obj in source_root_objs}\n return PythonSourceFiles(\n", "issue": "python_source_root not added to syspath\nI have a project structure along the lines of this:\r\n```\r\npackages/\r\n package_a/\r\n BUILD\r\n __init__.py\r\n package_a.py\r\n package_b/\r\n BUILD\r\n __init__.py\r\n package_b.py\r\nprotos/\r\n package_a/\r\n BUILD\r\n foobar.proto\r\n service_a/\r\n BUILD\r\n bazqux.proto\r\nservices/\r\n service_a/\r\n BUILD\r\n __init__.py\r\n main.py\r\n service_b/\r\n BUILD\r\n __init__.py\r\n main.py\r\n```\r\n\r\n`/packages`, `/protos` and `/services` are all configured as source roots. In each BUILD files residing in `protos/*` I have either the line `protobuf_library(python_source_root='services')` or `protobuf_library(python_source_root='packages')`, which means that the generated code ends up in `packages/*` or `services/*` instead.\r\n\r\nThis is working just fine as long as the running code is in the same source root as the generated protobuf code, but when code in `services/` is dependent on protos that has `python_source_root` set to `packages`, Python can't find the module unless an actual module from the same source root is also a dependency. I did some digging around, and it seems like the issue is that the source root specified in `python_source_root` is never explicitly added to Python's syspath, which is why imports fail if no \"real\" packages from the same source roots are used. So using the same example as earlier I see `services` and `protos`, but `packages`, where the generated code is placed, is missing.\r\n\r\nI created a [proof-of-concept repository](https://github.com/jyggen/pants-issue-11666) in case my rambling makes little sense. The issue can be seen by running `./pants test services/::`.\r\n\n", "before_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple, Type\n\nfrom pants.backend.python.target_types import PythonSources\nfrom pants.backend.python.util_rules import ancestor_files\nfrom pants.backend.python.util_rules.ancestor_files import AncestorFiles, AncestorFilesRequest\nfrom pants.core.target_types import FilesSources, ResourcesSources\nfrom pants.core.util_rules import source_files, stripped_source_files\nfrom pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest\nfrom pants.core.util_rules.stripped_source_files import StrippedSourceFiles\nfrom pants.engine.fs import MergeDigests, Snapshot\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.engine.target import Sources, Target\nfrom pants.engine.unions import UnionMembership\nfrom pants.source.source_root import SourceRoot, SourceRootRequest\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import frozen_after_init\n\n\n@dataclass(frozen=True)\nclass PythonSourceFiles:\n \"\"\"Sources that can be introspected by Python, relative to a set of source roots.\n\n Specifically, this will filter out to only have Python, and, optionally, resources() and\n files() targets; and will add any missing `__init__.py` files to ensure that modules are\n recognized correctly.\n\n Use-cases that introspect Python source code (e.g., the `test, `lint`, `fmt` goals) can\n request this type to get relevant sources that are still relative to their source roots.\n That way the paths they report are the unstripped ones the user is familiar with.\n\n The sources can also be imported and used by Python (e.g., for the `test` goal), but only\n if sys.path is modified to include the source roots.\n \"\"\"\n\n source_files: SourceFiles\n source_roots: Tuple[str, ...] # Source roots for the specified source files.\n\n\n@dataclass(frozen=True)\nclass StrippedPythonSourceFiles:\n \"\"\"A PythonSourceFiles that has had its source roots stripped.\"\"\"\n\n stripped_source_files: StrippedSourceFiles\n\n\n@frozen_after_init\n@dataclass(unsafe_hash=True)\nclass PythonSourceFilesRequest:\n targets: Tuple[Target, ...]\n include_resources: bool\n include_files: bool\n\n def __init__(\n self,\n targets: Iterable[Target],\n *,\n include_resources: bool = True,\n include_files: bool = False\n ) -> None:\n self.targets = tuple(targets)\n self.include_resources = include_resources\n self.include_files = include_files\n\n @property\n def valid_sources_types(self) -> Tuple[Type[Sources], ...]:\n types: List[Type[Sources]] = [PythonSources]\n if self.include_resources:\n types.append(ResourcesSources)\n if self.include_files:\n types.append(FilesSources)\n return tuple(types)\n\n\n@rule(level=LogLevel.DEBUG)\nasync def prepare_python_sources(\n request: PythonSourceFilesRequest, union_membership: UnionMembership\n) -> PythonSourceFiles:\n sources = await Get(\n SourceFiles,\n SourceFilesRequest(\n (tgt.get(Sources) for tgt in request.targets),\n for_sources_types=request.valid_sources_types,\n enable_codegen=True,\n ),\n )\n\n missing_init_files = await Get(\n AncestorFiles,\n AncestorFilesRequest(\"__init__.py\", sources.snapshot),\n )\n\n init_injected = await Get(\n Snapshot,\n MergeDigests((sources.snapshot.digest, missing_init_files.snapshot.digest)),\n )\n\n source_root_objs = await MultiGet(\n Get(SourceRoot, SourceRootRequest, SourceRootRequest.for_target(tgt))\n for tgt in request.targets\n if (\n tgt.has_field(PythonSources)\n or tgt.has_field(ResourcesSources)\n or tgt.get(Sources).can_generate(PythonSources, union_membership)\n or tgt.get(Sources).can_generate(ResourcesSources, union_membership)\n )\n )\n source_root_paths = {source_root_obj.path for source_root_obj in source_root_objs}\n return PythonSourceFiles(\n SourceFiles(init_injected, sources.unrooted_files), tuple(sorted(source_root_paths))\n )\n\n\n@rule(level=LogLevel.DEBUG)\nasync def strip_python_sources(python_sources: PythonSourceFiles) -> StrippedPythonSourceFiles:\n stripped = await Get(StrippedSourceFiles, SourceFiles, python_sources.source_files)\n return StrippedPythonSourceFiles(stripped)\n\n\ndef rules():\n return [\n *collect_rules(),\n *ancestor_files.rules(),\n *source_files.rules(),\n *stripped_source_files.rules(),\n ]\n", "path": "src/python/pants/backend/python/util_rules/python_sources.py"}], "after_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple, Type\n\nfrom pants.backend.python.target_types import PythonSources\nfrom pants.backend.python.util_rules import ancestor_files\nfrom pants.backend.python.util_rules.ancestor_files import AncestorFiles, AncestorFilesRequest\nfrom pants.core.target_types import FilesSources, ResourcesSources\nfrom pants.core.util_rules import source_files, stripped_source_files\nfrom pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest\nfrom pants.core.util_rules.stripped_source_files import StrippedSourceFiles\nfrom pants.engine.fs import MergeDigests, Snapshot\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.engine.target import HydratedSources, HydrateSourcesRequest, Sources, Target\nfrom pants.engine.unions import UnionMembership\nfrom pants.source.source_root import SourceRoot, SourceRootRequest\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import frozen_after_init\n\n\n@dataclass(frozen=True)\nclass PythonSourceFiles:\n \"\"\"Sources that can be introspected by Python, relative to a set of source roots.\n\n Specifically, this will filter out to only have Python, and, optionally, resources() and\n files() targets; and will add any missing `__init__.py` files to ensure that modules are\n recognized correctly.\n\n Use-cases that introspect Python source code (e.g., the `test, `lint`, `fmt` goals) can\n request this type to get relevant sources that are still relative to their source roots.\n That way the paths they report are the unstripped ones the user is familiar with.\n\n The sources can also be imported and used by Python (e.g., for the `test` goal), but only\n if sys.path is modified to include the source roots.\n \"\"\"\n\n source_files: SourceFiles\n source_roots: Tuple[str, ...] # Source roots for the specified source files.\n\n\n@dataclass(frozen=True)\nclass StrippedPythonSourceFiles:\n \"\"\"A PythonSourceFiles that has had its source roots stripped.\"\"\"\n\n stripped_source_files: StrippedSourceFiles\n\n\n@frozen_after_init\n@dataclass(unsafe_hash=True)\nclass PythonSourceFilesRequest:\n targets: Tuple[Target, ...]\n include_resources: bool\n include_files: bool\n\n def __init__(\n self,\n targets: Iterable[Target],\n *,\n include_resources: bool = True,\n include_files: bool = False\n ) -> None:\n self.targets = tuple(targets)\n self.include_resources = include_resources\n self.include_files = include_files\n\n @property\n def valid_sources_types(self) -> Tuple[Type[Sources], ...]:\n types: List[Type[Sources]] = [PythonSources]\n if self.include_resources:\n types.append(ResourcesSources)\n if self.include_files:\n types.append(FilesSources)\n return tuple(types)\n\n\n@rule(level=LogLevel.DEBUG)\nasync def prepare_python_sources(\n request: PythonSourceFilesRequest, union_membership: UnionMembership\n) -> PythonSourceFiles:\n sources = await Get(\n SourceFiles,\n SourceFilesRequest(\n (tgt.get(Sources) for tgt in request.targets),\n for_sources_types=request.valid_sources_types,\n enable_codegen=True,\n ),\n )\n\n missing_init_files = await Get(\n AncestorFiles,\n AncestorFilesRequest(\"__init__.py\", sources.snapshot),\n )\n\n init_injected = await Get(\n Snapshot,\n MergeDigests((sources.snapshot.digest, missing_init_files.snapshot.digest)),\n )\n\n # Codegen is able to generate code in any arbitrary location, unlike sources normally being\n # rooted under the target definition. To determine source roots for these generated files, we\n # cannot use the normal `SourceRootRequest.for_target()` and we instead must determine\n # a source root for every individual generated file. So, we re-resolve the codegen sources here.\n python_and_resources_targets = []\n codegen_targets = []\n for tgt in request.targets:\n if tgt.has_field(PythonSources) or tgt.has_field(ResourcesSources):\n python_and_resources_targets.append(tgt)\n elif tgt.get(Sources).can_generate(PythonSources, union_membership) or tgt.get(\n Sources\n ).can_generate(ResourcesSources, union_membership):\n codegen_targets.append(tgt)\n codegen_sources = await MultiGet(\n Get(\n HydratedSources,\n HydrateSourcesRequest(\n tgt.get(Sources), for_sources_types=request.valid_sources_types, enable_codegen=True\n ),\n )\n for tgt in codegen_targets\n )\n source_root_requests = [\n *(SourceRootRequest.for_target(tgt) for tgt in python_and_resources_targets),\n *(\n SourceRootRequest.for_file(f)\n for sources in codegen_sources\n for f in sources.snapshot.files\n ),\n ]\n\n source_root_objs = await MultiGet(\n Get(SourceRoot, SourceRootRequest, req) for req in source_root_requests\n )\n source_root_paths = {source_root_obj.path for source_root_obj in source_root_objs}\n return PythonSourceFiles(\n SourceFiles(init_injected, sources.unrooted_files), tuple(sorted(source_root_paths))\n )\n\n\n@rule(level=LogLevel.DEBUG)\nasync def strip_python_sources(python_sources: PythonSourceFiles) -> StrippedPythonSourceFiles:\n stripped = await Get(StrippedSourceFiles, SourceFiles, python_sources.source_files)\n return StrippedPythonSourceFiles(stripped)\n\n\ndef rules():\n return [\n *collect_rules(),\n *ancestor_files.rules(),\n *source_files.rules(),\n *stripped_source_files.rules(),\n ]\n", "path": "src/python/pants/backend/python/util_rules/python_sources.py"}]} | 1,966 | 656 |
gh_patches_debug_37970 | rasdani/github-patches | git_diff | elastic__apm-agent-python-596 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement support for chained exceptions
Python 3 supports chained exceptions. Its purpose is to handle cases like this:
try:
something_that_breaks()
except BreakingException:
do_something_else_that_breaks() # raises KaboomException
Without chained exceptions, only the `KaboomException` is recorded, the exception information of the `BreakingException` is lost. Python 3 stores the original exception in the `__context__` attribute of the exception value.
Python 3 also introduced an explicit way to raise a chained exception:
try:
something_that_breaks()
except BreakingException as exc:
raise RetryException('Retry later') from exc
In this case, the `BreakingException` is stored as `__cause__`.
An implementation has been [proposed](https://github.com/elastic/apm/issues/40#issuecomment-508619451) by @axw.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/conf/constants.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import decimal
32 import re
33
34 EVENTS_API_PATH = "intake/v2/events"
35 AGENT_CONFIG_PATH = "config/v1/agents"
36
37 TRACE_CONTEXT_VERSION = 0
38 TRACEPARENT_HEADER_NAME = "elastic-apm-traceparent"
39
40 TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
41
42 KEYWORD_MAX_LENGTH = 1024
43
44 HTTP_WITH_BODY = {"POST", "PUT", "PATCH", "DELETE"}
45
46 MASK = 8 * "*"
47
48 ERROR = "error"
49 TRANSACTION = "transaction"
50 SPAN = "span"
51 METRICSET = "metricset"
52
53 LABEL_RE = re.compile('[.*"]')
54
55 try:
56 # Python 2
57 LABEL_TYPES = (bool, int, long, float, decimal.Decimal)
58 except NameError:
59 # Python 3
60 LABEL_TYPES = (bool, int, float, decimal.Decimal)
61
```
Path: `elasticapm/events.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31
32 import logging
33 import random
34 import sys
35
36 from elasticapm.utils import varmap
37 from elasticapm.utils.encoding import keyword_field, shorten, to_unicode
38 from elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames
39
40 __all__ = ("BaseEvent", "Exception", "Message")
41
42 logger = logging.getLogger("elasticapm.events")
43
44
45 class BaseEvent(object):
46 @staticmethod
47 def to_string(client, data):
48 raise NotImplementedError
49
50 @staticmethod
51 def capture(client, **kwargs):
52 return {}
53
54
55 class Exception(BaseEvent):
56 """
57 Exceptions store the following metadata:
58
59 - value: 'My exception value'
60 - type: 'ClassName'
61 - module '__builtin__' (i.e. __builtin__.TypeError)
62 - frames: a list of serialized frames (see _get_traceback_frames)
63 """
64
65 @staticmethod
66 def to_string(client, data):
67 exc = data["exception"]
68 if exc["value"]:
69 return "%s: %s" % (exc["type"], exc["value"])
70 return exc["type"]
71
72 @staticmethod
73 def get_hash(data):
74 exc = data["exception"]
75 output = [exc["type"]]
76 for frame in data["stacktrace"]["frames"]:
77 output.append(frame["module"])
78 output.append(frame["function"])
79 return output
80
81 @staticmethod
82 def capture(client, exc_info=None, **kwargs):
83 culprit = exc_value = exc_type = exc_module = frames = exc_traceback = None
84 new_exc_info = False
85 if not exc_info or exc_info is True:
86 new_exc_info = True
87 exc_info = sys.exc_info()
88
89 if not exc_info:
90 raise ValueError("No exception found")
91
92 try:
93 exc_type, exc_value, exc_traceback = exc_info
94
95 frames = get_stack_info(
96 iter_traceback_frames(exc_traceback),
97 with_locals=client.config.collect_local_variables in ("errors", "all"),
98 library_frame_context_lines=client.config.source_lines_error_library_frames,
99 in_app_frame_context_lines=client.config.source_lines_error_app_frames,
100 include_paths_re=client.include_paths_re,
101 exclude_paths_re=client.exclude_paths_re,
102 locals_processor_func=lambda local_var: varmap(
103 lambda k, val: shorten(
104 val,
105 list_length=client.config.local_var_list_max_length,
106 string_length=client.config.local_var_max_length,
107 ),
108 local_var,
109 ),
110 )
111
112 culprit = get_culprit(frames, client.config.include_paths, client.config.exclude_paths)
113
114 if hasattr(exc_type, "__module__"):
115 exc_module = exc_type.__module__
116 exc_type = exc_type.__name__
117 else:
118 exc_module = None
119 exc_type = exc_type.__name__
120 finally:
121 if new_exc_info:
122 try:
123 del exc_info
124 del exc_traceback
125 except Exception as e:
126 logger.exception(e)
127 if "message" in kwargs:
128 message = kwargs["message"]
129 else:
130 message = "%s: %s" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)
131
132 return {
133 "id": "%032x" % random.getrandbits(128),
134 "culprit": keyword_field(culprit),
135 "exception": {
136 "message": message,
137 "type": keyword_field(str(exc_type)),
138 "module": keyword_field(str(exc_module)),
139 "stacktrace": frames,
140 },
141 }
142
143
144 class Message(BaseEvent):
145 """
146 Messages store the following metadata:
147
148 - message: 'My message from %s about %s'
149 - params: ('foo', 'bar')
150 """
151
152 @staticmethod
153 def to_string(client, data):
154 return data["log"]["message"]
155
156 @staticmethod
157 def get_hash(data):
158 msg = data["param_message"]
159 return [msg["message"]]
160
161 @staticmethod
162 def capture(client, param_message=None, message=None, level=None, logger_name=None, **kwargs):
163 if message:
164 param_message = {"message": message}
165 params = param_message.get("params")
166 message = param_message["message"] % params if params else param_message["message"]
167 data = kwargs.get("data", {})
168 message_data = {
169 "id": "%032x" % random.getrandbits(128),
170 "log": {
171 "level": keyword_field(level or "error"),
172 "logger_name": keyword_field(logger_name or "__root__"),
173 "message": message,
174 "param_message": keyword_field(param_message["message"]),
175 },
176 }
177 if isinstance(data.get("stacktrace"), dict):
178 message_data["log"]["stacktrace"] = data["stacktrace"]["frames"]
179 if kwargs.get("exception"):
180 message_data["culprit"] = kwargs["exception"]["culprit"]
181 message_data["exception"] = kwargs["exception"]["exception"]
182 return message_data
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/conf/constants.py b/elasticapm/conf/constants.py
--- a/elasticapm/conf/constants.py
+++ b/elasticapm/conf/constants.py
@@ -45,6 +45,8 @@
MASK = 8 * "*"
+EXCEPTION_CHAIN_MAX_DEPTH = 50
+
ERROR = "error"
TRANSACTION = "transaction"
SPAN = "span"
diff --git a/elasticapm/events.py b/elasticapm/events.py
--- a/elasticapm/events.py
+++ b/elasticapm/events.py
@@ -33,7 +33,8 @@
import random
import sys
-from elasticapm.utils import varmap
+from elasticapm.conf.constants import EXCEPTION_CHAIN_MAX_DEPTH
+from elasticapm.utils import compat, varmap
from elasticapm.utils.encoding import keyword_field, shorten, to_unicode
from elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames
@@ -109,7 +110,9 @@
),
)
- culprit = get_culprit(frames, client.config.include_paths, client.config.exclude_paths)
+ culprit = kwargs.get("culprit", None) or get_culprit(
+ frames, client.config.include_paths, client.config.exclude_paths
+ )
if hasattr(exc_type, "__module__"):
exc_module = exc_type.__module__
@@ -129,7 +132,7 @@
else:
message = "%s: %s" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)
- return {
+ data = {
"id": "%032x" % random.getrandbits(128),
"culprit": keyword_field(culprit),
"exception": {
@@ -139,6 +142,30 @@
"stacktrace": frames,
},
}
+ if compat.PY3:
+ depth = kwargs.get("_exc_chain_depth", 0)
+ if depth > EXCEPTION_CHAIN_MAX_DEPTH:
+ return
+ cause = exc_value.__cause__
+ chained_context = exc_value.__context__
+
+ # we follow the pattern of Python itself here and only capture the chained exception
+ # if cause is not None and __suppress_context__ is False
+ if chained_context and not (exc_value.__suppress_context__ and cause is None):
+ if cause:
+ chained_exc_type = type(cause)
+ chained_exc_value = cause
+ else:
+ chained_exc_type = type(chained_context)
+ chained_exc_value = chained_context
+ chained_exc_info = chained_exc_type, chained_exc_value, chained_context.__traceback__
+
+ chained_cause = Exception.capture(
+ client, exc_info=chained_exc_info, culprit="None", _exc_chain_depth=depth + 1
+ )
+ if chained_cause:
+ data["exception"]["cause"] = [chained_cause["exception"]]
+ return data
class Message(BaseEvent):
| {"golden_diff": "diff --git a/elasticapm/conf/constants.py b/elasticapm/conf/constants.py\n--- a/elasticapm/conf/constants.py\n+++ b/elasticapm/conf/constants.py\n@@ -45,6 +45,8 @@\n \n MASK = 8 * \"*\"\n \n+EXCEPTION_CHAIN_MAX_DEPTH = 50\n+\n ERROR = \"error\"\n TRANSACTION = \"transaction\"\n SPAN = \"span\"\ndiff --git a/elasticapm/events.py b/elasticapm/events.py\n--- a/elasticapm/events.py\n+++ b/elasticapm/events.py\n@@ -33,7 +33,8 @@\n import random\n import sys\n \n-from elasticapm.utils import varmap\n+from elasticapm.conf.constants import EXCEPTION_CHAIN_MAX_DEPTH\n+from elasticapm.utils import compat, varmap\n from elasticapm.utils.encoding import keyword_field, shorten, to_unicode\n from elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames\n \n@@ -109,7 +110,9 @@\n ),\n )\n \n- culprit = get_culprit(frames, client.config.include_paths, client.config.exclude_paths)\n+ culprit = kwargs.get(\"culprit\", None) or get_culprit(\n+ frames, client.config.include_paths, client.config.exclude_paths\n+ )\n \n if hasattr(exc_type, \"__module__\"):\n exc_module = exc_type.__module__\n@@ -129,7 +132,7 @@\n else:\n message = \"%s: %s\" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)\n \n- return {\n+ data = {\n \"id\": \"%032x\" % random.getrandbits(128),\n \"culprit\": keyword_field(culprit),\n \"exception\": {\n@@ -139,6 +142,30 @@\n \"stacktrace\": frames,\n },\n }\n+ if compat.PY3:\n+ depth = kwargs.get(\"_exc_chain_depth\", 0)\n+ if depth > EXCEPTION_CHAIN_MAX_DEPTH:\n+ return\n+ cause = exc_value.__cause__\n+ chained_context = exc_value.__context__\n+\n+ # we follow the pattern of Python itself here and only capture the chained exception\n+ # if cause is not None and __suppress_context__ is False\n+ if chained_context and not (exc_value.__suppress_context__ and cause is None):\n+ if cause:\n+ chained_exc_type = type(cause)\n+ chained_exc_value = cause\n+ else:\n+ chained_exc_type = type(chained_context)\n+ chained_exc_value = chained_context\n+ chained_exc_info = chained_exc_type, chained_exc_value, chained_context.__traceback__\n+\n+ chained_cause = Exception.capture(\n+ client, exc_info=chained_exc_info, culprit=\"None\", _exc_chain_depth=depth + 1\n+ )\n+ if chained_cause:\n+ data[\"exception\"][\"cause\"] = [chained_cause[\"exception\"]]\n+ return data\n \n \n class Message(BaseEvent):\n", "issue": "Implement support for chained exceptions\nPython 3 supports chained exceptions. Its purpose is to handle cases like this:\r\n\r\n try:\r\n something_that_breaks()\r\n except BreakingException:\r\n do_something_else_that_breaks() # raises KaboomException\r\n\r\nWithout chained exceptions, only the `KaboomException` is recorded, the exception information of the `BreakingException` is lost. Python 3 stores the original exception in the `__context__` attribute of the exception value.\r\n\r\nPython 3 also introduced an explicit way to raise a chained exception:\r\n\r\n try:\r\n something_that_breaks()\r\n except BreakingException as exc:\r\n raise RetryException('Retry later') from exc\r\n\r\nIn this case, the `BreakingException` is stored as `__cause__`.\r\n\r\nAn implementation has been [proposed](https://github.com/elastic/apm/issues/40#issuecomment-508619451) by @axw. \n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport decimal\nimport re\n\nEVENTS_API_PATH = \"intake/v2/events\"\nAGENT_CONFIG_PATH = \"config/v1/agents\"\n\nTRACE_CONTEXT_VERSION = 0\nTRACEPARENT_HEADER_NAME = \"elastic-apm-traceparent\"\n\nTIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nKEYWORD_MAX_LENGTH = 1024\n\nHTTP_WITH_BODY = {\"POST\", \"PUT\", \"PATCH\", \"DELETE\"}\n\nMASK = 8 * \"*\"\n\nERROR = \"error\"\nTRANSACTION = \"transaction\"\nSPAN = \"span\"\nMETRICSET = \"metricset\"\n\nLABEL_RE = re.compile('[.*\"]')\n\ntry:\n # Python 2\n LABEL_TYPES = (bool, int, long, float, decimal.Decimal)\nexcept NameError:\n # Python 3\n LABEL_TYPES = (bool, int, float, decimal.Decimal)\n", "path": "elasticapm/conf/constants.py"}, {"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport logging\nimport random\nimport sys\n\nfrom elasticapm.utils import varmap\nfrom elasticapm.utils.encoding import keyword_field, shorten, to_unicode\nfrom elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames\n\n__all__ = (\"BaseEvent\", \"Exception\", \"Message\")\n\nlogger = logging.getLogger(\"elasticapm.events\")\n\n\nclass BaseEvent(object):\n @staticmethod\n def to_string(client, data):\n raise NotImplementedError\n\n @staticmethod\n def capture(client, **kwargs):\n return {}\n\n\nclass Exception(BaseEvent):\n \"\"\"\n Exceptions store the following metadata:\n\n - value: 'My exception value'\n - type: 'ClassName'\n - module '__builtin__' (i.e. __builtin__.TypeError)\n - frames: a list of serialized frames (see _get_traceback_frames)\n \"\"\"\n\n @staticmethod\n def to_string(client, data):\n exc = data[\"exception\"]\n if exc[\"value\"]:\n return \"%s: %s\" % (exc[\"type\"], exc[\"value\"])\n return exc[\"type\"]\n\n @staticmethod\n def get_hash(data):\n exc = data[\"exception\"]\n output = [exc[\"type\"]]\n for frame in data[\"stacktrace\"][\"frames\"]:\n output.append(frame[\"module\"])\n output.append(frame[\"function\"])\n return output\n\n @staticmethod\n def capture(client, exc_info=None, **kwargs):\n culprit = exc_value = exc_type = exc_module = frames = exc_traceback = None\n new_exc_info = False\n if not exc_info or exc_info is True:\n new_exc_info = True\n exc_info = sys.exc_info()\n\n if not exc_info:\n raise ValueError(\"No exception found\")\n\n try:\n exc_type, exc_value, exc_traceback = exc_info\n\n frames = get_stack_info(\n iter_traceback_frames(exc_traceback),\n with_locals=client.config.collect_local_variables in (\"errors\", \"all\"),\n library_frame_context_lines=client.config.source_lines_error_library_frames,\n in_app_frame_context_lines=client.config.source_lines_error_app_frames,\n include_paths_re=client.include_paths_re,\n exclude_paths_re=client.exclude_paths_re,\n locals_processor_func=lambda local_var: varmap(\n lambda k, val: shorten(\n val,\n list_length=client.config.local_var_list_max_length,\n string_length=client.config.local_var_max_length,\n ),\n local_var,\n ),\n )\n\n culprit = get_culprit(frames, client.config.include_paths, client.config.exclude_paths)\n\n if hasattr(exc_type, \"__module__\"):\n exc_module = exc_type.__module__\n exc_type = exc_type.__name__\n else:\n exc_module = None\n exc_type = exc_type.__name__\n finally:\n if new_exc_info:\n try:\n del exc_info\n del exc_traceback\n except Exception as e:\n logger.exception(e)\n if \"message\" in kwargs:\n message = kwargs[\"message\"]\n else:\n message = \"%s: %s\" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)\n\n return {\n \"id\": \"%032x\" % random.getrandbits(128),\n \"culprit\": keyword_field(culprit),\n \"exception\": {\n \"message\": message,\n \"type\": keyword_field(str(exc_type)),\n \"module\": keyword_field(str(exc_module)),\n \"stacktrace\": frames,\n },\n }\n\n\nclass Message(BaseEvent):\n \"\"\"\n Messages store the following metadata:\n\n - message: 'My message from %s about %s'\n - params: ('foo', 'bar')\n \"\"\"\n\n @staticmethod\n def to_string(client, data):\n return data[\"log\"][\"message\"]\n\n @staticmethod\n def get_hash(data):\n msg = data[\"param_message\"]\n return [msg[\"message\"]]\n\n @staticmethod\n def capture(client, param_message=None, message=None, level=None, logger_name=None, **kwargs):\n if message:\n param_message = {\"message\": message}\n params = param_message.get(\"params\")\n message = param_message[\"message\"] % params if params else param_message[\"message\"]\n data = kwargs.get(\"data\", {})\n message_data = {\n \"id\": \"%032x\" % random.getrandbits(128),\n \"log\": {\n \"level\": keyword_field(level or \"error\"),\n \"logger_name\": keyword_field(logger_name or \"__root__\"),\n \"message\": message,\n \"param_message\": keyword_field(param_message[\"message\"]),\n },\n }\n if isinstance(data.get(\"stacktrace\"), dict):\n message_data[\"log\"][\"stacktrace\"] = data[\"stacktrace\"][\"frames\"]\n if kwargs.get(\"exception\"):\n message_data[\"culprit\"] = kwargs[\"exception\"][\"culprit\"]\n message_data[\"exception\"] = kwargs[\"exception\"][\"exception\"]\n return message_data\n", "path": "elasticapm/events.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport decimal\nimport re\n\nEVENTS_API_PATH = \"intake/v2/events\"\nAGENT_CONFIG_PATH = \"config/v1/agents\"\n\nTRACE_CONTEXT_VERSION = 0\nTRACEPARENT_HEADER_NAME = \"elastic-apm-traceparent\"\n\nTIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nKEYWORD_MAX_LENGTH = 1024\n\nHTTP_WITH_BODY = {\"POST\", \"PUT\", \"PATCH\", \"DELETE\"}\n\nMASK = 8 * \"*\"\n\nEXCEPTION_CHAIN_MAX_DEPTH = 50\n\nERROR = \"error\"\nTRANSACTION = \"transaction\"\nSPAN = \"span\"\nMETRICSET = \"metricset\"\n\nLABEL_RE = re.compile('[.*\"]')\n\ntry:\n # Python 2\n LABEL_TYPES = (bool, int, long, float, decimal.Decimal)\nexcept NameError:\n # Python 3\n LABEL_TYPES = (bool, int, float, decimal.Decimal)\n", "path": "elasticapm/conf/constants.py"}, {"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport logging\nimport random\nimport sys\n\nfrom elasticapm.conf.constants import EXCEPTION_CHAIN_MAX_DEPTH\nfrom elasticapm.utils import compat, varmap\nfrom elasticapm.utils.encoding import keyword_field, shorten, to_unicode\nfrom elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames\n\n__all__ = (\"BaseEvent\", \"Exception\", \"Message\")\n\nlogger = logging.getLogger(\"elasticapm.events\")\n\n\nclass BaseEvent(object):\n @staticmethod\n def to_string(client, data):\n raise NotImplementedError\n\n @staticmethod\n def capture(client, **kwargs):\n return {}\n\n\nclass Exception(BaseEvent):\n \"\"\"\n Exceptions store the following metadata:\n\n - value: 'My exception value'\n - type: 'ClassName'\n - module '__builtin__' (i.e. __builtin__.TypeError)\n - frames: a list of serialized frames (see _get_traceback_frames)\n \"\"\"\n\n @staticmethod\n def to_string(client, data):\n exc = data[\"exception\"]\n if exc[\"value\"]:\n return \"%s: %s\" % (exc[\"type\"], exc[\"value\"])\n return exc[\"type\"]\n\n @staticmethod\n def get_hash(data):\n exc = data[\"exception\"]\n output = [exc[\"type\"]]\n for frame in data[\"stacktrace\"][\"frames\"]:\n output.append(frame[\"module\"])\n output.append(frame[\"function\"])\n return output\n\n @staticmethod\n def capture(client, exc_info=None, **kwargs):\n culprit = exc_value = exc_type = exc_module = frames = exc_traceback = None\n new_exc_info = False\n if not exc_info or exc_info is True:\n new_exc_info = True\n exc_info = sys.exc_info()\n\n if not exc_info:\n raise ValueError(\"No exception found\")\n\n try:\n exc_type, exc_value, exc_traceback = exc_info\n\n frames = get_stack_info(\n iter_traceback_frames(exc_traceback),\n with_locals=client.config.collect_local_variables in (\"errors\", \"all\"),\n library_frame_context_lines=client.config.source_lines_error_library_frames,\n in_app_frame_context_lines=client.config.source_lines_error_app_frames,\n include_paths_re=client.include_paths_re,\n exclude_paths_re=client.exclude_paths_re,\n locals_processor_func=lambda local_var: varmap(\n lambda k, val: shorten(\n val,\n list_length=client.config.local_var_list_max_length,\n string_length=client.config.local_var_max_length,\n ),\n local_var,\n ),\n )\n\n culprit = kwargs.get(\"culprit\", None) or get_culprit(\n frames, client.config.include_paths, client.config.exclude_paths\n )\n\n if hasattr(exc_type, \"__module__\"):\n exc_module = exc_type.__module__\n exc_type = exc_type.__name__\n else:\n exc_module = None\n exc_type = exc_type.__name__\n finally:\n if new_exc_info:\n try:\n del exc_info\n del exc_traceback\n except Exception as e:\n logger.exception(e)\n if \"message\" in kwargs:\n message = kwargs[\"message\"]\n else:\n message = \"%s: %s\" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)\n\n data = {\n \"id\": \"%032x\" % random.getrandbits(128),\n \"culprit\": keyword_field(culprit),\n \"exception\": {\n \"message\": message,\n \"type\": keyword_field(str(exc_type)),\n \"module\": keyword_field(str(exc_module)),\n \"stacktrace\": frames,\n },\n }\n if compat.PY3:\n depth = kwargs.get(\"_exc_chain_depth\", 0)\n if depth > EXCEPTION_CHAIN_MAX_DEPTH:\n return\n cause = exc_value.__cause__\n chained_context = exc_value.__context__\n\n # we follow the pattern of Python itself here and only capture the chained exception\n # if cause is not None and __suppress_context__ is False\n if chained_context and not (exc_value.__suppress_context__ and cause is None):\n if cause:\n chained_exc_type = type(cause)\n chained_exc_value = cause\n else:\n chained_exc_type = type(chained_context)\n chained_exc_value = chained_context\n chained_exc_info = chained_exc_type, chained_exc_value, chained_context.__traceback__\n\n chained_cause = Exception.capture(\n client, exc_info=chained_exc_info, culprit=\"None\", _exc_chain_depth=depth + 1\n )\n if chained_cause:\n data[\"exception\"][\"cause\"] = [chained_cause[\"exception\"]]\n return data\n\n\nclass Message(BaseEvent):\n \"\"\"\n Messages store the following metadata:\n\n - message: 'My message from %s about %s'\n - params: ('foo', 'bar')\n \"\"\"\n\n @staticmethod\n def to_string(client, data):\n return data[\"log\"][\"message\"]\n\n @staticmethod\n def get_hash(data):\n msg = data[\"param_message\"]\n return [msg[\"message\"]]\n\n @staticmethod\n def capture(client, param_message=None, message=None, level=None, logger_name=None, **kwargs):\n if message:\n param_message = {\"message\": message}\n params = param_message.get(\"params\")\n message = param_message[\"message\"] % params if params else param_message[\"message\"]\n data = kwargs.get(\"data\", {})\n message_data = {\n \"id\": \"%032x\" % random.getrandbits(128),\n \"log\": {\n \"level\": keyword_field(level or \"error\"),\n \"logger_name\": keyword_field(logger_name or \"__root__\"),\n \"message\": message,\n \"param_message\": keyword_field(param_message[\"message\"]),\n },\n }\n if isinstance(data.get(\"stacktrace\"), dict):\n message_data[\"log\"][\"stacktrace\"] = data[\"stacktrace\"][\"frames\"]\n if kwargs.get(\"exception\"):\n message_data[\"culprit\"] = kwargs[\"exception\"][\"culprit\"]\n message_data[\"exception\"] = kwargs[\"exception\"][\"exception\"]\n return message_data\n", "path": "elasticapm/events.py"}]} | 2,973 | 674 |
gh_patches_debug_15895 | rasdani/github-patches | git_diff | mdn__kuma-7814 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ProtectedError trying to close account through /api/v1/settings
https://sentry.prod.mozaws.net/operations/mdn-stage/issues/10922237/?referrer=github_plugin
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/api/v1/views.py`
Content:
```
1 import json
2 import os
3 from datetime import datetime
4 from urllib.parse import urlparse
5
6 import stripe
7 from django.conf import settings
8 from django.contrib.auth import get_user_model
9 from django.http import (
10 HttpResponse,
11 HttpResponseBadRequest,
12 HttpResponseForbidden,
13 JsonResponse,
14 )
15 from django.utils import translation
16 from django.views.decorators.cache import never_cache
17 from django.views.decorators.csrf import csrf_exempt
18 from django.views.decorators.http import require_GET, require_POST
19 from raven.contrib.django.models import client as raven_client
20 from rest_framework import status
21 from rest_framework.decorators import api_view
22 from rest_framework.permissions import IsAuthenticated
23 from rest_framework.renderers import JSONRenderer
24 from rest_framework.response import Response
25 from rest_framework.views import APIView
26 from waffle import flag_is_active
27 from waffle.decorators import waffle_flag
28 from waffle.models import Flag, Switch
29
30 from kuma.api.v1.forms import AccountSettingsForm
31 from kuma.api.v1.serializers import UserDetailsSerializer
32 from kuma.core.email_utils import render_email
33 from kuma.core.ga_tracking import (
34 ACTION_SUBSCRIPTION_CANCELED,
35 ACTION_SUBSCRIPTION_CREATED,
36 ACTION_SUBSCRIPTION_FEEDBACK,
37 CATEGORY_MONTHLY_PAYMENTS,
38 track_event,
39 )
40 from kuma.core.urlresolvers import reverse
41 from kuma.core.utils import requests_retry_session, send_mail_retrying
42 from kuma.users.models import User, UserSubscription
43 from kuma.users.newsletter.utils import refresh_is_user_newsletter_subscribed
44 from kuma.users.signals import (
45 newsletter_subscribed,
46 newsletter_unsubscribed,
47 username_changed,
48 )
49 from kuma.users.stripe_utils import (
50 cancel_stripe_customer_subscriptions,
51 create_stripe_customer_and_subscription_for_user,
52 retrieve_and_synchronize_subscription_info,
53 )
54 from kuma.users.templatetags.jinja_helpers import get_avatar_url
55 from kuma.wiki.templatetags.jinja_helpers import absolutify
56
57
58 @never_cache
59 @require_GET
60 def whoami(request):
61 """
62 Return a JSON object representing the current user, either
63 authenticated or anonymous.
64 """
65 user = request.user
66 if user.is_authenticated:
67 data = {
68 "username": user.username,
69 "is_authenticated": True,
70 "avatar_url": get_avatar_url(user),
71 "email": user.email,
72 "subscriber_number": user.subscriber_number,
73 }
74 if UserSubscription.objects.filter(user=user, canceled__isnull=True).exists():
75 data["is_subscriber"] = True
76 if user.is_staff:
77 data["is_staff"] = True
78 if user.is_superuser:
79 data["is_superuser"] = True
80 if user.is_beta_tester:
81 data["is_beta_tester"] = True
82 else:
83 data = {}
84
85 data["waffle"] = {
86 "flags": {},
87 "switches": {s.name: True for s in Switch.get_all() if s.is_active()},
88 }
89 # Specifically and more smartly loop over the waffle Flag objects
90 # to avoid unnecessary `cache.get(...)` calls within the `flag.is_active(request)`.
91 for flag in Flag.get_all():
92 if not request.user.is_authenticated:
93 # Majority of users are anonymous, so let's focus on that.
94 # Let's see if there's a quick reason to bail the
95 # expensive `flag.is_active(request)` call.
96 if (
97 flag.authenticated or flag.staff or flag.superusers
98 ) and not flag.everyone:
99 continue
100 if not (flag.languages or flag.percent or flag.everyone):
101 continue
102 if flag.languages:
103 languages = [ln.strip() for ln in flag.languages.split(",")]
104 if (
105 not hasattr(request, "LANGUAGE_CODE")
106 or request.LANGUAGE_CODE not in languages
107 ):
108 continue
109
110 if flag.is_active(request):
111 data["waffle"]["flags"][flag.name] = True
112
113 return JsonResponse(data)
114
115
116 @never_cache
117 def account_settings(request):
118 user = request.user
119 if not user.is_authenticated:
120 return HttpResponseForbidden("not signed in")
121 if request.method == "DELETE":
122 # This should cease to be necessary once we get rid of the Wiki models.
123 anon, _ = User.objects.get_or_create(username="Anonymous")
124 user.revisionakismetsubmission_set.update(sender=anon)
125 user.created_revisions.update(creator=anon)
126 user.created_attachment_revisions.update(creator=anon)
127
128 user.delete()
129 return JsonResponse({"deleted": True})
130 elif request.method == "POST":
131 form = AccountSettingsForm(request.POST)
132 if not form.is_valid():
133 return JsonResponse({"errors": form.errors.get_json_data()}, status=400)
134
135 set_locale = None
136 if form.cleaned_data.get("locale"):
137 user.locale = set_locale = form.cleaned_data["locale"]
138 user.save()
139
140 response = JsonResponse({"ok": True})
141 if set_locale:
142 response.set_cookie(
143 key=settings.LANGUAGE_COOKIE_NAME,
144 value=set_locale,
145 max_age=settings.LANGUAGE_COOKIE_AGE,
146 path=settings.LANGUAGE_COOKIE_PATH,
147 domain=settings.LANGUAGE_COOKIE_DOMAIN,
148 secure=settings.LANGUAGE_COOKIE_SECURE,
149 )
150
151 return response
152
153 context = {
154 "csrfmiddlewaretoken": request.META.get("CSRF_COOKIE"),
155 "locale": user.locale,
156 }
157 return JsonResponse(context)
158
159
160 @waffle_flag("subscription")
161 @never_cache
162 @require_POST
163 def send_subscriptions_feedback(request):
164 """
165 Sends feedback to Google Analytics. This is done on the
166 backend to ensure that all feedback is collected, even
167 from users with DNT or where GA is disabled.
168 """
169 data = json.loads(request.body)
170 feedback = (data.get("feedback") or "").strip()
171
172 if not feedback:
173 return HttpResponseBadRequest("no feedback")
174
175 track_event(
176 CATEGORY_MONTHLY_PAYMENTS, ACTION_SUBSCRIPTION_FEEDBACK, data["feedback"]
177 )
178 return HttpResponse(status=204)
179
180
181 @api_view(["POST", "GET", "DELETE"])
182 @never_cache
183 def subscriptions(request):
184 if not request.user.is_authenticated or not flag_is_active(request, "subscription"):
185 return Response(None, status=status.HTTP_403_FORBIDDEN)
186
187 if request.method == "POST":
188 create_stripe_customer_and_subscription_for_user(
189 request.user, request.user.email, request.data["stripe_token"]
190 )
191 return Response(None, status=status.HTTP_201_CREATED)
192 elif request.method == "DELETE":
193 cancelled = cancel_stripe_customer_subscriptions(request.user)
194 if cancelled:
195 return Response(None, status=status.HTTP_204_NO_CONTENT)
196 else:
197 return Response("nothing to cancel", status=status.HTTP_410_GONE)
198
199 all_subscriptions = []
200 subscription_info = retrieve_and_synchronize_subscription_info(request.user)
201 if subscription_info:
202 all_subscriptions.append(subscription_info)
203
204 return Response({"subscriptions": all_subscriptions})
205
206
207 @csrf_exempt
208 @require_POST
209 @never_cache
210 def stripe_hooks(request):
211 try:
212 payload = json.loads(request.body)
213 except ValueError:
214 return HttpResponseBadRequest("Invalid JSON payload")
215
216 try:
217 event = stripe.Event.construct_from(payload, stripe.api_key)
218 except stripe.error.StripeError:
219 raven_client.captureException()
220 return HttpResponseBadRequest()
221
222 # Generally, for this list of if-statements, see the create_missing_stripe_webhook
223 # function.
224 # The list of events there ought to at least minimally match what we're prepared
225 # to deal with here.
226
227 if event.type == "invoice.payment_succeeded":
228 invoice = event.data.object
229 _send_payment_received_email(invoice, request.LANGUAGE_CODE)
230 track_event(
231 CATEGORY_MONTHLY_PAYMENTS,
232 ACTION_SUBSCRIPTION_CREATED,
233 f"{settings.CONTRIBUTION_AMOUNT_USD:.2f}",
234 )
235
236 elif event.type == "customer.subscription.deleted":
237 obj = event.data.object
238 for user in User.objects.filter(stripe_customer_id=obj.customer):
239 UserSubscription.set_canceled(user, obj.id)
240 track_event(CATEGORY_MONTHLY_PAYMENTS, ACTION_SUBSCRIPTION_CANCELED, "webhook")
241
242 else:
243 return HttpResponseBadRequest(
244 f"We did not expect a Stripe webhook of type {event.type!r}"
245 )
246
247 return HttpResponse()
248
249
250 def _send_payment_received_email(invoice, locale):
251 user = get_user_model().objects.get(stripe_customer_id=invoice.customer)
252 subscription_info = retrieve_and_synchronize_subscription_info(user)
253 locale = locale or settings.WIKI_DEFAULT_LANGUAGE
254 context = {
255 "payment_date": datetime.fromtimestamp(invoice.created),
256 "next_payment_date": subscription_info["next_payment_at"],
257 "invoice_number": invoice.number,
258 "cost": invoice.total / 100,
259 "credit_card_brand": subscription_info["brand"],
260 "manage_subscription_url": absolutify(reverse("payment_management")),
261 "faq_url": absolutify(reverse("payments_index")),
262 "contact_email": settings.CONTRIBUTION_SUPPORT_EMAIL,
263 }
264 with translation.override(locale):
265 subject = render_email("users/email/payment_received/subject.ltxt", context)
266 # Email subject *must not* contain newlines
267 subject = "".join(subject.splitlines())
268 plain = render_email("users/email/payment_received/plain.ltxt", context)
269
270 send_mail_retrying(
271 subject,
272 plain,
273 settings.DEFAULT_FROM_EMAIL,
274 [user.email],
275 attachment={
276 "name": os.path.basename(urlparse(invoice.invoice_pdf).path),
277 "bytes": _download_from_url(invoice.invoice_pdf),
278 "mime": "application/pdf",
279 },
280 )
281
282
283 def _download_from_url(url):
284 pdf_download = requests_retry_session().get(url)
285 pdf_download.raise_for_status()
286 return pdf_download.content
287
288
289 class APIUserDetailsView(APIView):
290 http_method_names = ["get", "put"]
291 serializer_class = UserDetailsSerializer
292 renderer_classes = [JSONRenderer]
293 permission_classes = [IsAuthenticated]
294
295 def get(self, request, format=None):
296 assert request.user.is_authenticated
297 serializer = UserDetailsSerializer(request.user, many=False)
298 return Response(serializer.data)
299
300 def put(self, request, format=None):
301 user = request.user
302 serializer = UserDetailsSerializer(instance=user, data=request.data)
303 if serializer.is_valid():
304 was_subscribed = user.is_newsletter_subscribed
305 old_username = user.username
306 serializer.save(user=user)
307
308 if not was_subscribed and user.is_newsletter_subscribed:
309 newsletter_subscribed.send(None, user=user)
310 if was_subscribed and not user.is_newsletter_subscribed:
311 newsletter_unsubscribed.send(None, user=user)
312
313 if old_username != user.username:
314 username_changed.send(None, user=user)
315
316 return Response(serializer.data, status=status.HTTP_200_OK)
317 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
318
319
320 user_details = never_cache(APIUserDetailsView.as_view())
321
322
323 @csrf_exempt
324 @require_POST
325 @never_cache
326 def sendinblue_hooks(request):
327 # Sendinblue does not sign its webhook requests, hence the event handlers following
328 # are different from the Stripe ones, in that they treat the event as a notification
329 # of a _potential_ change, while still needing to contact sendinblue to verify that
330 # it actually happened.
331 try:
332 payload = json.loads(request.body)
333 event = payload["event"]
334 email = payload["email"]
335 except (json.decoder.JSONDecodeError, KeyError) as exception:
336 return HttpResponseBadRequest(
337 f"{exception.__class__.__name__} on {request.body}"
338 )
339
340 if event == "unsubscribe":
341 refresh_is_user_newsletter_subscribed(email)
342 return HttpResponse()
343 else:
344 return HttpResponseBadRequest(
345 f"We did not expect a Sendinblue webhook of type {event['event']!r}"
346 )
347
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/api/v1/views.py b/kuma/api/v1/views.py
--- a/kuma/api/v1/views.py
+++ b/kuma/api/v1/views.py
@@ -122,8 +122,13 @@
# This should cease to be necessary once we get rid of the Wiki models.
anon, _ = User.objects.get_or_create(username="Anonymous")
user.revisionakismetsubmission_set.update(sender=anon)
+ user.documentdeletionlog_set.update(user=anon)
+ user.documentspamattempt_set.update(user=anon)
+ user.documentspam_reviewed.update(reviewer=anon)
user.created_revisions.update(creator=anon)
user.created_attachment_revisions.update(creator=anon)
+ user.bans.update(user=anon)
+ user.bans_issued.update(by=anon)
user.delete()
return JsonResponse({"deleted": True})
| {"golden_diff": "diff --git a/kuma/api/v1/views.py b/kuma/api/v1/views.py\n--- a/kuma/api/v1/views.py\n+++ b/kuma/api/v1/views.py\n@@ -122,8 +122,13 @@\n # This should cease to be necessary once we get rid of the Wiki models.\n anon, _ = User.objects.get_or_create(username=\"Anonymous\")\n user.revisionakismetsubmission_set.update(sender=anon)\n+ user.documentdeletionlog_set.update(user=anon)\n+ user.documentspamattempt_set.update(user=anon)\n+ user.documentspam_reviewed.update(reviewer=anon)\n user.created_revisions.update(creator=anon)\n user.created_attachment_revisions.update(creator=anon)\n+ user.bans.update(user=anon)\n+ user.bans_issued.update(by=anon)\n \n user.delete()\n return JsonResponse({\"deleted\": True})\n", "issue": "ProtectedError trying to close account through /api/v1/settings\nhttps://sentry.prod.mozaws.net/operations/mdn-stage/issues/10922237/?referrer=github_plugin\n\n", "before_files": [{"content": "import json\nimport os\nfrom datetime import datetime\nfrom urllib.parse import urlparse\n\nimport stripe\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.http import (\n HttpResponse,\n HttpResponseBadRequest,\n HttpResponseForbidden,\n JsonResponse,\n)\nfrom django.utils import translation\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_GET, require_POST\nfrom raven.contrib.django.models import client as raven_client\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom waffle import flag_is_active\nfrom waffle.decorators import waffle_flag\nfrom waffle.models import Flag, Switch\n\nfrom kuma.api.v1.forms import AccountSettingsForm\nfrom kuma.api.v1.serializers import UserDetailsSerializer\nfrom kuma.core.email_utils import render_email\nfrom kuma.core.ga_tracking import (\n ACTION_SUBSCRIPTION_CANCELED,\n ACTION_SUBSCRIPTION_CREATED,\n ACTION_SUBSCRIPTION_FEEDBACK,\n CATEGORY_MONTHLY_PAYMENTS,\n track_event,\n)\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import requests_retry_session, send_mail_retrying\nfrom kuma.users.models import User, UserSubscription\nfrom kuma.users.newsletter.utils import refresh_is_user_newsletter_subscribed\nfrom kuma.users.signals import (\n newsletter_subscribed,\n newsletter_unsubscribed,\n username_changed,\n)\nfrom kuma.users.stripe_utils import (\n cancel_stripe_customer_subscriptions,\n create_stripe_customer_and_subscription_for_user,\n retrieve_and_synchronize_subscription_info,\n)\nfrom kuma.users.templatetags.jinja_helpers import get_avatar_url\nfrom kuma.wiki.templatetags.jinja_helpers import absolutify\n\n\n@never_cache\n@require_GET\ndef whoami(request):\n \"\"\"\n Return a JSON object representing the current user, either\n authenticated or anonymous.\n \"\"\"\n user = request.user\n if user.is_authenticated:\n data = {\n \"username\": user.username,\n \"is_authenticated\": True,\n \"avatar_url\": get_avatar_url(user),\n \"email\": user.email,\n \"subscriber_number\": user.subscriber_number,\n }\n if UserSubscription.objects.filter(user=user, canceled__isnull=True).exists():\n data[\"is_subscriber\"] = True\n if user.is_staff:\n data[\"is_staff\"] = True\n if user.is_superuser:\n data[\"is_superuser\"] = True\n if user.is_beta_tester:\n data[\"is_beta_tester\"] = True\n else:\n data = {}\n\n data[\"waffle\"] = {\n \"flags\": {},\n \"switches\": {s.name: True for s in Switch.get_all() if s.is_active()},\n }\n # Specifically and more smartly loop over the waffle Flag objects\n # to avoid unnecessary `cache.get(...)` calls within the `flag.is_active(request)`.\n for flag in Flag.get_all():\n if not request.user.is_authenticated:\n # Majority of users are anonymous, so let's focus on that.\n # Let's see if there's a quick reason to bail the\n # expensive `flag.is_active(request)` call.\n if (\n flag.authenticated or flag.staff or flag.superusers\n ) and not flag.everyone:\n continue\n if not (flag.languages or flag.percent or flag.everyone):\n continue\n if flag.languages:\n languages = [ln.strip() for ln in flag.languages.split(\",\")]\n if (\n not hasattr(request, \"LANGUAGE_CODE\")\n or request.LANGUAGE_CODE not in languages\n ):\n continue\n\n if flag.is_active(request):\n data[\"waffle\"][\"flags\"][flag.name] = True\n\n return JsonResponse(data)\n\n\n@never_cache\ndef account_settings(request):\n user = request.user\n if not user.is_authenticated:\n return HttpResponseForbidden(\"not signed in\")\n if request.method == \"DELETE\":\n # This should cease to be necessary once we get rid of the Wiki models.\n anon, _ = User.objects.get_or_create(username=\"Anonymous\")\n user.revisionakismetsubmission_set.update(sender=anon)\n user.created_revisions.update(creator=anon)\n user.created_attachment_revisions.update(creator=anon)\n\n user.delete()\n return JsonResponse({\"deleted\": True})\n elif request.method == \"POST\":\n form = AccountSettingsForm(request.POST)\n if not form.is_valid():\n return JsonResponse({\"errors\": form.errors.get_json_data()}, status=400)\n\n set_locale = None\n if form.cleaned_data.get(\"locale\"):\n user.locale = set_locale = form.cleaned_data[\"locale\"]\n user.save()\n\n response = JsonResponse({\"ok\": True})\n if set_locale:\n response.set_cookie(\n key=settings.LANGUAGE_COOKIE_NAME,\n value=set_locale,\n max_age=settings.LANGUAGE_COOKIE_AGE,\n path=settings.LANGUAGE_COOKIE_PATH,\n domain=settings.LANGUAGE_COOKIE_DOMAIN,\n secure=settings.LANGUAGE_COOKIE_SECURE,\n )\n\n return response\n\n context = {\n \"csrfmiddlewaretoken\": request.META.get(\"CSRF_COOKIE\"),\n \"locale\": user.locale,\n }\n return JsonResponse(context)\n\n\n@waffle_flag(\"subscription\")\n@never_cache\n@require_POST\ndef send_subscriptions_feedback(request):\n \"\"\"\n Sends feedback to Google Analytics. This is done on the\n backend to ensure that all feedback is collected, even\n from users with DNT or where GA is disabled.\n \"\"\"\n data = json.loads(request.body)\n feedback = (data.get(\"feedback\") or \"\").strip()\n\n if not feedback:\n return HttpResponseBadRequest(\"no feedback\")\n\n track_event(\n CATEGORY_MONTHLY_PAYMENTS, ACTION_SUBSCRIPTION_FEEDBACK, data[\"feedback\"]\n )\n return HttpResponse(status=204)\n\n\n@api_view([\"POST\", \"GET\", \"DELETE\"])\n@never_cache\ndef subscriptions(request):\n if not request.user.is_authenticated or not flag_is_active(request, \"subscription\"):\n return Response(None, status=status.HTTP_403_FORBIDDEN)\n\n if request.method == \"POST\":\n create_stripe_customer_and_subscription_for_user(\n request.user, request.user.email, request.data[\"stripe_token\"]\n )\n return Response(None, status=status.HTTP_201_CREATED)\n elif request.method == \"DELETE\":\n cancelled = cancel_stripe_customer_subscriptions(request.user)\n if cancelled:\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(\"nothing to cancel\", status=status.HTTP_410_GONE)\n\n all_subscriptions = []\n subscription_info = retrieve_and_synchronize_subscription_info(request.user)\n if subscription_info:\n all_subscriptions.append(subscription_info)\n\n return Response({\"subscriptions\": all_subscriptions})\n\n\n@csrf_exempt\n@require_POST\n@never_cache\ndef stripe_hooks(request):\n try:\n payload = json.loads(request.body)\n except ValueError:\n return HttpResponseBadRequest(\"Invalid JSON payload\")\n\n try:\n event = stripe.Event.construct_from(payload, stripe.api_key)\n except stripe.error.StripeError:\n raven_client.captureException()\n return HttpResponseBadRequest()\n\n # Generally, for this list of if-statements, see the create_missing_stripe_webhook\n # function.\n # The list of events there ought to at least minimally match what we're prepared\n # to deal with here.\n\n if event.type == \"invoice.payment_succeeded\":\n invoice = event.data.object\n _send_payment_received_email(invoice, request.LANGUAGE_CODE)\n track_event(\n CATEGORY_MONTHLY_PAYMENTS,\n ACTION_SUBSCRIPTION_CREATED,\n f\"{settings.CONTRIBUTION_AMOUNT_USD:.2f}\",\n )\n\n elif event.type == \"customer.subscription.deleted\":\n obj = event.data.object\n for user in User.objects.filter(stripe_customer_id=obj.customer):\n UserSubscription.set_canceled(user, obj.id)\n track_event(CATEGORY_MONTHLY_PAYMENTS, ACTION_SUBSCRIPTION_CANCELED, \"webhook\")\n\n else:\n return HttpResponseBadRequest(\n f\"We did not expect a Stripe webhook of type {event.type!r}\"\n )\n\n return HttpResponse()\n\n\ndef _send_payment_received_email(invoice, locale):\n user = get_user_model().objects.get(stripe_customer_id=invoice.customer)\n subscription_info = retrieve_and_synchronize_subscription_info(user)\n locale = locale or settings.WIKI_DEFAULT_LANGUAGE\n context = {\n \"payment_date\": datetime.fromtimestamp(invoice.created),\n \"next_payment_date\": subscription_info[\"next_payment_at\"],\n \"invoice_number\": invoice.number,\n \"cost\": invoice.total / 100,\n \"credit_card_brand\": subscription_info[\"brand\"],\n \"manage_subscription_url\": absolutify(reverse(\"payment_management\")),\n \"faq_url\": absolutify(reverse(\"payments_index\")),\n \"contact_email\": settings.CONTRIBUTION_SUPPORT_EMAIL,\n }\n with translation.override(locale):\n subject = render_email(\"users/email/payment_received/subject.ltxt\", context)\n # Email subject *must not* contain newlines\n subject = \"\".join(subject.splitlines())\n plain = render_email(\"users/email/payment_received/plain.ltxt\", context)\n\n send_mail_retrying(\n subject,\n plain,\n settings.DEFAULT_FROM_EMAIL,\n [user.email],\n attachment={\n \"name\": os.path.basename(urlparse(invoice.invoice_pdf).path),\n \"bytes\": _download_from_url(invoice.invoice_pdf),\n \"mime\": \"application/pdf\",\n },\n )\n\n\ndef _download_from_url(url):\n pdf_download = requests_retry_session().get(url)\n pdf_download.raise_for_status()\n return pdf_download.content\n\n\nclass APIUserDetailsView(APIView):\n http_method_names = [\"get\", \"put\"]\n serializer_class = UserDetailsSerializer\n renderer_classes = [JSONRenderer]\n permission_classes = [IsAuthenticated]\n\n def get(self, request, format=None):\n assert request.user.is_authenticated\n serializer = UserDetailsSerializer(request.user, many=False)\n return Response(serializer.data)\n\n def put(self, request, format=None):\n user = request.user\n serializer = UserDetailsSerializer(instance=user, data=request.data)\n if serializer.is_valid():\n was_subscribed = user.is_newsletter_subscribed\n old_username = user.username\n serializer.save(user=user)\n\n if not was_subscribed and user.is_newsletter_subscribed:\n newsletter_subscribed.send(None, user=user)\n if was_subscribed and not user.is_newsletter_subscribed:\n newsletter_unsubscribed.send(None, user=user)\n\n if old_username != user.username:\n username_changed.send(None, user=user)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nuser_details = never_cache(APIUserDetailsView.as_view())\n\n\n@csrf_exempt\n@require_POST\n@never_cache\ndef sendinblue_hooks(request):\n # Sendinblue does not sign its webhook requests, hence the event handlers following\n # are different from the Stripe ones, in that they treat the event as a notification\n # of a _potential_ change, while still needing to contact sendinblue to verify that\n # it actually happened.\n try:\n payload = json.loads(request.body)\n event = payload[\"event\"]\n email = payload[\"email\"]\n except (json.decoder.JSONDecodeError, KeyError) as exception:\n return HttpResponseBadRequest(\n f\"{exception.__class__.__name__} on {request.body}\"\n )\n\n if event == \"unsubscribe\":\n refresh_is_user_newsletter_subscribed(email)\n return HttpResponse()\n else:\n return HttpResponseBadRequest(\n f\"We did not expect a Sendinblue webhook of type {event['event']!r}\"\n )\n", "path": "kuma/api/v1/views.py"}], "after_files": [{"content": "import json\nimport os\nfrom datetime import datetime\nfrom urllib.parse import urlparse\n\nimport stripe\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.http import (\n HttpResponse,\n HttpResponseBadRequest,\n HttpResponseForbidden,\n JsonResponse,\n)\nfrom django.utils import translation\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_GET, require_POST\nfrom raven.contrib.django.models import client as raven_client\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom waffle import flag_is_active\nfrom waffle.decorators import waffle_flag\nfrom waffle.models import Flag, Switch\n\nfrom kuma.api.v1.forms import AccountSettingsForm\nfrom kuma.api.v1.serializers import UserDetailsSerializer\nfrom kuma.core.email_utils import render_email\nfrom kuma.core.ga_tracking import (\n ACTION_SUBSCRIPTION_CANCELED,\n ACTION_SUBSCRIPTION_CREATED,\n ACTION_SUBSCRIPTION_FEEDBACK,\n CATEGORY_MONTHLY_PAYMENTS,\n track_event,\n)\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import requests_retry_session, send_mail_retrying\nfrom kuma.users.models import User, UserSubscription\nfrom kuma.users.newsletter.utils import refresh_is_user_newsletter_subscribed\nfrom kuma.users.signals import (\n newsletter_subscribed,\n newsletter_unsubscribed,\n username_changed,\n)\nfrom kuma.users.stripe_utils import (\n cancel_stripe_customer_subscriptions,\n create_stripe_customer_and_subscription_for_user,\n retrieve_and_synchronize_subscription_info,\n)\nfrom kuma.users.templatetags.jinja_helpers import get_avatar_url\nfrom kuma.wiki.templatetags.jinja_helpers import absolutify\n\n\n@never_cache\n@require_GET\ndef whoami(request):\n \"\"\"\n Return a JSON object representing the current user, either\n authenticated or anonymous.\n \"\"\"\n user = request.user\n if user.is_authenticated:\n data = {\n \"username\": user.username,\n \"is_authenticated\": True,\n \"avatar_url\": get_avatar_url(user),\n \"email\": user.email,\n \"subscriber_number\": user.subscriber_number,\n }\n if UserSubscription.objects.filter(user=user, canceled__isnull=True).exists():\n data[\"is_subscriber\"] = True\n if user.is_staff:\n data[\"is_staff\"] = True\n if user.is_superuser:\n data[\"is_superuser\"] = True\n if user.is_beta_tester:\n data[\"is_beta_tester\"] = True\n else:\n data = {}\n\n data[\"waffle\"] = {\n \"flags\": {},\n \"switches\": {s.name: True for s in Switch.get_all() if s.is_active()},\n }\n # Specifically and more smartly loop over the waffle Flag objects\n # to avoid unnecessary `cache.get(...)` calls within the `flag.is_active(request)`.\n for flag in Flag.get_all():\n if not request.user.is_authenticated:\n # Majority of users are anonymous, so let's focus on that.\n # Let's see if there's a quick reason to bail the\n # expensive `flag.is_active(request)` call.\n if (\n flag.authenticated or flag.staff or flag.superusers\n ) and not flag.everyone:\n continue\n if not (flag.languages or flag.percent or flag.everyone):\n continue\n if flag.languages:\n languages = [ln.strip() for ln in flag.languages.split(\",\")]\n if (\n not hasattr(request, \"LANGUAGE_CODE\")\n or request.LANGUAGE_CODE not in languages\n ):\n continue\n\n if flag.is_active(request):\n data[\"waffle\"][\"flags\"][flag.name] = True\n\n return JsonResponse(data)\n\n\n@never_cache\ndef account_settings(request):\n user = request.user\n if not user.is_authenticated:\n return HttpResponseForbidden(\"not signed in\")\n if request.method == \"DELETE\":\n # This should cease to be necessary once we get rid of the Wiki models.\n anon, _ = User.objects.get_or_create(username=\"Anonymous\")\n user.revisionakismetsubmission_set.update(sender=anon)\n user.documentdeletionlog_set.update(user=anon)\n user.documentspamattempt_set.update(user=anon)\n user.documentspam_reviewed.update(reviewer=anon)\n user.created_revisions.update(creator=anon)\n user.created_attachment_revisions.update(creator=anon)\n user.bans.update(user=anon)\n user.bans_issued.update(by=anon)\n\n user.delete()\n return JsonResponse({\"deleted\": True})\n elif request.method == \"POST\":\n form = AccountSettingsForm(request.POST)\n if not form.is_valid():\n return JsonResponse({\"errors\": form.errors.get_json_data()}, status=400)\n\n set_locale = None\n if form.cleaned_data.get(\"locale\"):\n user.locale = set_locale = form.cleaned_data[\"locale\"]\n user.save()\n\n response = JsonResponse({\"ok\": True})\n if set_locale:\n response.set_cookie(\n key=settings.LANGUAGE_COOKIE_NAME,\n value=set_locale,\n max_age=settings.LANGUAGE_COOKIE_AGE,\n path=settings.LANGUAGE_COOKIE_PATH,\n domain=settings.LANGUAGE_COOKIE_DOMAIN,\n secure=settings.LANGUAGE_COOKIE_SECURE,\n )\n\n return response\n\n context = {\n \"csrfmiddlewaretoken\": request.META.get(\"CSRF_COOKIE\"),\n \"locale\": user.locale,\n }\n return JsonResponse(context)\n\n\n@waffle_flag(\"subscription\")\n@never_cache\n@require_POST\ndef send_subscriptions_feedback(request):\n \"\"\"\n Sends feedback to Google Analytics. This is done on the\n backend to ensure that all feedback is collected, even\n from users with DNT or where GA is disabled.\n \"\"\"\n data = json.loads(request.body)\n feedback = (data.get(\"feedback\") or \"\").strip()\n\n if not feedback:\n return HttpResponseBadRequest(\"no feedback\")\n\n track_event(\n CATEGORY_MONTHLY_PAYMENTS, ACTION_SUBSCRIPTION_FEEDBACK, data[\"feedback\"]\n )\n return HttpResponse(status=204)\n\n\n@api_view([\"POST\", \"GET\", \"DELETE\"])\n@never_cache\ndef subscriptions(request):\n if not request.user.is_authenticated or not flag_is_active(request, \"subscription\"):\n return Response(None, status=status.HTTP_403_FORBIDDEN)\n\n if request.method == \"POST\":\n create_stripe_customer_and_subscription_for_user(\n request.user, request.user.email, request.data[\"stripe_token\"]\n )\n return Response(None, status=status.HTTP_201_CREATED)\n elif request.method == \"DELETE\":\n cancelled = cancel_stripe_customer_subscriptions(request.user)\n if cancelled:\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(\"nothing to cancel\", status=status.HTTP_410_GONE)\n\n all_subscriptions = []\n subscription_info = retrieve_and_synchronize_subscription_info(request.user)\n if subscription_info:\n all_subscriptions.append(subscription_info)\n\n return Response({\"subscriptions\": all_subscriptions})\n\n\n@csrf_exempt\n@require_POST\n@never_cache\ndef stripe_hooks(request):\n try:\n payload = json.loads(request.body)\n except ValueError:\n return HttpResponseBadRequest(\"Invalid JSON payload\")\n\n try:\n event = stripe.Event.construct_from(payload, stripe.api_key)\n except stripe.error.StripeError:\n raven_client.captureException()\n return HttpResponseBadRequest()\n\n # Generally, for this list of if-statements, see the create_missing_stripe_webhook\n # function.\n # The list of events there ought to at least minimally match what we're prepared\n # to deal with here.\n\n if event.type == \"invoice.payment_succeeded\":\n invoice = event.data.object\n _send_payment_received_email(invoice, request.LANGUAGE_CODE)\n track_event(\n CATEGORY_MONTHLY_PAYMENTS,\n ACTION_SUBSCRIPTION_CREATED,\n f\"{settings.CONTRIBUTION_AMOUNT_USD:.2f}\",\n )\n\n elif event.type == \"customer.subscription.deleted\":\n obj = event.data.object\n for user in User.objects.filter(stripe_customer_id=obj.customer):\n UserSubscription.set_canceled(user, obj.id)\n track_event(CATEGORY_MONTHLY_PAYMENTS, ACTION_SUBSCRIPTION_CANCELED, \"webhook\")\n\n else:\n return HttpResponseBadRequest(\n f\"We did not expect a Stripe webhook of type {event.type!r}\"\n )\n\n return HttpResponse()\n\n\ndef _send_payment_received_email(invoice, locale):\n user = get_user_model().objects.get(stripe_customer_id=invoice.customer)\n subscription_info = retrieve_and_synchronize_subscription_info(user)\n locale = locale or settings.WIKI_DEFAULT_LANGUAGE\n context = {\n \"payment_date\": datetime.fromtimestamp(invoice.created),\n \"next_payment_date\": subscription_info[\"next_payment_at\"],\n \"invoice_number\": invoice.number,\n \"cost\": invoice.total / 100,\n \"credit_card_brand\": subscription_info[\"brand\"],\n \"manage_subscription_url\": absolutify(reverse(\"payment_management\")),\n \"faq_url\": absolutify(reverse(\"payments_index\")),\n \"contact_email\": settings.CONTRIBUTION_SUPPORT_EMAIL,\n }\n with translation.override(locale):\n subject = render_email(\"users/email/payment_received/subject.ltxt\", context)\n # Email subject *must not* contain newlines\n subject = \"\".join(subject.splitlines())\n plain = render_email(\"users/email/payment_received/plain.ltxt\", context)\n\n send_mail_retrying(\n subject,\n plain,\n settings.DEFAULT_FROM_EMAIL,\n [user.email],\n attachment={\n \"name\": os.path.basename(urlparse(invoice.invoice_pdf).path),\n \"bytes\": _download_from_url(invoice.invoice_pdf),\n \"mime\": \"application/pdf\",\n },\n )\n\n\ndef _download_from_url(url):\n pdf_download = requests_retry_session().get(url)\n pdf_download.raise_for_status()\n return pdf_download.content\n\n\nclass APIUserDetailsView(APIView):\n http_method_names = [\"get\", \"put\"]\n serializer_class = UserDetailsSerializer\n renderer_classes = [JSONRenderer]\n permission_classes = [IsAuthenticated]\n\n def get(self, request, format=None):\n assert request.user.is_authenticated\n serializer = UserDetailsSerializer(request.user, many=False)\n return Response(serializer.data)\n\n def put(self, request, format=None):\n user = request.user\n serializer = UserDetailsSerializer(instance=user, data=request.data)\n if serializer.is_valid():\n was_subscribed = user.is_newsletter_subscribed\n old_username = user.username\n serializer.save(user=user)\n\n if not was_subscribed and user.is_newsletter_subscribed:\n newsletter_subscribed.send(None, user=user)\n if was_subscribed and not user.is_newsletter_subscribed:\n newsletter_unsubscribed.send(None, user=user)\n\n if old_username != user.username:\n username_changed.send(None, user=user)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nuser_details = never_cache(APIUserDetailsView.as_view())\n\n\n@csrf_exempt\n@require_POST\n@never_cache\ndef sendinblue_hooks(request):\n # Sendinblue does not sign its webhook requests, hence the event handlers following\n # are different from the Stripe ones, in that they treat the event as a notification\n # of a _potential_ change, while still needing to contact sendinblue to verify that\n # it actually happened.\n try:\n payload = json.loads(request.body)\n event = payload[\"event\"]\n email = payload[\"email\"]\n except (json.decoder.JSONDecodeError, KeyError) as exception:\n return HttpResponseBadRequest(\n f\"{exception.__class__.__name__} on {request.body}\"\n )\n\n if event == \"unsubscribe\":\n refresh_is_user_newsletter_subscribed(email)\n return HttpResponse()\n else:\n return HttpResponseBadRequest(\n f\"We did not expect a Sendinblue webhook of type {event['event']!r}\"\n )\n", "path": "kuma/api/v1/views.py"}]} | 3,771 | 199 |
gh_patches_debug_31278 | rasdani/github-patches | git_diff | oobabooga__text-generation-webui-3120 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
With multiple LoRAs loaded, only the first one in the list affects the generated text output
### Describe the bug
We're experimenting with training "stacked" LoRAs, as in LoRAs trained on top of other LoRAs. According to the warning message triggered by this, such an arrangement is supposed to work, but "may have unexpected effects". Training succeeds without any issue, but when I load the finished LoRAs, only the first one seems to have any effect.
So far, we have encountered and successfully worked around an issue where the 2nd LoRA's weights would never get loaded due to a PEFT bug/weirdness (the 2nd LoRA's weights get serialized with module names starting with "base_model.model.base_model.model.model", but on deserialization, PEFT expects "base_model.model.model"). Patching this made the 2nd LoRA load and affect the generated output successfully when it's the only LoRA being loaded. However, when we load it on top of the 1st LoRA, it still has no effect. With the loading order reversed compared to the training order, only the 2nd LoRA has any effect on the output - as if the 2nd and subsequent LoRAs were basically ignored.
To test this, we tried loading two known good, independently trained LoRAs (tloan/alpaca-lora-7b and 22h/cabrita-lora-v0-1) on a common base model (yahma/llama-7b-hf). Each LoRA works fine when loaded on its own, and is seen to affect the output.
When the "LoRA(s)" box is set to list alpaca first, then cabrita, both are shown as successfully loaded, but the generated text is identical to what I get with only alpaca loaded. If I put cabrita first, then alpaca, the output is indistinguishable from only cabrita being selected. This seems to confirm that only the first LoRA listed in the box is properly loaded into the model, the rest are silently ignored.
BnB quantization doesn't appear to affect this behavior - it's the same in 4-bit, 8-bit and native mode.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Download yahma/llama-7b-hf, tloen/alpaca-lora-7b and 22h/cabrita-lora-v0-1 from Hugging Face.
Start the UI in chat mode.
On the Parameters tab, set the temperature to 0.3.
On the Chat settings tab, select the "Alpaca" instruction template.
On the Model tab, load the yahma/llama-7b-hf base model.
Without loading any LoRA, switch to the Text generation tab, and run the following 3 queries (clear history after each one):
- "Tell me about alpacas."
- "Invente uma desculpa criativa pra dizer que não preciso ir à festa."
- "How many helicopters can a human eat in one sitting?"
Note the responses. (The Portuguese query will prompt a response in English, while the helicopter one will be nonsense.)
On the Model tab, reload the model, then load the alpaca LoRA. Run the above 3 questions again. Note how the answers change. (The Portuguese prompt is now answered in Portuguese, but not very creatively, and the helicopter answer will be correct, i.e. none.)
Unload the alpaca LoRA, reload the model, and load the cabrita LoRA. Run the 3 prompts, and note the new answers. (Portuguese answer is different, more creative, while the helicopter one is back to nonsense.)
Unload the LoRA, reload the model. In the LoRA(s) box, select cabrita first, then alpaca. Run the 3 prompts. (Answers are as if only cabrita were loaded!)
Unload the LoRAs, reload the model. In the LoRA(s) box, select alpaca first, then cabrita. Run the 3 prompts. (Answers indistinguishable from when only alpaca was loaded!)
### Screenshot
_No response_
### Logs
```shell
no exception generated
```
### System Info
```shell
Ryzen 7950X with a Zotac GeForce RTX 4080 GPU, running Ubuntu 22.04 LTS with the open-source NVIDIA kernel module (with nvidia.NVreg_OpenRmEnableUnsupportedGpus=1 specified on the kernel command line to skip the "datacenter GPUs only" check). The issue is reproducible in both CUDA and CPU mode.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modules/LoRA.py`
Content:
```
1 from pathlib import Path
2
3 import torch
4 from peft import PeftModel
5
6 import modules.shared as shared
7 from modules.logging_colors import logger
8 from modules.models import reload_model
9
10
11 def add_lora_to_model(lora_names):
12 if 'GPTQForCausalLM' in shared.model.__class__.__name__ or shared.args.loader == 'AutoGPTQ':
13 add_lora_autogptq(lora_names)
14 elif shared.model.__class__.__name__ in ['ExllamaModel', 'ExllamaHF'] or shared.args.loader == 'ExLlama':
15 add_lora_exllama(lora_names)
16 elif shared.model.__class__.__name__ in ['Exllamav2Model', 'Exllamav2HF'] or shared.args.loader == ['ExLlamav2', 'ExLlamav2_HF']:
17 add_lora_exllamav2(lora_names)
18 else:
19 add_lora_transformers(lora_names)
20
21
22 def get_lora_path(lora_name):
23 p = Path(lora_name)
24 if p.exists():
25 lora_name = p.parts[-1]
26
27 return Path(f"{shared.args.lora_dir}/{lora_name}")
28
29
30 def add_lora_exllama(lora_names):
31
32 try:
33 from exllama.lora import ExLlamaLora
34 except:
35 try:
36 from repositories.exllama.lora import ExLlamaLora
37 except:
38 logger.error("Could not find the file repositories/exllama/lora.py. Make sure that exllama is cloned inside repositories/ and is up to date.")
39 return
40
41 if len(lora_names) == 0:
42 if shared.model.__class__.__name__ == 'ExllamaModel':
43 shared.model.generator.lora = None
44 else:
45 shared.model.lora = None
46
47 shared.lora_names = []
48 return
49 else:
50 if len(lora_names) > 1:
51 logger.warning('ExLlama can only work with 1 LoRA at the moment. Only the first one in the list will be loaded.')
52
53 lora_path = get_lora_path(lora_names[0])
54 lora_config_path = lora_path / "adapter_config.json"
55 lora_adapter_path = lora_path / "adapter_model.bin"
56
57 logger.info("Applying the following LoRAs to {}: {}".format(shared.model_name, ', '.join([lora_names[0]])))
58 if shared.model.__class__.__name__ == 'ExllamaModel':
59 lora = ExLlamaLora(shared.model.model, str(lora_config_path), str(lora_adapter_path))
60 shared.model.generator.lora = lora
61 else:
62 lora = ExLlamaLora(shared.model.ex_model, str(lora_config_path), str(lora_adapter_path))
63 shared.model.lora = lora
64
65 shared.lora_names = [lora_names[0]]
66 return
67
68
69 def add_lora_exllamav2(lora_names):
70
71 from exllamav2 import ExLlamaV2Lora
72
73 if isinstance(shared.model.loras, list):
74 for lora in shared.model.loras:
75 lora.unload()
76
77 if len(lora_names) > 0:
78 logger.info("Applying the following LoRAs to {}: {}".format(shared.model_name, ', '.join(lora_names)))
79 shared.model.loras = []
80 for lora_name in lora_names:
81 lora_path = get_lora_path(lora_name)
82 if shared.model.__class__.__name__ == 'Exllamav2Model':
83 lora = ExLlamaV2Lora.from_directory(shared.model.model, str(lora_path))
84 else:
85 lora = ExLlamaV2Lora.from_directory(shared.model.ex_model, str(lora_path))
86
87 shared.model.loras.append(lora)
88
89 shared.lora_names = lora_names
90 else:
91 shared.lora_names = []
92 shared.model.loras = None
93
94
95 def add_lora_autogptq(lora_names):
96 '''
97 Adapted from https://github.com/Ph0rk0z/text-generation-webui-testing
98 '''
99
100 try:
101 from auto_gptq import get_gptq_peft_model
102 from auto_gptq.utils.peft_utils import GPTQLoraConfig
103 except:
104 logger.error("This version of AutoGPTQ does not support LoRA. You need to install from source or wait for a new release.")
105 return
106
107 if len(lora_names) == 0:
108 reload_model()
109
110 shared.lora_names = []
111 return
112 else:
113 if len(lora_names) > 1:
114 logger.warning('AutoGPTQ can only work with 1 LoRA at the moment. Only the first one in the list will be loaded.')
115 if not shared.args.no_inject_fused_attention:
116 logger.warning('Fused Atttention + AutoGPTQ may break Lora loading. Disable it.')
117
118 peft_config = GPTQLoraConfig(
119 inference_mode=True,
120 )
121
122 lora_path = get_lora_path(lora_names[0])
123 logger.info("Applying the following LoRAs to {}: {}".format(shared.model_name, ', '.join([lora_names[0]])))
124 shared.model = get_gptq_peft_model(shared.model, peft_config, lora_path)
125 shared.lora_names = [lora_names[0]]
126 return
127
128
129 def add_lora_transformers(lora_names):
130 prior_set = set(shared.lora_names)
131 added_set = set(lora_names) - prior_set
132 removed_set = prior_set - set(lora_names)
133
134 # If no LoRA needs to be added or removed, exit
135 if len(added_set) == 0 and len(removed_set) == 0:
136 return
137
138 # Add a LoRA when another LoRA is already present
139 if len(removed_set) == 0 and len(prior_set) > 0:
140 logger.info(f"Adding the LoRA(s) named {added_set} to the model...")
141 for lora in added_set:
142 shared.model.load_adapter(get_lora_path(lora), lora)
143
144 return
145
146 # If any LoRA needs to be removed, start over
147 if len(removed_set) > 0:
148 # shared.model may no longer be PeftModel
149 if hasattr(shared.model, 'disable_adapter'):
150 shared.model.disable_adapter()
151 shared.model = shared.model.base_model.model
152
153 if len(lora_names) > 0:
154 params = {}
155 if not shared.args.cpu:
156 if shared.args.load_in_4bit or shared.args.load_in_8bit:
157 params['peft_type'] = shared.model.dtype
158 else:
159 params['dtype'] = shared.model.dtype
160 if hasattr(shared.model, "hf_device_map"):
161 params['device_map'] = {"base_model.model." + k: v for k, v in shared.model.hf_device_map.items()}
162
163 logger.info("Applying the following LoRAs to {}: {}".format(shared.model_name, ', '.join(lora_names)))
164 shared.model = PeftModel.from_pretrained(shared.model, get_lora_path(lora_names[0]), adapter_name=lora_names[0], **params)
165 for lora in lora_names[1:]:
166 shared.model.load_adapter(get_lora_path(lora), lora)
167
168 shared.lora_names = lora_names
169
170 if not shared.args.load_in_8bit and not shared.args.cpu:
171 shared.model.half()
172 if not hasattr(shared.model, "hf_device_map"):
173 if torch.backends.mps.is_available():
174 device = torch.device('mps')
175 shared.model = shared.model.to(device)
176 else:
177 shared.model = shared.model.cuda()
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modules/LoRA.py b/modules/LoRA.py
--- a/modules/LoRA.py
+++ b/modules/LoRA.py
@@ -8,6 +8,14 @@
from modules.models import reload_model
+def merge_loras():
+ if len(list({shared.model.peft_config[adapter].r for adapter in shared.model.peft_config.keys()})) > 1:
+ logger.warning("The loaded LoRAs cannot be merged, as they have dissimilar ranks. Only the first one will be active.")
+ return
+
+ shared.model.add_weighted_adapter(shared.lora_names, [1] * len(shared.lora_names), "__merged")
+ shared.model.set_adapter("__merged")
+
def add_lora_to_model(lora_names):
if 'GPTQForCausalLM' in shared.model.__class__.__name__ or shared.args.loader == 'AutoGPTQ':
add_lora_autogptq(lora_names)
@@ -136,11 +144,14 @@
return
# Add a LoRA when another LoRA is already present
- if len(removed_set) == 0 and len(prior_set) > 0:
+ if len(removed_set) == 0 and len(prior_set) > 0 and "__merged" not in shared.model.peft_config.keys():
logger.info(f"Adding the LoRA(s) named {added_set} to the model...")
for lora in added_set:
shared.model.load_adapter(get_lora_path(lora), lora)
+ if len(lora_names) > 1:
+ merge_loras()
+
return
# If any LoRA needs to be removed, start over
@@ -165,6 +176,9 @@
for lora in lora_names[1:]:
shared.model.load_adapter(get_lora_path(lora), lora)
+ if len(lora_names) > 1:
+ merge_loras()
+
shared.lora_names = lora_names
if not shared.args.load_in_8bit and not shared.args.cpu:
| {"golden_diff": "diff --git a/modules/LoRA.py b/modules/LoRA.py\n--- a/modules/LoRA.py\n+++ b/modules/LoRA.py\n@@ -8,6 +8,14 @@\n from modules.models import reload_model\n \n \n+def merge_loras():\n+ if len(list({shared.model.peft_config[adapter].r for adapter in shared.model.peft_config.keys()})) > 1:\n+ logger.warning(\"The loaded LoRAs cannot be merged, as they have dissimilar ranks. Only the first one will be active.\")\n+ return\n+\n+ shared.model.add_weighted_adapter(shared.lora_names, [1] * len(shared.lora_names), \"__merged\")\n+ shared.model.set_adapter(\"__merged\")\n+\n def add_lora_to_model(lora_names):\n if 'GPTQForCausalLM' in shared.model.__class__.__name__ or shared.args.loader == 'AutoGPTQ':\n add_lora_autogptq(lora_names)\n@@ -136,11 +144,14 @@\n return\n \n # Add a LoRA when another LoRA is already present\n- if len(removed_set) == 0 and len(prior_set) > 0:\n+ if len(removed_set) == 0 and len(prior_set) > 0 and \"__merged\" not in shared.model.peft_config.keys():\n logger.info(f\"Adding the LoRA(s) named {added_set} to the model...\")\n for lora in added_set:\n shared.model.load_adapter(get_lora_path(lora), lora)\n \n+ if len(lora_names) > 1:\n+ merge_loras()\n+\n return\n \n # If any LoRA needs to be removed, start over\n@@ -165,6 +176,9 @@\n for lora in lora_names[1:]:\n shared.model.load_adapter(get_lora_path(lora), lora)\n \n+ if len(lora_names) > 1:\n+ merge_loras()\n+\n shared.lora_names = lora_names\n \n if not shared.args.load_in_8bit and not shared.args.cpu:\n", "issue": "With multiple LoRAs loaded, only the first one in the list affects the generated text output\n### Describe the bug\n\nWe're experimenting with training \"stacked\" LoRAs, as in LoRAs trained on top of other LoRAs. According to the warning message triggered by this, such an arrangement is supposed to work, but \"may have unexpected effects\". Training succeeds without any issue, but when I load the finished LoRAs, only the first one seems to have any effect.\r\n\r\nSo far, we have encountered and successfully worked around an issue where the 2nd LoRA's weights would never get loaded due to a PEFT bug/weirdness (the 2nd LoRA's weights get serialized with module names starting with \"base_model.model.base_model.model.model\", but on deserialization, PEFT expects \"base_model.model.model\"). Patching this made the 2nd LoRA load and affect the generated output successfully when it's the only LoRA being loaded. However, when we load it on top of the 1st LoRA, it still has no effect. With the loading order reversed compared to the training order, only the 2nd LoRA has any effect on the output - as if the 2nd and subsequent LoRAs were basically ignored.\r\n\r\nTo test this, we tried loading two known good, independently trained LoRAs (tloan/alpaca-lora-7b and 22h/cabrita-lora-v0-1) on a common base model (yahma/llama-7b-hf). Each LoRA works fine when loaded on its own, and is seen to affect the output.\r\n\r\nWhen the \"LoRA(s)\" box is set to list alpaca first, then cabrita, both are shown as successfully loaded, but the generated text is identical to what I get with only alpaca loaded. If I put cabrita first, then alpaca, the output is indistinguishable from only cabrita being selected. This seems to confirm that only the first LoRA listed in the box is properly loaded into the model, the rest are silently ignored.\r\n\r\nBnB quantization doesn't appear to affect this behavior - it's the same in 4-bit, 8-bit and native mode.\n\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Reproduction\n\nDownload yahma/llama-7b-hf, tloen/alpaca-lora-7b and 22h/cabrita-lora-v0-1 from Hugging Face.\r\n\r\nStart the UI in chat mode.\r\nOn the Parameters tab, set the temperature to 0.3.\r\nOn the Chat settings tab, select the \"Alpaca\" instruction template.\r\nOn the Model tab, load the yahma/llama-7b-hf base model.\r\n\r\nWithout loading any LoRA, switch to the Text generation tab, and run the following 3 queries (clear history after each one):\r\n- \"Tell me about alpacas.\"\r\n- \"Invente uma desculpa criativa pra dizer que n\u00e3o preciso ir \u00e0 festa.\"\r\n- \"How many helicopters can a human eat in one sitting?\"\r\n\r\nNote the responses. (The Portuguese query will prompt a response in English, while the helicopter one will be nonsense.)\r\n\r\nOn the Model tab, reload the model, then load the alpaca LoRA. Run the above 3 questions again. Note how the answers change. (The Portuguese prompt is now answered in Portuguese, but not very creatively, and the helicopter answer will be correct, i.e. none.)\r\n\r\nUnload the alpaca LoRA, reload the model, and load the cabrita LoRA. Run the 3 prompts, and note the new answers. (Portuguese answer is different, more creative, while the helicopter one is back to nonsense.)\r\n\r\nUnload the LoRA, reload the model. In the LoRA(s) box, select cabrita first, then alpaca. Run the 3 prompts. (Answers are as if only cabrita were loaded!)\r\n\r\nUnload the LoRAs, reload the model. In the LoRA(s) box, select alpaca first, then cabrita. Run the 3 prompts. (Answers indistinguishable from when only alpaca was loaded!)\n\n### Screenshot\n\n_No response_\n\n### Logs\n\n```shell\nno exception generated\n```\n\n\n### System Info\n\n```shell\nRyzen 7950X with a Zotac GeForce RTX 4080 GPU, running Ubuntu 22.04 LTS with the open-source NVIDIA kernel module (with nvidia.NVreg_OpenRmEnableUnsupportedGpus=1 specified on the kernel command line to skip the \"datacenter GPUs only\" check). The issue is reproducible in both CUDA and CPU mode.\n```\n\n", "before_files": [{"content": "from pathlib import Path\n\nimport torch\nfrom peft import PeftModel\n\nimport modules.shared as shared\nfrom modules.logging_colors import logger\nfrom modules.models import reload_model\n\n\ndef add_lora_to_model(lora_names):\n if 'GPTQForCausalLM' in shared.model.__class__.__name__ or shared.args.loader == 'AutoGPTQ':\n add_lora_autogptq(lora_names)\n elif shared.model.__class__.__name__ in ['ExllamaModel', 'ExllamaHF'] or shared.args.loader == 'ExLlama':\n add_lora_exllama(lora_names)\n elif shared.model.__class__.__name__ in ['Exllamav2Model', 'Exllamav2HF'] or shared.args.loader == ['ExLlamav2', 'ExLlamav2_HF']:\n add_lora_exllamav2(lora_names)\n else:\n add_lora_transformers(lora_names)\n\n\ndef get_lora_path(lora_name):\n p = Path(lora_name)\n if p.exists():\n lora_name = p.parts[-1]\n\n return Path(f\"{shared.args.lora_dir}/{lora_name}\")\n\n\ndef add_lora_exllama(lora_names):\n\n try:\n from exllama.lora import ExLlamaLora\n except:\n try:\n from repositories.exllama.lora import ExLlamaLora\n except:\n logger.error(\"Could not find the file repositories/exllama/lora.py. Make sure that exllama is cloned inside repositories/ and is up to date.\")\n return\n\n if len(lora_names) == 0:\n if shared.model.__class__.__name__ == 'ExllamaModel':\n shared.model.generator.lora = None\n else:\n shared.model.lora = None\n\n shared.lora_names = []\n return\n else:\n if len(lora_names) > 1:\n logger.warning('ExLlama can only work with 1 LoRA at the moment. Only the first one in the list will be loaded.')\n\n lora_path = get_lora_path(lora_names[0])\n lora_config_path = lora_path / \"adapter_config.json\"\n lora_adapter_path = lora_path / \"adapter_model.bin\"\n\n logger.info(\"Applying the following LoRAs to {}: {}\".format(shared.model_name, ', '.join([lora_names[0]])))\n if shared.model.__class__.__name__ == 'ExllamaModel':\n lora = ExLlamaLora(shared.model.model, str(lora_config_path), str(lora_adapter_path))\n shared.model.generator.lora = lora\n else:\n lora = ExLlamaLora(shared.model.ex_model, str(lora_config_path), str(lora_adapter_path))\n shared.model.lora = lora\n\n shared.lora_names = [lora_names[0]]\n return\n\n\ndef add_lora_exllamav2(lora_names):\n\n from exllamav2 import ExLlamaV2Lora\n\n if isinstance(shared.model.loras, list):\n for lora in shared.model.loras:\n lora.unload()\n\n if len(lora_names) > 0:\n logger.info(\"Applying the following LoRAs to {}: {}\".format(shared.model_name, ', '.join(lora_names)))\n shared.model.loras = []\n for lora_name in lora_names:\n lora_path = get_lora_path(lora_name)\n if shared.model.__class__.__name__ == 'Exllamav2Model':\n lora = ExLlamaV2Lora.from_directory(shared.model.model, str(lora_path))\n else:\n lora = ExLlamaV2Lora.from_directory(shared.model.ex_model, str(lora_path))\n\n shared.model.loras.append(lora)\n\n shared.lora_names = lora_names\n else:\n shared.lora_names = []\n shared.model.loras = None\n\n\ndef add_lora_autogptq(lora_names):\n '''\n Adapted from https://github.com/Ph0rk0z/text-generation-webui-testing\n '''\n\n try:\n from auto_gptq import get_gptq_peft_model\n from auto_gptq.utils.peft_utils import GPTQLoraConfig\n except:\n logger.error(\"This version of AutoGPTQ does not support LoRA. You need to install from source or wait for a new release.\")\n return\n\n if len(lora_names) == 0:\n reload_model()\n\n shared.lora_names = []\n return\n else:\n if len(lora_names) > 1:\n logger.warning('AutoGPTQ can only work with 1 LoRA at the moment. Only the first one in the list will be loaded.')\n if not shared.args.no_inject_fused_attention:\n logger.warning('Fused Atttention + AutoGPTQ may break Lora loading. Disable it.')\n\n peft_config = GPTQLoraConfig(\n inference_mode=True,\n )\n\n lora_path = get_lora_path(lora_names[0])\n logger.info(\"Applying the following LoRAs to {}: {}\".format(shared.model_name, ', '.join([lora_names[0]])))\n shared.model = get_gptq_peft_model(shared.model, peft_config, lora_path)\n shared.lora_names = [lora_names[0]]\n return\n\n\ndef add_lora_transformers(lora_names):\n prior_set = set(shared.lora_names)\n added_set = set(lora_names) - prior_set\n removed_set = prior_set - set(lora_names)\n\n # If no LoRA needs to be added or removed, exit\n if len(added_set) == 0 and len(removed_set) == 0:\n return\n\n # Add a LoRA when another LoRA is already present\n if len(removed_set) == 0 and len(prior_set) > 0:\n logger.info(f\"Adding the LoRA(s) named {added_set} to the model...\")\n for lora in added_set:\n shared.model.load_adapter(get_lora_path(lora), lora)\n\n return\n\n # If any LoRA needs to be removed, start over\n if len(removed_set) > 0:\n # shared.model may no longer be PeftModel\n if hasattr(shared.model, 'disable_adapter'):\n shared.model.disable_adapter()\n shared.model = shared.model.base_model.model\n\n if len(lora_names) > 0:\n params = {}\n if not shared.args.cpu:\n if shared.args.load_in_4bit or shared.args.load_in_8bit:\n params['peft_type'] = shared.model.dtype\n else:\n params['dtype'] = shared.model.dtype\n if hasattr(shared.model, \"hf_device_map\"):\n params['device_map'] = {\"base_model.model.\" + k: v for k, v in shared.model.hf_device_map.items()}\n\n logger.info(\"Applying the following LoRAs to {}: {}\".format(shared.model_name, ', '.join(lora_names)))\n shared.model = PeftModel.from_pretrained(shared.model, get_lora_path(lora_names[0]), adapter_name=lora_names[0], **params)\n for lora in lora_names[1:]:\n shared.model.load_adapter(get_lora_path(lora), lora)\n\n shared.lora_names = lora_names\n\n if not shared.args.load_in_8bit and not shared.args.cpu:\n shared.model.half()\n if not hasattr(shared.model, \"hf_device_map\"):\n if torch.backends.mps.is_available():\n device = torch.device('mps')\n shared.model = shared.model.to(device)\n else:\n shared.model = shared.model.cuda()\n", "path": "modules/LoRA.py"}], "after_files": [{"content": "from pathlib import Path\n\nimport torch\nfrom peft import PeftModel\n\nimport modules.shared as shared\nfrom modules.logging_colors import logger\nfrom modules.models import reload_model\n\n\ndef merge_loras():\n if len(list({shared.model.peft_config[adapter].r for adapter in shared.model.peft_config.keys()})) > 1:\n logger.warning(\"The loaded LoRAs cannot be merged, as they have dissimilar ranks. Only the first one will be active.\")\n return\n\n shared.model.add_weighted_adapter(shared.lora_names, [1] * len(shared.lora_names), \"__merged\")\n shared.model.set_adapter(\"__merged\")\n\ndef add_lora_to_model(lora_names):\n if 'GPTQForCausalLM' in shared.model.__class__.__name__ or shared.args.loader == 'AutoGPTQ':\n add_lora_autogptq(lora_names)\n elif shared.model.__class__.__name__ in ['ExllamaModel', 'ExllamaHF'] or shared.args.loader == 'ExLlama':\n add_lora_exllama(lora_names)\n elif shared.model.__class__.__name__ in ['Exllamav2Model', 'Exllamav2HF'] or shared.args.loader == ['ExLlamav2', 'ExLlamav2_HF']:\n add_lora_exllamav2(lora_names)\n else:\n add_lora_transformers(lora_names)\n\n\ndef get_lora_path(lora_name):\n p = Path(lora_name)\n if p.exists():\n lora_name = p.parts[-1]\n\n return Path(f\"{shared.args.lora_dir}/{lora_name}\")\n\n\ndef add_lora_exllama(lora_names):\n\n try:\n from exllama.lora import ExLlamaLora\n except:\n try:\n from repositories.exllama.lora import ExLlamaLora\n except:\n logger.error(\"Could not find the file repositories/exllama/lora.py. Make sure that exllama is cloned inside repositories/ and is up to date.\")\n return\n\n if len(lora_names) == 0:\n if shared.model.__class__.__name__ == 'ExllamaModel':\n shared.model.generator.lora = None\n else:\n shared.model.lora = None\n\n shared.lora_names = []\n return\n else:\n if len(lora_names) > 1:\n logger.warning('ExLlama can only work with 1 LoRA at the moment. Only the first one in the list will be loaded.')\n\n lora_path = get_lora_path(lora_names[0])\n lora_config_path = lora_path / \"adapter_config.json\"\n lora_adapter_path = lora_path / \"adapter_model.bin\"\n\n logger.info(\"Applying the following LoRAs to {}: {}\".format(shared.model_name, ', '.join([lora_names[0]])))\n if shared.model.__class__.__name__ == 'ExllamaModel':\n lora = ExLlamaLora(shared.model.model, str(lora_config_path), str(lora_adapter_path))\n shared.model.generator.lora = lora\n else:\n lora = ExLlamaLora(shared.model.ex_model, str(lora_config_path), str(lora_adapter_path))\n shared.model.lora = lora\n\n shared.lora_names = [lora_names[0]]\n return\n\n\ndef add_lora_exllamav2(lora_names):\n\n from exllamav2 import ExLlamaV2Lora\n\n if isinstance(shared.model.loras, list):\n for lora in shared.model.loras:\n lora.unload()\n\n if len(lora_names) > 0:\n logger.info(\"Applying the following LoRAs to {}: {}\".format(shared.model_name, ', '.join(lora_names)))\n shared.model.loras = []\n for lora_name in lora_names:\n lora_path = get_lora_path(lora_name)\n if shared.model.__class__.__name__ == 'Exllamav2Model':\n lora = ExLlamaV2Lora.from_directory(shared.model.model, str(lora_path))\n else:\n lora = ExLlamaV2Lora.from_directory(shared.model.ex_model, str(lora_path))\n\n shared.model.loras.append(lora)\n\n shared.lora_names = lora_names\n else:\n shared.lora_names = []\n shared.model.loras = None\n\n\ndef add_lora_autogptq(lora_names):\n '''\n Adapted from https://github.com/Ph0rk0z/text-generation-webui-testing\n '''\n\n try:\n from auto_gptq import get_gptq_peft_model\n from auto_gptq.utils.peft_utils import GPTQLoraConfig\n except:\n logger.error(\"This version of AutoGPTQ does not support LoRA. You need to install from source or wait for a new release.\")\n return\n\n if len(lora_names) == 0:\n reload_model()\n\n shared.lora_names = []\n return\n else:\n if len(lora_names) > 1:\n logger.warning('AutoGPTQ can only work with 1 LoRA at the moment. Only the first one in the list will be loaded.')\n if not shared.args.no_inject_fused_attention:\n logger.warning('Fused Atttention + AutoGPTQ may break Lora loading. Disable it.')\n\n peft_config = GPTQLoraConfig(\n inference_mode=True,\n )\n\n lora_path = get_lora_path(lora_names[0])\n logger.info(\"Applying the following LoRAs to {}: {}\".format(shared.model_name, ', '.join([lora_names[0]])))\n shared.model = get_gptq_peft_model(shared.model, peft_config, lora_path)\n shared.lora_names = [lora_names[0]]\n return\n\n\ndef add_lora_transformers(lora_names):\n prior_set = set(shared.lora_names)\n added_set = set(lora_names) - prior_set\n removed_set = prior_set - set(lora_names)\n\n # If no LoRA needs to be added or removed, exit\n if len(added_set) == 0 and len(removed_set) == 0:\n return\n\n # Add a LoRA when another LoRA is already present\n if len(removed_set) == 0 and len(prior_set) > 0 and \"__merged\" not in shared.model.peft_config.keys():\n logger.info(f\"Adding the LoRA(s) named {added_set} to the model...\")\n for lora in added_set:\n shared.model.load_adapter(get_lora_path(lora), lora)\n\n if len(lora_names) > 1:\n merge_loras()\n\n return\n\n # If any LoRA needs to be removed, start over\n if len(removed_set) > 0:\n # shared.model may no longer be PeftModel\n if hasattr(shared.model, 'disable_adapter'):\n shared.model.disable_adapter()\n shared.model = shared.model.base_model.model\n\n if len(lora_names) > 0:\n params = {}\n if not shared.args.cpu:\n if shared.args.load_in_4bit or shared.args.load_in_8bit:\n params['peft_type'] = shared.model.dtype\n else:\n params['dtype'] = shared.model.dtype\n if hasattr(shared.model, \"hf_device_map\"):\n params['device_map'] = {\"base_model.model.\" + k: v for k, v in shared.model.hf_device_map.items()}\n\n logger.info(\"Applying the following LoRAs to {}: {}\".format(shared.model_name, ', '.join(lora_names)))\n shared.model = PeftModel.from_pretrained(shared.model, get_lora_path(lora_names[0]), adapter_name=lora_names[0], **params)\n for lora in lora_names[1:]:\n shared.model.load_adapter(get_lora_path(lora), lora)\n\n if len(lora_names) > 1:\n merge_loras()\n\n shared.lora_names = lora_names\n\n if not shared.args.load_in_8bit and not shared.args.cpu:\n shared.model.half()\n if not hasattr(shared.model, \"hf_device_map\"):\n if torch.backends.mps.is_available():\n device = torch.device('mps')\n shared.model = shared.model.to(device)\n else:\n shared.model = shared.model.cuda()\n", "path": "modules/LoRA.py"}]} | 3,390 | 462 |
gh_patches_debug_40610 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1298 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NH Bill text points to wrong document.
State: NH
Many or all bill texts for NH's 2017 session are wrong. Use the openstates.org website to look at a few.
In case the problem isn't as obvious as I think, here's one:
NH's `HB 544` (OpenStates `NHB00006021`) is a bill titled:
> relative to earned time credits for prisoners participating in rehabilitative or educational programming.
A single version is listed in the bill detail API response, with a URL of
http://www.gencourt.state.nh.us/bill_status/billText.aspx?sy=2017&id=0761&txtFormat=html
The document at that URL is a fiscal note on an unrelated bill, `HB 407`.
I have not tried to determine if this affects earlier sessions.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/nh/bills.py`
Content:
```
1 import os
2 import re
3 import zipfile
4 import datetime as dt
5
6 from billy.scrape.bills import Bill, BillScraper
7 from billy.scrape.votes import Vote
8
9
10 body_code = {'lower': 'H', 'upper': 'S'}
11 bill_type_map = {'B': 'bill',
12 'R': 'resolution',
13 'CR': 'concurrent resolution',
14 'JR': 'joint resolution',
15 'CO': 'concurrent order',
16 'A': "address"
17 }
18 action_classifiers = [
19 ('Ought to Pass', ['bill:passed']),
20 ('Passed by Third Reading', ['bill:reading:3', 'bill:passed']),
21 ('.*Ought to Pass', ['committee:passed:favorable']),
22 ('.*Introduced(.*) and (R|r)eferred', ['bill:introduced', 'committee:referred']),
23 ('.*Inexpedient to Legislate', ['committee:passed:unfavorable']),
24 ('Proposed(.*) Amendment', 'amendment:introduced'),
25 ('Amendment .* Adopted', 'amendment:passed'),
26 ('Amendment .* Failed', 'amendment:failed'),
27 ('Signed', 'governor:signed'),
28 ('Vetoed', 'governor:vetoed'),
29 ]
30 VERSION_URL = 'http://www.gencourt.state.nh.us/legislation/%s/%s.html'
31 AMENDMENT_URL = 'http://www.gencourt.state.nh.us/legislation/amendments/%s.html'
32
33
34 def classify_action(action):
35 for regex, classification in action_classifiers:
36 if re.match(regex, action):
37 return classification
38 return 'other'
39
40
41 def extract_amendment_id(action):
42 piece = re.findall('Amendment #(\d{4}-\d+[hs])', action)
43 if piece:
44 return piece[0]
45
46
47 class NHBillScraper(BillScraper):
48 jurisdiction = 'nh'
49
50 def scrape(self, chamber, session):
51 if int(session) < 2017:
52 legacy = NHLegacyBillScraper(self.metadata, self.output_dir, self.strict_validation)
53 legacy.scrape(chamber, session)
54 # This throws an error because object_count isn't being properly incremented,
55 # even though it saves fine. So fake the output_names
56 self.output_names = ['1']
57 return
58
59 # bill basics
60 self.bills = {} # LSR->Bill
61 self.bills_by_id = {} # need a second table to attach votes
62 last_line = []
63 for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/LSRs.txt').content.split("\n"):
64 line = line.split('|')
65 if len(line) < 1:
66 continue
67
68 if len(line) < 36:
69 if len(last_line + line[1:]) == 36:
70 # combine two lines for processing
71 # (skip an empty entry at beginning of second line)
72 line = last_line + line
73 self.warning('used bad line')
74 else:
75 # skip this line, maybe we'll use it later
76 self.warning('bad line: %s' % '|'.join(line))
77 last_line = line
78 continue
79 session_yr = line[0]
80 lsr = line[1]
81 title = line[2]
82 body = line[3]
83 type_num = line[4]
84 expanded_bill_id = line[9]
85 bill_id = line[10]
86
87 if body == body_code[chamber] and session_yr == session:
88 if expanded_bill_id.startswith('CACR'):
89 bill_type = 'constitutional amendment'
90 elif expanded_bill_id.startswith('PET'):
91 bill_type = 'petition'
92 elif expanded_bill_id.startswith('AR') and bill_id.startswith('CACR'):
93 bill_type = 'constitutional amendment'
94 else:
95 bill_type = bill_type_map[expanded_bill_id.split(' ')[0][1:]]
96
97 if title.startswith('('):
98 title = title.split(')', 1)[1].strip()
99
100 self.bills[lsr] = Bill(session, chamber, bill_id, title,
101 type=bill_type)
102
103 # http://www.gencourt.state.nh.us/bill_status/billText.aspx?sy=2017&id=95&txtFormat=html
104 version_url = 'http://www.gencourt.state.nh.us/bill_status/billText.aspx?sy={}&id={}&txtFormat=html'.format(session, lsr)
105
106 self.bills[lsr].add_version('latest version', version_url,
107 mimetype='text/html')
108 self.bills_by_id[bill_id] = self.bills[lsr]
109
110 # load legislators
111 self.legislators = {}
112 for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/legislators.txt').content.split("\n"):
113 if len(line) < 1:
114 continue
115
116 line = line.split('|')
117 employee_num = line[0]
118
119 # first, last, middle
120 if line[3]:
121 name = '%s %s %s' % (line[2], line[3], line[1])
122 else:
123 name = '%s %s' % (line[2], line[1])
124
125 self.legislators[employee_num] = {'name': name,
126 'seat': line[5]}
127 #body = line[4]
128
129 # sponsors
130 for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/LsrSponsors.txt').content.split("\n"):
131 if len(line) < 1:
132 continue
133
134 session_yr, lsr, seq, employee, primary = line.strip().split('|')
135
136 if session_yr == session and lsr in self.bills:
137 sp_type = 'primary' if primary == '1' else 'cosponsor'
138 try:
139 self.bills[lsr].add_sponsor(sp_type,
140 self.legislators[employee]['name'],
141 _code=self.legislators[employee]['seat'])
142 except KeyError:
143 self.warning("Error, can't find person %s" % employee)
144
145
146 # actions
147 for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/Docket.txt').content.split("\n"):
148 if len(line) < 1:
149 continue
150 # a few blank/irregular lines, irritating
151 if '|' not in line:
152 continue
153
154 (session_yr, lsr, timestamp, bill_id, body,
155 action, _) = line.split('|')
156
157 if session_yr == session and lsr in self.bills:
158 actor = 'lower' if body == 'H' else 'upper'
159 time = dt.datetime.strptime(timestamp,
160 '%m/%d/%Y %H:%M:%S %p')
161 action = action.strip()
162 atype = classify_action(action)
163 self.bills[lsr].add_action(actor, action, time, type=atype)
164 amendment_id = extract_amendment_id(action)
165 if amendment_id:
166 self.bills[lsr].add_document('amendment %s' % amendment_id,
167 AMENDMENT_URL % amendment_id)
168
169 self.scrape_votes(session)
170
171 # save all bills
172 for bill in self.bills:
173 #bill.add_source(zip_url)
174 self.add_source(self.bills[bill], bill, session)
175 self.save_bill(self.bills[bill])
176
177 def add_source(self, bill, lsr, session):
178 bill_url = 'http://www.gencourt.state.nh.us/bill_Status/bill_status.aspx?lsr={}&sy={}&sortoption=&txtsessionyear={}'.format(lsr, session, session)
179 bill.add_source(bill_url)
180
181 def scrape_votes(self, session):
182 votes = {}
183 last_line = []
184
185 for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/RollCallSummary.txt').content:
186 if len(line) < 2:
187 continue
188
189 if line.strip() == "":
190 continue
191
192 line = line.split('|')
193 if len(line) < 14:
194 if len(last_line + line[1:]) == 14:
195 line = last_line
196 self.warning('used bad vote line')
197 else:
198 last_line = line
199 self.warning('bad vote line %s' % '|'.join(line))
200 session_yr = line[0]
201 body = line[1]
202 vote_num = line[2]
203 timestamp = line[3]
204 bill_id = line[4].strip()
205 yeas = int(line[5])
206 nays = int(line[6])
207 present = int(line[7])
208 absent = int(line[8])
209 motion = line[11].strip() or '[not available]'
210
211 if session_yr == session and bill_id in self.bills_by_id:
212 actor = 'lower' if body == 'H' else 'upper'
213 time = dt.datetime.strptime(timestamp,
214 '%m/%d/%Y %I:%M:%S %p')
215 # TODO: stop faking passed somehow
216 passed = yeas > nays
217 vote = Vote(actor, time, motion, passed, yeas, nays,
218 other_count=0)
219 votes[body+vote_num] = vote
220 self.bills_by_id[bill_id].add_vote(vote)
221
222 for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/RollCallHistory.txt').content:
223 if len(line) < 2:
224 continue
225
226 # 2016|H|2|330795||Yea|
227 # 2012 | H | 2 | 330795 | HB309 | Yea |1/4/2012 8:27:03 PM
228 session_yr, body, v_num, employee, bill_id, vote \
229 = line.split('|')
230
231 if not bill_id:
232 continue
233
234 if session_yr == session and bill_id.strip() in self.bills_by_id:
235 try:
236 leg = self.legislators[employee]['name']
237 except KeyError:
238 self.warning("Error, can't find person %s" % employee)
239 continue
240
241 vote = vote.strip()
242 if not body+v_num in votes:
243 self.warning("Skipping processing this vote:")
244 self.warning("Bad ID: %s" % ( body+v_num ) )
245 continue
246
247 #code = self.legislators[employee]['seat']
248 if vote == 'Yea':
249 votes[body+v_num].yes(leg)
250 elif vote == 'Nay':
251 votes[body+v_num].no(leg)
252 else:
253 votes[body+v_num].other(leg)
254 votes[body+v_num]['other_count'] += 1
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openstates/nh/bills.py b/openstates/nh/bills.py
--- a/openstates/nh/bills.py
+++ b/openstates/nh/bills.py
@@ -59,6 +59,11 @@
# bill basics
self.bills = {} # LSR->Bill
self.bills_by_id = {} # need a second table to attach votes
+ self.versions_by_lsr = {} # mapping of bill ID to lsr
+
+ # pre load the mapping table of LSR -> version id
+ self.scrape_version_ids()
+
last_line = []
for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/LSRs.txt').content.split("\n"):
line = line.split('|')
@@ -90,7 +95,7 @@
elif expanded_bill_id.startswith('PET'):
bill_type = 'petition'
elif expanded_bill_id.startswith('AR') and bill_id.startswith('CACR'):
- bill_type = 'constitutional amendment'
+ bill_type = 'constitutional amendment'
else:
bill_type = bill_type_map[expanded_bill_id.split(' ')[0][1:]]
@@ -100,11 +105,16 @@
self.bills[lsr] = Bill(session, chamber, bill_id, title,
type=bill_type)
- # http://www.gencourt.state.nh.us/bill_status/billText.aspx?sy=2017&id=95&txtFormat=html
- version_url = 'http://www.gencourt.state.nh.us/bill_status/billText.aspx?sy={}&id={}&txtFormat=html'.format(session, lsr)
+ # http://www.gencourt.state.nh.us/bill_status/billText.aspx?sy=2017&id=95&txtFormat=html
+ if lsr in self.versions_by_lsr:
+ version_id = self.versions_by_lsr[lsr]
+ version_url = 'http://www.gencourt.state.nh.us/bill_status/' \
+ 'billText.aspx?sy={}&id={}&txtFormat=html' \
+ .format(session, version_id)
+
+ self.bills[lsr].add_version('latest version', version_url,
+ mimetype='text/html', on_duplicate='use_new')
- self.bills[lsr].add_version('latest version', version_url,
- mimetype='text/html')
self.bills_by_id[bill_id] = self.bills[lsr]
# load legislators
@@ -178,6 +188,21 @@
bill_url = 'http://www.gencourt.state.nh.us/bill_Status/bill_status.aspx?lsr={}&sy={}&sortoption=&txtsessionyear={}'.format(lsr, session, session)
bill.add_source(bill_url)
+ def scrape_version_ids(self):
+ for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/LsrsOnly.txt').content.split("\n"):
+ if len(line) < 1:
+ continue
+ # a few blank/irregular lines, irritating
+ if '|' not in line:
+ continue
+
+ line = line.split('|')
+ file_id = line[2]
+ lsr = line[0].split('-')
+ lsr = lsr[1]
+ self.versions_by_lsr[lsr] = file_id
+
+
def scrape_votes(self, session):
votes = {}
last_line = []
| {"golden_diff": "diff --git a/openstates/nh/bills.py b/openstates/nh/bills.py\n--- a/openstates/nh/bills.py\n+++ b/openstates/nh/bills.py\n@@ -59,6 +59,11 @@\n # bill basics\n self.bills = {} # LSR->Bill\n self.bills_by_id = {} # need a second table to attach votes\n+ self.versions_by_lsr = {} # mapping of bill ID to lsr\n+\n+ # pre load the mapping table of LSR -> version id\n+ self.scrape_version_ids()\n+\n last_line = []\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/LSRs.txt').content.split(\"\\n\"):\n line = line.split('|')\n@@ -90,7 +95,7 @@\n elif expanded_bill_id.startswith('PET'):\n bill_type = 'petition'\n elif expanded_bill_id.startswith('AR') and bill_id.startswith('CACR'):\n- bill_type = 'constitutional amendment' \n+ bill_type = 'constitutional amendment'\n else:\n bill_type = bill_type_map[expanded_bill_id.split(' ')[0][1:]]\n \n@@ -100,11 +105,16 @@\n self.bills[lsr] = Bill(session, chamber, bill_id, title,\n type=bill_type)\n \n- # http://www.gencourt.state.nh.us/bill_status/billText.aspx?sy=2017&id=95&txtFormat=html \n- version_url = 'http://www.gencourt.state.nh.us/bill_status/billText.aspx?sy={}&id={}&txtFormat=html'.format(session, lsr)\n+ # http://www.gencourt.state.nh.us/bill_status/billText.aspx?sy=2017&id=95&txtFormat=html\n+ if lsr in self.versions_by_lsr:\n+ version_id = self.versions_by_lsr[lsr]\n+ version_url = 'http://www.gencourt.state.nh.us/bill_status/' \\\n+ 'billText.aspx?sy={}&id={}&txtFormat=html' \\\n+ .format(session, version_id)\n+\n+ self.bills[lsr].add_version('latest version', version_url,\n+ mimetype='text/html', on_duplicate='use_new')\n \n- self.bills[lsr].add_version('latest version', version_url,\n- mimetype='text/html')\n self.bills_by_id[bill_id] = self.bills[lsr]\n \n # load legislators\n@@ -178,6 +188,21 @@\n bill_url = 'http://www.gencourt.state.nh.us/bill_Status/bill_status.aspx?lsr={}&sy={}&sortoption=&txtsessionyear={}'.format(lsr, session, session)\n bill.add_source(bill_url)\n \n+ def scrape_version_ids(self):\n+ for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/LsrsOnly.txt').content.split(\"\\n\"):\n+ if len(line) < 1:\n+ continue\n+ # a few blank/irregular lines, irritating\n+ if '|' not in line:\n+ continue\n+\n+ line = line.split('|')\n+ file_id = line[2]\n+ lsr = line[0].split('-')\n+ lsr = lsr[1]\n+ self.versions_by_lsr[lsr] = file_id\n+\n+\n def scrape_votes(self, session):\n votes = {}\n last_line = []\n", "issue": "NH Bill text points to wrong document.\nState: NH\r\n\r\nMany or all bill texts for NH's 2017 session are wrong. Use the openstates.org website to look at a few.\r\nIn case the problem isn't as obvious as I think, here's one:\r\n\r\nNH's `HB 544` (OpenStates `NHB00006021`) is a bill titled: \r\n\r\n> relative to earned time credits for prisoners participating in rehabilitative or educational programming.\r\n\r\nA single version is listed in the bill detail API response, with a URL of \r\nhttp://www.gencourt.state.nh.us/bill_status/billText.aspx?sy=2017&id=0761&txtFormat=html\r\n\r\nThe document at that URL is a fiscal note on an unrelated bill, `HB 407`.\r\n\r\nI have not tried to determine if this affects earlier sessions.\n", "before_files": [{"content": "import os\nimport re\nimport zipfile\nimport datetime as dt\n\nfrom billy.scrape.bills import Bill, BillScraper\nfrom billy.scrape.votes import Vote\n\n\nbody_code = {'lower': 'H', 'upper': 'S'}\nbill_type_map = {'B': 'bill',\n 'R': 'resolution',\n 'CR': 'concurrent resolution',\n 'JR': 'joint resolution',\n 'CO': 'concurrent order',\n 'A': \"address\"\n }\naction_classifiers = [\n ('Ought to Pass', ['bill:passed']),\n ('Passed by Third Reading', ['bill:reading:3', 'bill:passed']),\n ('.*Ought to Pass', ['committee:passed:favorable']),\n ('.*Introduced(.*) and (R|r)eferred', ['bill:introduced', 'committee:referred']),\n ('.*Inexpedient to Legislate', ['committee:passed:unfavorable']),\n ('Proposed(.*) Amendment', 'amendment:introduced'),\n ('Amendment .* Adopted', 'amendment:passed'),\n ('Amendment .* Failed', 'amendment:failed'),\n ('Signed', 'governor:signed'),\n ('Vetoed', 'governor:vetoed'),\n]\nVERSION_URL = 'http://www.gencourt.state.nh.us/legislation/%s/%s.html'\nAMENDMENT_URL = 'http://www.gencourt.state.nh.us/legislation/amendments/%s.html'\n\n\ndef classify_action(action):\n for regex, classification in action_classifiers:\n if re.match(regex, action):\n return classification\n return 'other'\n\n\ndef extract_amendment_id(action):\n piece = re.findall('Amendment #(\\d{4}-\\d+[hs])', action)\n if piece:\n return piece[0]\n\n\nclass NHBillScraper(BillScraper):\n jurisdiction = 'nh'\n\n def scrape(self, chamber, session):\n if int(session) < 2017:\n legacy = NHLegacyBillScraper(self.metadata, self.output_dir, self.strict_validation)\n legacy.scrape(chamber, session)\n # This throws an error because object_count isn't being properly incremented, \n # even though it saves fine. So fake the output_names\n self.output_names = ['1']\n return\n\n # bill basics\n self.bills = {} # LSR->Bill\n self.bills_by_id = {} # need a second table to attach votes\n last_line = []\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/LSRs.txt').content.split(\"\\n\"):\n line = line.split('|')\n if len(line) < 1:\n continue\n\n if len(line) < 36:\n if len(last_line + line[1:]) == 36:\n # combine two lines for processing\n # (skip an empty entry at beginning of second line)\n line = last_line + line\n self.warning('used bad line')\n else:\n # skip this line, maybe we'll use it later\n self.warning('bad line: %s' % '|'.join(line))\n last_line = line\n continue\n session_yr = line[0]\n lsr = line[1]\n title = line[2]\n body = line[3]\n type_num = line[4]\n expanded_bill_id = line[9]\n bill_id = line[10]\n\n if body == body_code[chamber] and session_yr == session:\n if expanded_bill_id.startswith('CACR'):\n bill_type = 'constitutional amendment'\n elif expanded_bill_id.startswith('PET'):\n bill_type = 'petition'\n elif expanded_bill_id.startswith('AR') and bill_id.startswith('CACR'):\n bill_type = 'constitutional amendment' \n else:\n bill_type = bill_type_map[expanded_bill_id.split(' ')[0][1:]]\n\n if title.startswith('('):\n title = title.split(')', 1)[1].strip()\n\n self.bills[lsr] = Bill(session, chamber, bill_id, title,\n type=bill_type)\n\n # http://www.gencourt.state.nh.us/bill_status/billText.aspx?sy=2017&id=95&txtFormat=html \n version_url = 'http://www.gencourt.state.nh.us/bill_status/billText.aspx?sy={}&id={}&txtFormat=html'.format(session, lsr)\n\n self.bills[lsr].add_version('latest version', version_url,\n mimetype='text/html')\n self.bills_by_id[bill_id] = self.bills[lsr]\n\n # load legislators\n self.legislators = {}\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/legislators.txt').content.split(\"\\n\"):\n if len(line) < 1:\n continue \n\n line = line.split('|')\n employee_num = line[0]\n\n # first, last, middle\n if line[3]:\n name = '%s %s %s' % (line[2], line[3], line[1])\n else:\n name = '%s %s' % (line[2], line[1])\n\n self.legislators[employee_num] = {'name': name,\n 'seat': line[5]}\n #body = line[4]\n\n # sponsors\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/LsrSponsors.txt').content.split(\"\\n\"):\n if len(line) < 1:\n continue \n\n session_yr, lsr, seq, employee, primary = line.strip().split('|')\n\n if session_yr == session and lsr in self.bills:\n sp_type = 'primary' if primary == '1' else 'cosponsor'\n try:\n self.bills[lsr].add_sponsor(sp_type,\n self.legislators[employee]['name'],\n _code=self.legislators[employee]['seat'])\n except KeyError:\n self.warning(\"Error, can't find person %s\" % employee)\n\n\n # actions\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/Docket.txt').content.split(\"\\n\"):\n if len(line) < 1:\n continue \n # a few blank/irregular lines, irritating\n if '|' not in line:\n continue\n\n (session_yr, lsr, timestamp, bill_id, body,\n action, _) = line.split('|')\n\n if session_yr == session and lsr in self.bills:\n actor = 'lower' if body == 'H' else 'upper'\n time = dt.datetime.strptime(timestamp,\n '%m/%d/%Y %H:%M:%S %p')\n action = action.strip()\n atype = classify_action(action)\n self.bills[lsr].add_action(actor, action, time, type=atype)\n amendment_id = extract_amendment_id(action)\n if amendment_id:\n self.bills[lsr].add_document('amendment %s' % amendment_id,\n AMENDMENT_URL % amendment_id)\n\n self.scrape_votes(session)\n\n # save all bills\n for bill in self.bills:\n #bill.add_source(zip_url)\n self.add_source(self.bills[bill], bill, session)\n self.save_bill(self.bills[bill])\n\n def add_source(self, bill, lsr, session):\n bill_url = 'http://www.gencourt.state.nh.us/bill_Status/bill_status.aspx?lsr={}&sy={}&sortoption=&txtsessionyear={}'.format(lsr, session, session)\n bill.add_source(bill_url)\n\n def scrape_votes(self, session):\n votes = {}\n last_line = []\n\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/RollCallSummary.txt').content:\n if len(line) < 2:\n continue \n\n if line.strip() == \"\":\n continue\n\n line = line.split('|')\n if len(line) < 14:\n if len(last_line + line[1:]) == 14:\n line = last_line\n self.warning('used bad vote line')\n else:\n last_line = line\n self.warning('bad vote line %s' % '|'.join(line))\n session_yr = line[0]\n body = line[1]\n vote_num = line[2]\n timestamp = line[3]\n bill_id = line[4].strip()\n yeas = int(line[5])\n nays = int(line[6])\n present = int(line[7])\n absent = int(line[8])\n motion = line[11].strip() or '[not available]'\n\n if session_yr == session and bill_id in self.bills_by_id:\n actor = 'lower' if body == 'H' else 'upper'\n time = dt.datetime.strptime(timestamp,\n '%m/%d/%Y %I:%M:%S %p')\n # TODO: stop faking passed somehow\n passed = yeas > nays\n vote = Vote(actor, time, motion, passed, yeas, nays,\n other_count=0)\n votes[body+vote_num] = vote\n self.bills_by_id[bill_id].add_vote(vote)\n\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/RollCallHistory.txt').content:\n if len(line) < 2:\n continue\n \n # 2016|H|2|330795||Yea|\n # 2012 | H | 2 | 330795 | HB309 | Yea |1/4/2012 8:27:03 PM\n session_yr, body, v_num, employee, bill_id, vote \\\n = line.split('|')\n\n if not bill_id:\n continue\n\n if session_yr == session and bill_id.strip() in self.bills_by_id:\n try:\n leg = self.legislators[employee]['name']\n except KeyError:\n self.warning(\"Error, can't find person %s\" % employee)\n continue\n\n vote = vote.strip()\n if not body+v_num in votes:\n self.warning(\"Skipping processing this vote:\")\n self.warning(\"Bad ID: %s\" % ( body+v_num ) )\n continue\n\n #code = self.legislators[employee]['seat']\n if vote == 'Yea':\n votes[body+v_num].yes(leg)\n elif vote == 'Nay':\n votes[body+v_num].no(leg)\n else:\n votes[body+v_num].other(leg)\n votes[body+v_num]['other_count'] += 1", "path": "openstates/nh/bills.py"}], "after_files": [{"content": "import os\nimport re\nimport zipfile\nimport datetime as dt\n\nfrom billy.scrape.bills import Bill, BillScraper\nfrom billy.scrape.votes import Vote\n\n\nbody_code = {'lower': 'H', 'upper': 'S'}\nbill_type_map = {'B': 'bill',\n 'R': 'resolution',\n 'CR': 'concurrent resolution',\n 'JR': 'joint resolution',\n 'CO': 'concurrent order',\n 'A': \"address\"\n }\naction_classifiers = [\n ('Ought to Pass', ['bill:passed']),\n ('Passed by Third Reading', ['bill:reading:3', 'bill:passed']),\n ('.*Ought to Pass', ['committee:passed:favorable']),\n ('.*Introduced(.*) and (R|r)eferred', ['bill:introduced', 'committee:referred']),\n ('.*Inexpedient to Legislate', ['committee:passed:unfavorable']),\n ('Proposed(.*) Amendment', 'amendment:introduced'),\n ('Amendment .* Adopted', 'amendment:passed'),\n ('Amendment .* Failed', 'amendment:failed'),\n ('Signed', 'governor:signed'),\n ('Vetoed', 'governor:vetoed'),\n]\nVERSION_URL = 'http://www.gencourt.state.nh.us/legislation/%s/%s.html'\nAMENDMENT_URL = 'http://www.gencourt.state.nh.us/legislation/amendments/%s.html'\n\n\ndef classify_action(action):\n for regex, classification in action_classifiers:\n if re.match(regex, action):\n return classification\n return 'other'\n\n\ndef extract_amendment_id(action):\n piece = re.findall('Amendment #(\\d{4}-\\d+[hs])', action)\n if piece:\n return piece[0]\n\n\nclass NHBillScraper(BillScraper):\n jurisdiction = 'nh'\n\n def scrape(self, chamber, session):\n if int(session) < 2017:\n legacy = NHLegacyBillScraper(self.metadata, self.output_dir, self.strict_validation)\n legacy.scrape(chamber, session)\n # This throws an error because object_count isn't being properly incremented, \n # even though it saves fine. So fake the output_names\n self.output_names = ['1']\n return\n\n # bill basics\n self.bills = {} # LSR->Bill\n self.bills_by_id = {} # need a second table to attach votes\n self.versions_by_lsr = {} # mapping of bill ID to lsr\n\n # pre load the mapping table of LSR -> version id\n self.scrape_version_ids()\n\n last_line = []\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/LSRs.txt').content.split(\"\\n\"):\n line = line.split('|')\n if len(line) < 1:\n continue\n\n if len(line) < 36:\n if len(last_line + line[1:]) == 36:\n # combine two lines for processing\n # (skip an empty entry at beginning of second line)\n line = last_line + line\n self.warning('used bad line')\n else:\n # skip this line, maybe we'll use it later\n self.warning('bad line: %s' % '|'.join(line))\n last_line = line\n continue\n session_yr = line[0]\n lsr = line[1]\n title = line[2]\n body = line[3]\n type_num = line[4]\n expanded_bill_id = line[9]\n bill_id = line[10]\n\n if body == body_code[chamber] and session_yr == session:\n if expanded_bill_id.startswith('CACR'):\n bill_type = 'constitutional amendment'\n elif expanded_bill_id.startswith('PET'):\n bill_type = 'petition'\n elif expanded_bill_id.startswith('AR') and bill_id.startswith('CACR'):\n bill_type = 'constitutional amendment'\n else:\n bill_type = bill_type_map[expanded_bill_id.split(' ')[0][1:]]\n\n if title.startswith('('):\n title = title.split(')', 1)[1].strip()\n\n self.bills[lsr] = Bill(session, chamber, bill_id, title,\n type=bill_type)\n\n # http://www.gencourt.state.nh.us/bill_status/billText.aspx?sy=2017&id=95&txtFormat=html\n if lsr in self.versions_by_lsr:\n version_id = self.versions_by_lsr[lsr]\n version_url = 'http://www.gencourt.state.nh.us/bill_status/' \\\n 'billText.aspx?sy={}&id={}&txtFormat=html' \\\n .format(session, version_id)\n\n self.bills[lsr].add_version('latest version', version_url,\n mimetype='text/html', on_duplicate='use_new')\n\n self.bills_by_id[bill_id] = self.bills[lsr]\n\n # load legislators\n self.legislators = {}\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/legislators.txt').content.split(\"\\n\"):\n if len(line) < 1:\n continue \n\n line = line.split('|')\n employee_num = line[0]\n\n # first, last, middle\n if line[3]:\n name = '%s %s %s' % (line[2], line[3], line[1])\n else:\n name = '%s %s' % (line[2], line[1])\n\n self.legislators[employee_num] = {'name': name,\n 'seat': line[5]}\n #body = line[4]\n\n # sponsors\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/LsrSponsors.txt').content.split(\"\\n\"):\n if len(line) < 1:\n continue \n\n session_yr, lsr, seq, employee, primary = line.strip().split('|')\n\n if session_yr == session and lsr in self.bills:\n sp_type = 'primary' if primary == '1' else 'cosponsor'\n try:\n self.bills[lsr].add_sponsor(sp_type,\n self.legislators[employee]['name'],\n _code=self.legislators[employee]['seat'])\n except KeyError:\n self.warning(\"Error, can't find person %s\" % employee)\n\n\n # actions\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/Docket.txt').content.split(\"\\n\"):\n if len(line) < 1:\n continue \n # a few blank/irregular lines, irritating\n if '|' not in line:\n continue\n\n (session_yr, lsr, timestamp, bill_id, body,\n action, _) = line.split('|')\n\n if session_yr == session and lsr in self.bills:\n actor = 'lower' if body == 'H' else 'upper'\n time = dt.datetime.strptime(timestamp,\n '%m/%d/%Y %H:%M:%S %p')\n action = action.strip()\n atype = classify_action(action)\n self.bills[lsr].add_action(actor, action, time, type=atype)\n amendment_id = extract_amendment_id(action)\n if amendment_id:\n self.bills[lsr].add_document('amendment %s' % amendment_id,\n AMENDMENT_URL % amendment_id)\n\n self.scrape_votes(session)\n\n # save all bills\n for bill in self.bills:\n #bill.add_source(zip_url)\n self.add_source(self.bills[bill], bill, session)\n self.save_bill(self.bills[bill])\n\n def add_source(self, bill, lsr, session):\n bill_url = 'http://www.gencourt.state.nh.us/bill_Status/bill_status.aspx?lsr={}&sy={}&sortoption=&txtsessionyear={}'.format(lsr, session, session)\n bill.add_source(bill_url)\n\n def scrape_version_ids(self):\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/LsrsOnly.txt').content.split(\"\\n\"):\n if len(line) < 1:\n continue\n # a few blank/irregular lines, irritating\n if '|' not in line:\n continue\n\n line = line.split('|')\n file_id = line[2]\n lsr = line[0].split('-')\n lsr = lsr[1]\n self.versions_by_lsr[lsr] = file_id\n\n\n def scrape_votes(self, session):\n votes = {}\n last_line = []\n\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/RollCallSummary.txt').content:\n if len(line) < 2:\n continue \n\n if line.strip() == \"\":\n continue\n\n line = line.split('|')\n if len(line) < 14:\n if len(last_line + line[1:]) == 14:\n line = last_line\n self.warning('used bad vote line')\n else:\n last_line = line\n self.warning('bad vote line %s' % '|'.join(line))\n session_yr = line[0]\n body = line[1]\n vote_num = line[2]\n timestamp = line[3]\n bill_id = line[4].strip()\n yeas = int(line[5])\n nays = int(line[6])\n present = int(line[7])\n absent = int(line[8])\n motion = line[11].strip() or '[not available]'\n\n if session_yr == session and bill_id in self.bills_by_id:\n actor = 'lower' if body == 'H' else 'upper'\n time = dt.datetime.strptime(timestamp,\n '%m/%d/%Y %I:%M:%S %p')\n # TODO: stop faking passed somehow\n passed = yeas > nays\n vote = Vote(actor, time, motion, passed, yeas, nays,\n other_count=0)\n votes[body+vote_num] = vote\n self.bills_by_id[bill_id].add_vote(vote)\n\n for line in self.get('http://gencourt.state.nh.us/dynamicdatafiles/RollCallHistory.txt').content:\n if len(line) < 2:\n continue\n \n # 2016|H|2|330795||Yea|\n # 2012 | H | 2 | 330795 | HB309 | Yea |1/4/2012 8:27:03 PM\n session_yr, body, v_num, employee, bill_id, vote \\\n = line.split('|')\n\n if not bill_id:\n continue\n\n if session_yr == session and bill_id.strip() in self.bills_by_id:\n try:\n leg = self.legislators[employee]['name']\n except KeyError:\n self.warning(\"Error, can't find person %s\" % employee)\n continue\n\n vote = vote.strip()\n if not body+v_num in votes:\n self.warning(\"Skipping processing this vote:\")\n self.warning(\"Bad ID: %s\" % ( body+v_num ) )\n continue\n\n #code = self.legislators[employee]['seat']\n if vote == 'Yea':\n votes[body+v_num].yes(leg)\n elif vote == 'Nay':\n votes[body+v_num].no(leg)\n else:\n votes[body+v_num].other(leg)\n votes[body+v_num]['other_count'] += 1", "path": "openstates/nh/bills.py"}]} | 3,505 | 802 |
gh_patches_debug_19568 | rasdani/github-patches | git_diff | sanic-org__sanic-2238 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exception handler decorator in blueprint groups
**Is your feature request related to a problem? Please describe.**
Currently exception handlers can be attached to app and blueprint instances, but it is not possible to do the same with blueprint groups.
**Describe the solution you'd like**
Ideally it would be possible to attach an exception handler to a blueprint group via means of a decorator. If it is not possible to attach a handler to a blueprint group directly, its decorator could simply iterate through each of its blueprint attaching the handler to each of them.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/blueprint_group.py`
Content:
```
1 from __future__ import annotations
2
3 from collections.abc import MutableSequence
4 from functools import partial
5 from typing import TYPE_CHECKING, List, Optional, Union
6
7
8 if TYPE_CHECKING:
9 from sanic.blueprints import Blueprint
10
11
12 class BlueprintGroup(MutableSequence):
13 """
14 This class provides a mechanism to implement a Blueprint Group
15 using the :meth:`~sanic.blueprints.Blueprint.group` method in
16 :class:`~sanic.blueprints.Blueprint`. To avoid having to re-write
17 some of the existing implementation, this class provides a custom
18 iterator implementation that will let you use the object of this
19 class as a list/tuple inside the existing implementation.
20
21 .. code-block:: python
22
23 bp1 = Blueprint('bp1', url_prefix='/bp1')
24 bp2 = Blueprint('bp2', url_prefix='/bp2')
25
26 bp3 = Blueprint('bp3', url_prefix='/bp4')
27 bp3 = Blueprint('bp3', url_prefix='/bp4')
28
29 bpg = BlueprintGroup(bp3, bp4, url_prefix="/api", version="v1")
30
31 @bp1.middleware('request')
32 async def bp1_only_middleware(request):
33 print('applied on Blueprint : bp1 Only')
34
35 @bp1.route('/')
36 async def bp1_route(request):
37 return text('bp1')
38
39 @bp2.route('/<param>')
40 async def bp2_route(request, param):
41 return text(param)
42
43 @bp3.route('/')
44 async def bp1_route(request):
45 return text('bp1')
46
47 @bp4.route('/<param>')
48 async def bp2_route(request, param):
49 return text(param)
50
51 group = Blueprint.group(bp1, bp2)
52
53 @group.middleware('request')
54 async def group_middleware(request):
55 print('common middleware applied for both bp1 and bp2')
56
57 # Register Blueprint group under the app
58 app.blueprint(group)
59 app.blueprint(bpg)
60 """
61
62 __slots__ = (
63 "_blueprints",
64 "_url_prefix",
65 "_version",
66 "_strict_slashes",
67 "_version_prefix",
68 )
69
70 def __init__(
71 self,
72 url_prefix: Optional[str] = None,
73 version: Optional[Union[int, str, float]] = None,
74 strict_slashes: Optional[bool] = None,
75 version_prefix: str = "/v",
76 ):
77 """
78 Create a new Blueprint Group
79
80 :param url_prefix: URL: to be prefixed before all the Blueprint Prefix
81 :param version: API Version for the blueprint group. This will be
82 inherited by each of the Blueprint
83 :param strict_slashes: URL Strict slash behavior indicator
84 """
85 self._blueprints: List[Blueprint] = []
86 self._url_prefix = url_prefix
87 self._version = version
88 self._version_prefix = version_prefix
89 self._strict_slashes = strict_slashes
90
91 @property
92 def url_prefix(self) -> Optional[Union[int, str, float]]:
93 """
94 Retrieve the URL prefix being used for the Current Blueprint Group
95
96 :return: string with url prefix
97 """
98 return self._url_prefix
99
100 @property
101 def blueprints(self) -> List[Blueprint]:
102 """
103 Retrieve a list of all the available blueprints under this group.
104
105 :return: List of Blueprint instance
106 """
107 return self._blueprints
108
109 @property
110 def version(self) -> Optional[Union[str, int, float]]:
111 """
112 API Version for the Blueprint Group. This will be applied only in case
113 if the Blueprint doesn't already have a version specified
114
115 :return: Version information
116 """
117 return self._version
118
119 @property
120 def strict_slashes(self) -> Optional[bool]:
121 """
122 URL Slash termination behavior configuration
123
124 :return: bool
125 """
126 return self._strict_slashes
127
128 @property
129 def version_prefix(self) -> str:
130 """
131 Version prefix; defaults to ``/v``
132
133 :return: str
134 """
135 return self._version_prefix
136
137 def __iter__(self):
138 """
139 Tun the class Blueprint Group into an Iterable item
140 """
141 return iter(self._blueprints)
142
143 def __getitem__(self, item):
144 """
145 This method returns a blueprint inside the group specified by
146 an index value. This will enable indexing, splice and slicing
147 of the blueprint group like we can do with regular list/tuple.
148
149 This method is provided to ensure backward compatibility with
150 any of the pre-existing usage that might break.
151
152 :param item: Index of the Blueprint item in the group
153 :return: Blueprint object
154 """
155 return self._blueprints[item]
156
157 def __setitem__(self, index, item) -> None:
158 """
159 Abstract method implemented to turn the `BlueprintGroup` class
160 into a list like object to support all the existing behavior.
161
162 This method is used to perform the list's indexed setter operation.
163
164 :param index: Index to use for inserting a new Blueprint item
165 :param item: New `Blueprint` object.
166 :return: None
167 """
168 self._blueprints[index] = item
169
170 def __delitem__(self, index) -> None:
171 """
172 Abstract method implemented to turn the `BlueprintGroup` class
173 into a list like object to support all the existing behavior.
174
175 This method is used to delete an item from the list of blueprint
176 groups like it can be done on a regular list with index.
177
178 :param index: Index to use for removing a new Blueprint item
179 :return: None
180 """
181 del self._blueprints[index]
182
183 def __len__(self) -> int:
184 """
185 Get the Length of the blueprint group object.
186
187 :return: Length of Blueprint group object
188 """
189 return len(self._blueprints)
190
191 def append(self, value: Blueprint) -> None:
192 """
193 The Abstract class `MutableSequence` leverages this append method to
194 perform the `BlueprintGroup.append` operation.
195 :param value: New `Blueprint` object.
196 :return: None
197 """
198 self._blueprints.append(value)
199
200 def insert(self, index: int, item: Blueprint) -> None:
201 """
202 The Abstract class `MutableSequence` leverages this insert method to
203 perform the `BlueprintGroup.append` operation.
204
205 :param index: Index to use for removing a new Blueprint item
206 :param item: New `Blueprint` object.
207 :return: None
208 """
209 self._blueprints.insert(index, item)
210
211 def middleware(self, *args, **kwargs):
212 """
213 A decorator that can be used to implement a Middleware plugin to
214 all of the Blueprints that belongs to this specific Blueprint Group.
215
216 In case of nested Blueprint Groups, the same middleware is applied
217 across each of the Blueprints recursively.
218
219 :param args: Optional positional Parameters to be use middleware
220 :param kwargs: Optional Keyword arg to use with Middleware
221 :return: Partial function to apply the middleware
222 """
223
224 def register_middleware_for_blueprints(fn):
225 for blueprint in self.blueprints:
226 blueprint.middleware(fn, *args, **kwargs)
227
228 if args and callable(args[0]):
229 fn = args[0]
230 args = list(args)[1:]
231 return register_middleware_for_blueprints(fn)
232 return register_middleware_for_blueprints
233
234 def on_request(self, middleware=None):
235 if callable(middleware):
236 return self.middleware(middleware, "request")
237 else:
238 return partial(self.middleware, attach_to="request")
239
240 def on_response(self, middleware=None):
241 if callable(middleware):
242 return self.middleware(middleware, "response")
243 else:
244 return partial(self.middleware, attach_to="response")
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sanic/blueprint_group.py b/sanic/blueprint_group.py
--- a/sanic/blueprint_group.py
+++ b/sanic/blueprint_group.py
@@ -197,6 +197,27 @@
"""
self._blueprints.append(value)
+ def exception(self, *exceptions, **kwargs):
+ """
+ A decorator that can be used to implement a global exception handler
+ for all the Blueprints that belong to this Blueprint Group.
+
+ In case of nested Blueprint Groups, the same handler is applied
+ across each of the Blueprints recursively.
+
+ :param args: List of Python exceptions to be caught by the handler
+ :param kwargs: Additional optional arguments to be passed to the
+ exception handler
+ :return a decorated method to handle global exceptions for any
+ blueprint registered under this group.
+ """
+
+ def register_exception_handler_for_blueprints(fn):
+ for blueprint in self.blueprints:
+ blueprint.exception(*exceptions, **kwargs)(fn)
+
+ return register_exception_handler_for_blueprints
+
def insert(self, index: int, item: Blueprint) -> None:
"""
The Abstract class `MutableSequence` leverages this insert method to
| {"golden_diff": "diff --git a/sanic/blueprint_group.py b/sanic/blueprint_group.py\n--- a/sanic/blueprint_group.py\n+++ b/sanic/blueprint_group.py\n@@ -197,6 +197,27 @@\n \"\"\"\n self._blueprints.append(value)\n \n+ def exception(self, *exceptions, **kwargs):\n+ \"\"\"\n+ A decorator that can be used to implement a global exception handler\n+ for all the Blueprints that belong to this Blueprint Group.\n+\n+ In case of nested Blueprint Groups, the same handler is applied\n+ across each of the Blueprints recursively.\n+\n+ :param args: List of Python exceptions to be caught by the handler\n+ :param kwargs: Additional optional arguments to be passed to the\n+ exception handler\n+ :return a decorated method to handle global exceptions for any\n+ blueprint registered under this group.\n+ \"\"\"\n+\n+ def register_exception_handler_for_blueprints(fn):\n+ for blueprint in self.blueprints:\n+ blueprint.exception(*exceptions, **kwargs)(fn)\n+\n+ return register_exception_handler_for_blueprints\n+\n def insert(self, index: int, item: Blueprint) -> None:\n \"\"\"\n The Abstract class `MutableSequence` leverages this insert method to\n", "issue": "Exception handler decorator in blueprint groups\n**Is your feature request related to a problem? Please describe.**\r\nCurrently exception handlers can be attached to app and blueprint instances, but it is not possible to do the same with blueprint groups.\r\n\r\n\r\n**Describe the solution you'd like**\r\nIdeally it would be possible to attach an exception handler to a blueprint group via means of a decorator. If it is not possible to attach a handler to a blueprint group directly, its decorator could simply iterate through each of its blueprint attaching the handler to each of them.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom collections.abc import MutableSequence\nfrom functools import partial\nfrom typing import TYPE_CHECKING, List, Optional, Union\n\n\nif TYPE_CHECKING:\n from sanic.blueprints import Blueprint\n\n\nclass BlueprintGroup(MutableSequence):\n \"\"\"\n This class provides a mechanism to implement a Blueprint Group\n using the :meth:`~sanic.blueprints.Blueprint.group` method in\n :class:`~sanic.blueprints.Blueprint`. To avoid having to re-write\n some of the existing implementation, this class provides a custom\n iterator implementation that will let you use the object of this\n class as a list/tuple inside the existing implementation.\n\n .. code-block:: python\n\n bp1 = Blueprint('bp1', url_prefix='/bp1')\n bp2 = Blueprint('bp2', url_prefix='/bp2')\n\n bp3 = Blueprint('bp3', url_prefix='/bp4')\n bp3 = Blueprint('bp3', url_prefix='/bp4')\n\n bpg = BlueprintGroup(bp3, bp4, url_prefix=\"/api\", version=\"v1\")\n\n @bp1.middleware('request')\n async def bp1_only_middleware(request):\n print('applied on Blueprint : bp1 Only')\n\n @bp1.route('/')\n async def bp1_route(request):\n return text('bp1')\n\n @bp2.route('/<param>')\n async def bp2_route(request, param):\n return text(param)\n\n @bp3.route('/')\n async def bp1_route(request):\n return text('bp1')\n\n @bp4.route('/<param>')\n async def bp2_route(request, param):\n return text(param)\n\n group = Blueprint.group(bp1, bp2)\n\n @group.middleware('request')\n async def group_middleware(request):\n print('common middleware applied for both bp1 and bp2')\n\n # Register Blueprint group under the app\n app.blueprint(group)\n app.blueprint(bpg)\n \"\"\"\n\n __slots__ = (\n \"_blueprints\",\n \"_url_prefix\",\n \"_version\",\n \"_strict_slashes\",\n \"_version_prefix\",\n )\n\n def __init__(\n self,\n url_prefix: Optional[str] = None,\n version: Optional[Union[int, str, float]] = None,\n strict_slashes: Optional[bool] = None,\n version_prefix: str = \"/v\",\n ):\n \"\"\"\n Create a new Blueprint Group\n\n :param url_prefix: URL: to be prefixed before all the Blueprint Prefix\n :param version: API Version for the blueprint group. This will be\n inherited by each of the Blueprint\n :param strict_slashes: URL Strict slash behavior indicator\n \"\"\"\n self._blueprints: List[Blueprint] = []\n self._url_prefix = url_prefix\n self._version = version\n self._version_prefix = version_prefix\n self._strict_slashes = strict_slashes\n\n @property\n def url_prefix(self) -> Optional[Union[int, str, float]]:\n \"\"\"\n Retrieve the URL prefix being used for the Current Blueprint Group\n\n :return: string with url prefix\n \"\"\"\n return self._url_prefix\n\n @property\n def blueprints(self) -> List[Blueprint]:\n \"\"\"\n Retrieve a list of all the available blueprints under this group.\n\n :return: List of Blueprint instance\n \"\"\"\n return self._blueprints\n\n @property\n def version(self) -> Optional[Union[str, int, float]]:\n \"\"\"\n API Version for the Blueprint Group. This will be applied only in case\n if the Blueprint doesn't already have a version specified\n\n :return: Version information\n \"\"\"\n return self._version\n\n @property\n def strict_slashes(self) -> Optional[bool]:\n \"\"\"\n URL Slash termination behavior configuration\n\n :return: bool\n \"\"\"\n return self._strict_slashes\n\n @property\n def version_prefix(self) -> str:\n \"\"\"\n Version prefix; defaults to ``/v``\n\n :return: str\n \"\"\"\n return self._version_prefix\n\n def __iter__(self):\n \"\"\"\n Tun the class Blueprint Group into an Iterable item\n \"\"\"\n return iter(self._blueprints)\n\n def __getitem__(self, item):\n \"\"\"\n This method returns a blueprint inside the group specified by\n an index value. This will enable indexing, splice and slicing\n of the blueprint group like we can do with regular list/tuple.\n\n This method is provided to ensure backward compatibility with\n any of the pre-existing usage that might break.\n\n :param item: Index of the Blueprint item in the group\n :return: Blueprint object\n \"\"\"\n return self._blueprints[item]\n\n def __setitem__(self, index, item) -> None:\n \"\"\"\n Abstract method implemented to turn the `BlueprintGroup` class\n into a list like object to support all the existing behavior.\n\n This method is used to perform the list's indexed setter operation.\n\n :param index: Index to use for inserting a new Blueprint item\n :param item: New `Blueprint` object.\n :return: None\n \"\"\"\n self._blueprints[index] = item\n\n def __delitem__(self, index) -> None:\n \"\"\"\n Abstract method implemented to turn the `BlueprintGroup` class\n into a list like object to support all the existing behavior.\n\n This method is used to delete an item from the list of blueprint\n groups like it can be done on a regular list with index.\n\n :param index: Index to use for removing a new Blueprint item\n :return: None\n \"\"\"\n del self._blueprints[index]\n\n def __len__(self) -> int:\n \"\"\"\n Get the Length of the blueprint group object.\n\n :return: Length of Blueprint group object\n \"\"\"\n return len(self._blueprints)\n\n def append(self, value: Blueprint) -> None:\n \"\"\"\n The Abstract class `MutableSequence` leverages this append method to\n perform the `BlueprintGroup.append` operation.\n :param value: New `Blueprint` object.\n :return: None\n \"\"\"\n self._blueprints.append(value)\n\n def insert(self, index: int, item: Blueprint) -> None:\n \"\"\"\n The Abstract class `MutableSequence` leverages this insert method to\n perform the `BlueprintGroup.append` operation.\n\n :param index: Index to use for removing a new Blueprint item\n :param item: New `Blueprint` object.\n :return: None\n \"\"\"\n self._blueprints.insert(index, item)\n\n def middleware(self, *args, **kwargs):\n \"\"\"\n A decorator that can be used to implement a Middleware plugin to\n all of the Blueprints that belongs to this specific Blueprint Group.\n\n In case of nested Blueprint Groups, the same middleware is applied\n across each of the Blueprints recursively.\n\n :param args: Optional positional Parameters to be use middleware\n :param kwargs: Optional Keyword arg to use with Middleware\n :return: Partial function to apply the middleware\n \"\"\"\n\n def register_middleware_for_blueprints(fn):\n for blueprint in self.blueprints:\n blueprint.middleware(fn, *args, **kwargs)\n\n if args and callable(args[0]):\n fn = args[0]\n args = list(args)[1:]\n return register_middleware_for_blueprints(fn)\n return register_middleware_for_blueprints\n\n def on_request(self, middleware=None):\n if callable(middleware):\n return self.middleware(middleware, \"request\")\n else:\n return partial(self.middleware, attach_to=\"request\")\n\n def on_response(self, middleware=None):\n if callable(middleware):\n return self.middleware(middleware, \"response\")\n else:\n return partial(self.middleware, attach_to=\"response\")\n", "path": "sanic/blueprint_group.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom collections.abc import MutableSequence\nfrom functools import partial\nfrom typing import TYPE_CHECKING, List, Optional, Union\n\n\nif TYPE_CHECKING:\n from sanic.blueprints import Blueprint\n\n\nclass BlueprintGroup(MutableSequence):\n \"\"\"\n This class provides a mechanism to implement a Blueprint Group\n using the :meth:`~sanic.blueprints.Blueprint.group` method in\n :class:`~sanic.blueprints.Blueprint`. To avoid having to re-write\n some of the existing implementation, this class provides a custom\n iterator implementation that will let you use the object of this\n class as a list/tuple inside the existing implementation.\n\n .. code-block:: python\n\n bp1 = Blueprint('bp1', url_prefix='/bp1')\n bp2 = Blueprint('bp2', url_prefix='/bp2')\n\n bp3 = Blueprint('bp3', url_prefix='/bp4')\n bp3 = Blueprint('bp3', url_prefix='/bp4')\n\n bpg = BlueprintGroup(bp3, bp4, url_prefix=\"/api\", version=\"v1\")\n\n @bp1.middleware('request')\n async def bp1_only_middleware(request):\n print('applied on Blueprint : bp1 Only')\n\n @bp1.route('/')\n async def bp1_route(request):\n return text('bp1')\n\n @bp2.route('/<param>')\n async def bp2_route(request, param):\n return text(param)\n\n @bp3.route('/')\n async def bp1_route(request):\n return text('bp1')\n\n @bp4.route('/<param>')\n async def bp2_route(request, param):\n return text(param)\n\n group = Blueprint.group(bp1, bp2)\n\n @group.middleware('request')\n async def group_middleware(request):\n print('common middleware applied for both bp1 and bp2')\n\n # Register Blueprint group under the app\n app.blueprint(group)\n app.blueprint(bpg)\n \"\"\"\n\n __slots__ = (\n \"_blueprints\",\n \"_url_prefix\",\n \"_version\",\n \"_strict_slashes\",\n \"_version_prefix\",\n )\n\n def __init__(\n self,\n url_prefix: Optional[str] = None,\n version: Optional[Union[int, str, float]] = None,\n strict_slashes: Optional[bool] = None,\n version_prefix: str = \"/v\",\n ):\n \"\"\"\n Create a new Blueprint Group\n\n :param url_prefix: URL: to be prefixed before all the Blueprint Prefix\n :param version: API Version for the blueprint group. This will be\n inherited by each of the Blueprint\n :param strict_slashes: URL Strict slash behavior indicator\n \"\"\"\n self._blueprints: List[Blueprint] = []\n self._url_prefix = url_prefix\n self._version = version\n self._version_prefix = version_prefix\n self._strict_slashes = strict_slashes\n\n @property\n def url_prefix(self) -> Optional[Union[int, str, float]]:\n \"\"\"\n Retrieve the URL prefix being used for the Current Blueprint Group\n\n :return: string with url prefix\n \"\"\"\n return self._url_prefix\n\n @property\n def blueprints(self) -> List[Blueprint]:\n \"\"\"\n Retrieve a list of all the available blueprints under this group.\n\n :return: List of Blueprint instance\n \"\"\"\n return self._blueprints\n\n @property\n def version(self) -> Optional[Union[str, int, float]]:\n \"\"\"\n API Version for the Blueprint Group. This will be applied only in case\n if the Blueprint doesn't already have a version specified\n\n :return: Version information\n \"\"\"\n return self._version\n\n @property\n def strict_slashes(self) -> Optional[bool]:\n \"\"\"\n URL Slash termination behavior configuration\n\n :return: bool\n \"\"\"\n return self._strict_slashes\n\n @property\n def version_prefix(self) -> str:\n \"\"\"\n Version prefix; defaults to ``/v``\n\n :return: str\n \"\"\"\n return self._version_prefix\n\n def __iter__(self):\n \"\"\"\n Tun the class Blueprint Group into an Iterable item\n \"\"\"\n return iter(self._blueprints)\n\n def __getitem__(self, item):\n \"\"\"\n This method returns a blueprint inside the group specified by\n an index value. This will enable indexing, splice and slicing\n of the blueprint group like we can do with regular list/tuple.\n\n This method is provided to ensure backward compatibility with\n any of the pre-existing usage that might break.\n\n :param item: Index of the Blueprint item in the group\n :return: Blueprint object\n \"\"\"\n return self._blueprints[item]\n\n def __setitem__(self, index, item) -> None:\n \"\"\"\n Abstract method implemented to turn the `BlueprintGroup` class\n into a list like object to support all the existing behavior.\n\n This method is used to perform the list's indexed setter operation.\n\n :param index: Index to use for inserting a new Blueprint item\n :param item: New `Blueprint` object.\n :return: None\n \"\"\"\n self._blueprints[index] = item\n\n def __delitem__(self, index) -> None:\n \"\"\"\n Abstract method implemented to turn the `BlueprintGroup` class\n into a list like object to support all the existing behavior.\n\n This method is used to delete an item from the list of blueprint\n groups like it can be done on a regular list with index.\n\n :param index: Index to use for removing a new Blueprint item\n :return: None\n \"\"\"\n del self._blueprints[index]\n\n def __len__(self) -> int:\n \"\"\"\n Get the Length of the blueprint group object.\n\n :return: Length of Blueprint group object\n \"\"\"\n return len(self._blueprints)\n\n def append(self, value: Blueprint) -> None:\n \"\"\"\n The Abstract class `MutableSequence` leverages this append method to\n perform the `BlueprintGroup.append` operation.\n :param value: New `Blueprint` object.\n :return: None\n \"\"\"\n self._blueprints.append(value)\n\n def exception(self, *exceptions, **kwargs):\n \"\"\"\n A decorator that can be used to implement a global exception handler\n for all the Blueprints that belong to this Blueprint Group.\n\n In case of nested Blueprint Groups, the same handler is applied\n across each of the Blueprints recursively.\n\n :param args: List of Python exceptions to be caught by the handler\n :param kwargs: Additional optional arguments to be passed to the\n exception handler\n :return a decorated method to handle global exceptions for any\n blueprint registered under this group.\n \"\"\"\n\n def register_exception_handler_for_blueprints(fn):\n for blueprint in self.blueprints:\n blueprint.exception(*exceptions, **kwargs)(fn)\n\n return register_exception_handler_for_blueprints\n\n def insert(self, index: int, item: Blueprint) -> None:\n \"\"\"\n The Abstract class `MutableSequence` leverages this insert method to\n perform the `BlueprintGroup.append` operation.\n\n :param index: Index to use for removing a new Blueprint item\n :param item: New `Blueprint` object.\n :return: None\n \"\"\"\n self._blueprints.insert(index, item)\n\n def middleware(self, *args, **kwargs):\n \"\"\"\n A decorator that can be used to implement a Middleware plugin to\n all of the Blueprints that belongs to this specific Blueprint Group.\n\n In case of nested Blueprint Groups, the same middleware is applied\n across each of the Blueprints recursively.\n\n :param args: Optional positional Parameters to be use middleware\n :param kwargs: Optional Keyword arg to use with Middleware\n :return: Partial function to apply the middleware\n \"\"\"\n\n def register_middleware_for_blueprints(fn):\n for blueprint in self.blueprints:\n blueprint.middleware(fn, *args, **kwargs)\n\n if args and callable(args[0]):\n fn = args[0]\n args = list(args)[1:]\n return register_middleware_for_blueprints(fn)\n return register_middleware_for_blueprints\n\n def on_request(self, middleware=None):\n if callable(middleware):\n return self.middleware(middleware, \"request\")\n else:\n return partial(self.middleware, attach_to=\"request\")\n\n def on_response(self, middleware=None):\n if callable(middleware):\n return self.middleware(middleware, \"response\")\n else:\n return partial(self.middleware, attach_to=\"response\")\n", "path": "sanic/blueprint_group.py"}]} | 2,696 | 273 |
gh_patches_debug_10677 | rasdani/github-patches | git_diff | python-discord__bot-1521 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logic in Source Command's Help Command Check
## Description
<!-- Describe the bug you've found -->
Its impossible to get the source for the HelpChannel cog.
## Steps to Reproduce
<!-- Detail the exact list of steps to be able to encounter the bug -->
`!src HelpChannel`
Due to the logic of the command, all that needs to be said is `!src help` followed by anything.
## Expected Behaviour
<!-- Outline what you expect to occur, include screenshots if relevant -->
Source link to the HelpChannel cog is shown.
## Actual Behaviour
<!-- Outline what occurs instead of the expected behavior, include screenshots if relevant -->
Help command source link is shown.
## Known Impacted Platforms
<!-- Replace [ ] with [x] to mark items -->
- [x] Web
- [x] Desktop
- [x] Android App
- [x] iOS App
## Possible Solutions
<!-- Detail any solutions you might have in mind to be able to resolve the bug -->
These lines are the probable cause.
https://github.com/python-discord/bot/blob/51af1369e0d9c2ad185f0c0920b599b7187ed077/bot/exts/info/source.py#L19-L20
After looking at sir-lancebot, it might even be possible to remove these two lines altogether.
## Would you like to implement a fix?
***Note: For high-priority or critical bugs, fixes may be implemented by staff.***
<!-- Replace [ ] with [x] with your choice -->
- [ ] I'd like to implement the bug fix
- [x] Anyone can implement the bug fix
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/exts/info/source.py`
Content:
```
1 import inspect
2 from pathlib import Path
3 from typing import Optional, Tuple, Union
4
5 from discord import Embed, utils
6 from discord.ext import commands
7
8 from bot.bot import Bot
9 from bot.constants import URLs
10
11 SourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]
12
13
14 class SourceConverter(commands.Converter):
15 """Convert an argument into a help command, tag, command, or cog."""
16
17 async def convert(self, ctx: commands.Context, argument: str) -> SourceType:
18 """Convert argument into source object."""
19 if argument.lower().startswith("help"):
20 return ctx.bot.help_command
21
22 cog = ctx.bot.get_cog(argument)
23 if cog:
24 return cog
25
26 cmd = ctx.bot.get_command(argument)
27 if cmd:
28 return cmd
29
30 tags_cog = ctx.bot.get_cog("Tags")
31 show_tag = True
32
33 if not tags_cog:
34 show_tag = False
35 elif argument.lower() in tags_cog._cache:
36 return argument.lower()
37
38 escaped_arg = utils.escape_markdown(argument)
39
40 raise commands.BadArgument(
41 f"Unable to convert '{escaped_arg}' to valid command{', tag,' if show_tag else ''} or Cog."
42 )
43
44
45 class BotSource(commands.Cog):
46 """Displays information about the bot's source code."""
47
48 def __init__(self, bot: Bot):
49 self.bot = bot
50
51 @commands.command(name="source", aliases=("src",))
52 async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:
53 """Display information and a GitHub link to the source code of a command, tag, or cog."""
54 if not source_item:
55 embed = Embed(title="Bot's GitHub Repository")
56 embed.add_field(name="Repository", value=f"[Go to GitHub]({URLs.github_bot_repo})")
57 embed.set_thumbnail(url="https://avatars1.githubusercontent.com/u/9919")
58 await ctx.send(embed=embed)
59 return
60
61 embed = await self.build_embed(source_item)
62 await ctx.send(embed=embed)
63
64 def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:
65 """
66 Build GitHub link of source item, return this link, file location and first line number.
67
68 Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).
69 """
70 if isinstance(source_item, commands.Command):
71 source_item = inspect.unwrap(source_item.callback)
72 src = source_item.__code__
73 filename = src.co_filename
74 elif isinstance(source_item, str):
75 tags_cog = self.bot.get_cog("Tags")
76 filename = tags_cog._cache[source_item]["location"]
77 else:
78 src = type(source_item)
79 try:
80 filename = inspect.getsourcefile(src)
81 except TypeError:
82 raise commands.BadArgument("Cannot get source for a dynamically-created object.")
83
84 if not isinstance(source_item, str):
85 try:
86 lines, first_line_no = inspect.getsourcelines(src)
87 except OSError:
88 raise commands.BadArgument("Cannot get source for a dynamically-created object.")
89
90 lines_extension = f"#L{first_line_no}-L{first_line_no+len(lines)-1}"
91 else:
92 first_line_no = None
93 lines_extension = ""
94
95 # Handle tag file location differently than others to avoid errors in some cases
96 if not first_line_no:
97 file_location = Path(filename).relative_to("/bot/")
98 else:
99 file_location = Path(filename).relative_to(Path.cwd()).as_posix()
100
101 url = f"{URLs.github_bot_repo}/blob/main/{file_location}{lines_extension}"
102
103 return url, file_location, first_line_no or None
104
105 async def build_embed(self, source_object: SourceType) -> Optional[Embed]:
106 """Build embed based on source object."""
107 url, location, first_line = self.get_source_link(source_object)
108
109 if isinstance(source_object, commands.HelpCommand):
110 title = "Help Command"
111 description = source_object.__doc__.splitlines()[1]
112 elif isinstance(source_object, commands.Command):
113 description = source_object.short_doc
114 title = f"Command: {source_object.qualified_name}"
115 elif isinstance(source_object, str):
116 title = f"Tag: {source_object}"
117 description = ""
118 else:
119 title = f"Cog: {source_object.qualified_name}"
120 description = source_object.description.splitlines()[0]
121
122 embed = Embed(title=title, description=description)
123 embed.add_field(name="Source Code", value=f"[Go to GitHub]({url})")
124 line_text = f":{first_line}" if first_line else ""
125 embed.set_footer(text=f"{location}{line_text}")
126
127 return embed
128
129
130 def setup(bot: Bot) -> None:
131 """Load the BotSource cog."""
132 bot.add_cog(BotSource(bot))
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/exts/info/source.py b/bot/exts/info/source.py
--- a/bot/exts/info/source.py
+++ b/bot/exts/info/source.py
@@ -14,9 +14,10 @@
class SourceConverter(commands.Converter):
"""Convert an argument into a help command, tag, command, or cog."""
- async def convert(self, ctx: commands.Context, argument: str) -> SourceType:
+ @staticmethod
+ async def convert(ctx: commands.Context, argument: str) -> SourceType:
"""Convert argument into source object."""
- if argument.lower().startswith("help"):
+ if argument.lower() == "help":
return ctx.bot.help_command
cog = ctx.bot.get_cog(argument)
| {"golden_diff": "diff --git a/bot/exts/info/source.py b/bot/exts/info/source.py\n--- a/bot/exts/info/source.py\n+++ b/bot/exts/info/source.py\n@@ -14,9 +14,10 @@\n class SourceConverter(commands.Converter):\n \"\"\"Convert an argument into a help command, tag, command, or cog.\"\"\"\n \n- async def convert(self, ctx: commands.Context, argument: str) -> SourceType:\n+ @staticmethod\n+ async def convert(ctx: commands.Context, argument: str) -> SourceType:\n \"\"\"Convert argument into source object.\"\"\"\n- if argument.lower().startswith(\"help\"):\n+ if argument.lower() == \"help\":\n return ctx.bot.help_command\n \n cog = ctx.bot.get_cog(argument)\n", "issue": "Logic in Source Command's Help Command Check\n## Description\r\n<!-- Describe the bug you've found -->\r\nIts impossible to get the source for the HelpChannel cog.\r\n\r\n## Steps to Reproduce\r\n<!-- Detail the exact list of steps to be able to encounter the bug -->\r\n`!src HelpChannel`\r\nDue to the logic of the command, all that needs to be said is `!src help` followed by anything.\r\n\r\n## Expected Behaviour\r\n<!-- Outline what you expect to occur, include screenshots if relevant -->\r\nSource link to the HelpChannel cog is shown.\r\n\r\n## Actual Behaviour\r\n<!-- Outline what occurs instead of the expected behavior, include screenshots if relevant -->\r\nHelp command source link is shown.\r\n\r\n## Known Impacted Platforms\r\n<!-- Replace [ ] with [x] to mark items -->\r\n\r\n- [x] Web\r\n- [x] Desktop\r\n- [x] Android App\r\n- [x] iOS App\r\n\r\n## Possible Solutions\r\n<!-- Detail any solutions you might have in mind to be able to resolve the bug -->\r\nThese lines are the probable cause.\r\nhttps://github.com/python-discord/bot/blob/51af1369e0d9c2ad185f0c0920b599b7187ed077/bot/exts/info/source.py#L19-L20\r\nAfter looking at sir-lancebot, it might even be possible to remove these two lines altogether.\r\n## Would you like to implement a fix?\r\n\r\n***Note: For high-priority or critical bugs, fixes may be implemented by staff.***\r\n<!-- Replace [ ] with [x] with your choice -->\r\n\r\n- [ ] I'd like to implement the bug fix\r\n- [x] Anyone can implement the bug fix\r\n\n", "before_files": [{"content": "import inspect\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union\n\nfrom discord import Embed, utils\nfrom discord.ext import commands\n\nfrom bot.bot import Bot\nfrom bot.constants import URLs\n\nSourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]\n\n\nclass SourceConverter(commands.Converter):\n \"\"\"Convert an argument into a help command, tag, command, or cog.\"\"\"\n\n async def convert(self, ctx: commands.Context, argument: str) -> SourceType:\n \"\"\"Convert argument into source object.\"\"\"\n if argument.lower().startswith(\"help\"):\n return ctx.bot.help_command\n\n cog = ctx.bot.get_cog(argument)\n if cog:\n return cog\n\n cmd = ctx.bot.get_command(argument)\n if cmd:\n return cmd\n\n tags_cog = ctx.bot.get_cog(\"Tags\")\n show_tag = True\n\n if not tags_cog:\n show_tag = False\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n\n escaped_arg = utils.escape_markdown(argument)\n\n raise commands.BadArgument(\n f\"Unable to convert '{escaped_arg}' to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n\n\nclass BotSource(commands.Cog):\n \"\"\"Displays information about the bot's source code.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command(name=\"source\", aliases=(\"src\",))\n async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:\n \"\"\"Display information and a GitHub link to the source code of a command, tag, or cog.\"\"\"\n if not source_item:\n embed = Embed(title=\"Bot's GitHub Repository\")\n embed.add_field(name=\"Repository\", value=f\"[Go to GitHub]({URLs.github_bot_repo})\")\n embed.set_thumbnail(url=\"https://avatars1.githubusercontent.com/u/9919\")\n await ctx.send(embed=embed)\n return\n\n embed = await self.build_embed(source_item)\n await ctx.send(embed=embed)\n\n def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:\n \"\"\"\n Build GitHub link of source item, return this link, file location and first line number.\n\n Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).\n \"\"\"\n if isinstance(source_item, commands.Command):\n source_item = inspect.unwrap(source_item.callback)\n src = source_item.__code__\n filename = src.co_filename\n elif isinstance(source_item, str):\n tags_cog = self.bot.get_cog(\"Tags\")\n filename = tags_cog._cache[source_item][\"location\"]\n else:\n src = type(source_item)\n try:\n filename = inspect.getsourcefile(src)\n except TypeError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n if not isinstance(source_item, str):\n try:\n lines, first_line_no = inspect.getsourcelines(src)\n except OSError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n lines_extension = f\"#L{first_line_no}-L{first_line_no+len(lines)-1}\"\n else:\n first_line_no = None\n lines_extension = \"\"\n\n # Handle tag file location differently than others to avoid errors in some cases\n if not first_line_no:\n file_location = Path(filename).relative_to(\"/bot/\")\n else:\n file_location = Path(filename).relative_to(Path.cwd()).as_posix()\n\n url = f\"{URLs.github_bot_repo}/blob/main/{file_location}{lines_extension}\"\n\n return url, file_location, first_line_no or None\n\n async def build_embed(self, source_object: SourceType) -> Optional[Embed]:\n \"\"\"Build embed based on source object.\"\"\"\n url, location, first_line = self.get_source_link(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n description = source_object.__doc__.splitlines()[1]\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, str):\n title = f\"Tag: {source_object}\"\n description = \"\"\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = Embed(title=title, description=description)\n embed.add_field(name=\"Source Code\", value=f\"[Go to GitHub]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the BotSource cog.\"\"\"\n bot.add_cog(BotSource(bot))\n", "path": "bot/exts/info/source.py"}], "after_files": [{"content": "import inspect\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union\n\nfrom discord import Embed, utils\nfrom discord.ext import commands\n\nfrom bot.bot import Bot\nfrom bot.constants import URLs\n\nSourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]\n\n\nclass SourceConverter(commands.Converter):\n \"\"\"Convert an argument into a help command, tag, command, or cog.\"\"\"\n\n @staticmethod\n async def convert(ctx: commands.Context, argument: str) -> SourceType:\n \"\"\"Convert argument into source object.\"\"\"\n if argument.lower() == \"help\":\n return ctx.bot.help_command\n\n cog = ctx.bot.get_cog(argument)\n if cog:\n return cog\n\n cmd = ctx.bot.get_command(argument)\n if cmd:\n return cmd\n\n tags_cog = ctx.bot.get_cog(\"Tags\")\n show_tag = True\n\n if not tags_cog:\n show_tag = False\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n\n escaped_arg = utils.escape_markdown(argument)\n\n raise commands.BadArgument(\n f\"Unable to convert '{escaped_arg}' to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n\n\nclass BotSource(commands.Cog):\n \"\"\"Displays information about the bot's source code.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command(name=\"source\", aliases=(\"src\",))\n async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:\n \"\"\"Display information and a GitHub link to the source code of a command, tag, or cog.\"\"\"\n if not source_item:\n embed = Embed(title=\"Bot's GitHub Repository\")\n embed.add_field(name=\"Repository\", value=f\"[Go to GitHub]({URLs.github_bot_repo})\")\n embed.set_thumbnail(url=\"https://avatars1.githubusercontent.com/u/9919\")\n await ctx.send(embed=embed)\n return\n\n embed = await self.build_embed(source_item)\n await ctx.send(embed=embed)\n\n def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:\n \"\"\"\n Build GitHub link of source item, return this link, file location and first line number.\n\n Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).\n \"\"\"\n if isinstance(source_item, commands.Command):\n source_item = inspect.unwrap(source_item.callback)\n src = source_item.__code__\n filename = src.co_filename\n elif isinstance(source_item, str):\n tags_cog = self.bot.get_cog(\"Tags\")\n filename = tags_cog._cache[source_item][\"location\"]\n else:\n src = type(source_item)\n try:\n filename = inspect.getsourcefile(src)\n except TypeError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n if not isinstance(source_item, str):\n try:\n lines, first_line_no = inspect.getsourcelines(src)\n except OSError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n lines_extension = f\"#L{first_line_no}-L{first_line_no+len(lines)-1}\"\n else:\n first_line_no = None\n lines_extension = \"\"\n\n # Handle tag file location differently than others to avoid errors in some cases\n if not first_line_no:\n file_location = Path(filename).relative_to(\"/bot/\")\n else:\n file_location = Path(filename).relative_to(Path.cwd()).as_posix()\n\n url = f\"{URLs.github_bot_repo}/blob/main/{file_location}{lines_extension}\"\n\n return url, file_location, first_line_no or None\n\n async def build_embed(self, source_object: SourceType) -> Optional[Embed]:\n \"\"\"Build embed based on source object.\"\"\"\n url, location, first_line = self.get_source_link(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n description = source_object.__doc__.splitlines()[1]\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, str):\n title = f\"Tag: {source_object}\"\n description = \"\"\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = Embed(title=title, description=description)\n embed.add_field(name=\"Source Code\", value=f\"[Go to GitHub]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the BotSource cog.\"\"\"\n bot.add_cog(BotSource(bot))\n", "path": "bot/exts/info/source.py"}]} | 1,969 | 166 |
gh_patches_debug_1323 | rasdani/github-patches | git_diff | ivy-llc__ivy-22920 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
eigvals
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py`
Content:
```
1 # local
2 import ivy
3 from ivy.functional.frontends.numpy.func_wrapper import (
4 to_ivy_arrays_and_back,
5 from_zero_dim_arrays_to_scalar,
6 )
7
8
9 @to_ivy_arrays_and_back
10 def eig(a):
11 return ivy.eig(a)
12
13
14 @to_ivy_arrays_and_back
15 @from_zero_dim_arrays_to_scalar
16 def eigh(a, /, UPLO="L"):
17 return ivy.eigh(a, UPLO=UPLO)
18
19
20 @to_ivy_arrays_and_back
21 @from_zero_dim_arrays_to_scalar
22 def eigvalsh(a, /, UPLO="L"):
23 return ivy.eigvalsh(a, UPLO=UPLO)
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py
--- a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py
+++ b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py
@@ -17,6 +17,11 @@
return ivy.eigh(a, UPLO=UPLO)
+@to_ivy_arrays_and_back
+def eigvals(a):
+ return ivy.eig(a)[0]
+
+
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def eigvalsh(a, /, UPLO="L"):
| {"golden_diff": "diff --git a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n--- a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n+++ b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n@@ -17,6 +17,11 @@\n return ivy.eigh(a, UPLO=UPLO)\n \n \n+@to_ivy_arrays_and_back\n+def eigvals(a):\n+ return ivy.eig(a)[0]\n+\n+\n @to_ivy_arrays_and_back\n @from_zero_dim_arrays_to_scalar\n def eigvalsh(a, /, UPLO=\"L\"):\n", "issue": "eigvals\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigvalsh(a, /, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n", "path": "ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef eigvals(a):\n return ivy.eig(a)[0]\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigvalsh(a, /, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n", "path": "ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py"}]} | 460 | 161 |
gh_patches_debug_27469 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-2963 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When deserializing core collection archives don't throw generic exceptions
There's a condition where during core collection a file may be collected but it is not saved in the archive because it is empty or not text or json. In a core-collection archive there may be metadata that indicates a file that was removed should be present in the archive. When insights-core tries to deserialize files from this type of archive a generic exception will be thrown which is not representative of the condition.
Here's an example of the log messages:
```
WARNING:insights.core.serde:Unrecognized type: insights.core.spec_factory.TextFileProvider
WARNING:insights.core.serde:Unrecognized type: insights.core.spec_factory.CommandOutputProvider
```
The `deserialize` and `hydrate` functions should distinguish between a `ContentException`, which is this case, or some other type of exception and only log `ContentException` messages at DEBUG level and not WARNING level.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/core/serde.py`
Content:
```
1 """
2 The serde module provides decorators that allow developers to register
3 serializer and deserializer functions for types. It also provides a
4 :py:class`Hydration` class that uses registered serde functions to save and
5 load objects from the file system. The Hydration class includes a
6 :py:func`Hydration.make_persister` method that returns a function appropriate
7 to register as an observer on a :py:class:`Broker`.
8 """
9 import json as ser
10 import logging
11 import os
12 import time
13 import traceback
14 from glob import glob
15 from functools import partial
16
17 from insights.core import dr
18 from insights.util import fs
19
20 log = logging.getLogger(__name__)
21
22 SERIALIZERS = {}
23 DESERIALIZERS = {}
24
25
26 def serializer(_type):
27 """
28 Decorator for serializers.
29
30 A serializer should accept two parameters: An object and a path which is
31 a directory on the filesystem where supplementary data can be stored. This
32 is most often useful for datasources. It should return a dictionary version
33 of the original object that contains only elements that can be serialized
34 to json.
35 """
36
37 def inner(func):
38 name = dr.get_name(_type)
39 if name in SERIALIZERS:
40 msg = "%s already has a serializer registered: %s"
41 raise Exception(msg % (name, dr.get_name(SERIALIZERS[name])))
42 SERIALIZERS[name] = func
43 return func
44 return inner
45
46
47 def deserializer(_type):
48 """
49 Decorator for deserializers.
50
51 A deserializer should accept three parameters: A type, a dictionary, and a
52 path that may contain supplementary data stored by its paired serializer.
53 If the serializer stores supplementary data, the relative path to it should
54 be somewhere in the dict of the second parameter.
55 """
56
57 def inner(func):
58 name = dr.get_name(_type)
59 if name in DESERIALIZERS:
60 msg = "%s already has a deserializer registered: %s"
61 raise Exception(msg % (dr.get_name(name), dr.get_name(DESERIALIZERS[name])))
62 DESERIALIZERS[name] = (_type, func)
63 return func
64 return inner
65
66
67 def get_serializer(obj):
68 """ Get a registered serializer for the given object.
69
70 This function walks the mro of obj looking for serializers.
71 Returns None if no valid serializer is found.
72 """
73 return SERIALIZERS.get(dr.get_name(type(obj)))
74
75
76 def get_deserializer(obj):
77 """ Returns a deserializer based on the fully qualified name string."""
78 return DESERIALIZERS.get(dr.get_name(type(obj)))
79
80
81 def serialize(obj, root=None):
82 to_dict = get_serializer(obj)
83 return {
84 "type": dr.get_name(type(obj)),
85 "object": to_dict(obj, root=root),
86 }
87
88
89 def deserialize(data, root=None):
90 try:
91 (_type, from_dict) = DESERIALIZERS.get(data["type"])
92 return from_dict(_type, data["object"], root=root)
93 except Exception:
94 raise Exception("Unrecognized type: %s" % data["type"])
95
96
97 def marshal(v, root=None, pool=None):
98 if v is None:
99 return
100 f = partial(serialize, root=root)
101 if isinstance(v, list):
102 if pool:
103 return list(pool.map(f, v))
104 else:
105 return [f(t) for t in v]
106 return f(v)
107
108
109 def unmarshal(data, root=None):
110 if data is None:
111 return
112 if isinstance(data, list):
113 return [deserialize(d, root=root) for d in data]
114 return deserialize(data, root=root)
115
116
117 class Hydration(object):
118 """
119 The Hydration class is responsible for saving and loading insights
120 components. It puts metadata about a component's evaluation in a metadata
121 file for the component and allows the serializer for a component to put raw
122 data beneath a working directory.
123 """
124 def __init__(self, root=None, meta_data="meta_data", data="data", pool=None):
125 self.root = root
126 self.meta_data = os.path.join(root, meta_data) if root else None
127 self.data = os.path.join(root, data) if root else None
128 self.ser_name = dr.get_base_module_name(ser)
129 self.created = False
130 self.pool = pool
131
132 def _hydrate_one(self, doc):
133 """ Returns (component, results, errors, duration) """
134 name = doc["name"]
135
136 key = dr.get_component_by_name(name)
137 if key is None:
138 raise ValueError("{} is not a loaded component.".format(name))
139 exec_time = doc["exec_time"]
140 ser_time = doc["ser_time"]
141 results = unmarshal(doc["results"], root=self.data)
142 return (key, results, exec_time, ser_time)
143
144 def hydrate(self, broker=None):
145 """
146 Loads a Broker from a previously saved one. A Broker is created if one
147 isn't provided.
148 """
149 broker = broker or dr.Broker()
150 for path in glob(os.path.join(self.meta_data, "*")):
151 try:
152 with open(path) as f:
153 doc = ser.load(f)
154 res = self._hydrate_one(doc)
155 comp, results, exec_time, ser_time = res
156 if results:
157 broker[comp] = results
158 broker.exec_times[comp] = exec_time + ser_time
159 except Exception as ex:
160 log.warning(ex)
161 return broker
162
163 def dehydrate(self, comp, broker):
164 """
165 Saves a component in the given broker to the file system.
166 """
167 if not self.meta_data:
168 raise Exception("Hydration meta_path not set. Can't dehydrate.")
169
170 if not self.created:
171 fs.ensure_path(self.meta_data, mode=0o770)
172 if self.data:
173 fs.ensure_path(self.data, mode=0o770)
174 self.created = True
175
176 c = comp
177 doc = None
178 try:
179 name = dr.get_name(c)
180 value = broker.get(c)
181 errors = [t for e in broker.exceptions.get(c, [])
182 for t in broker.tracebacks[e]]
183 doc = {
184 "name": name,
185 "exec_time": broker.exec_times.get(c),
186 "errors": errors
187 }
188
189 try:
190 start = time.time()
191 doc["results"] = marshal(value, root=self.data, pool=self.pool)
192 except Exception:
193 errors.append(traceback.format_exc())
194 doc["results"] = None
195 finally:
196 doc["ser_time"] = time.time() - start
197 except Exception as ex:
198 log.exception(ex)
199 else:
200 if doc is not None and (doc["results"] or doc["errors"]):
201 try:
202 path = os.path.join(self.meta_data, name + "." + self.ser_name)
203 with open(path, "w") as f:
204 ser.dump(doc, f)
205 except Exception as boom:
206 log.error("Could not serialize %s to %s: %r" % (name, self.ser_name, boom))
207 if path:
208 fs.remove(path)
209
210 def make_persister(self, to_persist):
211 """
212 Returns a function that hydrates components as they are evaluated. The
213 function should be registered as an observer on a Broker just before
214 execution.
215
216 Args:
217 to_persist (set): Set of components to persist. Skip everything
218 else.
219 """
220
221 if not self.meta_data:
222 raise Exception("Root not set. Can't create persister.")
223
224 def persister(c, broker):
225 if c in to_persist:
226 self.dehydrate(c, broker)
227 return persister
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/core/serde.py b/insights/core/serde.py
--- a/insights/core/serde.py
+++ b/insights/core/serde.py
@@ -87,11 +87,11 @@
def deserialize(data, root=None):
- try:
- (_type, from_dict) = DESERIALIZERS.get(data["type"])
- return from_dict(_type, data["object"], root=root)
- except Exception:
+ type_data = DESERIALIZERS.get(data["type"])
+ if type_data is None:
raise Exception("Unrecognized type: %s" % data["type"])
+ (_type, from_dict) = type_data
+ return from_dict(_type, data["object"], root=root)
def marshal(v, root=None, pool=None):
@@ -146,6 +146,8 @@
Loads a Broker from a previously saved one. A Broker is created if one
isn't provided.
"""
+ from insights.core.spec_factory import ContentException
+
broker = broker or dr.Broker()
for path in glob(os.path.join(self.meta_data, "*")):
try:
@@ -156,6 +158,8 @@
if results:
broker[comp] = results
broker.exec_times[comp] = exec_time + ser_time
+ except ContentException as ex:
+ log.debug(ex)
except Exception as ex:
log.warning(ex)
return broker
| {"golden_diff": "diff --git a/insights/core/serde.py b/insights/core/serde.py\n--- a/insights/core/serde.py\n+++ b/insights/core/serde.py\n@@ -87,11 +87,11 @@\n \n \n def deserialize(data, root=None):\n- try:\n- (_type, from_dict) = DESERIALIZERS.get(data[\"type\"])\n- return from_dict(_type, data[\"object\"], root=root)\n- except Exception:\n+ type_data = DESERIALIZERS.get(data[\"type\"])\n+ if type_data is None:\n raise Exception(\"Unrecognized type: %s\" % data[\"type\"])\n+ (_type, from_dict) = type_data\n+ return from_dict(_type, data[\"object\"], root=root)\n \n \n def marshal(v, root=None, pool=None):\n@@ -146,6 +146,8 @@\n Loads a Broker from a previously saved one. A Broker is created if one\n isn't provided.\n \"\"\"\n+ from insights.core.spec_factory import ContentException\n+\n broker = broker or dr.Broker()\n for path in glob(os.path.join(self.meta_data, \"*\")):\n try:\n@@ -156,6 +158,8 @@\n if results:\n broker[comp] = results\n broker.exec_times[comp] = exec_time + ser_time\n+ except ContentException as ex:\n+ log.debug(ex)\n except Exception as ex:\n log.warning(ex)\n return broker\n", "issue": "When deserializing core collection archives don't throw generic exceptions\nThere's a condition where during core collection a file may be collected but it is not saved in the archive because it is empty or not text or json. In a core-collection archive there may be metadata that indicates a file that was removed should be present in the archive. When insights-core tries to deserialize files from this type of archive a generic exception will be thrown which is not representative of the condition.\r\n\r\nHere's an example of the log messages:\r\n\r\n```\r\nWARNING:insights.core.serde:Unrecognized type: insights.core.spec_factory.TextFileProvider\r\nWARNING:insights.core.serde:Unrecognized type: insights.core.spec_factory.CommandOutputProvider\r\n```\r\n\r\nThe `deserialize` and `hydrate` functions should distinguish between a `ContentException`, which is this case, or some other type of exception and only log `ContentException` messages at DEBUG level and not WARNING level.\n", "before_files": [{"content": "\"\"\"\nThe serde module provides decorators that allow developers to register\nserializer and deserializer functions for types. It also provides a\n:py:class`Hydration` class that uses registered serde functions to save and\nload objects from the file system. The Hydration class includes a\n:py:func`Hydration.make_persister` method that returns a function appropriate\nto register as an observer on a :py:class:`Broker`.\n\"\"\"\nimport json as ser\nimport logging\nimport os\nimport time\nimport traceback\nfrom glob import glob\nfrom functools import partial\n\nfrom insights.core import dr\nfrom insights.util import fs\n\nlog = logging.getLogger(__name__)\n\nSERIALIZERS = {}\nDESERIALIZERS = {}\n\n\ndef serializer(_type):\n \"\"\"\n Decorator for serializers.\n\n A serializer should accept two parameters: An object and a path which is\n a directory on the filesystem where supplementary data can be stored. This\n is most often useful for datasources. It should return a dictionary version\n of the original object that contains only elements that can be serialized\n to json.\n \"\"\"\n\n def inner(func):\n name = dr.get_name(_type)\n if name in SERIALIZERS:\n msg = \"%s already has a serializer registered: %s\"\n raise Exception(msg % (name, dr.get_name(SERIALIZERS[name])))\n SERIALIZERS[name] = func\n return func\n return inner\n\n\ndef deserializer(_type):\n \"\"\"\n Decorator for deserializers.\n\n A deserializer should accept three parameters: A type, a dictionary, and a\n path that may contain supplementary data stored by its paired serializer.\n If the serializer stores supplementary data, the relative path to it should\n be somewhere in the dict of the second parameter.\n \"\"\"\n\n def inner(func):\n name = dr.get_name(_type)\n if name in DESERIALIZERS:\n msg = \"%s already has a deserializer registered: %s\"\n raise Exception(msg % (dr.get_name(name), dr.get_name(DESERIALIZERS[name])))\n DESERIALIZERS[name] = (_type, func)\n return func\n return inner\n\n\ndef get_serializer(obj):\n \"\"\" Get a registered serializer for the given object.\n\n This function walks the mro of obj looking for serializers.\n Returns None if no valid serializer is found.\n \"\"\"\n return SERIALIZERS.get(dr.get_name(type(obj)))\n\n\ndef get_deserializer(obj):\n \"\"\" Returns a deserializer based on the fully qualified name string.\"\"\"\n return DESERIALIZERS.get(dr.get_name(type(obj)))\n\n\ndef serialize(obj, root=None):\n to_dict = get_serializer(obj)\n return {\n \"type\": dr.get_name(type(obj)),\n \"object\": to_dict(obj, root=root),\n }\n\n\ndef deserialize(data, root=None):\n try:\n (_type, from_dict) = DESERIALIZERS.get(data[\"type\"])\n return from_dict(_type, data[\"object\"], root=root)\n except Exception:\n raise Exception(\"Unrecognized type: %s\" % data[\"type\"])\n\n\ndef marshal(v, root=None, pool=None):\n if v is None:\n return\n f = partial(serialize, root=root)\n if isinstance(v, list):\n if pool:\n return list(pool.map(f, v))\n else:\n return [f(t) for t in v]\n return f(v)\n\n\ndef unmarshal(data, root=None):\n if data is None:\n return\n if isinstance(data, list):\n return [deserialize(d, root=root) for d in data]\n return deserialize(data, root=root)\n\n\nclass Hydration(object):\n \"\"\"\n The Hydration class is responsible for saving and loading insights\n components. It puts metadata about a component's evaluation in a metadata\n file for the component and allows the serializer for a component to put raw\n data beneath a working directory.\n \"\"\"\n def __init__(self, root=None, meta_data=\"meta_data\", data=\"data\", pool=None):\n self.root = root\n self.meta_data = os.path.join(root, meta_data) if root else None\n self.data = os.path.join(root, data) if root else None\n self.ser_name = dr.get_base_module_name(ser)\n self.created = False\n self.pool = pool\n\n def _hydrate_one(self, doc):\n \"\"\" Returns (component, results, errors, duration) \"\"\"\n name = doc[\"name\"]\n\n key = dr.get_component_by_name(name)\n if key is None:\n raise ValueError(\"{} is not a loaded component.\".format(name))\n exec_time = doc[\"exec_time\"]\n ser_time = doc[\"ser_time\"]\n results = unmarshal(doc[\"results\"], root=self.data)\n return (key, results, exec_time, ser_time)\n\n def hydrate(self, broker=None):\n \"\"\"\n Loads a Broker from a previously saved one. A Broker is created if one\n isn't provided.\n \"\"\"\n broker = broker or dr.Broker()\n for path in glob(os.path.join(self.meta_data, \"*\")):\n try:\n with open(path) as f:\n doc = ser.load(f)\n res = self._hydrate_one(doc)\n comp, results, exec_time, ser_time = res\n if results:\n broker[comp] = results\n broker.exec_times[comp] = exec_time + ser_time\n except Exception as ex:\n log.warning(ex)\n return broker\n\n def dehydrate(self, comp, broker):\n \"\"\"\n Saves a component in the given broker to the file system.\n \"\"\"\n if not self.meta_data:\n raise Exception(\"Hydration meta_path not set. Can't dehydrate.\")\n\n if not self.created:\n fs.ensure_path(self.meta_data, mode=0o770)\n if self.data:\n fs.ensure_path(self.data, mode=0o770)\n self.created = True\n\n c = comp\n doc = None\n try:\n name = dr.get_name(c)\n value = broker.get(c)\n errors = [t for e in broker.exceptions.get(c, [])\n for t in broker.tracebacks[e]]\n doc = {\n \"name\": name,\n \"exec_time\": broker.exec_times.get(c),\n \"errors\": errors\n }\n\n try:\n start = time.time()\n doc[\"results\"] = marshal(value, root=self.data, pool=self.pool)\n except Exception:\n errors.append(traceback.format_exc())\n doc[\"results\"] = None\n finally:\n doc[\"ser_time\"] = time.time() - start\n except Exception as ex:\n log.exception(ex)\n else:\n if doc is not None and (doc[\"results\"] or doc[\"errors\"]):\n try:\n path = os.path.join(self.meta_data, name + \".\" + self.ser_name)\n with open(path, \"w\") as f:\n ser.dump(doc, f)\n except Exception as boom:\n log.error(\"Could not serialize %s to %s: %r\" % (name, self.ser_name, boom))\n if path:\n fs.remove(path)\n\n def make_persister(self, to_persist):\n \"\"\"\n Returns a function that hydrates components as they are evaluated. The\n function should be registered as an observer on a Broker just before\n execution.\n\n Args:\n to_persist (set): Set of components to persist. Skip everything\n else.\n \"\"\"\n\n if not self.meta_data:\n raise Exception(\"Root not set. Can't create persister.\")\n\n def persister(c, broker):\n if c in to_persist:\n self.dehydrate(c, broker)\n return persister\n", "path": "insights/core/serde.py"}], "after_files": [{"content": "\"\"\"\nThe serde module provides decorators that allow developers to register\nserializer and deserializer functions for types. It also provides a\n:py:class`Hydration` class that uses registered serde functions to save and\nload objects from the file system. The Hydration class includes a\n:py:func`Hydration.make_persister` method that returns a function appropriate\nto register as an observer on a :py:class:`Broker`.\n\"\"\"\nimport json as ser\nimport logging\nimport os\nimport time\nimport traceback\nfrom glob import glob\nfrom functools import partial\n\nfrom insights.core import dr\nfrom insights.util import fs\n\nlog = logging.getLogger(__name__)\n\nSERIALIZERS = {}\nDESERIALIZERS = {}\n\n\ndef serializer(_type):\n \"\"\"\n Decorator for serializers.\n\n A serializer should accept two parameters: An object and a path which is\n a directory on the filesystem where supplementary data can be stored. This\n is most often useful for datasources. It should return a dictionary version\n of the original object that contains only elements that can be serialized\n to json.\n \"\"\"\n\n def inner(func):\n name = dr.get_name(_type)\n if name in SERIALIZERS:\n msg = \"%s already has a serializer registered: %s\"\n raise Exception(msg % (name, dr.get_name(SERIALIZERS[name])))\n SERIALIZERS[name] = func\n return func\n return inner\n\n\ndef deserializer(_type):\n \"\"\"\n Decorator for deserializers.\n\n A deserializer should accept three parameters: A type, a dictionary, and a\n path that may contain supplementary data stored by its paired serializer.\n If the serializer stores supplementary data, the relative path to it should\n be somewhere in the dict of the second parameter.\n \"\"\"\n\n def inner(func):\n name = dr.get_name(_type)\n if name in DESERIALIZERS:\n msg = \"%s already has a deserializer registered: %s\"\n raise Exception(msg % (dr.get_name(name), dr.get_name(DESERIALIZERS[name])))\n DESERIALIZERS[name] = (_type, func)\n return func\n return inner\n\n\ndef get_serializer(obj):\n \"\"\" Get a registered serializer for the given object.\n\n This function walks the mro of obj looking for serializers.\n Returns None if no valid serializer is found.\n \"\"\"\n return SERIALIZERS.get(dr.get_name(type(obj)))\n\n\ndef get_deserializer(obj):\n \"\"\" Returns a deserializer based on the fully qualified name string.\"\"\"\n return DESERIALIZERS.get(dr.get_name(type(obj)))\n\n\ndef serialize(obj, root=None):\n to_dict = get_serializer(obj)\n return {\n \"type\": dr.get_name(type(obj)),\n \"object\": to_dict(obj, root=root),\n }\n\n\ndef deserialize(data, root=None):\n type_data = DESERIALIZERS.get(data[\"type\"])\n if type_data is None:\n raise Exception(\"Unrecognized type: %s\" % data[\"type\"])\n (_type, from_dict) = type_data\n return from_dict(_type, data[\"object\"], root=root)\n\n\ndef marshal(v, root=None, pool=None):\n if v is None:\n return\n f = partial(serialize, root=root)\n if isinstance(v, list):\n if pool:\n return list(pool.map(f, v))\n else:\n return [f(t) for t in v]\n return f(v)\n\n\ndef unmarshal(data, root=None):\n if data is None:\n return\n if isinstance(data, list):\n return [deserialize(d, root=root) for d in data]\n return deserialize(data, root=root)\n\n\nclass Hydration(object):\n \"\"\"\n The Hydration class is responsible for saving and loading insights\n components. It puts metadata about a component's evaluation in a metadata\n file for the component and allows the serializer for a component to put raw\n data beneath a working directory.\n \"\"\"\n def __init__(self, root=None, meta_data=\"meta_data\", data=\"data\", pool=None):\n self.root = root\n self.meta_data = os.path.join(root, meta_data) if root else None\n self.data = os.path.join(root, data) if root else None\n self.ser_name = dr.get_base_module_name(ser)\n self.created = False\n self.pool = pool\n\n def _hydrate_one(self, doc):\n \"\"\" Returns (component, results, errors, duration) \"\"\"\n name = doc[\"name\"]\n\n key = dr.get_component_by_name(name)\n if key is None:\n raise ValueError(\"{} is not a loaded component.\".format(name))\n exec_time = doc[\"exec_time\"]\n ser_time = doc[\"ser_time\"]\n results = unmarshal(doc[\"results\"], root=self.data)\n return (key, results, exec_time, ser_time)\n\n def hydrate(self, broker=None):\n \"\"\"\n Loads a Broker from a previously saved one. A Broker is created if one\n isn't provided.\n \"\"\"\n from insights.core.spec_factory import ContentException\n\n broker = broker or dr.Broker()\n for path in glob(os.path.join(self.meta_data, \"*\")):\n try:\n with open(path) as f:\n doc = ser.load(f)\n res = self._hydrate_one(doc)\n comp, results, exec_time, ser_time = res\n if results:\n broker[comp] = results\n broker.exec_times[comp] = exec_time + ser_time\n except ContentException as ex:\n log.debug(ex)\n except Exception as ex:\n log.warning(ex)\n return broker\n\n def dehydrate(self, comp, broker):\n \"\"\"\n Saves a component in the given broker to the file system.\n \"\"\"\n if not self.meta_data:\n raise Exception(\"Hydration meta_path not set. Can't dehydrate.\")\n\n if not self.created:\n fs.ensure_path(self.meta_data, mode=0o770)\n if self.data:\n fs.ensure_path(self.data, mode=0o770)\n self.created = True\n\n c = comp\n doc = None\n try:\n name = dr.get_name(c)\n value = broker.get(c)\n errors = [t for e in broker.exceptions.get(c, [])\n for t in broker.tracebacks[e]]\n doc = {\n \"name\": name,\n \"exec_time\": broker.exec_times.get(c),\n \"errors\": errors\n }\n\n try:\n start = time.time()\n doc[\"results\"] = marshal(value, root=self.data, pool=self.pool)\n except Exception:\n errors.append(traceback.format_exc())\n doc[\"results\"] = None\n finally:\n doc[\"ser_time\"] = time.time() - start\n except Exception as ex:\n log.exception(ex)\n else:\n if doc is not None and (doc[\"results\"] or doc[\"errors\"]):\n try:\n path = os.path.join(self.meta_data, name + \".\" + self.ser_name)\n with open(path, \"w\") as f:\n ser.dump(doc, f)\n except Exception as boom:\n log.error(\"Could not serialize %s to %s: %r\" % (name, self.ser_name, boom))\n if path:\n fs.remove(path)\n\n def make_persister(self, to_persist):\n \"\"\"\n Returns a function that hydrates components as they are evaluated. The\n function should be registered as an observer on a Broker just before\n execution.\n\n Args:\n to_persist (set): Set of components to persist. Skip everything\n else.\n \"\"\"\n\n if not self.meta_data:\n raise Exception(\"Root not set. Can't create persister.\")\n\n def persister(c, broker):\n if c in to_persist:\n self.dehydrate(c, broker)\n return persister\n", "path": "insights/core/serde.py"}]} | 2,678 | 326 |
gh_patches_debug_25344 | rasdani/github-patches | git_diff | fedora-infra__bodhi-2358 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot run database migrations on the 3.7 branch
I built a beta out of the ```HEAD``` of the ```3.7``` branch, and the migrations fail to run:
```
[root@bodhi-backend01 bowlofeggs][STG]# /usr/bin/alembic -c /etc/bodhi/alembic.ini upgrade head
INFO [alembic.runtime.migration] Context impl PostgresqlImpl.
INFO [alembic.runtime.migration] Will assume transactional DDL.
INFO [alembic.env] Emitting SQL to allow for global DDL locking with BDR
/usr/lib/python2.7/site-packages/alembic/util/messaging.py:69: UserWarning: Revision be25565a1211 referenced from be25565a1211 -> 59c0f5fbc1b2 (head), Add a greenwave_unsatisfied_requirements column to the updates table. is not present
warnings.warn(msg)
Traceback (most recent call last):
File "/usr/bin/alembic", line 12, in <module>
sys.exit(load_entry_point('alembic', 'console_scripts', 'alembic')())
File "/usr/lib/python2.7/site-packages/alembic/config.py", line 479, in main
CommandLine(prog=prog).main(argv=argv)
File "/usr/lib/python2.7/site-packages/alembic/config.py", line 473, in main
self.run_cmd(cfg, options)
File "/usr/lib/python2.7/site-packages/alembic/config.py", line 456, in run_cmd
**dict((k, getattr(options, k, None)) for k in kwarg)
File "/usr/lib/python2.7/site-packages/alembic/command.py", line 254, in upgrade
script.run_env()
File "/usr/lib/python2.7/site-packages/alembic/script/base.py", line 425, in run_env
util.load_python_file(self.dir, 'env.py')
File "/usr/lib/python2.7/site-packages/alembic/util/pyfiles.py", line 81, in load_python_file
module = load_module_py(module_id, path)
File "/usr/lib/python2.7/site-packages/alembic/util/compat.py", line 141, in load_module_py
mod = imp.load_source(module_id, path, fp)
File "/usr/lib/python2.7/site-packages/bodhi/server/migrations/env.py", line 112, in <module>
run_migrations_online()
File "/usr/lib/python2.7/site-packages/bodhi/server/migrations/env.py", line 104, in run_migrations_online
context.run_migrations()
File "<string>", line 8, in run_migrations
File "/usr/lib/python2.7/site-packages/alembic/runtime/environment.py", line 836, in run_migrations
self.get_context().run_migrations(**kw)
File "/usr/lib/python2.7/site-packages/alembic/runtime/migration.py", line 321, in run_migrations
for step in self._migrations_fn(heads, self):
File "/usr/lib/python2.7/site-packages/alembic/command.py", line 243, in upgrade
return script._upgrade_revs(revision, rev)
File "/usr/lib/python2.7/site-packages/alembic/script/base.py", line 334, in _upgrade_revs
revs = list(revs)
File "/usr/lib/python2.7/site-packages/alembic/script/revision.py", line 645, in _iterate_revisions
requested_lowers = self.get_revisions(lower)
File "/usr/lib/python2.7/site-packages/alembic/script/revision.py", line 299, in get_revisions
return sum([self.get_revisions(id_elem) for id_elem in id_], ())
File "/usr/lib/python2.7/site-packages/alembic/script/revision.py", line 301, in get_revisions
resolved_id, branch_label = self._resolve_revision_number(id_)
File "/usr/lib/python2.7/site-packages/alembic/script/revision.py", line 437, in _resolve_revision_number
self._revision_map
File "/usr/lib/python2.7/site-packages/alembic/util/langhelpers.py", line 239, in __get__
obj.__dict__[self.__name__] = result = self.fget(obj)
File "/usr/lib/python2.7/site-packages/alembic/script/revision.py", line 152, in _revision_map
down_revision = map_[downrev]
KeyError: 'be25565a1211'
```
It sounds like there's a migration on ```develop``` that is not on the ```3.7``` branch, and when I cherry-picked the migration back to ```3.7``` it now references a migration that does not exist. To fix this, I'll need to shuffle the order of the migrations.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py`
Content:
```
1 # Copyright (c) 2018 Red Hat, Inc.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """
19 Add a greenwave_unsatisfied_requirements column to the updates table.
20
21 Revision ID: 59c0f5fbc1b2
22 Revises: be25565a1211
23 Create Date: 2018-05-01 15:37:07.346034
24 """
25 from alembic import op
26 import sqlalchemy as sa
27
28
29 # revision identifiers, used by Alembic.
30 revision = '59c0f5fbc1b2'
31 down_revision = 'be25565a1211'
32
33
34 def upgrade():
35 """Add a greenwave_unsatisfied_requirements to the updates table."""
36 op.add_column('updates',
37 sa.Column('greenwave_unsatisfied_requirements', sa.UnicodeText(), nullable=True))
38
39
40 def downgrade():
41 """Drop the greenwave_unsatisfied_requirements from the updates table."""
42 op.drop_column('updates', 'greenwave_unsatisfied_requirements')
43
```
Path: `bodhi/server/migrations/versions/22858ba91115_add_new_side_tag_update_states.py`
Content:
```
1 # Copyright (c) 2018 Red Hat, Inc.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """
19 Add new side tag update states.
20
21 Revision ID: 22858ba91115
22 Revises: c21dd18b161a
23 Create Date: 2018-04-10 21:49:26.209484
24 """
25 from alembic import op
26 from sqlalchemy import exc
27
28
29 # revision identifiers, used by Alembic.
30 revision = '22858ba91115'
31 down_revision = 'c21dd18b161a'
32
33
34 def upgrade():
35 """
36 Add side tag enums to the update_status enum.
37
38 Side tags add three new enums to the update_status enum: side_tag_active, side_tag_merging, and
39 side_tag_expired.
40 """
41 op.execute('COMMIT') # See https://bitbucket.org/zzzeek/alembic/issue/123
42 try:
43 # This will raise a ProgrammingError if the DB server doesn't use BDR.
44 op.execute('SHOW bdr.permit_ddl_locking')
45 # This server uses BDR, so let's ask for a DDL lock.
46 op.execute('SET LOCAL bdr.permit_ddl_locking = true')
47 except exc.ProgrammingError:
48 # This server doesn't use BDR, so no problem.
49 pass
50 op.execute("ALTER TYPE ck_update_status ADD VALUE 'side_tag_expired' AFTER 'processing'")
51 op.execute("ALTER TYPE ck_update_status ADD VALUE 'side_tag_active' AFTER 'processing'")
52
53
54 def downgrade():
55 """Raise an exception explaining that this migration cannot be reversed."""
56 raise NotImplemented('This migration cannot be reversed.')
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bodhi/server/migrations/versions/22858ba91115_add_new_side_tag_update_states.py b/bodhi/server/migrations/versions/22858ba91115_add_new_side_tag_update_states.py
--- a/bodhi/server/migrations/versions/22858ba91115_add_new_side_tag_update_states.py
+++ b/bodhi/server/migrations/versions/22858ba91115_add_new_side_tag_update_states.py
@@ -19,7 +19,7 @@
Add new side tag update states.
Revision ID: 22858ba91115
-Revises: c21dd18b161a
+Revises: 59c0f5fbc1b2
Create Date: 2018-04-10 21:49:26.209484
"""
from alembic import op
@@ -28,7 +28,7 @@
# revision identifiers, used by Alembic.
revision = '22858ba91115'
-down_revision = 'c21dd18b161a'
+down_revision = '59c0f5fbc1b2'
def upgrade():
diff --git a/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py b/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py
--- a/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py
+++ b/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py
@@ -19,7 +19,7 @@
Add a greenwave_unsatisfied_requirements column to the updates table.
Revision ID: 59c0f5fbc1b2
-Revises: be25565a1211
+Revises: c21dd18b161a
Create Date: 2018-05-01 15:37:07.346034
"""
from alembic import op
@@ -28,7 +28,7 @@
# revision identifiers, used by Alembic.
revision = '59c0f5fbc1b2'
-down_revision = 'be25565a1211'
+down_revision = 'c21dd18b161a'
def upgrade():
| {"golden_diff": "diff --git a/bodhi/server/migrations/versions/22858ba91115_add_new_side_tag_update_states.py b/bodhi/server/migrations/versions/22858ba91115_add_new_side_tag_update_states.py\n--- a/bodhi/server/migrations/versions/22858ba91115_add_new_side_tag_update_states.py\n+++ b/bodhi/server/migrations/versions/22858ba91115_add_new_side_tag_update_states.py\n@@ -19,7 +19,7 @@\n Add new side tag update states.\n \n Revision ID: 22858ba91115\n-Revises: c21dd18b161a\n+Revises: 59c0f5fbc1b2\n Create Date: 2018-04-10 21:49:26.209484\n \"\"\"\n from alembic import op\n@@ -28,7 +28,7 @@\n \n # revision identifiers, used by Alembic.\n revision = '22858ba91115'\n-down_revision = 'c21dd18b161a'\n+down_revision = '59c0f5fbc1b2'\n \n \n def upgrade():\ndiff --git a/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py b/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py\n--- a/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py\n+++ b/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py\n@@ -19,7 +19,7 @@\n Add a greenwave_unsatisfied_requirements column to the updates table.\n \n Revision ID: 59c0f5fbc1b2\n-Revises: be25565a1211\n+Revises: c21dd18b161a\n Create Date: 2018-05-01 15:37:07.346034\n \"\"\"\n from alembic import op\n@@ -28,7 +28,7 @@\n \n # revision identifiers, used by Alembic.\n revision = '59c0f5fbc1b2'\n-down_revision = 'be25565a1211'\n+down_revision = 'c21dd18b161a'\n \n \n def upgrade():\n", "issue": "Cannot run database migrations on the 3.7 branch\nI built a beta out of the ```HEAD``` of the ```3.7``` branch, and the migrations fail to run:\r\n\r\n```\r\n[root@bodhi-backend01 bowlofeggs][STG]# /usr/bin/alembic -c /etc/bodhi/alembic.ini upgrade head\r\nINFO [alembic.runtime.migration] Context impl PostgresqlImpl.\r\nINFO [alembic.runtime.migration] Will assume transactional DDL.\r\nINFO [alembic.env] Emitting SQL to allow for global DDL locking with BDR\r\n/usr/lib/python2.7/site-packages/alembic/util/messaging.py:69: UserWarning: Revision be25565a1211 referenced from be25565a1211 -> 59c0f5fbc1b2 (head), Add a greenwave_unsatisfied_requirements column to the updates table. is not present\r\n warnings.warn(msg)\r\nTraceback (most recent call last):\r\n File \"/usr/bin/alembic\", line 12, in <module>\r\n sys.exit(load_entry_point('alembic', 'console_scripts', 'alembic')())\r\n File \"/usr/lib/python2.7/site-packages/alembic/config.py\", line 479, in main\r\n CommandLine(prog=prog).main(argv=argv)\r\n File \"/usr/lib/python2.7/site-packages/alembic/config.py\", line 473, in main\r\n self.run_cmd(cfg, options)\r\n File \"/usr/lib/python2.7/site-packages/alembic/config.py\", line 456, in run_cmd\r\n **dict((k, getattr(options, k, None)) for k in kwarg)\r\n File \"/usr/lib/python2.7/site-packages/alembic/command.py\", line 254, in upgrade\r\n script.run_env()\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/base.py\", line 425, in run_env\r\n util.load_python_file(self.dir, 'env.py')\r\n File \"/usr/lib/python2.7/site-packages/alembic/util/pyfiles.py\", line 81, in load_python_file\r\n module = load_module_py(module_id, path)\r\n File \"/usr/lib/python2.7/site-packages/alembic/util/compat.py\", line 141, in load_module_py\r\n mod = imp.load_source(module_id, path, fp)\r\n File \"/usr/lib/python2.7/site-packages/bodhi/server/migrations/env.py\", line 112, in <module>\r\n run_migrations_online()\r\n File \"/usr/lib/python2.7/site-packages/bodhi/server/migrations/env.py\", line 104, in run_migrations_online\r\n context.run_migrations()\r\n File \"<string>\", line 8, in run_migrations\r\n File \"/usr/lib/python2.7/site-packages/alembic/runtime/environment.py\", line 836, in run_migrations\r\n self.get_context().run_migrations(**kw)\r\n File \"/usr/lib/python2.7/site-packages/alembic/runtime/migration.py\", line 321, in run_migrations\r\n for step in self._migrations_fn(heads, self):\r\n File \"/usr/lib/python2.7/site-packages/alembic/command.py\", line 243, in upgrade\r\n return script._upgrade_revs(revision, rev)\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/base.py\", line 334, in _upgrade_revs\r\n revs = list(revs)\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/revision.py\", line 645, in _iterate_revisions\r\n requested_lowers = self.get_revisions(lower)\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/revision.py\", line 299, in get_revisions\r\n return sum([self.get_revisions(id_elem) for id_elem in id_], ())\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/revision.py\", line 301, in get_revisions\r\n resolved_id, branch_label = self._resolve_revision_number(id_)\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/revision.py\", line 437, in _resolve_revision_number\r\n self._revision_map\r\n File \"/usr/lib/python2.7/site-packages/alembic/util/langhelpers.py\", line 239, in __get__\r\n obj.__dict__[self.__name__] = result = self.fget(obj)\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/revision.py\", line 152, in _revision_map\r\n down_revision = map_[downrev]\r\nKeyError: 'be25565a1211'\r\n```\r\n\r\nIt sounds like there's a migration on ```develop``` that is not on the ```3.7``` branch, and when I cherry-picked the migration back to ```3.7``` it now references a migration that does not exist. To fix this, I'll need to shuffle the order of the migrations.\n", "before_files": [{"content": "# Copyright (c) 2018 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nAdd a greenwave_unsatisfied_requirements column to the updates table.\n\nRevision ID: 59c0f5fbc1b2\nRevises: be25565a1211\nCreate Date: 2018-05-01 15:37:07.346034\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '59c0f5fbc1b2'\ndown_revision = 'be25565a1211'\n\n\ndef upgrade():\n \"\"\"Add a greenwave_unsatisfied_requirements to the updates table.\"\"\"\n op.add_column('updates',\n sa.Column('greenwave_unsatisfied_requirements', sa.UnicodeText(), nullable=True))\n\n\ndef downgrade():\n \"\"\"Drop the greenwave_unsatisfied_requirements from the updates table.\"\"\"\n op.drop_column('updates', 'greenwave_unsatisfied_requirements')\n", "path": "bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py"}, {"content": "# Copyright (c) 2018 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nAdd new side tag update states.\n\nRevision ID: 22858ba91115\nRevises: c21dd18b161a\nCreate Date: 2018-04-10 21:49:26.209484\n\"\"\"\nfrom alembic import op\nfrom sqlalchemy import exc\n\n\n# revision identifiers, used by Alembic.\nrevision = '22858ba91115'\ndown_revision = 'c21dd18b161a'\n\n\ndef upgrade():\n \"\"\"\n Add side tag enums to the update_status enum.\n\n Side tags add three new enums to the update_status enum: side_tag_active, side_tag_merging, and\n side_tag_expired.\n \"\"\"\n op.execute('COMMIT') # See https://bitbucket.org/zzzeek/alembic/issue/123\n try:\n # This will raise a ProgrammingError if the DB server doesn't use BDR.\n op.execute('SHOW bdr.permit_ddl_locking')\n # This server uses BDR, so let's ask for a DDL lock.\n op.execute('SET LOCAL bdr.permit_ddl_locking = true')\n except exc.ProgrammingError:\n # This server doesn't use BDR, so no problem.\n pass\n op.execute(\"ALTER TYPE ck_update_status ADD VALUE 'side_tag_expired' AFTER 'processing'\")\n op.execute(\"ALTER TYPE ck_update_status ADD VALUE 'side_tag_active' AFTER 'processing'\")\n\n\ndef downgrade():\n \"\"\"Raise an exception explaining that this migration cannot be reversed.\"\"\"\n raise NotImplemented('This migration cannot be reversed.')\n", "path": "bodhi/server/migrations/versions/22858ba91115_add_new_side_tag_update_states.py"}], "after_files": [{"content": "# Copyright (c) 2018 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nAdd a greenwave_unsatisfied_requirements column to the updates table.\n\nRevision ID: 59c0f5fbc1b2\nRevises: c21dd18b161a\nCreate Date: 2018-05-01 15:37:07.346034\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '59c0f5fbc1b2'\ndown_revision = 'c21dd18b161a'\n\n\ndef upgrade():\n \"\"\"Add a greenwave_unsatisfied_requirements to the updates table.\"\"\"\n op.add_column('updates',\n sa.Column('greenwave_unsatisfied_requirements', sa.UnicodeText(), nullable=True))\n\n\ndef downgrade():\n \"\"\"Drop the greenwave_unsatisfied_requirements from the updates table.\"\"\"\n op.drop_column('updates', 'greenwave_unsatisfied_requirements')\n", "path": "bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py"}, {"content": "# Copyright (c) 2018 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nAdd new side tag update states.\n\nRevision ID: 22858ba91115\nRevises: 59c0f5fbc1b2\nCreate Date: 2018-04-10 21:49:26.209484\n\"\"\"\nfrom alembic import op\nfrom sqlalchemy import exc\n\n\n# revision identifiers, used by Alembic.\nrevision = '22858ba91115'\ndown_revision = '59c0f5fbc1b2'\n\n\ndef upgrade():\n \"\"\"\n Add side tag enums to the update_status enum.\n\n Side tags add three new enums to the update_status enum: side_tag_active, side_tag_merging, and\n side_tag_expired.\n \"\"\"\n op.execute('COMMIT') # See https://bitbucket.org/zzzeek/alembic/issue/123\n try:\n # This will raise a ProgrammingError if the DB server doesn't use BDR.\n op.execute('SHOW bdr.permit_ddl_locking')\n # This server uses BDR, so let's ask for a DDL lock.\n op.execute('SET LOCAL bdr.permit_ddl_locking = true')\n except exc.ProgrammingError:\n # This server doesn't use BDR, so no problem.\n pass\n op.execute(\"ALTER TYPE ck_update_status ADD VALUE 'side_tag_expired' AFTER 'processing'\")\n op.execute(\"ALTER TYPE ck_update_status ADD VALUE 'side_tag_active' AFTER 'processing'\")\n\n\ndef downgrade():\n \"\"\"Raise an exception explaining that this migration cannot be reversed.\"\"\"\n raise NotImplemented('This migration cannot be reversed.')\n", "path": "bodhi/server/migrations/versions/22858ba91115_add_new_side_tag_update_states.py"}]} | 2,622 | 622 |
gh_patches_debug_38903 | rasdani/github-patches | git_diff | facebookresearch__fairscale-89 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[feat] OSS: make it compatible with nvidia's AMP
## 🚀 Feature
AMP is using a load(get()) trick to make sure that the state tensors are cast in the right format, which breaks OSS assumptions, see https://github.com/NVIDIA/apex/blob/2ec84ebdca59278eaf15e8ddf32476d9d6d8b904/apex/amp/_initialize.py#L205
## Motivation
nvidia's AMP is getting deprecated in favour of Pytorch, but some of its features are not yet there. It brings a lot of speed to the table which is necessary for big jobs
## Pitch
Make it happen (tm)
## Alternatives
Do not support AMP
## Additional context
<!-- Add any other context or screenshots about the feature request here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fairscale/optim/oss.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import copy
7 from itertools import chain
8 import logging
9 from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type
10
11 import torch
12 import torch.distributed as dist
13 from torch.optim import SGD, Optimizer
14
15 from .utils import broadcast_object, recursive_copy_to_device
16
17 if TYPE_CHECKING: # pragma: no cover
18 from torch.optim.optimizer import _params_t
19 else:
20 _params_t = Any
21
22
23 class OSS(Optimizer):
24 """Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`
25 optimizer and shards its state as described by ZeRO_.
26 ::
27 opt = OSS(params, optim=torch.optim.Adam, lr=0.01)
28
29 .. _ZeRO: https://arxiv.org/abs/1910.02054
30
31 We use a greedy algorithm to pack a number of parameters
32 at each rank. Each parameter belongs to a single rank and
33 is not divided among rank.
34
35 After each rank completed their parameter update, they broadcast
36 the new version of the parameters to all other ranks to synchronize
37 the parameters for next round forward/backward computation.
38
39 Args:
40 params (list of tensors):
41 parameters to be optimized
42 Keyword Args:
43 optim (torch.nn.Optimizer):
44 optimizer to shard (default: SGD)
45 group (group):
46 torch.distributed group (default: group.WORLD)
47 """
48
49 optim: Optimizer
50 in_super_constructor: bool
51
52 def __init__(self, params: _params_t, optim: Type[Optimizer] = SGD, group: Any = dist.group.WORLD, **defaults: Any):
53 # Hold all the model params in the root .param_groups
54 self.in_super_constructor = True
55 super().__init__(params, defaults)
56 self.in_super_constructor = False
57
58 # Build the wrapped optimizer, responsible for a shard of the params
59 self.group = group
60 self.rank = dist.get_rank(group)
61 split_param_groups = self.partition_parameters()
62 self.optim = optim(split_param_groups[self.rank], **defaults)
63
64 # Optional consolidated optimizer state
65 self._all_states: List[Dict[str, Any]] = []
66
67 # Current device is set by the parameters allocated to this rank
68 self._device = split_param_groups[self.rank][0]["params"][0].device
69
70 # Sync local and global param_groups keys
71 for global_group, local_group in zip(self.param_groups, self.optim.param_groups):
72 for k, v in local_group.items():
73 if k != "params":
74 global_group[k] = v
75
76 def partition_parameters(self) -> List[List[dict]]:
77 """Partitions parameters across distributed ranks.
78
79 Returns a list of param_groups (which is a list of dict) where each
80 element of the list contains the param_groups for a rank. Element 0
81 corresponds to rank 0, etc. We need all the ranks for the broadcast
82 inside step().
83 """
84 world_size = dist.get_world_size(self.group)
85 param_groups: List[List] = [list() for _ in range(world_size)]
86 sizes = [0] * world_size
87 for param_group in self.param_groups:
88 param_lists: List[List] = [list() for _ in range(world_size)]
89 for param in param_group["params"]:
90 # Add this param to rank with smallest size.
91 rank = sizes.index(min(sizes))
92 param_lists[rank].append(param)
93 sizes[rank] += param.numel()
94 for rank, params in enumerate(param_lists):
95 param_group_rank = copy.copy(param_group)
96 param_group_rank["params"] = params
97 param_groups[rank].append(param_group_rank)
98 return param_groups
99
100 # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.
101 # For example, the apex library contains fused optimizers with a step that supports extra kwargs.
102 def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:
103 # Sync oss param_groups attributes in case they've been updated by a scheduler.
104 self._sync_param_groups()
105
106 # Run the optimizer step on this shard only:
107 if closure is not None:
108 loss = self.optim.step(closure=closure, **kwargs) # type: ignore
109 else:
110 loss = self.optim.step(**kwargs)
111
112 # Sync all the states. Broadcast requests are issued async, we check completeness before moving on
113 requests = []
114 for rank, param_groups in enumerate(self.partition_parameters()):
115 for param_group in param_groups:
116 for param in param_group["params"]:
117 requests.append(dist.broadcast(tensor=param, src=rank, group=self.group, async_op=True))
118
119 _ = list(map(lambda x: x.wait(), requests))
120 return loss
121
122 def local_state_dict(self) -> dict:
123 """ Gets this rank's state_dict. """
124 return self.optim.state_dict()
125
126 def consolidate_state_dict(self, recipient_rank: int = 0) -> None:
127 """ Update the consolidated state_dict list, one per rank.
128
129 This needs to be called on all replicas """
130
131 # Sync lr and other attributes in case its been updated
132 self._sync_param_groups()
133
134 if self.rank == recipient_rank:
135 # Pull the sharded state from all the other replicas
136 # Store all the states in order, rank by rank
137 logging.debug("Pulling the sharded optimizer state from all replicas")
138 self._all_states = self._collect_sharded_states()
139 else:
140 # Acknowledge broadcasts, and send this rank's shard when needed
141 self._broadcast_state_dict()
142
143 def state_dict(self) -> Dict[str, Any]:
144 """
145 Return the last known global optimizer state, which consist of a list of the shards.
146
147 NOTE: This is limited to the replica which was responsible for the consolidation.
148 The state may also not be up to date, depending on when `consolidate_state_dict` was last called.
149 """
150
151 assert (
152 len(self._all_states) > 0
153 ), "The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand"
154
155 # Flatten the param_groups, save the partition which logs the rank <> shard correspondence
156 partition: List[Tuple[int, int]] = []
157 param_groups: List[Dict[Any, Any]] = []
158
159 start = 0
160 for i, s in enumerate(self._all_states):
161 param_groups.extend(s["param_groups"])
162 end = start + len(s["param_groups"])
163 partition.append((start, end))
164 start = end
165
166 return {
167 "state": [s["state"] for s in self._all_states],
168 "param_groups": param_groups,
169 "partition": partition,
170 }
171
172 def load_local_state_dict(self, state_dict: dict) -> None:
173 """ Loads this rank's state_dict. """
174
175 self.optim.load_state_dict(state_dict)
176
177 # Workaround PyTorch bug that casts state (https://github.com/pytorch/pytorch/issues/43706)
178 # Copied from https://github.com/pytorch/fairseq/blob/v0.9.0/fairseq/optim/fp16_optimizer.py#L251-L268
179 groups = self.optim.param_groups
180 saved_groups = state_dict["param_groups"]
181 id_map = {
182 old_id: p
183 for old_id, p in zip(chain(*(g["params"] for g in saved_groups)), chain(*(g["params"] for g in groups)))
184 }
185 for k, v in state_dict["state"].items():
186 if k in id_map:
187 param = id_map[k]
188 self.optim.state[param] = recursive_copy_to_device(v, non_blocking=True, device=param.device)
189
190 # Restore the global param_groups (the params themselves are already correct)
191 for global_group, local_group in zip(self.param_groups, groups):
192 for k, v in local_group.items():
193 if k != "params":
194 global_group[k] = v
195
196 def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
197 """ Restore the global parameter groups as well as the shard """
198
199 # Get this optimizer's param_groups shard
200 param_groups = state_dict["param_groups"][
201 state_dict["partition"][self.rank][0] : state_dict["partition"][self.rank][1]
202 ]
203 # Dispatch this rank's state dictionary to the wrapped shard optimizer
204 self.load_local_state_dict({"state": state_dict["state"][self.rank], "param_groups": param_groups})
205
206 def add_param_group(self, param_group: dict) -> None:
207 super().add_param_group(param_group)
208 if not self.in_super_constructor:
209 param_groups = self.partition_parameters()[self.rank]
210 if len(param_groups) == len(self.optim.param_groups) + 1:
211 self.optim.add_param_group(param_groups[-1])
212
213 def _sync_param_groups(self) -> None:
214 """Sync learning rate and other optimizer attributes (needed to support schedulers)."""
215 for global_group, local_group in zip(self.param_groups, self.optim.param_groups):
216 for k in local_group.keys():
217 if k != "params":
218 # Params have been sharded and should not be synced here
219 local_group[k] = global_group[k]
220
221 def _collect_sharded_states(self) -> List[Dict[str, Any]]:
222 """
223 Collect all the state shards, in CPU memory.
224 """
225 empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)
226 all_states: List[Dict[str, Any]] = []
227
228 for rank in range(dist.get_world_size(group=self.group)):
229 if rank == self.rank:
230 logging.debug("Saving self state")
231 all_states.append(
232 recursive_copy_to_device(self.local_state_dict(), non_blocking=True, device=torch.device("cpu"))
233 )
234
235 # Sync with other replicas
236 broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)
237 else:
238 # Fetch the optim state from the other replicas
239 logging.debug("Receiving state from rank %s ", rank)
240 replica_state = broadcast_object(
241 empty_buffer, src_rank=rank, group=self.group, dist_device=self._device
242 )
243
244 all_states.append(
245 recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device("cpu"))
246 )
247
248 logging.debug("State from rank %s received", rank)
249
250 return all_states
251
252 def _broadcast_state_dict(self) -> None:
253 """
254 Broadcast this rank's state shard, discard others
255 """
256 empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)
257
258 for rank in range(dist.get_world_size(group=self.group)):
259 if rank == self.rank:
260 # Send the state to the reference replica
261 logging.debug(
262 "Sending the sharded optimizer state to the reference replica from rank %s", rank,
263 )
264 broadcast_object(self.local_state_dict(), src_rank=rank, group=self.group, dist_device=self._device)
265 else:
266 # Discard this tensor/rank, broadcast necessary for syncing
267 logging.debug("Discarding broadcast from rank %s", rank)
268 broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)
269
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fairscale/optim/oss.py b/fairscale/optim/oss.py
--- a/fairscale/optim/oss.py
+++ b/fairscale/optim/oss.py
@@ -141,13 +141,18 @@
"""
Return the last known global optimizer state, which consist of a list of the shards.
- NOTE: This is limited to the replica which was responsible for the consolidation.
+ NOTE:
+ - If the state has not been consolidated, this returns a shard's worth, not the global state.
+ - Returning the global state is limited to the replica which was responsible for the consolidation.
The state may also not be up to date, depending on when `consolidate_state_dict` was last called.
"""
- assert (
- len(self._all_states) > 0
- ), "The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand"
+ if len(self._all_states) == 0:
+ logging.warning("Optimizer state has not been consolidated. Returning the local state")
+ logging.warning("Please call `consolidate_state_dict()` beforehand if you meant to save the global state")
+ state_dict = self.local_state_dict()
+ state_dict["local_state_dict"] = True
+ return state_dict
# Flatten the param_groups, save the partition which logs the rank <> shard correspondence
partition: List[Tuple[int, int]] = []
@@ -164,6 +169,7 @@
"state": [s["state"] for s in self._all_states],
"param_groups": param_groups,
"partition": partition,
+ "local_state_dict": False,
}
def load_local_state_dict(self, state_dict: dict) -> None:
@@ -193,12 +199,16 @@
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
""" Restore the global parameter groups as well as the shard """
- # Get this optimizer's param_groups shard
- param_groups = state_dict["param_groups"][
- state_dict["partition"][self.rank][0] : state_dict["partition"][self.rank][1]
- ]
- # Dispatch this rank's state dictionary to the wrapped shard optimizer
- self.load_local_state_dict({"state": state_dict["state"][self.rank], "param_groups": param_groups})
+ # Check whether we got a local or global dict
+ if state_dict["local_state_dict"]:
+ self.load_local_state_dict(state_dict)
+ else:
+ # Get this optimizer's param_groups shard
+ param_groups = state_dict["param_groups"][
+ state_dict["partition"][self.rank][0] : state_dict["partition"][self.rank][1]
+ ]
+ # Dispatch this rank's state dictionary to the wrapped shard optimizer
+ self.load_local_state_dict({"state": state_dict["state"][self.rank], "param_groups": param_groups})
def add_param_group(self, param_group: dict) -> None:
super().add_param_group(param_group)
| {"golden_diff": "diff --git a/fairscale/optim/oss.py b/fairscale/optim/oss.py\n--- a/fairscale/optim/oss.py\n+++ b/fairscale/optim/oss.py\n@@ -141,13 +141,18 @@\n \"\"\"\n Return the last known global optimizer state, which consist of a list of the shards.\n \n- NOTE: This is limited to the replica which was responsible for the consolidation.\n+ NOTE:\n+ - If the state has not been consolidated, this returns a shard's worth, not the global state.\n+ - Returning the global state is limited to the replica which was responsible for the consolidation.\n The state may also not be up to date, depending on when `consolidate_state_dict` was last called.\n \"\"\"\n \n- assert (\n- len(self._all_states) > 0\n- ), \"The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand\"\n+ if len(self._all_states) == 0:\n+ logging.warning(\"Optimizer state has not been consolidated. Returning the local state\")\n+ logging.warning(\"Please call `consolidate_state_dict()` beforehand if you meant to save the global state\")\n+ state_dict = self.local_state_dict()\n+ state_dict[\"local_state_dict\"] = True\n+ return state_dict\n \n # Flatten the param_groups, save the partition which logs the rank <> shard correspondence\n partition: List[Tuple[int, int]] = []\n@@ -164,6 +169,7 @@\n \"state\": [s[\"state\"] for s in self._all_states],\n \"param_groups\": param_groups,\n \"partition\": partition,\n+ \"local_state_dict\": False,\n }\n \n def load_local_state_dict(self, state_dict: dict) -> None:\n@@ -193,12 +199,16 @@\n def load_state_dict(self, state_dict: Dict[str, Any]) -> None:\n \"\"\" Restore the global parameter groups as well as the shard \"\"\"\n \n- # Get this optimizer's param_groups shard\n- param_groups = state_dict[\"param_groups\"][\n- state_dict[\"partition\"][self.rank][0] : state_dict[\"partition\"][self.rank][1]\n- ]\n- # Dispatch this rank's state dictionary to the wrapped shard optimizer\n- self.load_local_state_dict({\"state\": state_dict[\"state\"][self.rank], \"param_groups\": param_groups})\n+ # Check whether we got a local or global dict\n+ if state_dict[\"local_state_dict\"]:\n+ self.load_local_state_dict(state_dict)\n+ else:\n+ # Get this optimizer's param_groups shard\n+ param_groups = state_dict[\"param_groups\"][\n+ state_dict[\"partition\"][self.rank][0] : state_dict[\"partition\"][self.rank][1]\n+ ]\n+ # Dispatch this rank's state dictionary to the wrapped shard optimizer\n+ self.load_local_state_dict({\"state\": state_dict[\"state\"][self.rank], \"param_groups\": param_groups})\n \n def add_param_group(self, param_group: dict) -> None:\n super().add_param_group(param_group)\n", "issue": "[feat] OSS: make it compatible with nvidia's AMP\n## \ud83d\ude80 Feature\r\nAMP is using a load(get()) trick to make sure that the state tensors are cast in the right format, which breaks OSS assumptions, see https://github.com/NVIDIA/apex/blob/2ec84ebdca59278eaf15e8ddf32476d9d6d8b904/apex/amp/_initialize.py#L205\r\n\r\n## Motivation\r\nnvidia's AMP is getting deprecated in favour of Pytorch, but some of its features are not yet there. It brings a lot of speed to the table which is necessary for big jobs\r\n\r\n## Pitch\r\nMake it happen (tm)\r\n\r\n## Alternatives\r\nDo not support AMP\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nfrom itertools import chain\nimport logging\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type\n\nimport torch\nimport torch.distributed as dist\nfrom torch.optim import SGD, Optimizer\n\nfrom .utils import broadcast_object, recursive_copy_to_device\n\nif TYPE_CHECKING: # pragma: no cover\n from torch.optim.optimizer import _params_t\nelse:\n _params_t = Any\n\n\nclass OSS(Optimizer):\n \"\"\"Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`\n optimizer and shards its state as described by ZeRO_.\n ::\n opt = OSS(params, optim=torch.optim.Adam, lr=0.01)\n\n .. _ZeRO: https://arxiv.org/abs/1910.02054\n\n We use a greedy algorithm to pack a number of parameters\n at each rank. Each parameter belongs to a single rank and\n is not divided among rank.\n\n After each rank completed their parameter update, they broadcast\n the new version of the parameters to all other ranks to synchronize\n the parameters for next round forward/backward computation.\n\n Args:\n params (list of tensors):\n parameters to be optimized\n Keyword Args:\n optim (torch.nn.Optimizer):\n optimizer to shard (default: SGD)\n group (group):\n torch.distributed group (default: group.WORLD)\n \"\"\"\n\n optim: Optimizer\n in_super_constructor: bool\n\n def __init__(self, params: _params_t, optim: Type[Optimizer] = SGD, group: Any = dist.group.WORLD, **defaults: Any):\n # Hold all the model params in the root .param_groups\n self.in_super_constructor = True\n super().__init__(params, defaults)\n self.in_super_constructor = False\n\n # Build the wrapped optimizer, responsible for a shard of the params\n self.group = group\n self.rank = dist.get_rank(group)\n split_param_groups = self.partition_parameters()\n self.optim = optim(split_param_groups[self.rank], **defaults)\n\n # Optional consolidated optimizer state\n self._all_states: List[Dict[str, Any]] = []\n\n # Current device is set by the parameters allocated to this rank\n self._device = split_param_groups[self.rank][0][\"params\"][0].device\n\n # Sync local and global param_groups keys\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n for k, v in local_group.items():\n if k != \"params\":\n global_group[k] = v\n\n def partition_parameters(self) -> List[List[dict]]:\n \"\"\"Partitions parameters across distributed ranks.\n\n Returns a list of param_groups (which is a list of dict) where each\n element of the list contains the param_groups for a rank. Element 0\n corresponds to rank 0, etc. We need all the ranks for the broadcast\n inside step().\n \"\"\"\n world_size = dist.get_world_size(self.group)\n param_groups: List[List] = [list() for _ in range(world_size)]\n sizes = [0] * world_size\n for param_group in self.param_groups:\n param_lists: List[List] = [list() for _ in range(world_size)]\n for param in param_group[\"params\"]:\n # Add this param to rank with smallest size.\n rank = sizes.index(min(sizes))\n param_lists[rank].append(param)\n sizes[rank] += param.numel()\n for rank, params in enumerate(param_lists):\n param_group_rank = copy.copy(param_group)\n param_group_rank[\"params\"] = params\n param_groups[rank].append(param_group_rank)\n return param_groups\n\n # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.\n # For example, the apex library contains fused optimizers with a step that supports extra kwargs.\n def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:\n # Sync oss param_groups attributes in case they've been updated by a scheduler.\n self._sync_param_groups()\n\n # Run the optimizer step on this shard only:\n if closure is not None:\n loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n else:\n loss = self.optim.step(**kwargs)\n\n # Sync all the states. Broadcast requests are issued async, we check completeness before moving on\n requests = []\n for rank, param_groups in enumerate(self.partition_parameters()):\n for param_group in param_groups:\n for param in param_group[\"params\"]:\n requests.append(dist.broadcast(tensor=param, src=rank, group=self.group, async_op=True))\n\n _ = list(map(lambda x: x.wait(), requests))\n return loss\n\n def local_state_dict(self) -> dict:\n \"\"\" Gets this rank's state_dict. \"\"\"\n return self.optim.state_dict()\n\n def consolidate_state_dict(self, recipient_rank: int = 0) -> None:\n \"\"\" Update the consolidated state_dict list, one per rank.\n\n This needs to be called on all replicas \"\"\"\n\n # Sync lr and other attributes in case its been updated\n self._sync_param_groups()\n\n if self.rank == recipient_rank:\n # Pull the sharded state from all the other replicas\n # Store all the states in order, rank by rank\n logging.debug(\"Pulling the sharded optimizer state from all replicas\")\n self._all_states = self._collect_sharded_states()\n else:\n # Acknowledge broadcasts, and send this rank's shard when needed\n self._broadcast_state_dict()\n\n def state_dict(self) -> Dict[str, Any]:\n \"\"\"\n Return the last known global optimizer state, which consist of a list of the shards.\n\n NOTE: This is limited to the replica which was responsible for the consolidation.\n The state may also not be up to date, depending on when `consolidate_state_dict` was last called.\n \"\"\"\n\n assert (\n len(self._all_states) > 0\n ), \"The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand\"\n\n # Flatten the param_groups, save the partition which logs the rank <> shard correspondence\n partition: List[Tuple[int, int]] = []\n param_groups: List[Dict[Any, Any]] = []\n\n start = 0\n for i, s in enumerate(self._all_states):\n param_groups.extend(s[\"param_groups\"])\n end = start + len(s[\"param_groups\"])\n partition.append((start, end))\n start = end\n\n return {\n \"state\": [s[\"state\"] for s in self._all_states],\n \"param_groups\": param_groups,\n \"partition\": partition,\n }\n\n def load_local_state_dict(self, state_dict: dict) -> None:\n \"\"\" Loads this rank's state_dict. \"\"\"\n\n self.optim.load_state_dict(state_dict)\n\n # Workaround PyTorch bug that casts state (https://github.com/pytorch/pytorch/issues/43706)\n # Copied from https://github.com/pytorch/fairseq/blob/v0.9.0/fairseq/optim/fp16_optimizer.py#L251-L268\n groups = self.optim.param_groups\n saved_groups = state_dict[\"param_groups\"]\n id_map = {\n old_id: p\n for old_id, p in zip(chain(*(g[\"params\"] for g in saved_groups)), chain(*(g[\"params\"] for g in groups)))\n }\n for k, v in state_dict[\"state\"].items():\n if k in id_map:\n param = id_map[k]\n self.optim.state[param] = recursive_copy_to_device(v, non_blocking=True, device=param.device)\n\n # Restore the global param_groups (the params themselves are already correct)\n for global_group, local_group in zip(self.param_groups, groups):\n for k, v in local_group.items():\n if k != \"params\":\n global_group[k] = v\n\n def load_state_dict(self, state_dict: Dict[str, Any]) -> None:\n \"\"\" Restore the global parameter groups as well as the shard \"\"\"\n\n # Get this optimizer's param_groups shard\n param_groups = state_dict[\"param_groups\"][\n state_dict[\"partition\"][self.rank][0] : state_dict[\"partition\"][self.rank][1]\n ]\n # Dispatch this rank's state dictionary to the wrapped shard optimizer\n self.load_local_state_dict({\"state\": state_dict[\"state\"][self.rank], \"param_groups\": param_groups})\n\n def add_param_group(self, param_group: dict) -> None:\n super().add_param_group(param_group)\n if not self.in_super_constructor:\n param_groups = self.partition_parameters()[self.rank]\n if len(param_groups) == len(self.optim.param_groups) + 1:\n self.optim.add_param_group(param_groups[-1])\n\n def _sync_param_groups(self) -> None:\n \"\"\"Sync learning rate and other optimizer attributes (needed to support schedulers).\"\"\"\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n for k in local_group.keys():\n if k != \"params\":\n # Params have been sharded and should not be synced here\n local_group[k] = global_group[k]\n\n def _collect_sharded_states(self) -> List[Dict[str, Any]]:\n \"\"\"\n Collect all the state shards, in CPU memory.\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n all_states: List[Dict[str, Any]] = []\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n logging.debug(\"Saving self state\")\n all_states.append(\n recursive_copy_to_device(self.local_state_dict(), non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n # Sync with other replicas\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Fetch the optim state from the other replicas\n logging.debug(\"Receiving state from rank %s \", rank)\n replica_state = broadcast_object(\n empty_buffer, src_rank=rank, group=self.group, dist_device=self._device\n )\n\n all_states.append(\n recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n logging.debug(\"State from rank %s received\", rank)\n\n return all_states\n\n def _broadcast_state_dict(self) -> None:\n \"\"\"\n Broadcast this rank's state shard, discard others\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n # Send the state to the reference replica\n logging.debug(\n \"Sending the sharded optimizer state to the reference replica from rank %s\", rank,\n )\n broadcast_object(self.local_state_dict(), src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Discard this tensor/rank, broadcast necessary for syncing\n logging.debug(\"Discarding broadcast from rank %s\", rank)\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n", "path": "fairscale/optim/oss.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nfrom itertools import chain\nimport logging\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type\n\nimport torch\nimport torch.distributed as dist\nfrom torch.optim import SGD, Optimizer\n\nfrom .utils import broadcast_object, recursive_copy_to_device\n\nif TYPE_CHECKING: # pragma: no cover\n from torch.optim.optimizer import _params_t\nelse:\n _params_t = Any\n\n\nclass OSS(Optimizer):\n \"\"\"Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`\n optimizer and shards its state as described by ZeRO_.\n ::\n opt = OSS(params, optim=torch.optim.Adam, lr=0.01)\n\n .. _ZeRO: https://arxiv.org/abs/1910.02054\n\n We use a greedy algorithm to pack a number of parameters\n at each rank. Each parameter belongs to a single rank and\n is not divided among rank.\n\n After each rank completed their parameter update, they broadcast\n the new version of the parameters to all other ranks to synchronize\n the parameters for next round forward/backward computation.\n\n Args:\n params (list of tensors):\n parameters to be optimized\n Keyword Args:\n optim (torch.nn.Optimizer):\n optimizer to shard (default: SGD)\n group (group):\n torch.distributed group (default: group.WORLD)\n \"\"\"\n\n optim: Optimizer\n in_super_constructor: bool\n\n def __init__(self, params: _params_t, optim: Type[Optimizer] = SGD, group: Any = dist.group.WORLD, **defaults: Any):\n # Hold all the model params in the root .param_groups\n self.in_super_constructor = True\n super().__init__(params, defaults)\n self.in_super_constructor = False\n\n # Build the wrapped optimizer, responsible for a shard of the params\n self.group = group\n self.rank = dist.get_rank(group)\n split_param_groups = self.partition_parameters()\n self.optim = optim(split_param_groups[self.rank], **defaults)\n\n # Optional consolidated optimizer state\n self._all_states: List[Dict[str, Any]] = []\n\n # Current device is set by the parameters allocated to this rank\n self._device = split_param_groups[self.rank][0][\"params\"][0].device\n\n # Sync local and global param_groups keys\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n for k, v in local_group.items():\n if k != \"params\":\n global_group[k] = v\n\n def partition_parameters(self) -> List[List[dict]]:\n \"\"\"Partitions parameters across distributed ranks.\n\n Returns a list of param_groups (which is a list of dict) where each\n element of the list contains the param_groups for a rank. Element 0\n corresponds to rank 0, etc. We need all the ranks for the broadcast\n inside step().\n \"\"\"\n world_size = dist.get_world_size(self.group)\n param_groups: List[List] = [list() for _ in range(world_size)]\n sizes = [0] * world_size\n for param_group in self.param_groups:\n param_lists: List[List] = [list() for _ in range(world_size)]\n for param in param_group[\"params\"]:\n # Add this param to rank with smallest size.\n rank = sizes.index(min(sizes))\n param_lists[rank].append(param)\n sizes[rank] += param.numel()\n for rank, params in enumerate(param_lists):\n param_group_rank = copy.copy(param_group)\n param_group_rank[\"params\"] = params\n param_groups[rank].append(param_group_rank)\n return param_groups\n\n # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.\n # For example, the apex library contains fused optimizers with a step that supports extra kwargs.\n def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:\n # Sync oss param_groups attributes in case they've been updated by a scheduler.\n self._sync_param_groups()\n\n # Run the optimizer step on this shard only\n loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n\n # Sync all the states. Broadcast requests are issued async, we check completeness before moving on\n requests = []\n for rank, param_groups in enumerate(self.partition_parameters()):\n for param_group in param_groups:\n for param in param_group[\"params\"]:\n requests.append(dist.broadcast(tensor=param, src=rank, group=self.group, async_op=True))\n\n _ = list(map(lambda x: x.wait(), requests))\n return loss\n\n def local_state_dict(self) -> dict:\n \"\"\" Gets this rank's state_dict. \"\"\"\n return self.optim.state_dict()\n\n def consolidate_state_dict(self, recipient_rank: int = 0) -> None:\n \"\"\" Update the consolidated state_dict list, one per rank.\n\n This needs to be called on all replicas \"\"\"\n\n # Sync lr and other attributes in case its been updated\n self._sync_param_groups()\n\n if self.rank == recipient_rank:\n # Pull the sharded state from all the other replicas\n # Store all the states in order, rank by rank\n logging.debug(\"Pulling the sharded optimizer state from all replicas\")\n self._all_states = self._collect_sharded_states()\n else:\n # Acknowledge broadcasts, and send this rank's shard when needed\n self._broadcast_state_dict()\n\n def state_dict(self) -> Dict[str, Any]:\n \"\"\"\n Return the last known global optimizer state, which consist of a list of the shards.\n\n NOTE:\n - If the state has not been consolidated, this returns a shard's worth, not the global state.\n - Returning the global state is limited to the replica which was responsible for the consolidation.\n The state may also not be up to date, depending on when `consolidate_state_dict` was last called.\n \"\"\"\n\n if len(self._all_states) == 0:\n logging.warning(\"Optimizer state has not been consolidated. Returning the local state\")\n logging.warning(\"Please call `consolidate_state_dict()` beforehand if you meant to save the global state\")\n state_dict = self.local_state_dict()\n state_dict[\"local_state_dict\"] = True\n return state_dict\n\n # Flatten the param_groups, save the partition which logs the rank <> shard correspondence\n partition: List[Tuple[int, int]] = []\n param_groups: List[Dict[Any, Any]] = []\n\n start = 0\n for i, s in enumerate(self._all_states):\n param_groups.extend(s[\"param_groups\"])\n end = start + len(s[\"param_groups\"])\n partition.append((start, end))\n start = end\n\n return {\n \"state\": [s[\"state\"] for s in self._all_states],\n \"param_groups\": param_groups,\n \"partition\": partition,\n \"local_state_dict\": False,\n }\n\n def load_local_state_dict(self, state_dict: dict) -> None:\n \"\"\" Loads this rank's state_dict. \"\"\"\n\n self.optim.load_state_dict(state_dict)\n\n # Workaround PyTorch bug that casts state (https://github.com/pytorch/pytorch/issues/43706)\n # Copied from https://github.com/pytorch/fairseq/blob/v0.9.0/fairseq/optim/fp16_optimizer.py#L251-L268\n groups = self.optim.param_groups\n saved_groups = state_dict[\"param_groups\"]\n id_map = {\n old_id: p\n for old_id, p in zip(chain(*(g[\"params\"] for g in saved_groups)), chain(*(g[\"params\"] for g in groups)))\n }\n for k, v in state_dict[\"state\"].items():\n if k in id_map:\n param = id_map[k]\n self.optim.state[param] = recursive_copy_to_device(v, non_blocking=True, device=param.device)\n\n # Restore the global param_groups (the params themselves are already correct)\n for global_group, local_group in zip(self.param_groups, groups):\n for k, v in local_group.items():\n if k != \"params\":\n global_group[k] = v\n\n def load_state_dict(self, state_dict: Dict[str, Any]) -> None:\n \"\"\" Restore the global parameter groups as well as the shard \"\"\"\n\n # Check whether we got a local or global dict\n if state_dict[\"local_state_dict\"]:\n self.load_local_state_dict(state_dict)\n else:\n # Get this optimizer's param_groups shard\n param_groups = state_dict[\"param_groups\"][\n state_dict[\"partition\"][self.rank][0] : state_dict[\"partition\"][self.rank][1]\n ]\n # Dispatch this rank's state dictionary to the wrapped shard optimizer\n self.load_local_state_dict({\"state\": state_dict[\"state\"][self.rank], \"param_groups\": param_groups})\n\n def add_param_group(self, param_group: dict) -> None:\n super().add_param_group(param_group)\n if not self.in_super_constructor:\n param_groups = self.partition_parameters()[self.rank]\n if len(param_groups) == len(self.optim.param_groups) + 1:\n self.optim.add_param_group(param_groups[-1])\n\n def _sync_param_groups(self) -> None:\n \"\"\"Sync learning rate and other optimizer attributes (needed to support schedulers).\"\"\"\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n for k in local_group.keys():\n if k != \"params\":\n # Params have been sharded and should not be synced here\n local_group[k] = global_group[k]\n\n def _collect_sharded_states(self) -> List[Dict[str, Any]]:\n \"\"\"\n Collect all the state shards, in CPU memory.\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n all_states: List[Dict[str, Any]] = []\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n logging.debug(\"Saving self state\")\n all_states.append(\n recursive_copy_to_device(self.local_state_dict(), non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n # Sync with other replicas\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Fetch the optim state from the other replicas\n logging.debug(\"Receiving state from rank %s \", rank)\n replica_state = broadcast_object(\n empty_buffer, src_rank=rank, group=self.group, dist_device=self._device\n )\n\n all_states.append(\n recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n logging.debug(\"State from rank %s received\", rank)\n\n return all_states\n\n def _broadcast_state_dict(self) -> None:\n \"\"\"\n Broadcast this rank's state shard, discard others\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n # Send the state to the reference replica\n logging.debug(\n \"Sending the sharded optimizer state to the reference replica from rank %s\", rank,\n )\n broadcast_object(self.local_state_dict(), src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Discard this tensor/rank, broadcast necessary for syncing\n logging.debug(\"Discarding broadcast from rank %s\", rank)\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n", "path": "fairscale/optim/oss.py"}]} | 3,648 | 683 |
gh_patches_debug_37040 | rasdani/github-patches | git_diff | opsdroid__opsdroid-522 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Switch CLI to use click
I recently discovered [`click`](http://click.pocoo.org/5/) which makes adding command line options and arguments to your python application super simple and much more robust.
We should look at replacing the [current argparse code](https://github.com/opsdroid/opsdroid/blob/master/opsdroid/__main__.py#L88) with `click`!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/__main__.py`
Content:
```
1 """Starts opsdroid."""
2
3 import os
4 import sys
5 import logging
6 import argparse
7 import gettext
8
9 from opsdroid.core import OpsDroid
10 from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE,\
11 DEFAULT_LANGUAGE, LOCALE_DIR
12 from opsdroid.web import Web
13
14
15 gettext.install('opsdroid')
16 _LOGGER = logging.getLogger("opsdroid")
17
18
19 def configure_lang(config):
20 """Configure app language based on user config."""
21 lang_code = config.get("lang", DEFAULT_LANGUAGE)
22 if lang_code != DEFAULT_LANGUAGE:
23 lang = gettext.translation(
24 'opsdroid', LOCALE_DIR, (lang_code,), fallback=True)
25 lang.install()
26
27
28 def configure_logging(config):
29 """Configure the root logger based on user config."""
30 rootlogger = logging.getLogger()
31 while rootlogger.handlers:
32 rootlogger.handlers.pop()
33
34 try:
35 if config["logging"]["path"]:
36 logfile_path = os.path.expanduser(config["logging"]["path"])
37 else:
38 logfile_path = config["logging"]["path"]
39 except KeyError:
40 logfile_path = DEFAULT_LOG_FILENAME
41
42 try:
43 log_level = get_logging_level(
44 config["logging"]["level"])
45 except KeyError:
46 log_level = logging.INFO
47
48 rootlogger.setLevel(log_level)
49 formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')
50
51 console_handler = logging.StreamHandler()
52 console_handler.setLevel(log_level)
53 console_handler.setFormatter(formatter)
54 rootlogger.addHandler(console_handler)
55
56 try:
57 if not config["logging"]["console"]:
58 console_handler.setLevel(logging.CRITICAL)
59 except KeyError:
60 pass
61
62 if logfile_path:
63 logdir = os.path.dirname(os.path.realpath(logfile_path))
64 if not os.path.isdir(logdir):
65 os.makedirs(logdir)
66 file_handler = logging.FileHandler(logfile_path)
67 file_handler.setLevel(log_level)
68 file_handler.setFormatter(formatter)
69 rootlogger.addHandler(file_handler)
70 _LOGGER.info("="*40)
71 _LOGGER.info(_("Started application"))
72
73
74 def get_logging_level(logging_level):
75 """Get the logger level based on the user configuration."""
76 if logging_level == 'critical':
77 return logging.CRITICAL
78 elif logging_level == 'error':
79 return logging.ERROR
80 elif logging_level == 'warning':
81 return logging.WARNING
82 elif logging_level == 'debug':
83 return logging.DEBUG
84
85 return logging.INFO
86
87
88 def parse_args(args):
89 """Parse command line arguments."""
90 parser = argparse.ArgumentParser(description='Run opsdroid.')
91 parser.add_argument('--gen-config', action="store_true",
92 help='prints out an example configuration file')
93 return parser.parse_args(args)
94
95
96 def check_dependencies():
97 """Check for system dependencies required by opsdroid."""
98 if sys.version_info.major < 3 or sys.version_info.minor < 5:
99 logging.critical(_("Whoops! opsdroid requires python 3.5 or above."))
100 sys.exit(1)
101
102
103 def welcome_message(config):
104 """Add welcome message if set to true in configuration."""
105 try:
106 if config['welcome-message']:
107 _LOGGER.info("=" * 40)
108 _LOGGER.info(_("You can customise your opsdroid by modifying "
109 "your configuration.yaml"))
110 _LOGGER.info(_("Read more at: "
111 "http://opsdroid.readthedocs.io/#configuration"))
112 _LOGGER.info(_("Watch the Get Started Videos at: "
113 "http://bit.ly/2fnC0Fh"))
114 _LOGGER.info(_("Install Opsdroid Desktop at: \n"
115 "https://github.com/opsdroid/opsdroid-desktop/"
116 "releases"))
117 _LOGGER.info("=" * 40)
118 except KeyError:
119 _LOGGER.warning(_("'welcome-message: true/false' is missing in "
120 "configuration.yaml"))
121
122
123 def main():
124 """Parse the args and then start the application."""
125 args = parse_args(sys.argv[1:])
126
127 if args.gen_config:
128 with open(EXAMPLE_CONFIG_FILE, 'r') as conf:
129 print(conf.read())
130 sys.exit(0)
131
132 check_dependencies()
133
134 with OpsDroid() as opsdroid:
135 opsdroid.load()
136 configure_lang(opsdroid.config)
137 configure_logging(opsdroid.config)
138 welcome_message(opsdroid.config)
139 opsdroid.web_server = Web(opsdroid)
140 opsdroid.start_loop()
141
142
143 def init():
144 """Enter the application."""
145 if __name__ == "__main__":
146 main()
147
148
149 init()
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py
--- a/opsdroid/__main__.py
+++ b/opsdroid/__main__.py
@@ -3,12 +3,13 @@
import os
import sys
import logging
-import argparse
import gettext
+import click
+
from opsdroid.core import OpsDroid
-from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE,\
- DEFAULT_LANGUAGE, LOCALE_DIR
+from opsdroid.const import __version__, DEFAULT_LOG_FILENAME, \
+ EXAMPLE_CONFIG_FILE, DEFAULT_LANGUAGE, LOCALE_DIR
from opsdroid.web import Web
@@ -85,14 +86,6 @@
return logging.INFO
-def parse_args(args):
- """Parse command line arguments."""
- parser = argparse.ArgumentParser(description='Run opsdroid.')
- parser.add_argument('--gen-config', action="store_true",
- help='prints out an example configuration file')
- return parser.parse_args(args)
-
-
def check_dependencies():
"""Check for system dependencies required by opsdroid."""
if sys.version_info.major < 3 or sys.version_info.minor < 5:
@@ -100,6 +93,23 @@
sys.exit(1)
+def print_version(ctx, param, value):
+ """Print out the version of opsdroid that is installed."""
+ if not value or ctx.resilient_parsing:
+ return
+ click.echo('opsdroid v{version}'.format(version=__version__))
+ ctx.exit(0)
+
+
+def print_example_config(ctx, param, value):
+ """Print out the example config."""
+ if not value or ctx.resilient_parsing:
+ return
+ with open(EXAMPLE_CONFIG_FILE, 'r') as conf:
+ click.echo(conf.read())
+ ctx.exit(0)
+
+
def welcome_message(config):
"""Add welcome message if set to true in configuration."""
try:
@@ -120,15 +130,19 @@
"configuration.yaml"))
[email protected]()
[email protected]('--gen-config', is_flag=True, callback=print_example_config,
+ expose_value=False, default=False,
+ help='Print an example config and exit.')
[email protected]('--version', '-v', is_flag=True, callback=print_version,
+ expose_value=False, default=False, is_eager=True,
+ help='Print the version and exit.')
def main():
- """Parse the args and then start the application."""
- args = parse_args(sys.argv[1:])
-
- if args.gen_config:
- with open(EXAMPLE_CONFIG_FILE, 'r') as conf:
- print(conf.read())
- sys.exit(0)
+ """Opsdroid is a chat bot framework written in Python.
+ It is designed to be extendable, scalable and simple.
+ See https://opsdroid.github.io/ for more information.
+ """
check_dependencies()
with OpsDroid() as opsdroid:
| {"golden_diff": "diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py\n--- a/opsdroid/__main__.py\n+++ b/opsdroid/__main__.py\n@@ -3,12 +3,13 @@\n import os\n import sys\n import logging\n-import argparse\n import gettext\n \n+import click\n+\n from opsdroid.core import OpsDroid\n-from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE,\\\n- DEFAULT_LANGUAGE, LOCALE_DIR\n+from opsdroid.const import __version__, DEFAULT_LOG_FILENAME, \\\n+ EXAMPLE_CONFIG_FILE, DEFAULT_LANGUAGE, LOCALE_DIR\n from opsdroid.web import Web\n \n \n@@ -85,14 +86,6 @@\n return logging.INFO\n \n \n-def parse_args(args):\n- \"\"\"Parse command line arguments.\"\"\"\n- parser = argparse.ArgumentParser(description='Run opsdroid.')\n- parser.add_argument('--gen-config', action=\"store_true\",\n- help='prints out an example configuration file')\n- return parser.parse_args(args)\n-\n-\n def check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 5:\n@@ -100,6 +93,23 @@\n sys.exit(1)\n \n \n+def print_version(ctx, param, value):\n+ \"\"\"Print out the version of opsdroid that is installed.\"\"\"\n+ if not value or ctx.resilient_parsing:\n+ return\n+ click.echo('opsdroid v{version}'.format(version=__version__))\n+ ctx.exit(0)\n+\n+\n+def print_example_config(ctx, param, value):\n+ \"\"\"Print out the example config.\"\"\"\n+ if not value or ctx.resilient_parsing:\n+ return\n+ with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n+ click.echo(conf.read())\n+ ctx.exit(0)\n+\n+\n def welcome_message(config):\n \"\"\"Add welcome message if set to true in configuration.\"\"\"\n try:\n@@ -120,15 +130,19 @@\n \"configuration.yaml\"))\n \n \[email protected]()\[email protected]('--gen-config', is_flag=True, callback=print_example_config,\n+ expose_value=False, default=False,\n+ help='Print an example config and exit.')\[email protected]('--version', '-v', is_flag=True, callback=print_version,\n+ expose_value=False, default=False, is_eager=True,\n+ help='Print the version and exit.')\n def main():\n- \"\"\"Parse the args and then start the application.\"\"\"\n- args = parse_args(sys.argv[1:])\n-\n- if args.gen_config:\n- with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n- print(conf.read())\n- sys.exit(0)\n+ \"\"\"Opsdroid is a chat bot framework written in Python.\n \n+ It is designed to be extendable, scalable and simple.\n+ See https://opsdroid.github.io/ for more information.\n+ \"\"\"\n check_dependencies()\n \n with OpsDroid() as opsdroid:\n", "issue": "Switch CLI to use click\nI recently discovered [`click`](http://click.pocoo.org/5/) which makes adding command line options and arguments to your python application super simple and much more robust.\r\n\r\nWe should look at replacing the [current argparse code](https://github.com/opsdroid/opsdroid/blob/master/opsdroid/__main__.py#L88) with `click`!\n", "before_files": [{"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport os\nimport sys\nimport logging\nimport argparse\nimport gettext\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE,\\\n DEFAULT_LANGUAGE, LOCALE_DIR\nfrom opsdroid.web import Web\n\n\ngettext.install('opsdroid')\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_lang(config):\n \"\"\"Configure app language based on user config.\"\"\"\n lang_code = config.get(\"lang\", DEFAULT_LANGUAGE)\n if lang_code != DEFAULT_LANGUAGE:\n lang = gettext.translation(\n 'opsdroid', LOCALE_DIR, (lang_code,), fallback=True)\n lang.install()\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n try:\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n except KeyError:\n pass\n\n if logfile_path:\n logdir = os.path.dirname(os.path.realpath(logfile_path))\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(_(\"Started application\"))\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n elif logging_level == 'error':\n return logging.ERROR\n elif logging_level == 'warning':\n return logging.WARNING\n elif logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef parse_args(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Run opsdroid.')\n parser.add_argument('--gen-config', action=\"store_true\",\n help='prints out an example configuration file')\n return parser.parse_args(args)\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 5:\n logging.critical(_(\"Whoops! opsdroid requires python 3.5 or above.\"))\n sys.exit(1)\n\n\ndef welcome_message(config):\n \"\"\"Add welcome message if set to true in configuration.\"\"\"\n try:\n if config['welcome-message']:\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(_(\"You can customise your opsdroid by modifying \"\n \"your configuration.yaml\"))\n _LOGGER.info(_(\"Read more at: \"\n \"http://opsdroid.readthedocs.io/#configuration\"))\n _LOGGER.info(_(\"Watch the Get Started Videos at: \"\n \"http://bit.ly/2fnC0Fh\"))\n _LOGGER.info(_(\"Install Opsdroid Desktop at: \\n\"\n \"https://github.com/opsdroid/opsdroid-desktop/\"\n \"releases\"))\n _LOGGER.info(\"=\" * 40)\n except KeyError:\n _LOGGER.warning(_(\"'welcome-message: true/false' is missing in \"\n \"configuration.yaml\"))\n\n\ndef main():\n \"\"\"Parse the args and then start the application.\"\"\"\n args = parse_args(sys.argv[1:])\n\n if args.gen_config:\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n print(conf.read())\n sys.exit(0)\n\n check_dependencies()\n\n with OpsDroid() as opsdroid:\n opsdroid.load()\n configure_lang(opsdroid.config)\n configure_logging(opsdroid.config)\n welcome_message(opsdroid.config)\n opsdroid.web_server = Web(opsdroid)\n opsdroid.start_loop()\n\n\ndef init():\n \"\"\"Enter the application.\"\"\"\n if __name__ == \"__main__\":\n main()\n\n\ninit()\n", "path": "opsdroid/__main__.py"}], "after_files": [{"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport os\nimport sys\nimport logging\nimport gettext\n\nimport click\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.const import __version__, DEFAULT_LOG_FILENAME, \\\n EXAMPLE_CONFIG_FILE, DEFAULT_LANGUAGE, LOCALE_DIR\nfrom opsdroid.web import Web\n\n\ngettext.install('opsdroid')\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_lang(config):\n \"\"\"Configure app language based on user config.\"\"\"\n lang_code = config.get(\"lang\", DEFAULT_LANGUAGE)\n if lang_code != DEFAULT_LANGUAGE:\n lang = gettext.translation(\n 'opsdroid', LOCALE_DIR, (lang_code,), fallback=True)\n lang.install()\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n try:\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n except KeyError:\n pass\n\n if logfile_path:\n logdir = os.path.dirname(os.path.realpath(logfile_path))\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(_(\"Started application\"))\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n elif logging_level == 'error':\n return logging.ERROR\n elif logging_level == 'warning':\n return logging.WARNING\n elif logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 5:\n logging.critical(_(\"Whoops! opsdroid requires python 3.5 or above.\"))\n sys.exit(1)\n\n\ndef print_version(ctx, param, value):\n \"\"\"Print out the version of opsdroid that is installed.\"\"\"\n if not value or ctx.resilient_parsing:\n return\n click.echo('opsdroid v{version}'.format(version=__version__))\n ctx.exit(0)\n\n\ndef print_example_config(ctx, param, value):\n \"\"\"Print out the example config.\"\"\"\n if not value or ctx.resilient_parsing:\n return\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n click.echo(conf.read())\n ctx.exit(0)\n\n\ndef welcome_message(config):\n \"\"\"Add welcome message if set to true in configuration.\"\"\"\n try:\n if config['welcome-message']:\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(_(\"You can customise your opsdroid by modifying \"\n \"your configuration.yaml\"))\n _LOGGER.info(_(\"Read more at: \"\n \"http://opsdroid.readthedocs.io/#configuration\"))\n _LOGGER.info(_(\"Watch the Get Started Videos at: \"\n \"http://bit.ly/2fnC0Fh\"))\n _LOGGER.info(_(\"Install Opsdroid Desktop at: \\n\"\n \"https://github.com/opsdroid/opsdroid-desktop/\"\n \"releases\"))\n _LOGGER.info(\"=\" * 40)\n except KeyError:\n _LOGGER.warning(_(\"'welcome-message: true/false' is missing in \"\n \"configuration.yaml\"))\n\n\[email protected]()\[email protected]('--gen-config', is_flag=True, callback=print_example_config,\n expose_value=False, default=False,\n help='Print an example config and exit.')\[email protected]('--version', '-v', is_flag=True, callback=print_version,\n expose_value=False, default=False, is_eager=True,\n help='Print the version and exit.')\ndef main():\n \"\"\"Opsdroid is a chat bot framework written in Python.\n\n It is designed to be extendable, scalable and simple.\n See https://opsdroid.github.io/ for more information.\n \"\"\"\n check_dependencies()\n\n with OpsDroid() as opsdroid:\n opsdroid.load()\n configure_lang(opsdroid.config)\n configure_logging(opsdroid.config)\n welcome_message(opsdroid.config)\n opsdroid.web_server = Web(opsdroid)\n opsdroid.start_loop()\n\n\ndef init():\n \"\"\"Enter the application.\"\"\"\n if __name__ == \"__main__\":\n main()\n\n\ninit()\n", "path": "opsdroid/__main__.py"}]} | 1,657 | 672 |
gh_patches_debug_6492 | rasdani/github-patches | git_diff | mozilla__kitsune-2981 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add whitenoise and serve static files from the app
Add whitenoise to serve static files in the dev (and stage?) instances.
In production we'll probably go with another more efficient approach which is being investigated in #2949.
Whitenoise activation should be configurable via the `ENABLE_WHITENOISE` env variable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wsgi/app.py`
Content:
```
1 """
2 WSGI config for kitsune project.
3
4 It exposes the WSGI callable as a module-level variable named ``application``.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
8 """
9 import os
10 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kitsune.settings') # NOQA
11
12 from django.core.wsgi import get_wsgi_application
13
14 import newrelic.agent
15 from decouple import config
16 from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
17
18 # For django-celery
19 os.environ['CELERY_LOADER'] = 'django'
20
21 application = get_wsgi_application()
22 application = Sentry(application)
23
24 # Add NewRelic
25 newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')
26 newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)
27 if newrelic_ini and newrelic_license_key:
28 newrelic.agent.initialize(newrelic_ini)
29 application = newrelic.agent.wsgi_application()(application)
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wsgi/app.py b/wsgi/app.py
--- a/wsgi/app.py
+++ b/wsgi/app.py
@@ -21,6 +21,10 @@
application = get_wsgi_application()
application = Sentry(application)
+if config('ENABLE_WHITENOISE', default=False, cast=bool):
+ from whitenoise.django import DjangoWhiteNoise
+ application = DjangoWhiteNoise(application)
+
# Add NewRelic
newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')
newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)
| {"golden_diff": "diff --git a/wsgi/app.py b/wsgi/app.py\n--- a/wsgi/app.py\n+++ b/wsgi/app.py\n@@ -21,6 +21,10 @@\n application = get_wsgi_application()\n application = Sentry(application)\n \n+if config('ENABLE_WHITENOISE', default=False, cast=bool):\n+ from whitenoise.django import DjangoWhiteNoise\n+ application = DjangoWhiteNoise(application)\n+\n # Add NewRelic\n newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')\n newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)\n", "issue": "Add whitenoise and serve static files from the app\nAdd whitenoise to serve static files in the dev (and stage?) instances. \r\n\r\nIn production we'll probably go with another more efficient approach which is being investigated in #2949.\r\n\r\nWhitenoise activation should be configurable via the `ENABLE_WHITENOISE` env variable.\n", "before_files": [{"content": "\"\"\"\nWSGI config for kitsune project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/\n\"\"\"\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kitsune.settings') # NOQA\n\nfrom django.core.wsgi import get_wsgi_application\n\nimport newrelic.agent\nfrom decouple import config\nfrom raven.contrib.django.raven_compat.middleware.wsgi import Sentry\n\n# For django-celery\nos.environ['CELERY_LOADER'] = 'django'\n\napplication = get_wsgi_application()\napplication = Sentry(application)\n\n# Add NewRelic\nnewrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')\nnewrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)\nif newrelic_ini and newrelic_license_key:\n newrelic.agent.initialize(newrelic_ini)\n application = newrelic.agent.wsgi_application()(application)\n", "path": "wsgi/app.py"}], "after_files": [{"content": "\"\"\"\nWSGI config for kitsune project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/\n\"\"\"\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kitsune.settings') # NOQA\n\nfrom django.core.wsgi import get_wsgi_application\n\nimport newrelic.agent\nfrom decouple import config\nfrom raven.contrib.django.raven_compat.middleware.wsgi import Sentry\n\n# For django-celery\nos.environ['CELERY_LOADER'] = 'django'\n\napplication = get_wsgi_application()\napplication = Sentry(application)\n\nif config('ENABLE_WHITENOISE', default=False, cast=bool):\n from whitenoise.django import DjangoWhiteNoise\n application = DjangoWhiteNoise(application)\n\n# Add NewRelic\nnewrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')\nnewrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)\nif newrelic_ini and newrelic_license_key:\n newrelic.agent.initialize(newrelic_ini)\n application = newrelic.agent.wsgi_application()(application)\n", "path": "wsgi/app.py"}]} | 612 | 136 |
gh_patches_debug_36466 | rasdani/github-patches | git_diff | google__timesketch-1741 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
client.get_sigma_rule different result to client.get_sigma_rule_by_text
Running the following command:
```
rule = client.get_sigma_rule(rule_uuid='5266a592-b793-11ea-b3de-0242ac130004')
rule.es_query
```
Gives:
```
'*apt\\-get\\ install\\ zmap*'
```
Vs:
```
rule_text = """title: Suspicious Installation of Zenmap
id: 5266a592-b793-11ea-b3de-0242ac130004
description: Detects suspicious installation of Zenmap
references:
- https://rmusser.net/docs/ATT&CK-Stuff/ATT&CK/Discovery.html
author: Alexander Jaeger
date: 2020/06/26
modified: 2020/06/26
tags:
- attack.discovery
- attack.t1046
logsource:
product: linux
service: shell
detection:
keywords:
# Generic suspicious commands
- '*apt-get install zmap*'
condition: keywords
falsepositives:
- Unknown
level: high"""
rule2 = client.get_sigma_rule_by_text(rule_text)
rule2.es_query
```
Gives:
```'(data_type:("shell\\:zsh\\:history" OR "bash\\:history\\:command" OR "apt\\:history\\:line" OR "selinux\\:line") AND "*apt\\-get\\ install\\ zmap*")'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `timesketch/lib/sigma_util.py`
Content:
```
1 # Copyright 2020 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Timesketch Sigma lib functions."""
15
16 import os
17 import codecs
18 import logging
19 import yaml
20
21 from flask import current_app
22
23 import sigma.configuration as sigma_configuration
24
25 from sigma.backends import elasticsearch as sigma_es
26 from sigma.parser import collection as sigma_collection
27 from sigma.parser import exceptions as sigma_exceptions
28 from sigma.config.exceptions import SigmaConfigParseError
29
30 logger = logging.getLogger('timesketch.lib.sigma')
31
32
33 def get_sigma_config_file(config_file=None):
34 """Get a sigma.configuration.SigmaConfiguration object.
35
36 Args:
37 config_file: Optional path to a config file
38 Returns:
39 A sigma.configuration.SigmaConfiguration object
40 Raises:
41 ValueError: If SIGMA_CONFIG is not found in the config file.
42 or the Sigma config file is not readabale.
43 SigmaConfigParseError: If config file could not be parsed.
44 """
45 if config_file:
46 config_file_path = config_file
47 else:
48 config_file_path = current_app.config.get(
49 'SIGMA_CONFIG', './data/sigma_config.yaml')
50
51 if not config_file_path:
52 raise ValueError('No config_file_path set via param or config file')
53
54 if not os.path.isfile(config_file_path):
55 raise ValueError(
56 'Unable to open file: [{0:s}], it does not exist.'.format(
57 config_file_path))
58
59 if not os.access(config_file_path, os.R_OK):
60 raise ValueError(
61 'Unable to open file: [{0:s}], cannot open it for '
62 'read, please check permissions.'.format(config_file_path))
63
64 with open(config_file_path, 'r') as config_file_read:
65 sigma_config_file = config_file_read.read()
66
67 try:
68 sigma_config = sigma_configuration.SigmaConfiguration(sigma_config_file)
69 except SigmaConfigParseError:
70 logger.error('Parsing error with {0:s}'.format(sigma_config_file))
71 raise
72
73 return sigma_config
74
75 def get_sigma_rules_path():
76 """Get Sigma rules paths.
77
78 Returns:
79 A list of strings to the Sigma rules
80
81 Raises:
82 ValueError: If SIGMA_RULES_FOLDERS is not found in the config file.
83 or the folders are not readabale.
84 """
85 try:
86 rules_path = current_app.config.get('SIGMA_RULES_FOLDERS', [])
87 except RuntimeError as e:
88 raise ValueError(
89 'SIGMA_RULES_FOLDERS not found in config file') from e
90
91 if not rules_path:
92 raise ValueError(
93 'SIGMA_RULES_FOLDERS not found in config file')
94
95 for folder in rules_path:
96 if not os.path.isdir(folder):
97 raise ValueError(
98 'Unable to open dir: [{0:s}], it does not exist.'.format(
99 folder))
100
101 if not os.access(folder, os.R_OK):
102 raise ValueError(
103 'Unable to open dir: [{0:s}], cannot open it for '
104 'read, please check permissions.'.format(folder))
105
106 return rules_path
107
108
109 def get_sigma_rules(rule_folder, sigma_config=None):
110 """Returns the Sigma rules for a folder including subfolders.
111 Args:
112 rule_folder: folder to be checked for rules
113 sigma_config: optional argument to pass a
114 sigma.configuration.SigmaConfiguration object
115 Returns:
116 A array of Sigma rules as JSON
117 Raises:
118 ValueError: If SIGMA_RULES_FOLDERS is not found in the config file.
119 or the folders are not readabale.
120 """
121 return_array = []
122
123 for dirpath, dirnames, files in os.walk(rule_folder):
124 if 'deprecated' in [x.lower() for x in dirnames]:
125 dirnames.remove('deprecated')
126
127 for rule_filename in files:
128 if rule_filename.lower().endswith('.yml'):
129 # if a sub dir is found, do not try to parse it.
130 if os.path.isdir(os.path.join(dirpath, rule_filename)):
131 continue
132
133 rule_file_path = os.path.join(dirpath, rule_filename)
134 parsed_rule = get_sigma_rule(rule_file_path, sigma_config)
135 if parsed_rule:
136 return_array.append(parsed_rule)
137
138 return return_array
139
140
141 def get_all_sigma_rules():
142 """Returns all Sigma rules
143
144 Returns:
145 A array of Sigma rules
146
147 Raises:
148 ValueError: If SIGMA_RULES_FOLDERS is not found in the config file.
149 or the folders are not readabale.
150 """
151 sigma_rules = []
152
153 rules_paths = get_sigma_rules_path()
154
155 for folder in rules_paths:
156 sigma_rules.extend(get_sigma_rules(folder))
157
158 return sigma_rules
159
160
161 def get_sigma_rule(filepath, sigma_config=None):
162 """Returns a JSON represenation for a rule
163 Args:
164 filepath: path to the sigma rule to be parsed
165 sigma_config: optional argument to pass a
166 sigma.configuration.SigmaConfiguration object
167 Returns:
168 Json representation of the parsed rule
169 Raises:
170 ValueError: Parsing error
171 IsADirectoryError: If a directory is passed as filepath
172 """
173 try:
174 if sigma_config:
175 sigma_conf_obj = sigma_config
176 else:
177 sigma_conf_obj = get_sigma_config_file()
178 except ValueError as e:
179 logger.error(
180 'Problem reading the Sigma config', exc_info=True)
181 raise ValueError('Problem reading the Sigma config') from e
182
183
184 sigma_backend = sigma_es.ElasticsearchQuerystringBackend(sigma_conf_obj, {})
185
186 try:
187 sigma_rules_paths = get_sigma_rules_path()
188 except ValueError:
189 sigma_rules_paths = None
190
191 if not filepath.lower().endswith('.yml'):
192 raise ValueError(f'{filepath} does not end with .yml')
193
194 # if a sub dir is found, nothing can be parsed
195 if os.path.isdir(filepath):
196 raise IsADirectoryError(f'{filepath} is a directory - must be a file')
197
198 abs_path = os.path.abspath(filepath)
199
200 with codecs.open(
201 abs_path, 'r', encoding='utf-8', errors='replace') as file:
202 try:
203 rule_return = {}
204 rule_yaml_data = yaml.safe_load_all(file.read())
205 for doc in rule_yaml_data:
206 rule_return.update(doc)
207 parser = sigma_collection.SigmaCollectionParser(
208 str(doc), sigma_config, None)
209 parsed_sigma_rules = parser.generate(sigma_backend)
210
211 except NotImplementedError as exception:
212 logger.error(
213 'Error generating rule in file {0:s}: {1!s}'
214 .format(abs_path, exception))
215 raise
216
217 except sigma_exceptions.SigmaParseError as exception:
218 logger.error(
219 'Sigma parsing error generating rule in file {0:s}: {1!s}'
220 .format(abs_path, exception))
221 raise
222
223 except yaml.parser.ParserError as exception:
224 logger.error(
225 'Yaml parsing error generating rule in file {0:s}: {1!s}'
226 .format(abs_path, exception))
227 return None
228
229 sigma_es_query = ''
230
231 for sigma_rule in parsed_sigma_rules:
232 sigma_es_query = sigma_rule
233
234 rule_return.update(
235 {'es_query':sigma_es_query})
236 rule_return.update(
237 {'file_name':os.path.basename(filepath)})
238
239 # in case multiple folders are in the config, need to remove them
240 if sigma_rules_paths:
241 for rule_path in sigma_rules_paths:
242 file_relpath = os.path.relpath(filepath, rule_path)
243 else:
244 file_relpath = 'N/A'
245
246 rule_return.update(
247 {'file_relpath':file_relpath})
248
249 return rule_return
250
251 def get_sigma_rule_by_text(rule_text, sigma_config=None):
252 """Returns a JSON represenation for a rule
253
254 Args:
255 rule_text: Text of the sigma rule to be parsed
256 sigma_config: config file object
257
258 Returns:
259 Json representation of the parsed rule
260 Raises:
261 sigma_exceptions.SigmaParseError: Issue with parsing the given rule
262 yaml.parser.ParserError: Not a correct YAML text provided
263 NotImplementedError: A feature in the provided Sigma rule is not
264 implemented in Sigma for Timesketch
265 """
266 if sigma_config is None:
267 sigma_config = get_sigma_config_file()
268
269 sigma_backend = sigma_es.ElasticsearchQuerystringBackend(sigma_config, {})
270
271 rule_return = {}
272
273 # TODO check if input validation is needed / useful.
274 try:
275 parser = sigma_collection.SigmaCollectionParser(
276 rule_text, sigma_config, None)
277 parsed_sigma_rules = parser.generate(sigma_backend)
278 rule_yaml_data = yaml.safe_load_all(rule_text)
279 for doc in rule_yaml_data:
280 rule_return.update(doc)
281
282 except NotImplementedError as exception:
283 logger.error(
284 'Error generating rule {0!s}'.format(exception))
285 raise
286
287 except sigma_exceptions.SigmaParseError as exception:
288 logger.error(
289 'Sigma parsing error generating rule {0!s}'
290 .format(exception))
291 raise
292
293 except yaml.parser.ParserError as exception:
294 logger.error(
295 'Yaml parsing error generating rule {0!s}'.format(exception))
296 raise
297
298 sigma_es_query = ''
299
300 for sigma_rule in parsed_sigma_rules:
301 sigma_es_query = sigma_rule
302
303 rule_return.update(
304 {'es_query':sigma_es_query})
305 rule_return.update(
306 {'file_name':'N/A'})
307 rule_return.update(
308 {'file_relpath':'N/A'})
309
310 return rule_return
311
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/timesketch/lib/sigma_util.py b/timesketch/lib/sigma_util.py
--- a/timesketch/lib/sigma_util.py
+++ b/timesketch/lib/sigma_util.py
@@ -171,8 +171,10 @@
IsADirectoryError: If a directory is passed as filepath
"""
try:
- if sigma_config:
+ if isinstance(sigma_config, sigma_configuration.SigmaConfiguration):
sigma_conf_obj = sigma_config
+ elif isinstance(sigma_config, str):
+ sigma_conf_obj = get_sigma_config_file(sigma_config)
else:
sigma_conf_obj = get_sigma_config_file()
except ValueError as e:
@@ -205,7 +207,7 @@
for doc in rule_yaml_data:
rule_return.update(doc)
parser = sigma_collection.SigmaCollectionParser(
- str(doc), sigma_config, None)
+ str(doc), sigma_conf_obj, None)
parsed_sigma_rules = parser.generate(sigma_backend)
except NotImplementedError as exception:
@@ -263,17 +265,26 @@
NotImplementedError: A feature in the provided Sigma rule is not
implemented in Sigma for Timesketch
"""
- if sigma_config is None:
- sigma_config = get_sigma_config_file()
+ try:
+ if isinstance(sigma_config, sigma_configuration.SigmaConfiguration):
+ sigma_conf_obj = sigma_config
+ elif isinstance(sigma_config, str):
+ sigma_conf_obj = get_sigma_config_file(sigma_config)
+ else:
+ sigma_conf_obj = get_sigma_config_file()
+ except ValueError as e:
+ logger.error(
+ 'Problem reading the Sigma config', exc_info=True)
+ raise ValueError('Problem reading the Sigma config') from e
- sigma_backend = sigma_es.ElasticsearchQuerystringBackend(sigma_config, {})
+ sigma_backend = sigma_es.ElasticsearchQuerystringBackend(sigma_conf_obj, {})
rule_return = {}
# TODO check if input validation is needed / useful.
try:
parser = sigma_collection.SigmaCollectionParser(
- rule_text, sigma_config, None)
+ rule_text, sigma_conf_obj, None)
parsed_sigma_rules = parser.generate(sigma_backend)
rule_yaml_data = yaml.safe_load_all(rule_text)
for doc in rule_yaml_data:
| {"golden_diff": "diff --git a/timesketch/lib/sigma_util.py b/timesketch/lib/sigma_util.py\n--- a/timesketch/lib/sigma_util.py\n+++ b/timesketch/lib/sigma_util.py\n@@ -171,8 +171,10 @@\n IsADirectoryError: If a directory is passed as filepath\n \"\"\"\n try:\n- if sigma_config:\n+ if isinstance(sigma_config, sigma_configuration.SigmaConfiguration):\n sigma_conf_obj = sigma_config\n+ elif isinstance(sigma_config, str):\n+ sigma_conf_obj = get_sigma_config_file(sigma_config)\n else:\n sigma_conf_obj = get_sigma_config_file()\n except ValueError as e:\n@@ -205,7 +207,7 @@\n for doc in rule_yaml_data:\n rule_return.update(doc)\n parser = sigma_collection.SigmaCollectionParser(\n- str(doc), sigma_config, None)\n+ str(doc), sigma_conf_obj, None)\n parsed_sigma_rules = parser.generate(sigma_backend)\n \n except NotImplementedError as exception:\n@@ -263,17 +265,26 @@\n NotImplementedError: A feature in the provided Sigma rule is not\n implemented in Sigma for Timesketch\n \"\"\"\n- if sigma_config is None:\n- sigma_config = get_sigma_config_file()\n+ try:\n+ if isinstance(sigma_config, sigma_configuration.SigmaConfiguration):\n+ sigma_conf_obj = sigma_config\n+ elif isinstance(sigma_config, str):\n+ sigma_conf_obj = get_sigma_config_file(sigma_config)\n+ else:\n+ sigma_conf_obj = get_sigma_config_file()\n+ except ValueError as e:\n+ logger.error(\n+ 'Problem reading the Sigma config', exc_info=True)\n+ raise ValueError('Problem reading the Sigma config') from e\n \n- sigma_backend = sigma_es.ElasticsearchQuerystringBackend(sigma_config, {})\n+ sigma_backend = sigma_es.ElasticsearchQuerystringBackend(sigma_conf_obj, {})\n \n rule_return = {}\n \n # TODO check if input validation is needed / useful.\n try:\n parser = sigma_collection.SigmaCollectionParser(\n- rule_text, sigma_config, None)\n+ rule_text, sigma_conf_obj, None)\n parsed_sigma_rules = parser.generate(sigma_backend)\n rule_yaml_data = yaml.safe_load_all(rule_text)\n for doc in rule_yaml_data:\n", "issue": "client.get_sigma_rule different result to client.get_sigma_rule_by_text\nRunning the following command:\r\n\r\n```\r\nrule = client.get_sigma_rule(rule_uuid='5266a592-b793-11ea-b3de-0242ac130004')\r\nrule.es_query\r\n``` \r\n\r\nGives:\r\n```\r\n'*apt\\\\-get\\\\ install\\\\ zmap*'\r\n```\r\n\r\nVs:\r\n```\r\nrule_text = \"\"\"title: Suspicious Installation of Zenmap\r\nid: 5266a592-b793-11ea-b3de-0242ac130004\r\ndescription: Detects suspicious installation of Zenmap\r\nreferences:\r\n - https://rmusser.net/docs/ATT&CK-Stuff/ATT&CK/Discovery.html\r\nauthor: Alexander Jaeger\r\ndate: 2020/06/26\r\nmodified: 2020/06/26\r\ntags:\r\n - attack.discovery\r\n - attack.t1046\r\nlogsource:\r\n product: linux\r\n service: shell\r\ndetection:\r\n keywords:\r\n # Generic suspicious commands\r\n - '*apt-get install zmap*'\r\n condition: keywords\r\nfalsepositives:\r\n - Unknown\r\nlevel: high\"\"\"\r\nrule2 = client.get_sigma_rule_by_text(rule_text)\r\nrule2.es_query\r\n```\r\n\r\nGives:\r\n\r\n```'(data_type:(\"shell\\\\:zsh\\\\:history\" OR \"bash\\\\:history\\\\:command\" OR \"apt\\\\:history\\\\:line\" OR \"selinux\\\\:line\") AND \"*apt\\\\-get\\\\ install\\\\ zmap*\")'\r\n```\n", "before_files": [{"content": "# Copyright 2020 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Timesketch Sigma lib functions.\"\"\"\n\nimport os\nimport codecs\nimport logging\nimport yaml\n\nfrom flask import current_app\n\nimport sigma.configuration as sigma_configuration\n\nfrom sigma.backends import elasticsearch as sigma_es\nfrom sigma.parser import collection as sigma_collection\nfrom sigma.parser import exceptions as sigma_exceptions\nfrom sigma.config.exceptions import SigmaConfigParseError\n\nlogger = logging.getLogger('timesketch.lib.sigma')\n\n\ndef get_sigma_config_file(config_file=None):\n \"\"\"Get a sigma.configuration.SigmaConfiguration object.\n\n Args:\n config_file: Optional path to a config file\n Returns:\n A sigma.configuration.SigmaConfiguration object\n Raises:\n ValueError: If SIGMA_CONFIG is not found in the config file.\n or the Sigma config file is not readabale.\n SigmaConfigParseError: If config file could not be parsed.\n \"\"\"\n if config_file:\n config_file_path = config_file\n else:\n config_file_path = current_app.config.get(\n 'SIGMA_CONFIG', './data/sigma_config.yaml')\n\n if not config_file_path:\n raise ValueError('No config_file_path set via param or config file')\n\n if not os.path.isfile(config_file_path):\n raise ValueError(\n 'Unable to open file: [{0:s}], it does not exist.'.format(\n config_file_path))\n\n if not os.access(config_file_path, os.R_OK):\n raise ValueError(\n 'Unable to open file: [{0:s}], cannot open it for '\n 'read, please check permissions.'.format(config_file_path))\n\n with open(config_file_path, 'r') as config_file_read:\n sigma_config_file = config_file_read.read()\n\n try:\n sigma_config = sigma_configuration.SigmaConfiguration(sigma_config_file)\n except SigmaConfigParseError:\n logger.error('Parsing error with {0:s}'.format(sigma_config_file))\n raise\n\n return sigma_config\n\ndef get_sigma_rules_path():\n \"\"\"Get Sigma rules paths.\n\n Returns:\n A list of strings to the Sigma rules\n\n Raises:\n ValueError: If SIGMA_RULES_FOLDERS is not found in the config file.\n or the folders are not readabale.\n \"\"\"\n try:\n rules_path = current_app.config.get('SIGMA_RULES_FOLDERS', [])\n except RuntimeError as e:\n raise ValueError(\n 'SIGMA_RULES_FOLDERS not found in config file') from e\n\n if not rules_path:\n raise ValueError(\n 'SIGMA_RULES_FOLDERS not found in config file')\n\n for folder in rules_path:\n if not os.path.isdir(folder):\n raise ValueError(\n 'Unable to open dir: [{0:s}], it does not exist.'.format(\n folder))\n\n if not os.access(folder, os.R_OK):\n raise ValueError(\n 'Unable to open dir: [{0:s}], cannot open it for '\n 'read, please check permissions.'.format(folder))\n\n return rules_path\n\n\ndef get_sigma_rules(rule_folder, sigma_config=None):\n \"\"\"Returns the Sigma rules for a folder including subfolders.\n Args:\n rule_folder: folder to be checked for rules\n sigma_config: optional argument to pass a\n sigma.configuration.SigmaConfiguration object\n Returns:\n A array of Sigma rules as JSON\n Raises:\n ValueError: If SIGMA_RULES_FOLDERS is not found in the config file.\n or the folders are not readabale.\n \"\"\"\n return_array = []\n\n for dirpath, dirnames, files in os.walk(rule_folder):\n if 'deprecated' in [x.lower() for x in dirnames]:\n dirnames.remove('deprecated')\n\n for rule_filename in files:\n if rule_filename.lower().endswith('.yml'):\n # if a sub dir is found, do not try to parse it.\n if os.path.isdir(os.path.join(dirpath, rule_filename)):\n continue\n\n rule_file_path = os.path.join(dirpath, rule_filename)\n parsed_rule = get_sigma_rule(rule_file_path, sigma_config)\n if parsed_rule:\n return_array.append(parsed_rule)\n\n return return_array\n\n\ndef get_all_sigma_rules():\n \"\"\"Returns all Sigma rules\n\n Returns:\n A array of Sigma rules\n\n Raises:\n ValueError: If SIGMA_RULES_FOLDERS is not found in the config file.\n or the folders are not readabale.\n \"\"\"\n sigma_rules = []\n\n rules_paths = get_sigma_rules_path()\n\n for folder in rules_paths:\n sigma_rules.extend(get_sigma_rules(folder))\n\n return sigma_rules\n\n\ndef get_sigma_rule(filepath, sigma_config=None):\n \"\"\"Returns a JSON represenation for a rule\n Args:\n filepath: path to the sigma rule to be parsed\n sigma_config: optional argument to pass a\n sigma.configuration.SigmaConfiguration object\n Returns:\n Json representation of the parsed rule\n Raises:\n ValueError: Parsing error\n IsADirectoryError: If a directory is passed as filepath\n \"\"\"\n try:\n if sigma_config:\n sigma_conf_obj = sigma_config\n else:\n sigma_conf_obj = get_sigma_config_file()\n except ValueError as e:\n logger.error(\n 'Problem reading the Sigma config', exc_info=True)\n raise ValueError('Problem reading the Sigma config') from e\n\n\n sigma_backend = sigma_es.ElasticsearchQuerystringBackend(sigma_conf_obj, {})\n\n try:\n sigma_rules_paths = get_sigma_rules_path()\n except ValueError:\n sigma_rules_paths = None\n\n if not filepath.lower().endswith('.yml'):\n raise ValueError(f'{filepath} does not end with .yml')\n\n # if a sub dir is found, nothing can be parsed\n if os.path.isdir(filepath):\n raise IsADirectoryError(f'{filepath} is a directory - must be a file')\n\n abs_path = os.path.abspath(filepath)\n\n with codecs.open(\n abs_path, 'r', encoding='utf-8', errors='replace') as file:\n try:\n rule_return = {}\n rule_yaml_data = yaml.safe_load_all(file.read())\n for doc in rule_yaml_data:\n rule_return.update(doc)\n parser = sigma_collection.SigmaCollectionParser(\n str(doc), sigma_config, None)\n parsed_sigma_rules = parser.generate(sigma_backend)\n\n except NotImplementedError as exception:\n logger.error(\n 'Error generating rule in file {0:s}: {1!s}'\n .format(abs_path, exception))\n raise\n\n except sigma_exceptions.SigmaParseError as exception:\n logger.error(\n 'Sigma parsing error generating rule in file {0:s}: {1!s}'\n .format(abs_path, exception))\n raise\n\n except yaml.parser.ParserError as exception:\n logger.error(\n 'Yaml parsing error generating rule in file {0:s}: {1!s}'\n .format(abs_path, exception))\n return None\n\n sigma_es_query = ''\n\n for sigma_rule in parsed_sigma_rules:\n sigma_es_query = sigma_rule\n\n rule_return.update(\n {'es_query':sigma_es_query})\n rule_return.update(\n {'file_name':os.path.basename(filepath)})\n\n # in case multiple folders are in the config, need to remove them\n if sigma_rules_paths:\n for rule_path in sigma_rules_paths:\n file_relpath = os.path.relpath(filepath, rule_path)\n else:\n file_relpath = 'N/A'\n\n rule_return.update(\n {'file_relpath':file_relpath})\n\n return rule_return\n\ndef get_sigma_rule_by_text(rule_text, sigma_config=None):\n \"\"\"Returns a JSON represenation for a rule\n\n Args:\n rule_text: Text of the sigma rule to be parsed\n sigma_config: config file object\n\n Returns:\n Json representation of the parsed rule\n Raises:\n sigma_exceptions.SigmaParseError: Issue with parsing the given rule\n yaml.parser.ParserError: Not a correct YAML text provided\n NotImplementedError: A feature in the provided Sigma rule is not\n implemented in Sigma for Timesketch\n \"\"\"\n if sigma_config is None:\n sigma_config = get_sigma_config_file()\n\n sigma_backend = sigma_es.ElasticsearchQuerystringBackend(sigma_config, {})\n\n rule_return = {}\n\n # TODO check if input validation is needed / useful.\n try:\n parser = sigma_collection.SigmaCollectionParser(\n rule_text, sigma_config, None)\n parsed_sigma_rules = parser.generate(sigma_backend)\n rule_yaml_data = yaml.safe_load_all(rule_text)\n for doc in rule_yaml_data:\n rule_return.update(doc)\n\n except NotImplementedError as exception:\n logger.error(\n 'Error generating rule {0!s}'.format(exception))\n raise\n\n except sigma_exceptions.SigmaParseError as exception:\n logger.error(\n 'Sigma parsing error generating rule {0!s}'\n .format(exception))\n raise\n\n except yaml.parser.ParserError as exception:\n logger.error(\n 'Yaml parsing error generating rule {0!s}'.format(exception))\n raise\n\n sigma_es_query = ''\n\n for sigma_rule in parsed_sigma_rules:\n sigma_es_query = sigma_rule\n\n rule_return.update(\n {'es_query':sigma_es_query})\n rule_return.update(\n {'file_name':'N/A'})\n rule_return.update(\n {'file_relpath':'N/A'})\n\n return rule_return\n", "path": "timesketch/lib/sigma_util.py"}], "after_files": [{"content": "# Copyright 2020 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Timesketch Sigma lib functions.\"\"\"\n\nimport os\nimport codecs\nimport logging\nimport yaml\n\nfrom flask import current_app\n\nimport sigma.configuration as sigma_configuration\n\nfrom sigma.backends import elasticsearch as sigma_es\nfrom sigma.parser import collection as sigma_collection\nfrom sigma.parser import exceptions as sigma_exceptions\nfrom sigma.config.exceptions import SigmaConfigParseError\n\nlogger = logging.getLogger('timesketch.lib.sigma')\n\n\ndef get_sigma_config_file(config_file=None):\n \"\"\"Get a sigma.configuration.SigmaConfiguration object.\n\n Args:\n config_file: Optional path to a config file\n Returns:\n A sigma.configuration.SigmaConfiguration object\n Raises:\n ValueError: If SIGMA_CONFIG is not found in the config file.\n or the Sigma config file is not readabale.\n SigmaConfigParseError: If config file could not be parsed.\n \"\"\"\n if config_file:\n config_file_path = config_file\n else:\n config_file_path = current_app.config.get(\n 'SIGMA_CONFIG', './data/sigma_config.yaml')\n\n if not config_file_path:\n raise ValueError('No config_file_path set via param or config file')\n\n if not os.path.isfile(config_file_path):\n raise ValueError(\n 'Unable to open file: [{0:s}], it does not exist.'.format(\n config_file_path))\n\n if not os.access(config_file_path, os.R_OK):\n raise ValueError(\n 'Unable to open file: [{0:s}], cannot open it for '\n 'read, please check permissions.'.format(config_file_path))\n\n with open(config_file_path, 'r') as config_file_read:\n sigma_config_file = config_file_read.read()\n\n try:\n sigma_config = sigma_configuration.SigmaConfiguration(sigma_config_file)\n except SigmaConfigParseError:\n logger.error('Parsing error with {0:s}'.format(sigma_config_file))\n raise\n\n return sigma_config\n\ndef get_sigma_rules_path():\n \"\"\"Get Sigma rules paths.\n\n Returns:\n A list of strings to the Sigma rules\n\n Raises:\n ValueError: If SIGMA_RULES_FOLDERS is not found in the config file.\n or the folders are not readabale.\n \"\"\"\n try:\n rules_path = current_app.config.get('SIGMA_RULES_FOLDERS', [])\n except RuntimeError as e:\n raise ValueError(\n 'SIGMA_RULES_FOLDERS not found in config file') from e\n\n if not rules_path:\n raise ValueError(\n 'SIGMA_RULES_FOLDERS not found in config file')\n\n for folder in rules_path:\n if not os.path.isdir(folder):\n raise ValueError(\n 'Unable to open dir: [{0:s}], it does not exist.'.format(\n folder))\n\n if not os.access(folder, os.R_OK):\n raise ValueError(\n 'Unable to open dir: [{0:s}], cannot open it for '\n 'read, please check permissions.'.format(folder))\n\n return rules_path\n\n\ndef get_sigma_rules(rule_folder, sigma_config=None):\n \"\"\"Returns the Sigma rules for a folder including subfolders.\n Args:\n rule_folder: folder to be checked for rules\n sigma_config: optional argument to pass a\n sigma.configuration.SigmaConfiguration object\n Returns:\n A array of Sigma rules as JSON\n Raises:\n ValueError: If SIGMA_RULES_FOLDERS is not found in the config file.\n or the folders are not readabale.\n \"\"\"\n return_array = []\n\n for dirpath, dirnames, files in os.walk(rule_folder):\n if 'deprecated' in [x.lower() for x in dirnames]:\n dirnames.remove('deprecated')\n\n for rule_filename in files:\n if rule_filename.lower().endswith('.yml'):\n # if a sub dir is found, do not try to parse it.\n if os.path.isdir(os.path.join(dirpath, rule_filename)):\n continue\n\n rule_file_path = os.path.join(dirpath, rule_filename)\n parsed_rule = get_sigma_rule(rule_file_path, sigma_config)\n if parsed_rule:\n return_array.append(parsed_rule)\n\n return return_array\n\n\ndef get_all_sigma_rules():\n \"\"\"Returns all Sigma rules\n\n Returns:\n A array of Sigma rules\n\n Raises:\n ValueError: If SIGMA_RULES_FOLDERS is not found in the config file.\n or the folders are not readabale.\n \"\"\"\n sigma_rules = []\n\n rules_paths = get_sigma_rules_path()\n\n for folder in rules_paths:\n sigma_rules.extend(get_sigma_rules(folder))\n\n return sigma_rules\n\n\ndef get_sigma_rule(filepath, sigma_config=None):\n \"\"\"Returns a JSON represenation for a rule\n Args:\n filepath: path to the sigma rule to be parsed\n sigma_config: optional argument to pass a\n sigma.configuration.SigmaConfiguration object\n Returns:\n Json representation of the parsed rule\n Raises:\n ValueError: Parsing error\n IsADirectoryError: If a directory is passed as filepath\n \"\"\"\n try:\n if isinstance(sigma_config, sigma_configuration.SigmaConfiguration):\n sigma_conf_obj = sigma_config\n elif isinstance(sigma_config, str):\n sigma_conf_obj = get_sigma_config_file(sigma_config)\n else:\n sigma_conf_obj = get_sigma_config_file()\n except ValueError as e:\n logger.error(\n 'Problem reading the Sigma config', exc_info=True)\n raise ValueError('Problem reading the Sigma config') from e\n\n\n sigma_backend = sigma_es.ElasticsearchQuerystringBackend(sigma_conf_obj, {})\n\n try:\n sigma_rules_paths = get_sigma_rules_path()\n except ValueError:\n sigma_rules_paths = None\n\n if not filepath.lower().endswith('.yml'):\n raise ValueError(f'{filepath} does not end with .yml')\n\n # if a sub dir is found, nothing can be parsed\n if os.path.isdir(filepath):\n raise IsADirectoryError(f'{filepath} is a directory - must be a file')\n\n abs_path = os.path.abspath(filepath)\n\n with codecs.open(\n abs_path, 'r', encoding='utf-8', errors='replace') as file:\n try:\n rule_return = {}\n rule_yaml_data = yaml.safe_load_all(file.read())\n for doc in rule_yaml_data:\n rule_return.update(doc)\n parser = sigma_collection.SigmaCollectionParser(\n str(doc), sigma_conf_obj, None)\n parsed_sigma_rules = parser.generate(sigma_backend)\n\n except NotImplementedError as exception:\n logger.error(\n 'Error generating rule in file {0:s}: {1!s}'\n .format(abs_path, exception))\n raise\n\n except sigma_exceptions.SigmaParseError as exception:\n logger.error(\n 'Sigma parsing error generating rule in file {0:s}: {1!s}'\n .format(abs_path, exception))\n raise\n\n except yaml.parser.ParserError as exception:\n logger.error(\n 'Yaml parsing error generating rule in file {0:s}: {1!s}'\n .format(abs_path, exception))\n return None\n\n sigma_es_query = ''\n\n for sigma_rule in parsed_sigma_rules:\n sigma_es_query = sigma_rule\n\n rule_return.update(\n {'es_query':sigma_es_query})\n rule_return.update(\n {'file_name':os.path.basename(filepath)})\n\n # in case multiple folders are in the config, need to remove them\n if sigma_rules_paths:\n for rule_path in sigma_rules_paths:\n file_relpath = os.path.relpath(filepath, rule_path)\n else:\n file_relpath = 'N/A'\n\n rule_return.update(\n {'file_relpath':file_relpath})\n\n return rule_return\n\ndef get_sigma_rule_by_text(rule_text, sigma_config=None):\n \"\"\"Returns a JSON represenation for a rule\n\n Args:\n rule_text: Text of the sigma rule to be parsed\n sigma_config: config file object\n\n Returns:\n Json representation of the parsed rule\n Raises:\n sigma_exceptions.SigmaParseError: Issue with parsing the given rule\n yaml.parser.ParserError: Not a correct YAML text provided\n NotImplementedError: A feature in the provided Sigma rule is not\n implemented in Sigma for Timesketch\n \"\"\"\n try:\n if isinstance(sigma_config, sigma_configuration.SigmaConfiguration):\n sigma_conf_obj = sigma_config\n elif isinstance(sigma_config, str):\n sigma_conf_obj = get_sigma_config_file(sigma_config)\n else:\n sigma_conf_obj = get_sigma_config_file()\n except ValueError as e:\n logger.error(\n 'Problem reading the Sigma config', exc_info=True)\n raise ValueError('Problem reading the Sigma config') from e\n\n sigma_backend = sigma_es.ElasticsearchQuerystringBackend(sigma_conf_obj, {})\n\n rule_return = {}\n\n # TODO check if input validation is needed / useful.\n try:\n parser = sigma_collection.SigmaCollectionParser(\n rule_text, sigma_conf_obj, None)\n parsed_sigma_rules = parser.generate(sigma_backend)\n rule_yaml_data = yaml.safe_load_all(rule_text)\n for doc in rule_yaml_data:\n rule_return.update(doc)\n\n except NotImplementedError as exception:\n logger.error(\n 'Error generating rule {0!s}'.format(exception))\n raise\n\n except sigma_exceptions.SigmaParseError as exception:\n logger.error(\n 'Sigma parsing error generating rule {0!s}'\n .format(exception))\n raise\n\n except yaml.parser.ParserError as exception:\n logger.error(\n 'Yaml parsing error generating rule {0!s}'.format(exception))\n raise\n\n sigma_es_query = ''\n\n for sigma_rule in parsed_sigma_rules:\n sigma_es_query = sigma_rule\n\n rule_return.update(\n {'es_query':sigma_es_query})\n rule_return.update(\n {'file_name':'N/A'})\n rule_return.update(\n {'file_relpath':'N/A'})\n\n return rule_return\n", "path": "timesketch/lib/sigma_util.py"}]} | 3,589 | 504 |
gh_patches_debug_35399 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3107 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Does not work on windows
## Description
This cannot work on Windows because of the pglast dependency. Unfortunately it seems that this is an issue of the libpg_query library and probably will never be fixed: https://github.com/pganalyze/libpg_query/issues/44
## Expected behavior
To be able to install the dependencies on windows
## To Reproduce
Try to instal requirements on windows
## Environment
Windows 10, python 3.9.
## Additional context
If I remove the pglast dependency `pip install -r requirements-dev.txt` works fine. I believe that this can be resolved by removing pglast *completely* since it's used only once. I'd be happy to discuss this more.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/columns/operations/select.py`
Content:
```
1 import warnings
2
3 from pglast import Node, parse_sql
4 from sqlalchemy import and_, asc, cast, select, text, exists
5
6 from db.columns.exceptions import DynamicDefaultWarning
7 from db.tables.operations.select import reflect_table_from_oid
8 from db.utils import execute_statement, get_pg_catalog_table
9
10 # These tags define which nodes in the AST built by pglast we consider to be
11 # "dynamic" when found in a column default clause. The nodes are best
12 # documented by C header files that define the underlying structs:
13 # https://github.com/pganalyze/libpg_query/blob/13-latest/src/postgres/include/nodes/parsenodes.h
14 # https://github.com/pganalyze/libpg_query/blob/13-latest/src/postgres/include/nodes/primnodes.h
15 # It's possible that more dynamic nodes will be found. Their tags should be
16 # added to this set.
17 DYNAMIC_NODE_TAGS = {"SQLValueFunction", "FuncCall"}
18
19
20 def get_column_attnum_from_names_as_map(table_oid, column_names, engine, metadata, connection_to_use=None):
21 statement = _get_columns_attnum_from_names(table_oid, column_names, engine, metadata=metadata)
22 attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()
23 name_attnum_map = {attnum_tuple['attname']: attnum_tuple['attnum'] for attnum_tuple in attnums_tuple}
24 return name_attnum_map
25
26
27 def get_columns_attnum_from_names(table_oid, column_names, engine, metadata, connection_to_use=None):
28 """
29 Returns the respective list of attnum of the column names passed.
30 The order is based on the column order in the table and not by the order of the column names argument.
31 """
32 statement = _get_columns_attnum_from_names(table_oid, column_names, engine=engine, metadata=metadata)
33 attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()
34 attnums = [attnum_tuple[0] for attnum_tuple in attnums_tuple]
35 return attnums
36
37
38 def get_column_attnum_from_name(table_oid, column_name, engine, metadata, connection_to_use=None):
39 statement = _get_columns_attnum_from_names(table_oid, [column_name], engine=engine, metadata=metadata)
40 return execute_statement(engine, statement, connection_to_use).scalar()
41
42
43 def _get_columns_attnum_from_names(table_oid, column_names, engine, metadata):
44 pg_attribute = get_pg_catalog_table("pg_attribute", engine=engine, metadata=metadata)
45 sel = select(pg_attribute.c.attnum, pg_attribute.c.attname).where(
46 and_(
47 pg_attribute.c.attrelid == table_oid,
48 pg_attribute.c.attname.in_(column_names)
49 )
50 ).order_by(asc(pg_attribute.c.attnum))
51 return sel
52
53
54 def get_column_attnums_from_tables(table_oids, engine, metadata, connection_to_use=None):
55 pg_attribute = get_pg_catalog_table("pg_attribute", engine, metadata=metadata)
56 sel = select(pg_attribute.c.attnum, pg_attribute.c.attrelid.label('table_oid')).where(
57 and_(
58 pg_attribute.c.attrelid.in_(table_oids),
59 # Ignore system columns
60 pg_attribute.c.attnum > 0,
61 # Ignore removed columns
62 pg_attribute.c.attisdropped.is_(False)
63 )
64 )
65 results = execute_statement(engine, sel, connection_to_use).fetchall()
66 return results
67
68
69 def get_map_of_attnum_and_table_oid_to_column_name(table_oids, engine, metadata, connection_to_use=None):
70 """
71 Order determined by the column order in the table.
72 """
73 triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(
74 table_oids, None, engine, metadata, connection_to_use
75 )
76 return {
77 (attnum, table_oid): column_name
78 for column_name, attnum, table_oid
79 in triples_of_col_info
80 }
81
82
83 def get_column_names_from_attnums(table_oid, attnums, engine, metadata, connection_to_use=None):
84 return list(get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use).values())
85
86
87 def get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use=None):
88 """
89 Order determined by the column order in the table.
90 """
91 triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(
92 [table_oid], attnums, engine, metadata, connection_to_use
93 )
94 return {
95 attnum: column_name
96 for column_name, attnum, _
97 in triples_of_col_info
98 }
99
100
101 def _get_triples_of_column_name_and_attnum_and_table_oid(
102 table_oids, attnums, engine, metadata, connection_to_use
103 ):
104 statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(
105 table_oids, attnums, engine, metadata
106 )
107 return execute_statement(engine, statement, connection_to_use).fetchall()
108
109
110 def get_column_default(table_oid, attnum, engine, metadata, connection_to_use=None):
111 default_dict = get_column_default_dict(
112 table_oid,
113 attnum,
114 engine,
115 metadata=metadata,
116 connection_to_use=connection_to_use,
117 )
118 if default_dict is not None:
119 return default_dict['value']
120
121
122 def get_column_default_dict(table_oid, attnum, engine, metadata, connection_to_use=None):
123 column = get_column_from_oid_and_attnum(
124 table_oid=table_oid,
125 attnum=attnum,
126 engine=engine,
127 metadata=metadata,
128 connection_to_use=connection_to_use,
129 )
130 if column.server_default is None:
131 return
132
133 is_dynamic = _is_default_expr_dynamic(column.server_default)
134 sql_text = str(column.server_default.arg)
135
136 if is_dynamic:
137 warnings.warn(
138 "Dynamic column defaults are read only", DynamicDefaultWarning
139 )
140 default_value = sql_text
141 else:
142 # Defaults are often stored as text with SQL casts appended
143 # Ex: "'test default string'::character varying" or "'2020-01-01'::date"
144 # Here, we execute the cast to get the proper python value
145 default_value = execute_statement(
146 engine,
147 select(cast(text(sql_text), column.type)),
148 connection_to_use
149 ).scalar()
150
151 return {"value": default_value, "is_dynamic": is_dynamic}
152
153
154 def determine_whether_column_contains_data(
155 table_oid, column_name, engine, metadata, connection_to_use=None
156 ):
157 """
158 Given a column, return True if it contains data, False otherwise.
159 """
160 sa_table = reflect_table_from_oid(
161 table_oid, engine, metadata=metadata, connection_to_use=connection_to_use,
162 )
163 sel = select(exists(1).where(sa_table.columns[column_name] != None)) # noqa
164 contains_data = execute_statement(engine, sel, connection_to_use).scalar()
165 return contains_data
166
167
168 def get_column_from_oid_and_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):
169 sa_table = reflect_table_from_oid(table_oid, engine, metadata=metadata, connection_to_use=connection_to_use)
170 column_name = get_column_name_from_attnum(table_oid, attnum, engine, metadata=metadata, connection_to_use=connection_to_use)
171 sa_column = sa_table.columns[column_name]
172 return sa_column
173
174
175 def get_column_name_from_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):
176 statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(
177 [table_oid], [attnum], engine, metadata=metadata,
178 )
179 column_name = execute_statement(engine, statement, connection_to_use).scalar()
180 return column_name
181
182
183 def _statement_for_triples_of_column_name_and_attnum_and_table_oid(
184 table_oids, attnums, engine, metadata
185 ):
186 """
187 Returns (column name, column attnum, column table's oid) tuples for each column that's in the
188 tables specified via `table_oids`, and, when `attnums` is not None, that has an attnum
189 specified in `attnums`.
190
191 The order is based on the column order in the table and not on the order of the arguments.
192 """
193 pg_attribute = get_pg_catalog_table("pg_attribute", engine, metadata=metadata)
194 sel = select(pg_attribute.c.attname, pg_attribute.c.attnum, pg_attribute.c.attrelid)
195 wasnt_dropped = pg_attribute.c.attisdropped.is_(False)
196 table_oid_matches = pg_attribute.c.attrelid.in_(table_oids)
197 conditions = [wasnt_dropped, table_oid_matches]
198 if attnums is not None:
199 attnum_matches = pg_attribute.c.attnum.in_(attnums)
200 conditions.append(attnum_matches)
201 else:
202 attnum_positive = pg_attribute.c.attnum > 0
203 conditions.append(attnum_positive)
204 sel = sel.where(and_(*conditions))
205 return sel
206
207
208 def _is_default_expr_dynamic(server_default):
209 prepared_expr = f"""SELECT {server_default.arg.text};"""
210 expr_ast_root = Node(parse_sql(prepared_expr))
211 ast_nodes = {
212 n.node_tag for n in expr_ast_root.traverse() if isinstance(n, Node)
213 }
214 return not ast_nodes.isdisjoint(DYNAMIC_NODE_TAGS)
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/columns/operations/select.py b/db/columns/operations/select.py
--- a/db/columns/operations/select.py
+++ b/db/columns/operations/select.py
@@ -1,21 +1,12 @@
import warnings
-from pglast import Node, parse_sql
from sqlalchemy import and_, asc, cast, select, text, exists
from db.columns.exceptions import DynamicDefaultWarning
+from db.connection import execute_msar_func_with_engine
from db.tables.operations.select import reflect_table_from_oid
from db.utils import execute_statement, get_pg_catalog_table
-# These tags define which nodes in the AST built by pglast we consider to be
-# "dynamic" when found in a column default clause. The nodes are best
-# documented by C header files that define the underlying structs:
-# https://github.com/pganalyze/libpg_query/blob/13-latest/src/postgres/include/nodes/parsenodes.h
-# https://github.com/pganalyze/libpg_query/blob/13-latest/src/postgres/include/nodes/primnodes.h
-# It's possible that more dynamic nodes will be found. Their tags should be
-# added to this set.
-DYNAMIC_NODE_TAGS = {"SQLValueFunction", "FuncCall"}
-
def get_column_attnum_from_names_as_map(table_oid, column_names, engine, metadata, connection_to_use=None):
statement = _get_columns_attnum_from_names(table_oid, column_names, engine, metadata=metadata)
@@ -127,10 +118,13 @@
metadata=metadata,
connection_to_use=connection_to_use,
)
+
if column.server_default is None:
return
- is_dynamic = _is_default_expr_dynamic(column.server_default)
+ is_dynamic = execute_msar_func_with_engine(
+ engine, 'is_default_possibly_dynamic', table_oid, attnum
+ ).fetchone()[0]
sql_text = str(column.server_default.arg)
if is_dynamic:
@@ -203,12 +197,3 @@
conditions.append(attnum_positive)
sel = sel.where(and_(*conditions))
return sel
-
-
-def _is_default_expr_dynamic(server_default):
- prepared_expr = f"""SELECT {server_default.arg.text};"""
- expr_ast_root = Node(parse_sql(prepared_expr))
- ast_nodes = {
- n.node_tag for n in expr_ast_root.traverse() if isinstance(n, Node)
- }
- return not ast_nodes.isdisjoint(DYNAMIC_NODE_TAGS)
| {"golden_diff": "diff --git a/db/columns/operations/select.py b/db/columns/operations/select.py\n--- a/db/columns/operations/select.py\n+++ b/db/columns/operations/select.py\n@@ -1,21 +1,12 @@\n import warnings\n \n-from pglast import Node, parse_sql\n from sqlalchemy import and_, asc, cast, select, text, exists\n \n from db.columns.exceptions import DynamicDefaultWarning\n+from db.connection import execute_msar_func_with_engine\n from db.tables.operations.select import reflect_table_from_oid\n from db.utils import execute_statement, get_pg_catalog_table\n \n-# These tags define which nodes in the AST built by pglast we consider to be\n-# \"dynamic\" when found in a column default clause. The nodes are best\n-# documented by C header files that define the underlying structs:\n-# https://github.com/pganalyze/libpg_query/blob/13-latest/src/postgres/include/nodes/parsenodes.h\n-# https://github.com/pganalyze/libpg_query/blob/13-latest/src/postgres/include/nodes/primnodes.h\n-# It's possible that more dynamic nodes will be found. Their tags should be\n-# added to this set.\n-DYNAMIC_NODE_TAGS = {\"SQLValueFunction\", \"FuncCall\"}\n-\n \n def get_column_attnum_from_names_as_map(table_oid, column_names, engine, metadata, connection_to_use=None):\n statement = _get_columns_attnum_from_names(table_oid, column_names, engine, metadata=metadata)\n@@ -127,10 +118,13 @@\n metadata=metadata,\n connection_to_use=connection_to_use,\n )\n+\n if column.server_default is None:\n return\n \n- is_dynamic = _is_default_expr_dynamic(column.server_default)\n+ is_dynamic = execute_msar_func_with_engine(\n+ engine, 'is_default_possibly_dynamic', table_oid, attnum\n+ ).fetchone()[0]\n sql_text = str(column.server_default.arg)\n \n if is_dynamic:\n@@ -203,12 +197,3 @@\n conditions.append(attnum_positive)\n sel = sel.where(and_(*conditions))\n return sel\n-\n-\n-def _is_default_expr_dynamic(server_default):\n- prepared_expr = f\"\"\"SELECT {server_default.arg.text};\"\"\"\n- expr_ast_root = Node(parse_sql(prepared_expr))\n- ast_nodes = {\n- n.node_tag for n in expr_ast_root.traverse() if isinstance(n, Node)\n- }\n- return not ast_nodes.isdisjoint(DYNAMIC_NODE_TAGS)\n", "issue": "Does not work on windows\n## Description\r\nThis cannot work on Windows because of the pglast dependency. Unfortunately it seems that this is an issue of the libpg_query library and probably will never be fixed: https://github.com/pganalyze/libpg_query/issues/44\r\n\r\n## Expected behavior\r\nTo be able to install the dependencies on windows\r\n\r\n## To Reproduce\r\nTry to instal requirements on windows\r\n\r\n## Environment\r\nWindows 10, python 3.9.\r\n\r\n## Additional context\r\nIf I remove the pglast dependency `pip install -r requirements-dev.txt` works fine. I believe that this can be resolved by removing pglast *completely* since it's used only once. I'd be happy to discuss this more.\r\n\r\n\n", "before_files": [{"content": "import warnings\n\nfrom pglast import Node, parse_sql\nfrom sqlalchemy import and_, asc, cast, select, text, exists\n\nfrom db.columns.exceptions import DynamicDefaultWarning\nfrom db.tables.operations.select import reflect_table_from_oid\nfrom db.utils import execute_statement, get_pg_catalog_table\n\n# These tags define which nodes in the AST built by pglast we consider to be\n# \"dynamic\" when found in a column default clause. The nodes are best\n# documented by C header files that define the underlying structs:\n# https://github.com/pganalyze/libpg_query/blob/13-latest/src/postgres/include/nodes/parsenodes.h\n# https://github.com/pganalyze/libpg_query/blob/13-latest/src/postgres/include/nodes/primnodes.h\n# It's possible that more dynamic nodes will be found. Their tags should be\n# added to this set.\nDYNAMIC_NODE_TAGS = {\"SQLValueFunction\", \"FuncCall\"}\n\n\ndef get_column_attnum_from_names_as_map(table_oid, column_names, engine, metadata, connection_to_use=None):\n statement = _get_columns_attnum_from_names(table_oid, column_names, engine, metadata=metadata)\n attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()\n name_attnum_map = {attnum_tuple['attname']: attnum_tuple['attnum'] for attnum_tuple in attnums_tuple}\n return name_attnum_map\n\n\ndef get_columns_attnum_from_names(table_oid, column_names, engine, metadata, connection_to_use=None):\n \"\"\"\n Returns the respective list of attnum of the column names passed.\n The order is based on the column order in the table and not by the order of the column names argument.\n \"\"\"\n statement = _get_columns_attnum_from_names(table_oid, column_names, engine=engine, metadata=metadata)\n attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()\n attnums = [attnum_tuple[0] for attnum_tuple in attnums_tuple]\n return attnums\n\n\ndef get_column_attnum_from_name(table_oid, column_name, engine, metadata, connection_to_use=None):\n statement = _get_columns_attnum_from_names(table_oid, [column_name], engine=engine, metadata=metadata)\n return execute_statement(engine, statement, connection_to_use).scalar()\n\n\ndef _get_columns_attnum_from_names(table_oid, column_names, engine, metadata):\n pg_attribute = get_pg_catalog_table(\"pg_attribute\", engine=engine, metadata=metadata)\n sel = select(pg_attribute.c.attnum, pg_attribute.c.attname).where(\n and_(\n pg_attribute.c.attrelid == table_oid,\n pg_attribute.c.attname.in_(column_names)\n )\n ).order_by(asc(pg_attribute.c.attnum))\n return sel\n\n\ndef get_column_attnums_from_tables(table_oids, engine, metadata, connection_to_use=None):\n pg_attribute = get_pg_catalog_table(\"pg_attribute\", engine, metadata=metadata)\n sel = select(pg_attribute.c.attnum, pg_attribute.c.attrelid.label('table_oid')).where(\n and_(\n pg_attribute.c.attrelid.in_(table_oids),\n # Ignore system columns\n pg_attribute.c.attnum > 0,\n # Ignore removed columns\n pg_attribute.c.attisdropped.is_(False)\n )\n )\n results = execute_statement(engine, sel, connection_to_use).fetchall()\n return results\n\n\ndef get_map_of_attnum_and_table_oid_to_column_name(table_oids, engine, metadata, connection_to_use=None):\n \"\"\"\n Order determined by the column order in the table.\n \"\"\"\n triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(\n table_oids, None, engine, metadata, connection_to_use\n )\n return {\n (attnum, table_oid): column_name\n for column_name, attnum, table_oid\n in triples_of_col_info\n }\n\n\ndef get_column_names_from_attnums(table_oid, attnums, engine, metadata, connection_to_use=None):\n return list(get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use).values())\n\n\ndef get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use=None):\n \"\"\"\n Order determined by the column order in the table.\n \"\"\"\n triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(\n [table_oid], attnums, engine, metadata, connection_to_use\n )\n return {\n attnum: column_name\n for column_name, attnum, _\n in triples_of_col_info\n }\n\n\ndef _get_triples_of_column_name_and_attnum_and_table_oid(\n table_oids, attnums, engine, metadata, connection_to_use\n):\n statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(\n table_oids, attnums, engine, metadata\n )\n return execute_statement(engine, statement, connection_to_use).fetchall()\n\n\ndef get_column_default(table_oid, attnum, engine, metadata, connection_to_use=None):\n default_dict = get_column_default_dict(\n table_oid,\n attnum,\n engine,\n metadata=metadata,\n connection_to_use=connection_to_use,\n )\n if default_dict is not None:\n return default_dict['value']\n\n\ndef get_column_default_dict(table_oid, attnum, engine, metadata, connection_to_use=None):\n column = get_column_from_oid_and_attnum(\n table_oid=table_oid,\n attnum=attnum,\n engine=engine,\n metadata=metadata,\n connection_to_use=connection_to_use,\n )\n if column.server_default is None:\n return\n\n is_dynamic = _is_default_expr_dynamic(column.server_default)\n sql_text = str(column.server_default.arg)\n\n if is_dynamic:\n warnings.warn(\n \"Dynamic column defaults are read only\", DynamicDefaultWarning\n )\n default_value = sql_text\n else:\n # Defaults are often stored as text with SQL casts appended\n # Ex: \"'test default string'::character varying\" or \"'2020-01-01'::date\"\n # Here, we execute the cast to get the proper python value\n default_value = execute_statement(\n engine,\n select(cast(text(sql_text), column.type)),\n connection_to_use\n ).scalar()\n\n return {\"value\": default_value, \"is_dynamic\": is_dynamic}\n\n\ndef determine_whether_column_contains_data(\n table_oid, column_name, engine, metadata, connection_to_use=None\n):\n \"\"\"\n Given a column, return True if it contains data, False otherwise.\n \"\"\"\n sa_table = reflect_table_from_oid(\n table_oid, engine, metadata=metadata, connection_to_use=connection_to_use,\n )\n sel = select(exists(1).where(sa_table.columns[column_name] != None)) # noqa\n contains_data = execute_statement(engine, sel, connection_to_use).scalar()\n return contains_data\n\n\ndef get_column_from_oid_and_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):\n sa_table = reflect_table_from_oid(table_oid, engine, metadata=metadata, connection_to_use=connection_to_use)\n column_name = get_column_name_from_attnum(table_oid, attnum, engine, metadata=metadata, connection_to_use=connection_to_use)\n sa_column = sa_table.columns[column_name]\n return sa_column\n\n\ndef get_column_name_from_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):\n statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(\n [table_oid], [attnum], engine, metadata=metadata,\n )\n column_name = execute_statement(engine, statement, connection_to_use).scalar()\n return column_name\n\n\ndef _statement_for_triples_of_column_name_and_attnum_and_table_oid(\n table_oids, attnums, engine, metadata\n):\n \"\"\"\n Returns (column name, column attnum, column table's oid) tuples for each column that's in the\n tables specified via `table_oids`, and, when `attnums` is not None, that has an attnum\n specified in `attnums`.\n\n The order is based on the column order in the table and not on the order of the arguments.\n \"\"\"\n pg_attribute = get_pg_catalog_table(\"pg_attribute\", engine, metadata=metadata)\n sel = select(pg_attribute.c.attname, pg_attribute.c.attnum, pg_attribute.c.attrelid)\n wasnt_dropped = pg_attribute.c.attisdropped.is_(False)\n table_oid_matches = pg_attribute.c.attrelid.in_(table_oids)\n conditions = [wasnt_dropped, table_oid_matches]\n if attnums is not None:\n attnum_matches = pg_attribute.c.attnum.in_(attnums)\n conditions.append(attnum_matches)\n else:\n attnum_positive = pg_attribute.c.attnum > 0\n conditions.append(attnum_positive)\n sel = sel.where(and_(*conditions))\n return sel\n\n\ndef _is_default_expr_dynamic(server_default):\n prepared_expr = f\"\"\"SELECT {server_default.arg.text};\"\"\"\n expr_ast_root = Node(parse_sql(prepared_expr))\n ast_nodes = {\n n.node_tag for n in expr_ast_root.traverse() if isinstance(n, Node)\n }\n return not ast_nodes.isdisjoint(DYNAMIC_NODE_TAGS)\n", "path": "db/columns/operations/select.py"}], "after_files": [{"content": "import warnings\n\nfrom sqlalchemy import and_, asc, cast, select, text, exists\n\nfrom db.columns.exceptions import DynamicDefaultWarning\nfrom db.connection import execute_msar_func_with_engine\nfrom db.tables.operations.select import reflect_table_from_oid\nfrom db.utils import execute_statement, get_pg_catalog_table\n\n\ndef get_column_attnum_from_names_as_map(table_oid, column_names, engine, metadata, connection_to_use=None):\n statement = _get_columns_attnum_from_names(table_oid, column_names, engine, metadata=metadata)\n attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()\n name_attnum_map = {attnum_tuple['attname']: attnum_tuple['attnum'] for attnum_tuple in attnums_tuple}\n return name_attnum_map\n\n\ndef get_columns_attnum_from_names(table_oid, column_names, engine, metadata, connection_to_use=None):\n \"\"\"\n Returns the respective list of attnum of the column names passed.\n The order is based on the column order in the table and not by the order of the column names argument.\n \"\"\"\n statement = _get_columns_attnum_from_names(table_oid, column_names, engine=engine, metadata=metadata)\n attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()\n attnums = [attnum_tuple[0] for attnum_tuple in attnums_tuple]\n return attnums\n\n\ndef get_column_attnum_from_name(table_oid, column_name, engine, metadata, connection_to_use=None):\n statement = _get_columns_attnum_from_names(table_oid, [column_name], engine=engine, metadata=metadata)\n return execute_statement(engine, statement, connection_to_use).scalar()\n\n\ndef _get_columns_attnum_from_names(table_oid, column_names, engine, metadata):\n pg_attribute = get_pg_catalog_table(\"pg_attribute\", engine=engine, metadata=metadata)\n sel = select(pg_attribute.c.attnum, pg_attribute.c.attname).where(\n and_(\n pg_attribute.c.attrelid == table_oid,\n pg_attribute.c.attname.in_(column_names)\n )\n ).order_by(asc(pg_attribute.c.attnum))\n return sel\n\n\ndef get_column_attnums_from_tables(table_oids, engine, metadata, connection_to_use=None):\n pg_attribute = get_pg_catalog_table(\"pg_attribute\", engine, metadata=metadata)\n sel = select(pg_attribute.c.attnum, pg_attribute.c.attrelid.label('table_oid')).where(\n and_(\n pg_attribute.c.attrelid.in_(table_oids),\n # Ignore system columns\n pg_attribute.c.attnum > 0,\n # Ignore removed columns\n pg_attribute.c.attisdropped.is_(False)\n )\n )\n results = execute_statement(engine, sel, connection_to_use).fetchall()\n return results\n\n\ndef get_map_of_attnum_and_table_oid_to_column_name(table_oids, engine, metadata, connection_to_use=None):\n \"\"\"\n Order determined by the column order in the table.\n \"\"\"\n triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(\n table_oids, None, engine, metadata, connection_to_use\n )\n return {\n (attnum, table_oid): column_name\n for column_name, attnum, table_oid\n in triples_of_col_info\n }\n\n\ndef get_column_names_from_attnums(table_oid, attnums, engine, metadata, connection_to_use=None):\n return list(get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use).values())\n\n\ndef get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use=None):\n \"\"\"\n Order determined by the column order in the table.\n \"\"\"\n triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(\n [table_oid], attnums, engine, metadata, connection_to_use\n )\n return {\n attnum: column_name\n for column_name, attnum, _\n in triples_of_col_info\n }\n\n\ndef _get_triples_of_column_name_and_attnum_and_table_oid(\n table_oids, attnums, engine, metadata, connection_to_use\n):\n statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(\n table_oids, attnums, engine, metadata\n )\n return execute_statement(engine, statement, connection_to_use).fetchall()\n\n\ndef get_column_default(table_oid, attnum, engine, metadata, connection_to_use=None):\n default_dict = get_column_default_dict(\n table_oid,\n attnum,\n engine,\n metadata=metadata,\n connection_to_use=connection_to_use,\n )\n if default_dict is not None:\n return default_dict['value']\n\n\ndef get_column_default_dict(table_oid, attnum, engine, metadata, connection_to_use=None):\n column = get_column_from_oid_and_attnum(\n table_oid=table_oid,\n attnum=attnum,\n engine=engine,\n metadata=metadata,\n connection_to_use=connection_to_use,\n )\n\n if column.server_default is None:\n return\n\n is_dynamic = execute_msar_func_with_engine(\n engine, 'is_default_possibly_dynamic', table_oid, attnum\n ).fetchone()[0]\n sql_text = str(column.server_default.arg)\n\n if is_dynamic:\n warnings.warn(\n \"Dynamic column defaults are read only\", DynamicDefaultWarning\n )\n default_value = sql_text\n else:\n # Defaults are often stored as text with SQL casts appended\n # Ex: \"'test default string'::character varying\" or \"'2020-01-01'::date\"\n # Here, we execute the cast to get the proper python value\n default_value = execute_statement(\n engine,\n select(cast(text(sql_text), column.type)),\n connection_to_use\n ).scalar()\n\n return {\"value\": default_value, \"is_dynamic\": is_dynamic}\n\n\ndef determine_whether_column_contains_data(\n table_oid, column_name, engine, metadata, connection_to_use=None\n):\n \"\"\"\n Given a column, return True if it contains data, False otherwise.\n \"\"\"\n sa_table = reflect_table_from_oid(\n table_oid, engine, metadata=metadata, connection_to_use=connection_to_use,\n )\n sel = select(exists(1).where(sa_table.columns[column_name] != None)) # noqa\n contains_data = execute_statement(engine, sel, connection_to_use).scalar()\n return contains_data\n\n\ndef get_column_from_oid_and_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):\n sa_table = reflect_table_from_oid(table_oid, engine, metadata=metadata, connection_to_use=connection_to_use)\n column_name = get_column_name_from_attnum(table_oid, attnum, engine, metadata=metadata, connection_to_use=connection_to_use)\n sa_column = sa_table.columns[column_name]\n return sa_column\n\n\ndef get_column_name_from_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):\n statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(\n [table_oid], [attnum], engine, metadata=metadata,\n )\n column_name = execute_statement(engine, statement, connection_to_use).scalar()\n return column_name\n\n\ndef _statement_for_triples_of_column_name_and_attnum_and_table_oid(\n table_oids, attnums, engine, metadata\n):\n \"\"\"\n Returns (column name, column attnum, column table's oid) tuples for each column that's in the\n tables specified via `table_oids`, and, when `attnums` is not None, that has an attnum\n specified in `attnums`.\n\n The order is based on the column order in the table and not on the order of the arguments.\n \"\"\"\n pg_attribute = get_pg_catalog_table(\"pg_attribute\", engine, metadata=metadata)\n sel = select(pg_attribute.c.attname, pg_attribute.c.attnum, pg_attribute.c.attrelid)\n wasnt_dropped = pg_attribute.c.attisdropped.is_(False)\n table_oid_matches = pg_attribute.c.attrelid.in_(table_oids)\n conditions = [wasnt_dropped, table_oid_matches]\n if attnums is not None:\n attnum_matches = pg_attribute.c.attnum.in_(attnums)\n conditions.append(attnum_matches)\n else:\n attnum_positive = pg_attribute.c.attnum > 0\n conditions.append(attnum_positive)\n sel = sel.where(and_(*conditions))\n return sel\n", "path": "db/columns/operations/select.py"}]} | 3,038 | 552 |
gh_patches_debug_5181 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1067 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix lookup_embedding call in embedding layer
elasticdl.layers.Embedding.lookup_embedding still uses old worker.lookup_embedding method.
We need to update it to the revised version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/elasticdl/layers/embedding.py`
Content:
```
1 import tensorflow as tf
2 from tensorflow.python.keras.utils import tf_utils
3
4
5 class Embedding(tf.keras.layers.Layer):
6 """
7 Input: indexes for the embedding entries with a shape of
8 (batch_size, input_length). Input can be either dense tensor
9 or SparseTensor.
10 Output:
11 corresponding (combined) embeddings with a shape of
12 (batch_size, input_length, output_dim) if combiner is None
13 (batch_size, output_dim) if combiner is not None
14 Arguments:
15 output_dim: the dimension of the embedding vector
16 embedding_initializer: Initializer for embedding table
17 mask_zero: Whether or not the input value 0 is a special "padding"
18 value that should be masked out.
19 If input is SparseTensor, mask_zero must be False.
20 input_length: Length of input sequences, when it is constant.
21 This argument is required if you are going to connect
22 `Flatten` then `Dense` layers upstream
23 (without it, the shape of the dense outputs cannot be computed).
24 combiner: A string specifying the reduction op or None if not used.
25 "mean", "sqrtn" and "sum" are supported for the reduction op.
26 If input is SparseTensor, combiner must set as a reduction op.
27 """
28
29 def __init__(
30 self,
31 output_dim,
32 embedding_initializer="uniform",
33 mask_zero=False,
34 input_length=None,
35 combiner=None,
36 **kwargs
37 ):
38 if "input_shape" not in kwargs and input_length:
39 kwargs["input_shape"] = (input_length,)
40 super(Embedding, self).__init__(**kwargs)
41
42 self.output_dim = output_dim
43 self.embedding_initializer = embedding_initializer
44 self.supports_masking = mask_zero
45 self.input_length = input_length
46 self.combiner = combiner
47 self.tape = None
48 self.worker = None
49 self.bet_ids_pair = []
50
51 @tf_utils.shape_type_conversion
52 def compute_output_shape(self, input_shape):
53 # this function is taken from
54 # tf.keras.layers.Embedding.compute_output_shape
55 # https://github.com/tensorflow/tensorflow/blob/3f3c728bf80e0fd6653744318cbbfe1454c6ddca/tensorflow/python/keras/layers/embeddings.py#L156
56 if self.input_length is None:
57 return input_shape + (self.output_dim,)
58 else:
59 if isinstance(self.input_length, (list, tuple)):
60 in_lens = list(self.input_length)
61 else:
62 in_lens = [self.input_length]
63 if len(in_lens) != len(input_shape) - 1:
64 raise ValueError(
65 '"input_length" is %s, '
66 "but received input has shape %s"
67 % (str(self.input_length), str(input_shape))
68 )
69 else:
70 for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):
71 if s1 is not None and s2 is not None and s1 != s2:
72 raise ValueError(
73 '"input_length" is %s, '
74 "but received input has shape %s"
75 % (str(self.input_length), str(input_shape))
76 )
77 elif s1 is None:
78 in_lens[i] = s2
79 return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)
80
81 @property
82 def name(self):
83 return self._name
84
85 @staticmethod
86 def get_key(name_list):
87 return "-".join(map(str, name_list))
88
89 def lookup_embedding(self, unique_ids):
90 batch_embedding = self.worker.embedding_lookup(
91 unique_ids, self._name, self.embedding_initializer
92 )
93 return batch_embedding
94
95 def call(self, input):
96 if isinstance(input, tf.SparseTensor):
97 return self._sparse_input_call(input)
98
99 ids = tf.convert_to_tensor(input, name="embedding_ids")
100 flat_ids = tf.reshape(ids, [-1])
101 unique_ids, idx = tf.unique(flat_ids)
102 batch_embedding_tensor = tf.py_function(
103 self.lookup_embedding, inp=[unique_ids], Tout=tf.float32
104 )
105 if self.tape:
106 # tape.watch works with eager mode only.
107 # Gradient for embeddings is SparseTensor here due to tf.gather op.
108 # tf.gather accesses tensor slices, resulting in sparse tensor
109 # gradient.
110 if not tf.executing_eagerly():
111 raise RuntimeError("tape.watch only works with eager mode")
112 self.tape.watch(batch_embedding_tensor)
113 self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))
114 outputs = tf.gather(batch_embedding_tensor, idx)
115 outputs = tf.reshape(
116 outputs, ids.get_shape().concatenate(self.output_dim)
117 )
118 # TODO: support combiner for dense input
119 return outputs
120
121 def _sparse_input_call(self, sparse_input):
122 if self.combiner not in ["sum", "mean", "sqrtn"]:
123 raise ValueError(
124 "combiner must set sum, mean or sqrtn for sparse input"
125 )
126 unique_ids, idx = tf.unique(sparse_input.values)
127 embeddings = tf.py_function(
128 self.lookup_embedding, inp=[unique_ids], Tout=tf.float32
129 )
130 if self.tape:
131 # tape.watch works with eager mode only
132 # gradient for embeddings is dense tensor for sparse_input_call
133 if not tf.executing_eagerly():
134 raise RuntimeError("tape.watch only works with eager mode")
135 self.tape.watch(embeddings)
136 self.bet_ids_pair.append((embeddings, unique_ids))
137 segment_ids = sparse_input.indices[:, 0]
138 if segment_ids.dtype != tf.int32:
139 segment_ids = tf.cast(segment_ids, tf.int32)
140
141 if self.combiner == "sum":
142 embeddings = tf.sparse.segment_sum(embeddings, idx, segment_ids)
143 elif self.combiner == "mean":
144 embeddings = tf.sparse.segment_mean(embeddings, idx, segment_ids)
145 elif self.combiner == "sqrtn":
146 embeddings = tf.sparse.segment_sqrt_n(embeddings, idx, segment_ids)
147 return embeddings
148
149 def compute_mask(self, inputs, mask=None):
150 if isinstance(input, tf.SparseTensor):
151 raise ValueError("SparseTensor inputs do not support mask_zero")
152 if not self.supports_masking:
153 return None
154 return tf.math.not_equal(inputs, 0)
155
156 def reset(self):
157 self.bet_ids_pair = []
158 self.tape = None
159
160 def set_tape(self, tape):
161 self.tape = tape
162
163 def set_worker(self, worker):
164 self.worker = worker
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/elasticdl/layers/embedding.py b/elasticdl/python/elasticdl/layers/embedding.py
--- a/elasticdl/python/elasticdl/layers/embedding.py
+++ b/elasticdl/python/elasticdl/layers/embedding.py
@@ -87,8 +87,8 @@
return "-".join(map(str, name_list))
def lookup_embedding(self, unique_ids):
- batch_embedding = self.worker.embedding_lookup(
- unique_ids, self._name, self.embedding_initializer
+ batch_embedding = self.worker.lookup_embedding(
+ unique_ids, self._name, self.embedding_initializer, self.output_dim
)
return batch_embedding
| {"golden_diff": "diff --git a/elasticdl/python/elasticdl/layers/embedding.py b/elasticdl/python/elasticdl/layers/embedding.py\n--- a/elasticdl/python/elasticdl/layers/embedding.py\n+++ b/elasticdl/python/elasticdl/layers/embedding.py\n@@ -87,8 +87,8 @@\n return \"-\".join(map(str, name_list))\n \n def lookup_embedding(self, unique_ids):\n- batch_embedding = self.worker.embedding_lookup(\n- unique_ids, self._name, self.embedding_initializer\n+ batch_embedding = self.worker.lookup_embedding(\n+ unique_ids, self._name, self.embedding_initializer, self.output_dim\n )\n return batch_embedding\n", "issue": "Fix lookup_embedding call in embedding layer\nelasticdl.layers.Embedding.lookup_embedding still uses old worker.lookup_embedding method.\r\nWe need to update it to the revised version.\n", "before_files": [{"content": "import tensorflow as tf\nfrom tensorflow.python.keras.utils import tf_utils\n\n\nclass Embedding(tf.keras.layers.Layer):\n \"\"\"\n Input: indexes for the embedding entries with a shape of\n (batch_size, input_length). Input can be either dense tensor\n or SparseTensor.\n Output:\n corresponding (combined) embeddings with a shape of\n (batch_size, input_length, output_dim) if combiner is None\n (batch_size, output_dim) if combiner is not None\n Arguments:\n output_dim: the dimension of the embedding vector\n embedding_initializer: Initializer for embedding table\n mask_zero: Whether or not the input value 0 is a special \"padding\"\n value that should be masked out.\n If input is SparseTensor, mask_zero must be False.\n input_length: Length of input sequences, when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n combiner: A string specifying the reduction op or None if not used.\n \"mean\", \"sqrtn\" and \"sum\" are supported for the reduction op.\n If input is SparseTensor, combiner must set as a reduction op.\n \"\"\"\n\n def __init__(\n self,\n output_dim,\n embedding_initializer=\"uniform\",\n mask_zero=False,\n input_length=None,\n combiner=None,\n **kwargs\n ):\n if \"input_shape\" not in kwargs and input_length:\n kwargs[\"input_shape\"] = (input_length,)\n super(Embedding, self).__init__(**kwargs)\n\n self.output_dim = output_dim\n self.embedding_initializer = embedding_initializer\n self.supports_masking = mask_zero\n self.input_length = input_length\n self.combiner = combiner\n self.tape = None\n self.worker = None\n self.bet_ids_pair = []\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n # this function is taken from\n # tf.keras.layers.Embedding.compute_output_shape\n # https://github.com/tensorflow/tensorflow/blob/3f3c728bf80e0fd6653744318cbbfe1454c6ddca/tensorflow/python/keras/layers/embeddings.py#L156\n if self.input_length is None:\n return input_shape + (self.output_dim,)\n else:\n if isinstance(self.input_length, (list, tuple)):\n in_lens = list(self.input_length)\n else:\n in_lens = [self.input_length]\n if len(in_lens) != len(input_shape) - 1:\n raise ValueError(\n '\"input_length\" is %s, '\n \"but received input has shape %s\"\n % (str(self.input_length), str(input_shape))\n )\n else:\n for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):\n if s1 is not None and s2 is not None and s1 != s2:\n raise ValueError(\n '\"input_length\" is %s, '\n \"but received input has shape %s\"\n % (str(self.input_length), str(input_shape))\n )\n elif s1 is None:\n in_lens[i] = s2\n return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)\n\n @property\n def name(self):\n return self._name\n\n @staticmethod\n def get_key(name_list):\n return \"-\".join(map(str, name_list))\n\n def lookup_embedding(self, unique_ids):\n batch_embedding = self.worker.embedding_lookup(\n unique_ids, self._name, self.embedding_initializer\n )\n return batch_embedding\n\n def call(self, input):\n if isinstance(input, tf.SparseTensor):\n return self._sparse_input_call(input)\n\n ids = tf.convert_to_tensor(input, name=\"embedding_ids\")\n flat_ids = tf.reshape(ids, [-1])\n unique_ids, idx = tf.unique(flat_ids)\n batch_embedding_tensor = tf.py_function(\n self.lookup_embedding, inp=[unique_ids], Tout=tf.float32\n )\n if self.tape:\n # tape.watch works with eager mode only.\n # Gradient for embeddings is SparseTensor here due to tf.gather op.\n # tf.gather accesses tensor slices, resulting in sparse tensor\n # gradient.\n if not tf.executing_eagerly():\n raise RuntimeError(\"tape.watch only works with eager mode\")\n self.tape.watch(batch_embedding_tensor)\n self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))\n outputs = tf.gather(batch_embedding_tensor, idx)\n outputs = tf.reshape(\n outputs, ids.get_shape().concatenate(self.output_dim)\n )\n # TODO: support combiner for dense input\n return outputs\n\n def _sparse_input_call(self, sparse_input):\n if self.combiner not in [\"sum\", \"mean\", \"sqrtn\"]:\n raise ValueError(\n \"combiner must set sum, mean or sqrtn for sparse input\"\n )\n unique_ids, idx = tf.unique(sparse_input.values)\n embeddings = tf.py_function(\n self.lookup_embedding, inp=[unique_ids], Tout=tf.float32\n )\n if self.tape:\n # tape.watch works with eager mode only\n # gradient for embeddings is dense tensor for sparse_input_call\n if not tf.executing_eagerly():\n raise RuntimeError(\"tape.watch only works with eager mode\")\n self.tape.watch(embeddings)\n self.bet_ids_pair.append((embeddings, unique_ids))\n segment_ids = sparse_input.indices[:, 0]\n if segment_ids.dtype != tf.int32:\n segment_ids = tf.cast(segment_ids, tf.int32)\n\n if self.combiner == \"sum\":\n embeddings = tf.sparse.segment_sum(embeddings, idx, segment_ids)\n elif self.combiner == \"mean\":\n embeddings = tf.sparse.segment_mean(embeddings, idx, segment_ids)\n elif self.combiner == \"sqrtn\":\n embeddings = tf.sparse.segment_sqrt_n(embeddings, idx, segment_ids)\n return embeddings\n\n def compute_mask(self, inputs, mask=None):\n if isinstance(input, tf.SparseTensor):\n raise ValueError(\"SparseTensor inputs do not support mask_zero\")\n if not self.supports_masking:\n return None\n return tf.math.not_equal(inputs, 0)\n\n def reset(self):\n self.bet_ids_pair = []\n self.tape = None\n\n def set_tape(self, tape):\n self.tape = tape\n\n def set_worker(self, worker):\n self.worker = worker\n", "path": "elasticdl/python/elasticdl/layers/embedding.py"}], "after_files": [{"content": "import tensorflow as tf\nfrom tensorflow.python.keras.utils import tf_utils\n\n\nclass Embedding(tf.keras.layers.Layer):\n \"\"\"\n Input: indexes for the embedding entries with a shape of\n (batch_size, input_length). Input can be either dense tensor\n or SparseTensor.\n Output:\n corresponding (combined) embeddings with a shape of\n (batch_size, input_length, output_dim) if combiner is None\n (batch_size, output_dim) if combiner is not None\n Arguments:\n output_dim: the dimension of the embedding vector\n embedding_initializer: Initializer for embedding table\n mask_zero: Whether or not the input value 0 is a special \"padding\"\n value that should be masked out.\n If input is SparseTensor, mask_zero must be False.\n input_length: Length of input sequences, when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n combiner: A string specifying the reduction op or None if not used.\n \"mean\", \"sqrtn\" and \"sum\" are supported for the reduction op.\n If input is SparseTensor, combiner must set as a reduction op.\n \"\"\"\n\n def __init__(\n self,\n output_dim,\n embedding_initializer=\"uniform\",\n mask_zero=False,\n input_length=None,\n combiner=None,\n **kwargs\n ):\n if \"input_shape\" not in kwargs and input_length:\n kwargs[\"input_shape\"] = (input_length,)\n super(Embedding, self).__init__(**kwargs)\n\n self.output_dim = output_dim\n self.embedding_initializer = embedding_initializer\n self.supports_masking = mask_zero\n self.input_length = input_length\n self.combiner = combiner\n self.tape = None\n self.worker = None\n self.bet_ids_pair = []\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n # this function is taken from\n # tf.keras.layers.Embedding.compute_output_shape\n # https://github.com/tensorflow/tensorflow/blob/3f3c728bf80e0fd6653744318cbbfe1454c6ddca/tensorflow/python/keras/layers/embeddings.py#L156\n if self.input_length is None:\n return input_shape + (self.output_dim,)\n else:\n if isinstance(self.input_length, (list, tuple)):\n in_lens = list(self.input_length)\n else:\n in_lens = [self.input_length]\n if len(in_lens) != len(input_shape) - 1:\n raise ValueError(\n '\"input_length\" is %s, '\n \"but received input has shape %s\"\n % (str(self.input_length), str(input_shape))\n )\n else:\n for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):\n if s1 is not None and s2 is not None and s1 != s2:\n raise ValueError(\n '\"input_length\" is %s, '\n \"but received input has shape %s\"\n % (str(self.input_length), str(input_shape))\n )\n elif s1 is None:\n in_lens[i] = s2\n return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)\n\n @property\n def name(self):\n return self._name\n\n @staticmethod\n def get_key(name_list):\n return \"-\".join(map(str, name_list))\n\n def lookup_embedding(self, unique_ids):\n batch_embedding = self.worker.lookup_embedding(\n unique_ids, self._name, self.embedding_initializer, self.output_dim\n )\n return batch_embedding\n\n def call(self, input):\n if isinstance(input, tf.SparseTensor):\n return self._sparse_input_call(input)\n\n ids = tf.convert_to_tensor(input, name=\"embedding_ids\")\n flat_ids = tf.reshape(ids, [-1])\n unique_ids, idx = tf.unique(flat_ids)\n batch_embedding_tensor = tf.py_function(\n self.lookup_embedding, inp=[unique_ids], Tout=tf.float32\n )\n if self.tape:\n # tape.watch works with eager mode only.\n # Gradient for embeddings is SparseTensor here due to tf.gather op.\n # tf.gather accesses tensor slices, resulting in sparse tensor\n # gradient.\n if not tf.executing_eagerly():\n raise RuntimeError(\"tape.watch only works with eager mode\")\n self.tape.watch(batch_embedding_tensor)\n self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))\n outputs = tf.gather(batch_embedding_tensor, idx)\n outputs = tf.reshape(\n outputs, ids.get_shape().concatenate(self.output_dim)\n )\n # TODO: support combiner for dense input\n return outputs\n\n def _sparse_input_call(self, sparse_input):\n if self.combiner not in [\"sum\", \"mean\", \"sqrtn\"]:\n raise ValueError(\n \"combiner must set sum, mean or sqrtn for sparse input\"\n )\n unique_ids, idx = tf.unique(sparse_input.values)\n embeddings = tf.py_function(\n self.lookup_embedding, inp=[unique_ids], Tout=tf.float32\n )\n if self.tape:\n # tape.watch works with eager mode only\n # gradient for embeddings is dense tensor for sparse_input_call\n if not tf.executing_eagerly():\n raise RuntimeError(\"tape.watch only works with eager mode\")\n self.tape.watch(embeddings)\n self.bet_ids_pair.append((embeddings, unique_ids))\n segment_ids = sparse_input.indices[:, 0]\n if segment_ids.dtype != tf.int32:\n segment_ids = tf.cast(segment_ids, tf.int32)\n\n if self.combiner == \"sum\":\n embeddings = tf.sparse.segment_sum(embeddings, idx, segment_ids)\n elif self.combiner == \"mean\":\n embeddings = tf.sparse.segment_mean(embeddings, idx, segment_ids)\n elif self.combiner == \"sqrtn\":\n embeddings = tf.sparse.segment_sqrt_n(embeddings, idx, segment_ids)\n return embeddings\n\n def compute_mask(self, inputs, mask=None):\n if isinstance(input, tf.SparseTensor):\n raise ValueError(\"SparseTensor inputs do not support mask_zero\")\n if not self.supports_masking:\n return None\n return tf.math.not_equal(inputs, 0)\n\n def reset(self):\n self.bet_ids_pair = []\n self.tape = None\n\n def set_tape(self, tape):\n self.tape = tape\n\n def set_worker(self, worker):\n self.worker = worker\n", "path": "elasticdl/python/elasticdl/layers/embedding.py"}]} | 2,143 | 150 |
gh_patches_debug_22252 | rasdani/github-patches | git_diff | getsentry__sentry-python-123 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
asyncio concurrency issues
@mitsuhiko discovered that passing around hubs does not work at all and we likely leak state between requests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/sanic.py`
Content:
```
1 import sys
2 import weakref
3 from inspect import isawaitable
4
5 from sentry_sdk._compat import urlparse, reraise
6 from sentry_sdk.hub import Hub
7 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
8 from sentry_sdk.integrations import Integration
9 from sentry_sdk.integrations._wsgi import RequestExtractor, _filter_headers
10 from sentry_sdk.integrations.logging import ignore_logger
11
12 from sanic import Sanic
13 from sanic.router import Router
14 from sanic.handlers import ErrorHandler
15
16
17 class SanicIntegration(Integration):
18 identifier = "sanic"
19
20 @staticmethod
21 def setup_once():
22 if sys.version_info < (3, 7):
23 # Sanic is async. We better have contextvars or we're going to leak
24 # state between requests.
25 raise RuntimeError("The sanic integration for Sentry requires Python 3.7+")
26
27 # Sanic 0.8 and older creates a logger named "root" and puts a
28 # stringified version of every exception in there (without exc_info),
29 # which our error deduplication can't detect.
30 #
31 # https://github.com/huge-success/sanic/issues/1332
32 ignore_logger("root")
33
34 old_handle_request = Sanic.handle_request
35
36 async def sentry_handle_request(self, request, *args, **kwargs):
37 hub = Hub.current
38 if hub.get_integration(SanicIntegration) is None:
39 response = old_handle_request(self, request, *args, **kwargs)
40 else:
41 weak_request = weakref.ref(request)
42
43 with hub.push_scope() as scope:
44 scope.add_event_processor(_make_request_processor(weak_request))
45 response = old_handle_request(self, request, *args, **kwargs)
46 if isawaitable(response):
47 response = await response
48
49 return response
50
51 Sanic.handle_request = sentry_handle_request
52
53 old_router_get = Router.get
54
55 def sentry_router_get(self, request):
56 rv = old_router_get(self, request)
57 hub = Hub.current
58 if hub.get_integration(SanicIntegration) is not None:
59 with capture_internal_exceptions():
60 with hub.configure_scope() as scope:
61 scope.transaction = rv[0].__name__
62 return rv
63
64 Router.get = sentry_router_get
65
66 old_error_handler_lookup = ErrorHandler.lookup
67
68 def sentry_error_handler_lookup(self, exception):
69 _capture_exception(exception)
70 old_error_handler = old_error_handler_lookup(self, exception)
71
72 if old_error_handler is None:
73 return None
74
75 if Hub.current.get_integration(SanicIntegration) is None:
76 return old_error_handler
77
78 async def sentry_wrapped_error_handler(request, exception):
79 try:
80 response = old_error_handler(request, exception)
81 if isawaitable(response):
82 response = await response
83 return response
84 except Exception:
85 exc_info = sys.exc_info()
86 _capture_exception(exc_info)
87 reraise(*exc_info)
88
89 return sentry_wrapped_error_handler
90
91 ErrorHandler.lookup = sentry_error_handler_lookup
92
93
94 def _capture_exception(exception):
95 hub = Hub.current
96 integration = hub.get_integration(SanicIntegration)
97 if integration is None:
98 return
99
100 with capture_internal_exceptions():
101 event, hint = event_from_exception(
102 exception,
103 client_options=hub.client.options,
104 mechanism={"type": "sanic", "handled": False},
105 )
106 hub.capture_event(event, hint=hint)
107
108
109 def _make_request_processor(weak_request):
110 def sanic_processor(event, hint):
111 request = weak_request()
112 if request is None:
113 return event
114
115 with capture_internal_exceptions():
116 extractor = SanicRequestExtractor(request)
117 extractor.extract_into_event(event)
118
119 request_info = event["request"]
120 if "query_string" not in request_info:
121 request_info["query_string"] = extractor.urlparts.query
122
123 if "method" not in request_info:
124 request_info["method"] = request.method
125
126 if "env" not in request_info:
127 request_info["env"] = {"REMOTE_ADDR": request.remote_addr}
128
129 if "headers" not in request_info:
130 request_info["headers"] = _filter_headers(dict(request.headers))
131
132 return event
133
134 return sanic_processor
135
136
137 class SanicRequestExtractor(RequestExtractor):
138 def __init__(self, request):
139 RequestExtractor.__init__(self, request)
140 self.urlparts = urlparse.urlsplit(self.request.url)
141
142 def content_length(self):
143 if self.request.body is None:
144 return 0
145 return len(self.request.body)
146
147 def url(self):
148 return "%s://%s%s" % (
149 self.urlparts.scheme,
150 self.urlparts.netloc,
151 self.urlparts.path,
152 )
153
154 def cookies(self):
155 return dict(self.request.cookies)
156
157 def raw_data(self):
158 return self.request.body
159
160 def form(self):
161 return self.request.form
162
163 def is_json(self):
164 raise NotImplementedError()
165
166 def json(self):
167 return self.request.json
168
169 def files(self):
170 return self.request.files
171
172 def size_of_file(self, file):
173 return len(file.body or ())
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sentry_sdk/integrations/sanic.py b/sentry_sdk/integrations/sanic.py
--- a/sentry_sdk/integrations/sanic.py
+++ b/sentry_sdk/integrations/sanic.py
@@ -36,17 +36,19 @@
async def sentry_handle_request(self, request, *args, **kwargs):
hub = Hub.current
if hub.get_integration(SanicIntegration) is None:
- response = old_handle_request(self, request, *args, **kwargs)
- else:
- weak_request = weakref.ref(request)
+ return old_handle_request(self, request, *args, **kwargs)
+
+ weak_request = weakref.ref(request)
- with hub.push_scope() as scope:
+ with Hub(hub) as hub:
+ with hub.configure_scope() as scope:
scope.add_event_processor(_make_request_processor(weak_request))
- response = old_handle_request(self, request, *args, **kwargs)
- if isawaitable(response):
- response = await response
- return response
+ response = old_handle_request(self, request, *args, **kwargs)
+ if isawaitable(response):
+ response = await response
+
+ return response
Sanic.handle_request = sentry_handle_request
| {"golden_diff": "diff --git a/sentry_sdk/integrations/sanic.py b/sentry_sdk/integrations/sanic.py\n--- a/sentry_sdk/integrations/sanic.py\n+++ b/sentry_sdk/integrations/sanic.py\n@@ -36,17 +36,19 @@\n async def sentry_handle_request(self, request, *args, **kwargs):\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is None:\n- response = old_handle_request(self, request, *args, **kwargs)\n- else:\n- weak_request = weakref.ref(request)\n+ return old_handle_request(self, request, *args, **kwargs)\n+\n+ weak_request = weakref.ref(request)\n \n- with hub.push_scope() as scope:\n+ with Hub(hub) as hub:\n+ with hub.configure_scope() as scope:\n scope.add_event_processor(_make_request_processor(weak_request))\n- response = old_handle_request(self, request, *args, **kwargs)\n- if isawaitable(response):\n- response = await response\n \n- return response\n+ response = old_handle_request(self, request, *args, **kwargs)\n+ if isawaitable(response):\n+ response = await response\n+\n+ return response\n \n Sanic.handle_request = sentry_handle_request\n", "issue": "asyncio concurrency issues\n@mitsuhiko discovered that passing around hubs does not work at all and we likely leak state between requests\n", "before_files": [{"content": "import sys\nimport weakref\nfrom inspect import isawaitable\n\nfrom sentry_sdk._compat import urlparse, reraise\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations._wsgi import RequestExtractor, _filter_headers\nfrom sentry_sdk.integrations.logging import ignore_logger\n\nfrom sanic import Sanic\nfrom sanic.router import Router\nfrom sanic.handlers import ErrorHandler\n\n\nclass SanicIntegration(Integration):\n identifier = \"sanic\"\n\n @staticmethod\n def setup_once():\n if sys.version_info < (3, 7):\n # Sanic is async. We better have contextvars or we're going to leak\n # state between requests.\n raise RuntimeError(\"The sanic integration for Sentry requires Python 3.7+\")\n\n # Sanic 0.8 and older creates a logger named \"root\" and puts a\n # stringified version of every exception in there (without exc_info),\n # which our error deduplication can't detect.\n #\n # https://github.com/huge-success/sanic/issues/1332\n ignore_logger(\"root\")\n\n old_handle_request = Sanic.handle_request\n\n async def sentry_handle_request(self, request, *args, **kwargs):\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is None:\n response = old_handle_request(self, request, *args, **kwargs)\n else:\n weak_request = weakref.ref(request)\n\n with hub.push_scope() as scope:\n scope.add_event_processor(_make_request_processor(weak_request))\n response = old_handle_request(self, request, *args, **kwargs)\n if isawaitable(response):\n response = await response\n\n return response\n\n Sanic.handle_request = sentry_handle_request\n\n old_router_get = Router.get\n\n def sentry_router_get(self, request):\n rv = old_router_get(self, request)\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is not None:\n with capture_internal_exceptions():\n with hub.configure_scope() as scope:\n scope.transaction = rv[0].__name__\n return rv\n\n Router.get = sentry_router_get\n\n old_error_handler_lookup = ErrorHandler.lookup\n\n def sentry_error_handler_lookup(self, exception):\n _capture_exception(exception)\n old_error_handler = old_error_handler_lookup(self, exception)\n\n if old_error_handler is None:\n return None\n\n if Hub.current.get_integration(SanicIntegration) is None:\n return old_error_handler\n\n async def sentry_wrapped_error_handler(request, exception):\n try:\n response = old_error_handler(request, exception)\n if isawaitable(response):\n response = await response\n return response\n except Exception:\n exc_info = sys.exc_info()\n _capture_exception(exc_info)\n reraise(*exc_info)\n\n return sentry_wrapped_error_handler\n\n ErrorHandler.lookup = sentry_error_handler_lookup\n\n\ndef _capture_exception(exception):\n hub = Hub.current\n integration = hub.get_integration(SanicIntegration)\n if integration is None:\n return\n\n with capture_internal_exceptions():\n event, hint = event_from_exception(\n exception,\n client_options=hub.client.options,\n mechanism={\"type\": \"sanic\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n\ndef _make_request_processor(weak_request):\n def sanic_processor(event, hint):\n request = weak_request()\n if request is None:\n return event\n\n with capture_internal_exceptions():\n extractor = SanicRequestExtractor(request)\n extractor.extract_into_event(event)\n\n request_info = event[\"request\"]\n if \"query_string\" not in request_info:\n request_info[\"query_string\"] = extractor.urlparts.query\n\n if \"method\" not in request_info:\n request_info[\"method\"] = request.method\n\n if \"env\" not in request_info:\n request_info[\"env\"] = {\"REMOTE_ADDR\": request.remote_addr}\n\n if \"headers\" not in request_info:\n request_info[\"headers\"] = _filter_headers(dict(request.headers))\n\n return event\n\n return sanic_processor\n\n\nclass SanicRequestExtractor(RequestExtractor):\n def __init__(self, request):\n RequestExtractor.__init__(self, request)\n self.urlparts = urlparse.urlsplit(self.request.url)\n\n def content_length(self):\n if self.request.body is None:\n return 0\n return len(self.request.body)\n\n def url(self):\n return \"%s://%s%s\" % (\n self.urlparts.scheme,\n self.urlparts.netloc,\n self.urlparts.path,\n )\n\n def cookies(self):\n return dict(self.request.cookies)\n\n def raw_data(self):\n return self.request.body\n\n def form(self):\n return self.request.form\n\n def is_json(self):\n raise NotImplementedError()\n\n def json(self):\n return self.request.json\n\n def files(self):\n return self.request.files\n\n def size_of_file(self, file):\n return len(file.body or ())\n", "path": "sentry_sdk/integrations/sanic.py"}], "after_files": [{"content": "import sys\nimport weakref\nfrom inspect import isawaitable\n\nfrom sentry_sdk._compat import urlparse, reraise\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations._wsgi import RequestExtractor, _filter_headers\nfrom sentry_sdk.integrations.logging import ignore_logger\n\nfrom sanic import Sanic\nfrom sanic.router import Router\nfrom sanic.handlers import ErrorHandler\n\n\nclass SanicIntegration(Integration):\n identifier = \"sanic\"\n\n @staticmethod\n def setup_once():\n if sys.version_info < (3, 7):\n # Sanic is async. We better have contextvars or we're going to leak\n # state between requests.\n raise RuntimeError(\"The sanic integration for Sentry requires Python 3.7+\")\n\n # Sanic 0.8 and older creates a logger named \"root\" and puts a\n # stringified version of every exception in there (without exc_info),\n # which our error deduplication can't detect.\n #\n # https://github.com/huge-success/sanic/issues/1332\n ignore_logger(\"root\")\n\n old_handle_request = Sanic.handle_request\n\n async def sentry_handle_request(self, request, *args, **kwargs):\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is None:\n return old_handle_request(self, request, *args, **kwargs)\n\n weak_request = weakref.ref(request)\n\n with Hub(hub) as hub:\n with hub.configure_scope() as scope:\n scope.add_event_processor(_make_request_processor(weak_request))\n\n response = old_handle_request(self, request, *args, **kwargs)\n if isawaitable(response):\n response = await response\n\n return response\n\n Sanic.handle_request = sentry_handle_request\n\n old_router_get = Router.get\n\n def sentry_router_get(self, request):\n rv = old_router_get(self, request)\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is not None:\n with capture_internal_exceptions():\n with hub.configure_scope() as scope:\n scope.transaction = rv[0].__name__\n return rv\n\n Router.get = sentry_router_get\n\n old_error_handler_lookup = ErrorHandler.lookup\n\n def sentry_error_handler_lookup(self, exception):\n _capture_exception(exception)\n old_error_handler = old_error_handler_lookup(self, exception)\n\n if old_error_handler is None:\n return None\n\n if Hub.current.get_integration(SanicIntegration) is None:\n return old_error_handler\n\n async def sentry_wrapped_error_handler(request, exception):\n try:\n response = old_error_handler(request, exception)\n if isawaitable(response):\n response = await response\n return response\n except Exception:\n exc_info = sys.exc_info()\n _capture_exception(exc_info)\n reraise(*exc_info)\n\n return sentry_wrapped_error_handler\n\n ErrorHandler.lookup = sentry_error_handler_lookup\n\n\ndef _capture_exception(exception):\n hub = Hub.current\n integration = hub.get_integration(SanicIntegration)\n if integration is None:\n return\n\n with capture_internal_exceptions():\n event, hint = event_from_exception(\n exception,\n client_options=hub.client.options,\n mechanism={\"type\": \"sanic\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n\ndef _make_request_processor(weak_request):\n def sanic_processor(event, hint):\n request = weak_request()\n if request is None:\n return event\n\n with capture_internal_exceptions():\n extractor = SanicRequestExtractor(request)\n extractor.extract_into_event(event)\n\n request_info = event[\"request\"]\n if \"query_string\" not in request_info:\n request_info[\"query_string\"] = extractor.urlparts.query\n\n if \"method\" not in request_info:\n request_info[\"method\"] = request.method\n\n if \"env\" not in request_info:\n request_info[\"env\"] = {\"REMOTE_ADDR\": request.remote_addr}\n\n if \"headers\" not in request_info:\n request_info[\"headers\"] = _filter_headers(dict(request.headers))\n\n return event\n\n return sanic_processor\n\n\nclass SanicRequestExtractor(RequestExtractor):\n def __init__(self, request):\n RequestExtractor.__init__(self, request)\n self.urlparts = urlparse.urlsplit(self.request.url)\n\n def content_length(self):\n if self.request.body is None:\n return 0\n return len(self.request.body)\n\n def url(self):\n return \"%s://%s%s\" % (\n self.urlparts.scheme,\n self.urlparts.netloc,\n self.urlparts.path,\n )\n\n def cookies(self):\n return dict(self.request.cookies)\n\n def raw_data(self):\n return self.request.body\n\n def form(self):\n return self.request.form\n\n def is_json(self):\n raise NotImplementedError()\n\n def json(self):\n return self.request.json\n\n def files(self):\n return self.request.files\n\n def size_of_file(self, file):\n return len(file.body or ())\n", "path": "sentry_sdk/integrations/sanic.py"}]} | 1,830 | 285 |
gh_patches_debug_14733 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3271 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add openAPI Specification for /database_roles/ endpoint
Generate spec for `database_roles` endpoint
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `config/settings/openapi.py`
Content:
```
1 def custom_preprocessing_hook(endpoints):
2 filtered = []
3 for (path, path_regex, method, callback) in endpoints:
4 # Remove all but DRF API endpoints
5 if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"):
6 filtered.append((path, path_regex, method, callback))
7 return filtered
8
9
10 def remove_url_prefix_hook(result, **kwargs):
11 # Remove namespace and version URL prefix from the operation Id of the generated API schema
12 for path, path_info in result['paths'].items():
13 for method, operation in path_info.items():
14 operation_id = operation.get('operationId')
15 if operation_id:
16 if path.startswith('/api/db/v0/'):
17 operation['operationId'] = operation_id.replace('db_v0_', '')
18 elif path.startswith('/api/ui/v0/'):
19 operation['operationId'] = operation_id.replace('ui_v0_', '')
20
21 return result
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/config/settings/openapi.py b/config/settings/openapi.py
--- a/config/settings/openapi.py
+++ b/config/settings/openapi.py
@@ -1,9 +1,16 @@
def custom_preprocessing_hook(endpoints):
- filtered = []
- for (path, path_regex, method, callback) in endpoints:
- # Remove all but DRF API endpoints
- if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"):
- filtered.append((path, path_regex, method, callback))
+ prefixes = [
+ "/api/db/v0/databases/",
+ "/api/db/v0/data_files/",
+ "/api/db/v0/schemas/",
+ "/api/db/v0/tables/",
+ "/api/db/v0/links/",
+ "/api/db/v0/queries/",
+ "/api/ui/v0/databases/",
+ "/api/ui/v0/users/",
+ "/api/ui/v0/database_roles/"
+ ]
+ filtered = [(path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if any(path.startswith(prefix) for prefix in prefixes)]
return filtered
| {"golden_diff": "diff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -1,9 +1,16 @@\n def custom_preprocessing_hook(endpoints):\n- filtered = []\n- for (path, path_regex, method, callback) in endpoints:\n- # Remove all but DRF API endpoints\n- if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n- filtered.append((path, path_regex, method, callback))\n+ prefixes = [\n+ \"/api/db/v0/databases/\",\n+ \"/api/db/v0/data_files/\",\n+ \"/api/db/v0/schemas/\",\n+ \"/api/db/v0/tables/\",\n+ \"/api/db/v0/links/\",\n+ \"/api/db/v0/queries/\",\n+ \"/api/ui/v0/databases/\",\n+ \"/api/ui/v0/users/\",\n+ \"/api/ui/v0/database_roles/\"\n+ ]\n+ filtered = [(path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if any(path.startswith(prefix) for prefix in prefixes)]\n return filtered\n", "issue": "Add openAPI Specification for /database_roles/ endpoint\nGenerate spec for `database_roles` endpoint\n", "before_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}], "after_files": [{"content": "def custom_preprocessing_hook(endpoints):\n prefixes = [\n \"/api/db/v0/databases/\",\n \"/api/db/v0/data_files/\",\n \"/api/db/v0/schemas/\",\n \"/api/db/v0/tables/\",\n \"/api/db/v0/links/\",\n \"/api/db/v0/queries/\",\n \"/api/ui/v0/databases/\",\n \"/api/ui/v0/users/\",\n \"/api/ui/v0/database_roles/\"\n ]\n filtered = [(path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if any(path.startswith(prefix) for prefix in prefixes)]\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]} | 541 | 281 |
gh_patches_debug_1950 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1810 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Worker occasionally crashes when reports evaluation task result.
The error log:
```
status = StatusCode.UNKNOWN
details = "Exception calling application: 'NoneType' object has no attribute 'complete_task'"
debug_error_string = "{"created":"@1582833503.778925101","description":"Error received from peer ipv4:11.253.195.11:50001","file":"src/core/lib/surface/call.cc","file_line":1056,"grpc_message":"Exception calling application: 'NoneType' object has no attribute 'complete_task'","grpc_status":2}"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/master/evaluation_service.py`
Content:
```
1 import threading
2 import time
3 from threading import Thread
4
5 from elasticdl.proto import elasticdl_pb2
6 from elasticdl.python.common.evaluation_utils import EvaluationMetrics
7 from elasticdl.python.common.log_utils import default_logger as logger
8 from elasticdl.python.common.tensor_utils import pb_to_ndarray
9
10
11 class EvaluationJob(object):
12 """Representation of an evaluation job"""
13
14 def __init__(self, metrics_dict, model_version, total_tasks=-1):
15 """
16 Args:
17 metrics_dict: A python dictionary. If model has only one output,
18 `metrics_dict` is a dictionary of `{metric_name: metric}`,
19 i.e. `{"acc": tf.keras.metrics.Accuracy()}`.
20 If model has multiple outputs, `metric_dict` is a dictionary of
21 `{output_name: {metric_name: metric}}`,
22 i.e. `{
23 "output_a": {"acc": tf.keras.metrics.Accuracy()},
24 "output_b": {"auc": tf.keras.metrics.AUC()},
25 }`. Note that for model with multiple outputs, each metric
26 only uses one output.
27 model_version: The version of the model to be evaluated.
28 total_tasks: The number of evaluation tasks.
29 """
30
31 self.model_version = model_version
32 self._total_tasks = total_tasks
33 self._completed_tasks = 0
34 self.evaluation_metrics = EvaluationMetrics(metrics_dict)
35
36 def complete_task(self):
37 self._completed_tasks += 1
38
39 def finished(self):
40 return self._completed_tasks >= self._total_tasks
41
42 def report_evaluation_metrics(self, model_outputs_pb, labels):
43 labels = pb_to_ndarray(labels)
44 model_outputs = {}
45 for name, tensor_pb in model_outputs_pb.items():
46 model_outputs[name] = pb_to_ndarray(tensor_pb)
47 self.evaluation_metrics.update_evaluation_metrics(
48 model_outputs, labels
49 )
50
51
52 class _EvaluationTrigger(Thread):
53 """A trigger which generates evaluation tasks periodically"""
54
55 def __init__(self, eval_service, start_delay_secs, throttle_secs):
56 Thread.__init__(self)
57 self._eval_service = eval_service
58 self._stopper = threading.Event()
59 self._throttle_secs = throttle_secs
60 self._eval_min_time = time.time() + start_delay_secs
61
62 def stop(self):
63 self._stopper.set()
64
65 def _wait_enough_time(self, cur_time_secs, previous_round_start_secs):
66 if cur_time_secs < self._eval_min_time:
67 return False
68 if (
69 previous_round_start_secs != -1
70 and cur_time_secs - previous_round_start_secs < self._throttle_secs
71 ):
72 return False
73 return True
74
75 def run(self):
76 previous_round_start_secs = -1
77
78 while not self._stopper.is_set():
79 time_now = time.time()
80 if self._wait_enough_time(time_now, previous_round_start_secs):
81 # Time is up, add an evaluation task
82 self._eval_service.add_evaluation_task(is_time_based_eval=True)
83 previous_round_start_secs = time_now
84 time.sleep(5)
85
86
87 class EvaluationService(object):
88 """Evaluation service"""
89
90 def __init__(
91 self,
92 tensorboard_service,
93 task_d,
94 start_delay_secs,
95 throttle_secs,
96 eval_steps,
97 eval_only,
98 eval_metrics_fn,
99 ):
100 self._tensorboard_service = tensorboard_service
101 self._task_d = task_d
102 self._lock = threading.Lock()
103 self._eval_job = None
104 self.trigger = _EvaluationTrigger(
105 self, start_delay_secs, throttle_secs
106 )
107 self._time_based_eval = throttle_secs > 0
108 self._eval_steps = eval_steps
109 self._eval_checkpoint_versions = []
110 self._last_eval_checkpoint_version = -1
111 self._eval_only = eval_only
112 self._eval_metrics_fn = eval_metrics_fn
113
114 def start(self):
115 if self._time_based_eval and not self._eval_only:
116 self.trigger.start()
117
118 def stop(self):
119 if self._time_based_eval and not self._eval_only:
120 self.trigger.stop()
121
122 def set_master_servicer(self, master_servicer):
123 self._master_servicer = master_servicer
124
125 def init_eval_only_job(self, num_task):
126 self._eval_job = EvaluationJob(self._eval_metrics_fn(), -1, num_task)
127
128 def add_evaluation_task(
129 self, is_time_based_eval, master_locking=True, model_version=None
130 ):
131 """
132 Add evaluation task with current model_version.
133 """
134 # Do not create time-based eval after all tasks are done
135 if is_time_based_eval and self._task_d.finished():
136 return
137 if not model_version:
138 model_version = self._master_servicer.get_model_version()
139 if model_version == self._last_eval_checkpoint_version:
140 return
141
142 checkpoint_version = model_version
143 with self._lock:
144 self._eval_checkpoint_versions.append(checkpoint_version)
145 self._last_eval_checkpoint_version = checkpoint_version
146 self.try_to_create_new_job()
147
148 def try_to_create_new_job(self):
149 """
150 Add eval task into task dispatcher if current eval_job is done
151 and there are pending eval tasks
152 """
153 with self._lock:
154 if self._eval_job is None and self._eval_checkpoint_versions:
155 checkpoint_version = self._eval_checkpoint_versions.pop(0)
156 self._task_d.create_tasks(
157 elasticdl_pb2.EVALUATION, checkpoint_version
158 )
159 task_count = len(self._task_d._eval_todo)
160 if self._eval_job is None:
161 self._eval_job = EvaluationJob(
162 self._eval_metrics_fn(), checkpoint_version, task_count
163 )
164 else:
165 self._eval_job.model_version = checkpoint_version
166 self._eval_job._total_tasks = task_count
167 self._eval_job.reset_metric_states()
168 return True
169 return False
170
171 def add_evaluation_task_if_needed(self, master_locking, model_version):
172 """
173 Add step-based evaluation task
174 """
175 if not model_version:
176 model_version = self._master_servicer.get_model_version()
177 if (
178 self._eval_steps
179 and model_version % self._eval_steps == 0
180 and model_version > self._last_eval_checkpoint_version
181 ):
182 self.add_evaluation_task(
183 is_time_based_eval=False,
184 master_locking=master_locking,
185 model_version=model_version,
186 )
187
188 def report_evaluation_metrics(self, model_outputs, labels):
189 if self._eval_job is None:
190 return False
191 with self._lock:
192 return self._eval_job.report_evaluation_metrics(
193 model_outputs, labels
194 )
195
196 def complete_task(self):
197 self._eval_job.complete_task()
198 if self._eval_job.finished():
199 evaluation_metrics = (
200 self._eval_job.evaluation_metrics.get_evaluation_summary()
201 )
202 if self._tensorboard_service and evaluation_metrics:
203 self._tensorboard_service.write_dict_to_summary(
204 evaluation_metrics, version=self._eval_job.model_version
205 )
206 logger.info(
207 "Evaluation metrics[v=%d]: %s"
208 % (
209 self._eval_job.model_version
210 if self._eval_job.model_version >= 0
211 else self._master_servicer.get_model_version(),
212 str(evaluation_metrics),
213 )
214 )
215 if not self._eval_only:
216 # delete checkpoint file
217 self._eval_job = None
218 # create new eval job if possible
219 self.try_to_create_new_job()
220 return evaluation_metrics
221
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/master/evaluation_service.py b/elasticdl/python/master/evaluation_service.py
--- a/elasticdl/python/master/evaluation_service.py
+++ b/elasticdl/python/master/evaluation_service.py
@@ -194,6 +194,8 @@
)
def complete_task(self):
+ if self._eval_job is None:
+ return
self._eval_job.complete_task()
if self._eval_job.finished():
evaluation_metrics = (
| {"golden_diff": "diff --git a/elasticdl/python/master/evaluation_service.py b/elasticdl/python/master/evaluation_service.py\n--- a/elasticdl/python/master/evaluation_service.py\n+++ b/elasticdl/python/master/evaluation_service.py\n@@ -194,6 +194,8 @@\n )\n \n def complete_task(self):\n+ if self._eval_job is None:\n+ return\n self._eval_job.complete_task()\n if self._eval_job.finished():\n evaluation_metrics = (\n", "issue": "Worker occasionally crashes when reports evaluation task result.\nThe error log:\r\n```\r\nstatus = StatusCode.UNKNOWN\r\ndetails = \"Exception calling application: 'NoneType' object has no attribute 'complete_task'\"\r\ndebug_error_string = \"{\"created\":\"@1582833503.778925101\",\"description\":\"Error received from peer ipv4:11.253.195.11:50001\",\"file\":\"src/core/lib/surface/call.cc\",\"file_line\":1056,\"grpc_message\":\"Exception calling application: 'NoneType' object has no attribute 'complete_task'\",\"grpc_status\":2}\"\r\n```\r\n\n", "before_files": [{"content": "import threading\nimport time\nfrom threading import Thread\n\nfrom elasticdl.proto import elasticdl_pb2\nfrom elasticdl.python.common.evaluation_utils import EvaluationMetrics\nfrom elasticdl.python.common.log_utils import default_logger as logger\nfrom elasticdl.python.common.tensor_utils import pb_to_ndarray\n\n\nclass EvaluationJob(object):\n \"\"\"Representation of an evaluation job\"\"\"\n\n def __init__(self, metrics_dict, model_version, total_tasks=-1):\n \"\"\"\n Args:\n metrics_dict: A python dictionary. If model has only one output,\n `metrics_dict` is a dictionary of `{metric_name: metric}`,\n i.e. `{\"acc\": tf.keras.metrics.Accuracy()}`.\n If model has multiple outputs, `metric_dict` is a dictionary of\n `{output_name: {metric_name: metric}}`,\n i.e. `{\n \"output_a\": {\"acc\": tf.keras.metrics.Accuracy()},\n \"output_b\": {\"auc\": tf.keras.metrics.AUC()},\n }`. Note that for model with multiple outputs, each metric\n only uses one output.\n model_version: The version of the model to be evaluated.\n total_tasks: The number of evaluation tasks.\n \"\"\"\n\n self.model_version = model_version\n self._total_tasks = total_tasks\n self._completed_tasks = 0\n self.evaluation_metrics = EvaluationMetrics(metrics_dict)\n\n def complete_task(self):\n self._completed_tasks += 1\n\n def finished(self):\n return self._completed_tasks >= self._total_tasks\n\n def report_evaluation_metrics(self, model_outputs_pb, labels):\n labels = pb_to_ndarray(labels)\n model_outputs = {}\n for name, tensor_pb in model_outputs_pb.items():\n model_outputs[name] = pb_to_ndarray(tensor_pb)\n self.evaluation_metrics.update_evaluation_metrics(\n model_outputs, labels\n )\n\n\nclass _EvaluationTrigger(Thread):\n \"\"\"A trigger which generates evaluation tasks periodically\"\"\"\n\n def __init__(self, eval_service, start_delay_secs, throttle_secs):\n Thread.__init__(self)\n self._eval_service = eval_service\n self._stopper = threading.Event()\n self._throttle_secs = throttle_secs\n self._eval_min_time = time.time() + start_delay_secs\n\n def stop(self):\n self._stopper.set()\n\n def _wait_enough_time(self, cur_time_secs, previous_round_start_secs):\n if cur_time_secs < self._eval_min_time:\n return False\n if (\n previous_round_start_secs != -1\n and cur_time_secs - previous_round_start_secs < self._throttle_secs\n ):\n return False\n return True\n\n def run(self):\n previous_round_start_secs = -1\n\n while not self._stopper.is_set():\n time_now = time.time()\n if self._wait_enough_time(time_now, previous_round_start_secs):\n # Time is up, add an evaluation task\n self._eval_service.add_evaluation_task(is_time_based_eval=True)\n previous_round_start_secs = time_now\n time.sleep(5)\n\n\nclass EvaluationService(object):\n \"\"\"Evaluation service\"\"\"\n\n def __init__(\n self,\n tensorboard_service,\n task_d,\n start_delay_secs,\n throttle_secs,\n eval_steps,\n eval_only,\n eval_metrics_fn,\n ):\n self._tensorboard_service = tensorboard_service\n self._task_d = task_d\n self._lock = threading.Lock()\n self._eval_job = None\n self.trigger = _EvaluationTrigger(\n self, start_delay_secs, throttle_secs\n )\n self._time_based_eval = throttle_secs > 0\n self._eval_steps = eval_steps\n self._eval_checkpoint_versions = []\n self._last_eval_checkpoint_version = -1\n self._eval_only = eval_only\n self._eval_metrics_fn = eval_metrics_fn\n\n def start(self):\n if self._time_based_eval and not self._eval_only:\n self.trigger.start()\n\n def stop(self):\n if self._time_based_eval and not self._eval_only:\n self.trigger.stop()\n\n def set_master_servicer(self, master_servicer):\n self._master_servicer = master_servicer\n\n def init_eval_only_job(self, num_task):\n self._eval_job = EvaluationJob(self._eval_metrics_fn(), -1, num_task)\n\n def add_evaluation_task(\n self, is_time_based_eval, master_locking=True, model_version=None\n ):\n \"\"\"\n Add evaluation task with current model_version.\n \"\"\"\n # Do not create time-based eval after all tasks are done\n if is_time_based_eval and self._task_d.finished():\n return\n if not model_version:\n model_version = self._master_servicer.get_model_version()\n if model_version == self._last_eval_checkpoint_version:\n return\n\n checkpoint_version = model_version\n with self._lock:\n self._eval_checkpoint_versions.append(checkpoint_version)\n self._last_eval_checkpoint_version = checkpoint_version\n self.try_to_create_new_job()\n\n def try_to_create_new_job(self):\n \"\"\"\n Add eval task into task dispatcher if current eval_job is done\n and there are pending eval tasks\n \"\"\"\n with self._lock:\n if self._eval_job is None and self._eval_checkpoint_versions:\n checkpoint_version = self._eval_checkpoint_versions.pop(0)\n self._task_d.create_tasks(\n elasticdl_pb2.EVALUATION, checkpoint_version\n )\n task_count = len(self._task_d._eval_todo)\n if self._eval_job is None:\n self._eval_job = EvaluationJob(\n self._eval_metrics_fn(), checkpoint_version, task_count\n )\n else:\n self._eval_job.model_version = checkpoint_version\n self._eval_job._total_tasks = task_count\n self._eval_job.reset_metric_states()\n return True\n return False\n\n def add_evaluation_task_if_needed(self, master_locking, model_version):\n \"\"\"\n Add step-based evaluation task\n \"\"\"\n if not model_version:\n model_version = self._master_servicer.get_model_version()\n if (\n self._eval_steps\n and model_version % self._eval_steps == 0\n and model_version > self._last_eval_checkpoint_version\n ):\n self.add_evaluation_task(\n is_time_based_eval=False,\n master_locking=master_locking,\n model_version=model_version,\n )\n\n def report_evaluation_metrics(self, model_outputs, labels):\n if self._eval_job is None:\n return False\n with self._lock:\n return self._eval_job.report_evaluation_metrics(\n model_outputs, labels\n )\n\n def complete_task(self):\n self._eval_job.complete_task()\n if self._eval_job.finished():\n evaluation_metrics = (\n self._eval_job.evaluation_metrics.get_evaluation_summary()\n )\n if self._tensorboard_service and evaluation_metrics:\n self._tensorboard_service.write_dict_to_summary(\n evaluation_metrics, version=self._eval_job.model_version\n )\n logger.info(\n \"Evaluation metrics[v=%d]: %s\"\n % (\n self._eval_job.model_version\n if self._eval_job.model_version >= 0\n else self._master_servicer.get_model_version(),\n str(evaluation_metrics),\n )\n )\n if not self._eval_only:\n # delete checkpoint file\n self._eval_job = None\n # create new eval job if possible\n self.try_to_create_new_job()\n return evaluation_metrics\n", "path": "elasticdl/python/master/evaluation_service.py"}], "after_files": [{"content": "import threading\nimport time\nfrom threading import Thread\n\nfrom elasticdl.proto import elasticdl_pb2\nfrom elasticdl.python.common.evaluation_utils import EvaluationMetrics\nfrom elasticdl.python.common.log_utils import default_logger as logger\nfrom elasticdl.python.common.tensor_utils import pb_to_ndarray\n\n\nclass EvaluationJob(object):\n \"\"\"Representation of an evaluation job\"\"\"\n\n def __init__(self, metrics_dict, model_version, total_tasks=-1):\n \"\"\"\n Args:\n metrics_dict: A python dictionary. If model has only one output,\n `metrics_dict` is a dictionary of `{metric_name: metric}`,\n i.e. `{\"acc\": tf.keras.metrics.Accuracy()}`.\n If model has multiple outputs, `metric_dict` is a dictionary of\n `{output_name: {metric_name: metric}}`,\n i.e. `{\n \"output_a\": {\"acc\": tf.keras.metrics.Accuracy()},\n \"output_b\": {\"auc\": tf.keras.metrics.AUC()},\n }`. Note that for model with multiple outputs, each metric\n only uses one output.\n model_version: The version of the model to be evaluated.\n total_tasks: The number of evaluation tasks.\n \"\"\"\n\n self.model_version = model_version\n self._total_tasks = total_tasks\n self._completed_tasks = 0\n self.evaluation_metrics = EvaluationMetrics(metrics_dict)\n\n def complete_task(self):\n self._completed_tasks += 1\n\n def finished(self):\n return self._completed_tasks >= self._total_tasks\n\n def report_evaluation_metrics(self, model_outputs_pb, labels):\n labels = pb_to_ndarray(labels)\n model_outputs = {}\n for name, tensor_pb in model_outputs_pb.items():\n model_outputs[name] = pb_to_ndarray(tensor_pb)\n self.evaluation_metrics.update_evaluation_metrics(\n model_outputs, labels\n )\n\n\nclass _EvaluationTrigger(Thread):\n \"\"\"A trigger which generates evaluation tasks periodically\"\"\"\n\n def __init__(self, eval_service, start_delay_secs, throttle_secs):\n Thread.__init__(self)\n self._eval_service = eval_service\n self._stopper = threading.Event()\n self._throttle_secs = throttle_secs\n self._eval_min_time = time.time() + start_delay_secs\n\n def stop(self):\n self._stopper.set()\n\n def _wait_enough_time(self, cur_time_secs, previous_round_start_secs):\n if cur_time_secs < self._eval_min_time:\n return False\n if (\n previous_round_start_secs != -1\n and cur_time_secs - previous_round_start_secs < self._throttle_secs\n ):\n return False\n return True\n\n def run(self):\n previous_round_start_secs = -1\n\n while not self._stopper.is_set():\n time_now = time.time()\n if self._wait_enough_time(time_now, previous_round_start_secs):\n # Time is up, add an evaluation task\n self._eval_service.add_evaluation_task(is_time_based_eval=True)\n previous_round_start_secs = time_now\n time.sleep(5)\n\n\nclass EvaluationService(object):\n \"\"\"Evaluation service\"\"\"\n\n def __init__(\n self,\n tensorboard_service,\n task_d,\n start_delay_secs,\n throttle_secs,\n eval_steps,\n eval_only,\n eval_metrics_fn,\n ):\n self._tensorboard_service = tensorboard_service\n self._task_d = task_d\n self._lock = threading.Lock()\n self._eval_job = None\n self.trigger = _EvaluationTrigger(\n self, start_delay_secs, throttle_secs\n )\n self._time_based_eval = throttle_secs > 0\n self._eval_steps = eval_steps\n self._eval_checkpoint_versions = []\n self._last_eval_checkpoint_version = -1\n self._eval_only = eval_only\n self._eval_metrics_fn = eval_metrics_fn\n\n def start(self):\n if self._time_based_eval and not self._eval_only:\n self.trigger.start()\n\n def stop(self):\n if self._time_based_eval and not self._eval_only:\n self.trigger.stop()\n\n def set_master_servicer(self, master_servicer):\n self._master_servicer = master_servicer\n\n def init_eval_only_job(self, num_task):\n self._eval_job = EvaluationJob(self._eval_metrics_fn(), -1, num_task)\n\n def add_evaluation_task(\n self, is_time_based_eval, master_locking=True, model_version=None\n ):\n \"\"\"\n Add evaluation task with current model_version.\n \"\"\"\n # Do not create time-based eval after all tasks are done\n if is_time_based_eval and self._task_d.finished():\n return\n if not model_version:\n model_version = self._master_servicer.get_model_version()\n if model_version == self._last_eval_checkpoint_version:\n return\n\n checkpoint_version = model_version\n with self._lock:\n self._eval_checkpoint_versions.append(checkpoint_version)\n self._last_eval_checkpoint_version = checkpoint_version\n self.try_to_create_new_job()\n\n def try_to_create_new_job(self):\n \"\"\"\n Add eval task into task dispatcher if current eval_job is done\n and there are pending eval tasks\n \"\"\"\n with self._lock:\n if self._eval_job is None and self._eval_checkpoint_versions:\n checkpoint_version = self._eval_checkpoint_versions.pop(0)\n self._task_d.create_tasks(\n elasticdl_pb2.EVALUATION, checkpoint_version\n )\n task_count = len(self._task_d._eval_todo)\n if self._eval_job is None:\n self._eval_job = EvaluationJob(\n self._eval_metrics_fn(), checkpoint_version, task_count\n )\n else:\n self._eval_job.model_version = checkpoint_version\n self._eval_job._total_tasks = task_count\n self._eval_job.reset_metric_states()\n return True\n return False\n\n def add_evaluation_task_if_needed(self, master_locking, model_version):\n \"\"\"\n Add step-based evaluation task\n \"\"\"\n if not model_version:\n model_version = self._master_servicer.get_model_version()\n if (\n self._eval_steps\n and model_version % self._eval_steps == 0\n and model_version > self._last_eval_checkpoint_version\n ):\n self.add_evaluation_task(\n is_time_based_eval=False,\n master_locking=master_locking,\n model_version=model_version,\n )\n\n def report_evaluation_metrics(self, model_outputs, labels):\n if self._eval_job is None:\n return False\n with self._lock:\n return self._eval_job.report_evaluation_metrics(\n model_outputs, labels\n )\n\n def complete_task(self):\n if self._eval_job is None:\n return\n self._eval_job.complete_task()\n if self._eval_job.finished():\n evaluation_metrics = (\n self._eval_job.evaluation_metrics.get_evaluation_summary()\n )\n if self._tensorboard_service and evaluation_metrics:\n self._tensorboard_service.write_dict_to_summary(\n evaluation_metrics, version=self._eval_job.model_version\n )\n logger.info(\n \"Evaluation metrics[v=%d]: %s\"\n % (\n self._eval_job.model_version\n if self._eval_job.model_version >= 0\n else self._master_servicer.get_model_version(),\n str(evaluation_metrics),\n )\n )\n if not self._eval_only:\n # delete checkpoint file\n self._eval_job = None\n # create new eval job if possible\n self.try_to_create_new_job()\n return evaluation_metrics\n", "path": "elasticdl/python/master/evaluation_service.py"}]} | 2,570 | 106 |
gh_patches_debug_35341 | rasdani/github-patches | git_diff | google__mobly-170 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Snippets are not compatible with UIAutomator
'am instrument' only sets up a UiAutomationConnection in -w mode (I don't know why). Snippets are not run in wait mode, so UiAutomationConnection is null. This crashes uiautomator, which needs that proxy object for privileged operations back to the shell.
We need to start the snippets in -w mode, using something like `start_standing_subprocess`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mobly/controllers/android_device_lib/snippet_client.py`
Content:
```
1 #/usr/bin/env python3.4
2 #
3 # Copyright 2016 Google Inc.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 """JSON RPC interface to Mobly Snippet Lib."""
17 import logging
18 import re
19
20 from mobly.controllers.android_device_lib import adb
21 from mobly.controllers.android_device_lib import jsonrpc_client_base
22
23 _INSTRUMENTATION_RUNNER_PACKAGE = 'com.google.android.mobly.snippet.SnippetRunner'
24
25 _LAUNCH_CMD = 'am instrument -e action start -e port %s %s/' + _INSTRUMENTATION_RUNNER_PACKAGE
26
27 _STOP_CMD = 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE
28
29
30 class Error(Exception):
31 pass
32
33
34 class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
35 """A client for interacting with snippet APKs using Mobly Snippet Lib.
36
37 See superclass documentation for a list of public attributes.
38 """
39
40 def __init__(self, package, host_port, adb_proxy, log=logging.getLogger()):
41 """Initializes a SnippetClient.
42
43 Args:
44 package: (str) The package name of the apk where the snippets are
45 defined.
46 host_port: (int) The port at which to start the snippet client. Note
47 that the same port will currently be used for both the
48 device and host side of the connection.
49 adb_proxy: (adb.AdbProxy) The adb proxy to use to start the app.
50 """
51 # TODO(adorokhine): Don't assume that a free host-side port is free on
52 # the device as well. Both sides should allocate a unique port.
53 super(SnippetClient, self).__init__(
54 host_port=host_port,
55 device_port=host_port,
56 app_name=package,
57 adb_proxy=adb_proxy,
58 log=log)
59 self.package = package
60 self.log = log
61 self._serial = self._adb.serial
62
63 def _do_start_app(self):
64 """Overrides superclass."""
65 cmd = _LAUNCH_CMD % (self.device_port, self.package)
66 # Use info here so people know exactly what's happening here, which is
67 # helpful since they need to create their own instrumentations and
68 # manifest.
69 self.log.info('Launching snippet apk %s', self.package)
70 self._adb.shell(cmd)
71
72 def stop_app(self):
73 """Overrides superclass."""
74 cmd = _STOP_CMD % self.package
75 self.log.debug('Stopping snippet apk %s', self.package)
76 out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')
77 if 'OK (0 tests)' not in out:
78 raise Error('Failed to stop existing apk. Unexpected output: %s' %
79 out)
80
81 def check_app_installed(self):
82 """Overrides superclass."""
83 # Check that the Mobly Snippet app is installed.
84 if not self._adb_grep_wrapper(
85 r'pm list package | tr -d "\r" | grep "^package:%s$"' %
86 self.package):
87 raise jsonrpc_client_base.AppStartError(
88 '%s is not installed on %s' % (self.package, self._serial))
89 # Check that the app is instrumented.
90 out = self._adb_grep_wrapper(
91 r'pm list instrumentation | tr -d "\r" | grep ^instrumentation:%s/%s'
92 % (self.package, _INSTRUMENTATION_RUNNER_PACKAGE))
93 if not out:
94 raise jsonrpc_client_base.AppStartError(
95 '%s is installed on %s, but it is not instrumented.' %
96 (self.package, self._serial))
97 match = re.search(r'^instrumentation:(.*)\/(.*) \(target=(.*)\)$', out)
98 target_name = match.group(3)
99 # Check that the instrumentation target is installed if it's not the
100 # same as the snippet package.
101 if target_name != self.package:
102 out = self._adb_grep_wrapper(
103 r'pm list package | tr -d "\r" | grep ^package:%s$' %
104 target_name)
105 if not out:
106 raise jsonrpc_client_base.AppStartError(
107 'Instrumentation target %s is not installed on %s' %
108 (target_name, self._serial))
109
110 def _start_event_client(self):
111 event_client = SnippetClient(
112 package=self.package,
113 host_port=self.host_port,
114 adb_proxy=self._adb,
115 log=self.log)
116 event_client.connect(self.uid,
117 jsonrpc_client_base.JsonRpcCommand.CONTINUE)
118 return event_client
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py
--- a/mobly/controllers/android_device_lib/snippet_client.py
+++ b/mobly/controllers/android_device_lib/snippet_client.py
@@ -17,14 +17,18 @@
import logging
import re
-from mobly.controllers.android_device_lib import adb
+from mobly import utils
from mobly.controllers.android_device_lib import jsonrpc_client_base
-_INSTRUMENTATION_RUNNER_PACKAGE = 'com.google.android.mobly.snippet.SnippetRunner'
+_INSTRUMENTATION_RUNNER_PACKAGE = (
+ 'com.google.android.mobly.snippet.SnippetRunner')
-_LAUNCH_CMD = 'am instrument -e action start -e port %s %s/' + _INSTRUMENTATION_RUNNER_PACKAGE
+_LAUNCH_CMD = (
+ 'am instrument -w -e action start -e port %s %s/' +
+ _INSTRUMENTATION_RUNNER_PACKAGE)
-_STOP_CMD = 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE
+_STOP_CMD = (
+ 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
class Error(Exception):
@@ -59,6 +63,7 @@
self.package = package
self.log = log
self._serial = self._adb.serial
+ self._proc = None
def _do_start_app(self):
"""Overrides superclass."""
@@ -67,11 +72,18 @@
# helpful since they need to create their own instrumentations and
# manifest.
self.log.info('Launching snippet apk %s', self.package)
- self._adb.shell(cmd)
+ adb_cmd = ['adb', '-s', self._adb.serial, 'shell', cmd]
+ self._proc = utils.start_standing_subprocess(adb_cmd, shell=False)
def stop_app(self):
"""Overrides superclass."""
- cmd = _STOP_CMD % self.package
+ # Kill the pending 'adb shell am instrument -w' process if there is one.
+ # Although killing the snippet apk would abort this process anyway, we
+ # want to call stop_standing_subprocess() to perform a health check,
+ # print the failure stack trace if there was any, and reap it from the
+ # process table.
+ if self._proc:
+ utils.stop_standing_subprocess(self._proc)
self.log.debug('Stopping snippet apk %s', self.package)
out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')
if 'OK (0 tests)' not in out:
| {"golden_diff": "diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py\n--- a/mobly/controllers/android_device_lib/snippet_client.py\n+++ b/mobly/controllers/android_device_lib/snippet_client.py\n@@ -17,14 +17,18 @@\n import logging\n import re\n \n-from mobly.controllers.android_device_lib import adb\n+from mobly import utils\n from mobly.controllers.android_device_lib import jsonrpc_client_base\n \n-_INSTRUMENTATION_RUNNER_PACKAGE = 'com.google.android.mobly.snippet.SnippetRunner'\n+_INSTRUMENTATION_RUNNER_PACKAGE = (\n+ 'com.google.android.mobly.snippet.SnippetRunner')\n \n-_LAUNCH_CMD = 'am instrument -e action start -e port %s %s/' + _INSTRUMENTATION_RUNNER_PACKAGE\n+_LAUNCH_CMD = (\n+ 'am instrument -w -e action start -e port %s %s/' +\n+ _INSTRUMENTATION_RUNNER_PACKAGE)\n \n-_STOP_CMD = 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE\n+_STOP_CMD = (\n+ 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)\n \n \n class Error(Exception):\n@@ -59,6 +63,7 @@\n self.package = package\n self.log = log\n self._serial = self._adb.serial\n+ self._proc = None\n \n def _do_start_app(self):\n \"\"\"Overrides superclass.\"\"\"\n@@ -67,11 +72,18 @@\n # helpful since they need to create their own instrumentations and\n # manifest.\n self.log.info('Launching snippet apk %s', self.package)\n- self._adb.shell(cmd)\n+ adb_cmd = ['adb', '-s', self._adb.serial, 'shell', cmd]\n+ self._proc = utils.start_standing_subprocess(adb_cmd, shell=False)\n \n def stop_app(self):\n \"\"\"Overrides superclass.\"\"\"\n- cmd = _STOP_CMD % self.package\n+ # Kill the pending 'adb shell am instrument -w' process if there is one.\n+ # Although killing the snippet apk would abort this process anyway, we\n+ # want to call stop_standing_subprocess() to perform a health check,\n+ # print the failure stack trace if there was any, and reap it from the\n+ # process table.\n+ if self._proc:\n+ utils.stop_standing_subprocess(self._proc)\n self.log.debug('Stopping snippet apk %s', self.package)\n out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')\n if 'OK (0 tests)' not in out:\n", "issue": "Snippets are not compatible with UIAutomator\n'am instrument' only sets up a UiAutomationConnection in -w mode (I don't know why). Snippets are not run in wait mode, so UiAutomationConnection is null. This crashes uiautomator, which needs that proxy object for privileged operations back to the shell.\r\n\r\nWe need to start the snippets in -w mode, using something like `start_standing_subprocess`.\n", "before_files": [{"content": "#/usr/bin/env python3.4\n#\n# Copyright 2016 Google Inc.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"JSON RPC interface to Mobly Snippet Lib.\"\"\"\nimport logging\nimport re\n\nfrom mobly.controllers.android_device_lib import adb\nfrom mobly.controllers.android_device_lib import jsonrpc_client_base\n\n_INSTRUMENTATION_RUNNER_PACKAGE = 'com.google.android.mobly.snippet.SnippetRunner'\n\n_LAUNCH_CMD = 'am instrument -e action start -e port %s %s/' + _INSTRUMENTATION_RUNNER_PACKAGE\n\n_STOP_CMD = 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE\n\n\nclass Error(Exception):\n pass\n\n\nclass SnippetClient(jsonrpc_client_base.JsonRpcClientBase):\n \"\"\"A client for interacting with snippet APKs using Mobly Snippet Lib.\n\n See superclass documentation for a list of public attributes.\n \"\"\"\n\n def __init__(self, package, host_port, adb_proxy, log=logging.getLogger()):\n \"\"\"Initializes a SnippetClient.\n \n Args:\n package: (str) The package name of the apk where the snippets are\n defined.\n host_port: (int) The port at which to start the snippet client. Note\n that the same port will currently be used for both the\n device and host side of the connection.\n adb_proxy: (adb.AdbProxy) The adb proxy to use to start the app.\n \"\"\"\n # TODO(adorokhine): Don't assume that a free host-side port is free on\n # the device as well. Both sides should allocate a unique port.\n super(SnippetClient, self).__init__(\n host_port=host_port,\n device_port=host_port,\n app_name=package,\n adb_proxy=adb_proxy,\n log=log)\n self.package = package\n self.log = log\n self._serial = self._adb.serial\n\n def _do_start_app(self):\n \"\"\"Overrides superclass.\"\"\"\n cmd = _LAUNCH_CMD % (self.device_port, self.package)\n # Use info here so people know exactly what's happening here, which is\n # helpful since they need to create their own instrumentations and\n # manifest.\n self.log.info('Launching snippet apk %s', self.package)\n self._adb.shell(cmd)\n\n def stop_app(self):\n \"\"\"Overrides superclass.\"\"\"\n cmd = _STOP_CMD % self.package\n self.log.debug('Stopping snippet apk %s', self.package)\n out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')\n if 'OK (0 tests)' not in out:\n raise Error('Failed to stop existing apk. Unexpected output: %s' %\n out)\n\n def check_app_installed(self):\n \"\"\"Overrides superclass.\"\"\"\n # Check that the Mobly Snippet app is installed.\n if not self._adb_grep_wrapper(\n r'pm list package | tr -d \"\\r\" | grep \"^package:%s$\"' %\n self.package):\n raise jsonrpc_client_base.AppStartError(\n '%s is not installed on %s' % (self.package, self._serial))\n # Check that the app is instrumented.\n out = self._adb_grep_wrapper(\n r'pm list instrumentation | tr -d \"\\r\" | grep ^instrumentation:%s/%s'\n % (self.package, _INSTRUMENTATION_RUNNER_PACKAGE))\n if not out:\n raise jsonrpc_client_base.AppStartError(\n '%s is installed on %s, but it is not instrumented.' %\n (self.package, self._serial))\n match = re.search(r'^instrumentation:(.*)\\/(.*) \\(target=(.*)\\)$', out)\n target_name = match.group(3)\n # Check that the instrumentation target is installed if it's not the\n # same as the snippet package.\n if target_name != self.package:\n out = self._adb_grep_wrapper(\n r'pm list package | tr -d \"\\r\" | grep ^package:%s$' %\n target_name)\n if not out:\n raise jsonrpc_client_base.AppStartError(\n 'Instrumentation target %s is not installed on %s' %\n (target_name, self._serial))\n\n def _start_event_client(self):\n event_client = SnippetClient(\n package=self.package,\n host_port=self.host_port,\n adb_proxy=self._adb,\n log=self.log)\n event_client.connect(self.uid,\n jsonrpc_client_base.JsonRpcCommand.CONTINUE)\n return event_client\n", "path": "mobly/controllers/android_device_lib/snippet_client.py"}], "after_files": [{"content": "#/usr/bin/env python3.4\n#\n# Copyright 2016 Google Inc.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"JSON RPC interface to Mobly Snippet Lib.\"\"\"\nimport logging\nimport re\n\nfrom mobly import utils\nfrom mobly.controllers.android_device_lib import jsonrpc_client_base\n\n_INSTRUMENTATION_RUNNER_PACKAGE = (\n 'com.google.android.mobly.snippet.SnippetRunner')\n\n_LAUNCH_CMD = (\n 'am instrument -w -e action start -e port %s %s/' +\n _INSTRUMENTATION_RUNNER_PACKAGE)\n\n_STOP_CMD = (\n 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)\n\n\nclass Error(Exception):\n pass\n\n\nclass SnippetClient(jsonrpc_client_base.JsonRpcClientBase):\n \"\"\"A client for interacting with snippet APKs using Mobly Snippet Lib.\n\n See superclass documentation for a list of public attributes.\n \"\"\"\n\n def __init__(self, package, host_port, adb_proxy, log=logging.getLogger()):\n \"\"\"Initializes a SnippetClient.\n \n Args:\n package: (str) The package name of the apk where the snippets are\n defined.\n host_port: (int) The port at which to start the snippet client. Note\n that the same port will currently be used for both the\n device and host side of the connection.\n adb_proxy: (adb.AdbProxy) The adb proxy to use to start the app.\n \"\"\"\n # TODO(adorokhine): Don't assume that a free host-side port is free on\n # the device as well. Both sides should allocate a unique port.\n super(SnippetClient, self).__init__(\n host_port=host_port,\n device_port=host_port,\n app_name=package,\n adb_proxy=adb_proxy,\n log=log)\n self.package = package\n self.log = log\n self._serial = self._adb.serial\n self._proc = None\n\n def _do_start_app(self):\n \"\"\"Overrides superclass.\"\"\"\n cmd = _LAUNCH_CMD % (self.device_port, self.package)\n # Use info here so people know exactly what's happening here, which is\n # helpful since they need to create their own instrumentations and\n # manifest.\n self.log.info('Launching snippet apk %s', self.package)\n adb_cmd = ['adb', '-s', self._adb.serial, 'shell', cmd]\n self._proc = utils.start_standing_subprocess(adb_cmd, shell=False)\n\n def stop_app(self):\n \"\"\"Overrides superclass.\"\"\"\n # Kill the pending 'adb shell am instrument -w' process if there is one.\n # Although killing the snippet apk would abort this process anyway, we\n # want to call stop_standing_subprocess() to perform a health check,\n # print the failure stack trace if there was any, and reap it from the\n # process table.\n if self._proc:\n utils.stop_standing_subprocess(self._proc)\n self.log.debug('Stopping snippet apk %s', self.package)\n out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')\n if 'OK (0 tests)' not in out:\n raise Error('Failed to stop existing apk. Unexpected output: %s' %\n out)\n\n def check_app_installed(self):\n \"\"\"Overrides superclass.\"\"\"\n # Check that the Mobly Snippet app is installed.\n if not self._adb_grep_wrapper(\n r'pm list package | tr -d \"\\r\" | grep \"^package:%s$\"' %\n self.package):\n raise jsonrpc_client_base.AppStartError(\n '%s is not installed on %s' % (self.package, self._serial))\n # Check that the app is instrumented.\n out = self._adb_grep_wrapper(\n r'pm list instrumentation | tr -d \"\\r\" | grep ^instrumentation:%s/%s'\n % (self.package, _INSTRUMENTATION_RUNNER_PACKAGE))\n if not out:\n raise jsonrpc_client_base.AppStartError(\n '%s is installed on %s, but it is not instrumented.' %\n (self.package, self._serial))\n match = re.search(r'^instrumentation:(.*)\\/(.*) \\(target=(.*)\\)$', out)\n target_name = match.group(3)\n # Check that the instrumentation target is installed if it's not the\n # same as the snippet package.\n if target_name != self.package:\n out = self._adb_grep_wrapper(\n r'pm list package | tr -d \"\\r\" | grep ^package:%s$' %\n target_name)\n if not out:\n raise jsonrpc_client_base.AppStartError(\n 'Instrumentation target %s is not installed on %s' %\n (target_name, self._serial))\n\n def _start_event_client(self):\n event_client = SnippetClient(\n package=self.package,\n host_port=self.host_port,\n adb_proxy=self._adb,\n log=self.log)\n event_client.connect(self.uid,\n jsonrpc_client_base.JsonRpcCommand.CONTINUE)\n return event_client\n", "path": "mobly/controllers/android_device_lib/snippet_client.py"}]} | 1,704 | 596 |
gh_patches_debug_31019 | rasdani/github-patches | git_diff | modin-project__modin-2774 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Modin read_parquet api throws 'NoneType' object is not subscriptable
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:Ubuntu 18.04
- **Modin version** (`modin.__version__`): latest
- **Python version**: 3.6.9
- **Code we can use to reproduce**:
```python
import ray
import modin.pandas as pd
if __name__ == '__main__':
path= "path/to/parquet/part-0001-2020-05-16-064533-0990-r0101.snappy.parquet"
modin_df = pd.read_parquet(path)
modin_df.to_parquet("path/to/parquet/test.parquet")
print(f'Modin DF len = {len(modin_df)}')
```
### Describe the problem
I have generated the parquet data through spark and I'm trying to read single file partition from parquet folder and it throws "NoneType' object is not subscriptable"
### Source code / logs
Log trace:
```
Traceback (most recent call last):
File "modinTest.py", line 6, in <module>
modin_df = pd.read_parquet(path)
File "/home/srds/virtual_env/airflow_venv/lib/python3.6/site-packages/modin/pandas/io.py", line 42, in read_parquet
path=path, columns=columns, engine=engine, **kwargs
File "/home/srds/virtual_env/airflow_venv/lib/python3.6/site-packages/modin/data_management/factories.py", line 57, in read_parquet
return cls._determine_engine()._read_parquet(**kwargs)
File "/home/srds/virtual_env/airflow_venv/lib/python3.6/site-packages/modin/data_management/factories.py", line 61, in _read_parquet
return cls.io_cls.read_parquet(**kwargs)
File "/home/srds/virtual_env/airflow_venv/lib/python3.6/site-packages/modin/engines/base/io/column_stores/parquet_reader.py", line 79, in read
meta.metadata[b"pandas"].replace(b"null", b"None")
TypeError: 'NoneType' object is not subscriptable
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modin/engines/base/io/column_stores/parquet_dispatcher.py`
Content:
```
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 import os
15
16 from modin.engines.base.io.column_stores.column_store_dispatcher import (
17 ColumnStoreDispatcher,
18 )
19 from modin.error_message import ErrorMessage
20
21
22 class ParquetDispatcher(ColumnStoreDispatcher):
23 @classmethod
24 def _read(cls, path, engine, columns, **kwargs):
25 """Load a parquet object from the file path, returning a Modin DataFrame.
26
27 Modin only supports pyarrow engine for now.
28
29 Parameters
30 ----------
31 path: str
32 The filepath of the parquet file in local filesystem or hdfs.
33 engine: 'pyarrow'
34 Parquet library to use
35 columns: list or None
36 If not None, only these columns will be read from the file.
37 kwargs: dict
38 Keyword arguments.
39
40 Returns
41 -------
42 PandasQueryCompiler
43 A new Query Compiler.
44
45 Notes
46 -----
47 ParquetFile API is used. Please refer to the documentation here
48 https://arrow.apache.org/docs/python/parquet.html
49 """
50 from pyarrow.parquet import ParquetFile, ParquetDataset
51 from modin.pandas.io import PQ_INDEX_REGEX
52
53 if isinstance(path, str) and os.path.isdir(path):
54 partitioned_columns = set()
55 directory = True
56 # We do a tree walk of the path directory because partitioned
57 # parquet directories have a unique column at each directory level.
58 # Thus, we can use os.walk(), which does a dfs search, to walk
59 # through the different columns that the data is partitioned on
60 for (root, dir_names, files) in os.walk(path):
61 if dir_names:
62 partitioned_columns.add(dir_names[0].split("=")[0])
63 if files:
64 # Metadata files, git files, .DSStore
65 if files[0][0] == ".":
66 continue
67 break
68 partitioned_columns = list(partitioned_columns)
69 if len(partitioned_columns):
70 ErrorMessage.default_to_pandas("Mixed Partitioning Columns in Parquet")
71 return cls.single_worker_read(
72 path, engine=engine, columns=columns, **kwargs
73 )
74 else:
75 directory = False
76 if not columns:
77 import s3fs
78
79 if directory:
80 # Path of the sample file that we will read to get the remaining columns
81 pd = ParquetDataset(path)
82 meta = pd.metadata
83 column_names = pd.schema.names
84 elif isinstance(path, str) and path.startswith("hdfs://"):
85 import fsspec.core
86
87 fs, path = fsspec.core.url_to_fs(path)
88 pd = ParquetDataset(path, filesystem=fs)
89 meta = pd.metadata
90 column_names = pd.schema.names
91 elif isinstance(path, s3fs.S3File) or (
92 isinstance(path, str) and path.startswith("s3://")
93 ):
94 from botocore.exceptions import NoCredentialsError
95
96 if isinstance(path, s3fs.S3File):
97 bucket_path = path.url().split(".s3.amazonaws.com")
98 path = "s3://" + bucket_path[0].split("://")[1] + bucket_path[1]
99 try:
100 fs = s3fs.S3FileSystem()
101 pd = ParquetDataset(path, filesystem=fs)
102 except NoCredentialsError:
103 fs = s3fs.S3FileSystem(anon=True)
104 pd = ParquetDataset(path, filesystem=fs)
105 meta = pd.metadata
106 column_names = pd.schema.names
107 else:
108 meta = ParquetFile(path).metadata
109 column_names = meta.schema.names
110 if meta is not None:
111 # This is how we convert the metadata from pyarrow to a python
112 # dictionary, from which we then get the index columns.
113 # We use these to filter out from the columns in the metadata since
114 # the pyarrow storage has no concept of row labels/index.
115 # This ensures that our metadata lines up with the partitions without
116 # extra communication steps once we `have done all the remote
117 # computation.
118 index_columns = eval(
119 meta.metadata[b"pandas"].replace(b"null", b"None")
120 ).get("index_columns", [])
121 column_names = [c for c in column_names if c not in index_columns]
122 columns = [name for name in column_names if not PQ_INDEX_REGEX.match(name)]
123 return cls.build_query_compiler(path, columns, **kwargs)
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modin/engines/base/io/column_stores/parquet_dispatcher.py b/modin/engines/base/io/column_stores/parquet_dispatcher.py
--- a/modin/engines/base/io/column_stores/parquet_dispatcher.py
+++ b/modin/engines/base/io/column_stores/parquet_dispatcher.py
@@ -107,17 +107,22 @@
else:
meta = ParquetFile(path).metadata
column_names = meta.schema.names
- if meta is not None:
- # This is how we convert the metadata from pyarrow to a python
- # dictionary, from which we then get the index columns.
- # We use these to filter out from the columns in the metadata since
- # the pyarrow storage has no concept of row labels/index.
- # This ensures that our metadata lines up with the partitions without
- # extra communication steps once we `have done all the remote
- # computation.
- index_columns = eval(
- meta.metadata[b"pandas"].replace(b"null", b"None")
- ).get("index_columns", [])
- column_names = [c for c in column_names if c not in index_columns]
+
+ if meta is not None and meta.metadata is not None:
+ pandas_metadata = meta.metadata.get(b"pandas", None)
+ if pandas_metadata is not None:
+ import json
+
+ # This is how we convert the metadata from pyarrow to a python
+ # dictionary, from which we then get the index columns.
+ # We use these to filter out from the columns in the metadata since
+ # the pyarrow storage has no concept of row labels/index.
+ # This ensures that our metadata lines up with the partitions without
+ # extra communication steps once we have done all the remote
+ # computation.
+ index_columns = json.loads(pandas_metadata.decode("utf8")).get(
+ "index_columns", []
+ )
+ column_names = [c for c in column_names if c not in index_columns]
columns = [name for name in column_names if not PQ_INDEX_REGEX.match(name)]
return cls.build_query_compiler(path, columns, **kwargs)
| {"golden_diff": "diff --git a/modin/engines/base/io/column_stores/parquet_dispatcher.py b/modin/engines/base/io/column_stores/parquet_dispatcher.py\n--- a/modin/engines/base/io/column_stores/parquet_dispatcher.py\n+++ b/modin/engines/base/io/column_stores/parquet_dispatcher.py\n@@ -107,17 +107,22 @@\n else:\n meta = ParquetFile(path).metadata\n column_names = meta.schema.names\n- if meta is not None:\n- # This is how we convert the metadata from pyarrow to a python\n- # dictionary, from which we then get the index columns.\n- # We use these to filter out from the columns in the metadata since\n- # the pyarrow storage has no concept of row labels/index.\n- # This ensures that our metadata lines up with the partitions without\n- # extra communication steps once we `have done all the remote\n- # computation.\n- index_columns = eval(\n- meta.metadata[b\"pandas\"].replace(b\"null\", b\"None\")\n- ).get(\"index_columns\", [])\n- column_names = [c for c in column_names if c not in index_columns]\n+\n+ if meta is not None and meta.metadata is not None:\n+ pandas_metadata = meta.metadata.get(b\"pandas\", None)\n+ if pandas_metadata is not None:\n+ import json\n+\n+ # This is how we convert the metadata from pyarrow to a python\n+ # dictionary, from which we then get the index columns.\n+ # We use these to filter out from the columns in the metadata since\n+ # the pyarrow storage has no concept of row labels/index.\n+ # This ensures that our metadata lines up with the partitions without\n+ # extra communication steps once we have done all the remote\n+ # computation.\n+ index_columns = json.loads(pandas_metadata.decode(\"utf8\")).get(\n+ \"index_columns\", []\n+ )\n+ column_names = [c for c in column_names if c not in index_columns]\n columns = [name for name in column_names if not PQ_INDEX_REGEX.match(name)]\n return cls.build_query_compiler(path, columns, **kwargs)\n", "issue": "Modin read_parquet api throws 'NoneType' object is not subscriptable\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:Ubuntu 18.04\r\n- **Modin version** (`modin.__version__`): latest\r\n- **Python version**: 3.6.9\r\n- **Code we can use to reproduce**:\r\n\r\n```python\r\nimport ray\r\nimport modin.pandas as pd\r\n\r\nif __name__ == '__main__':\r\n path= \"path/to/parquet/part-0001-2020-05-16-064533-0990-r0101.snappy.parquet\"\r\n modin_df = pd.read_parquet(path)\r\n modin_df.to_parquet(\"path/to/parquet/test.parquet\")\r\n print(f'Modin DF len = {len(modin_df)}')\r\n```\r\n\r\n### Describe the problem\r\nI have generated the parquet data through spark and I'm trying to read single file partition from parquet folder and it throws \"NoneType' object is not subscriptable\" \r\n\r\n### Source code / logs\r\n\r\nLog trace:\r\n```\r\nTraceback (most recent call last):\r\n File \"modinTest.py\", line 6, in <module>\r\n modin_df = pd.read_parquet(path)\r\n File \"/home/srds/virtual_env/airflow_venv/lib/python3.6/site-packages/modin/pandas/io.py\", line 42, in read_parquet\r\n path=path, columns=columns, engine=engine, **kwargs\r\n File \"/home/srds/virtual_env/airflow_venv/lib/python3.6/site-packages/modin/data_management/factories.py\", line 57, in read_parquet\r\n return cls._determine_engine()._read_parquet(**kwargs)\r\n File \"/home/srds/virtual_env/airflow_venv/lib/python3.6/site-packages/modin/data_management/factories.py\", line 61, in _read_parquet\r\n return cls.io_cls.read_parquet(**kwargs)\r\n File \"/home/srds/virtual_env/airflow_venv/lib/python3.6/site-packages/modin/engines/base/io/column_stores/parquet_reader.py\", line 79, in read\r\n meta.metadata[b\"pandas\"].replace(b\"null\", b\"None\")\r\nTypeError: 'NoneType' object is not subscriptable\r\n```\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport os\n\nfrom modin.engines.base.io.column_stores.column_store_dispatcher import (\n ColumnStoreDispatcher,\n)\nfrom modin.error_message import ErrorMessage\n\n\nclass ParquetDispatcher(ColumnStoreDispatcher):\n @classmethod\n def _read(cls, path, engine, columns, **kwargs):\n \"\"\"Load a parquet object from the file path, returning a Modin DataFrame.\n\n Modin only supports pyarrow engine for now.\n\n Parameters\n ----------\n path: str\n The filepath of the parquet file in local filesystem or hdfs.\n engine: 'pyarrow'\n Parquet library to use\n columns: list or None\n If not None, only these columns will be read from the file.\n kwargs: dict\n Keyword arguments.\n\n Returns\n -------\n PandasQueryCompiler\n A new Query Compiler.\n\n Notes\n -----\n ParquetFile API is used. Please refer to the documentation here\n https://arrow.apache.org/docs/python/parquet.html\n \"\"\"\n from pyarrow.parquet import ParquetFile, ParquetDataset\n from modin.pandas.io import PQ_INDEX_REGEX\n\n if isinstance(path, str) and os.path.isdir(path):\n partitioned_columns = set()\n directory = True\n # We do a tree walk of the path directory because partitioned\n # parquet directories have a unique column at each directory level.\n # Thus, we can use os.walk(), which does a dfs search, to walk\n # through the different columns that the data is partitioned on\n for (root, dir_names, files) in os.walk(path):\n if dir_names:\n partitioned_columns.add(dir_names[0].split(\"=\")[0])\n if files:\n # Metadata files, git files, .DSStore\n if files[0][0] == \".\":\n continue\n break\n partitioned_columns = list(partitioned_columns)\n if len(partitioned_columns):\n ErrorMessage.default_to_pandas(\"Mixed Partitioning Columns in Parquet\")\n return cls.single_worker_read(\n path, engine=engine, columns=columns, **kwargs\n )\n else:\n directory = False\n if not columns:\n import s3fs\n\n if directory:\n # Path of the sample file that we will read to get the remaining columns\n pd = ParquetDataset(path)\n meta = pd.metadata\n column_names = pd.schema.names\n elif isinstance(path, str) and path.startswith(\"hdfs://\"):\n import fsspec.core\n\n fs, path = fsspec.core.url_to_fs(path)\n pd = ParquetDataset(path, filesystem=fs)\n meta = pd.metadata\n column_names = pd.schema.names\n elif isinstance(path, s3fs.S3File) or (\n isinstance(path, str) and path.startswith(\"s3://\")\n ):\n from botocore.exceptions import NoCredentialsError\n\n if isinstance(path, s3fs.S3File):\n bucket_path = path.url().split(\".s3.amazonaws.com\")\n path = \"s3://\" + bucket_path[0].split(\"://\")[1] + bucket_path[1]\n try:\n fs = s3fs.S3FileSystem()\n pd = ParquetDataset(path, filesystem=fs)\n except NoCredentialsError:\n fs = s3fs.S3FileSystem(anon=True)\n pd = ParquetDataset(path, filesystem=fs)\n meta = pd.metadata\n column_names = pd.schema.names\n else:\n meta = ParquetFile(path).metadata\n column_names = meta.schema.names\n if meta is not None:\n # This is how we convert the metadata from pyarrow to a python\n # dictionary, from which we then get the index columns.\n # We use these to filter out from the columns in the metadata since\n # the pyarrow storage has no concept of row labels/index.\n # This ensures that our metadata lines up with the partitions without\n # extra communication steps once we `have done all the remote\n # computation.\n index_columns = eval(\n meta.metadata[b\"pandas\"].replace(b\"null\", b\"None\")\n ).get(\"index_columns\", [])\n column_names = [c for c in column_names if c not in index_columns]\n columns = [name for name in column_names if not PQ_INDEX_REGEX.match(name)]\n return cls.build_query_compiler(path, columns, **kwargs)\n", "path": "modin/engines/base/io/column_stores/parquet_dispatcher.py"}], "after_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport os\n\nfrom modin.engines.base.io.column_stores.column_store_dispatcher import (\n ColumnStoreDispatcher,\n)\nfrom modin.error_message import ErrorMessage\n\n\nclass ParquetDispatcher(ColumnStoreDispatcher):\n @classmethod\n def _read(cls, path, engine, columns, **kwargs):\n \"\"\"Load a parquet object from the file path, returning a Modin DataFrame.\n\n Modin only supports pyarrow engine for now.\n\n Parameters\n ----------\n path: str\n The filepath of the parquet file in local filesystem or hdfs.\n engine: 'pyarrow'\n Parquet library to use\n columns: list or None\n If not None, only these columns will be read from the file.\n kwargs: dict\n Keyword arguments.\n\n Returns\n -------\n PandasQueryCompiler\n A new Query Compiler.\n\n Notes\n -----\n ParquetFile API is used. Please refer to the documentation here\n https://arrow.apache.org/docs/python/parquet.html\n \"\"\"\n from pyarrow.parquet import ParquetFile, ParquetDataset\n from modin.pandas.io import PQ_INDEX_REGEX\n\n if isinstance(path, str) and os.path.isdir(path):\n partitioned_columns = set()\n directory = True\n # We do a tree walk of the path directory because partitioned\n # parquet directories have a unique column at each directory level.\n # Thus, we can use os.walk(), which does a dfs search, to walk\n # through the different columns that the data is partitioned on\n for (root, dir_names, files) in os.walk(path):\n if dir_names:\n partitioned_columns.add(dir_names[0].split(\"=\")[0])\n if files:\n # Metadata files, git files, .DSStore\n if files[0][0] == \".\":\n continue\n break\n partitioned_columns = list(partitioned_columns)\n if len(partitioned_columns):\n ErrorMessage.default_to_pandas(\"Mixed Partitioning Columns in Parquet\")\n return cls.single_worker_read(\n path, engine=engine, columns=columns, **kwargs\n )\n else:\n directory = False\n if not columns:\n import s3fs\n\n if directory:\n # Path of the sample file that we will read to get the remaining columns\n pd = ParquetDataset(path)\n meta = pd.metadata\n column_names = pd.schema.names\n elif isinstance(path, str) and path.startswith(\"hdfs://\"):\n import fsspec.core\n\n fs, path = fsspec.core.url_to_fs(path)\n pd = ParquetDataset(path, filesystem=fs)\n meta = pd.metadata\n column_names = pd.schema.names\n elif isinstance(path, s3fs.S3File) or (\n isinstance(path, str) and path.startswith(\"s3://\")\n ):\n from botocore.exceptions import NoCredentialsError\n\n if isinstance(path, s3fs.S3File):\n bucket_path = path.url().split(\".s3.amazonaws.com\")\n path = \"s3://\" + bucket_path[0].split(\"://\")[1] + bucket_path[1]\n try:\n fs = s3fs.S3FileSystem()\n pd = ParquetDataset(path, filesystem=fs)\n except NoCredentialsError:\n fs = s3fs.S3FileSystem(anon=True)\n pd = ParquetDataset(path, filesystem=fs)\n meta = pd.metadata\n column_names = pd.schema.names\n else:\n meta = ParquetFile(path).metadata\n column_names = meta.schema.names\n\n if meta is not None and meta.metadata is not None:\n pandas_metadata = meta.metadata.get(b\"pandas\", None)\n if pandas_metadata is not None:\n import json\n\n # This is how we convert the metadata from pyarrow to a python\n # dictionary, from which we then get the index columns.\n # We use these to filter out from the columns in the metadata since\n # the pyarrow storage has no concept of row labels/index.\n # This ensures that our metadata lines up with the partitions without\n # extra communication steps once we have done all the remote\n # computation.\n index_columns = json.loads(pandas_metadata.decode(\"utf8\")).get(\n \"index_columns\", []\n )\n column_names = [c for c in column_names if c not in index_columns]\n columns = [name for name in column_names if not PQ_INDEX_REGEX.match(name)]\n return cls.build_query_compiler(path, columns, **kwargs)\n", "path": "modin/engines/base/io/column_stores/parquet_dispatcher.py"}]} | 2,174 | 487 |
gh_patches_debug_57176 | rasdani/github-patches | git_diff | celery__celery-4037 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
celery.contrib.sphinx fails with Sphinx 1.6.1
When using the `celery.contrib.sphinx` extension with Sphinx 1.6.1 with Celery 4.0.2 the following occurs:
```
Exception occurred:
File "/home/ubuntu/virtualenvs/venv-system/lib/python2.7/site-packages/celery/contrib/sphinx.py", line 72, in setup
app.domains['py'].directives['task'] = TaskDirective
AttributeError: 'Sphinx' object has no attribute 'domains'
The full traceback has been saved in /tmp/sphinx-err-oOWabx.log, if you want to report the issue to the developers.
Please also report this if it was a user error, so that a better error message can be provided next time.
A bug report can be filed in the tracker at <https://github.com/sphinx-doc/sphinx/issues>. Thanks!
make: *** [html] Error 1
```
The `domains` property seems to have been removed in sphinx-doc/sphinx#3656 and I think this line needs to be replaced with the [`add_directive` method](http://www.sphinx-doc.org/en/stable/extdev/appapi.html#sphinx.application.Sphinx.add_directive) (or more likely the [`add_directive_to_domain` method](http://www.sphinx-doc.org/en/stable/extdev/appapi.html#sphinx.application.Sphinx.add_directive_to_domain)).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/contrib/sphinx.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Sphinx documentation plugin used to document tasks.
3
4 Introduction
5 ============
6
7 Usage
8 -----
9
10 Add the extension to your :file:`docs/conf.py` configuration module:
11
12 .. code-block:: python
13
14 extensions = (...,
15 'celery.contrib.sphinx')
16
17 If you'd like to change the prefix for tasks in reference documentation
18 then you can change the ``celery_task_prefix`` configuration value:
19
20 .. code-block:: python
21
22 celery_task_prefix = '(task)' # < default
23
24 With the extension installed `autodoc` will automatically find
25 task decorated objects and generate the correct (as well as
26 add a ``(task)`` prefix), and you can also refer to the tasks
27 using `:task:proj.tasks.add` syntax.
28
29 Use ``.. autotask::`` to manually document a task.
30 """
31 from __future__ import absolute_import, unicode_literals
32 from inspect import formatargspec
33 from sphinx.domains.python import PyModulelevel
34 from sphinx.ext.autodoc import FunctionDocumenter
35 from celery.app.task import BaseTask
36 from celery.five import getfullargspec
37
38
39 class TaskDocumenter(FunctionDocumenter):
40 """Document task definitions."""
41
42 objtype = 'task'
43 member_order = 11
44
45 @classmethod
46 def can_document_member(cls, member, membername, isattr, parent):
47 return isinstance(member, BaseTask) and getattr(member, '__wrapped__')
48
49 def format_args(self):
50 wrapped = getattr(self.object, '__wrapped__', None)
51 if wrapped is not None:
52 argspec = getfullargspec(wrapped)
53 fmt = formatargspec(*argspec)
54 fmt = fmt.replace('\\', '\\\\')
55 return fmt
56 return ''
57
58 def document_members(self, all_members=False):
59 pass
60
61
62 class TaskDirective(PyModulelevel):
63 """Sphinx task directive."""
64
65 def get_signature_prefix(self, sig):
66 return self.env.config.celery_task_prefix
67
68
69 def setup(app):
70 """Setup Sphinx extension."""
71 app.add_autodocumenter(TaskDocumenter)
72 app.domains['py'].directives['task'] = TaskDirective
73 app.add_config_value('celery_task_prefix', '(task)', True)
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/celery/contrib/sphinx.py b/celery/contrib/sphinx.py
--- a/celery/contrib/sphinx.py
+++ b/celery/contrib/sphinx.py
@@ -69,5 +69,5 @@
def setup(app):
"""Setup Sphinx extension."""
app.add_autodocumenter(TaskDocumenter)
- app.domains['py'].directives['task'] = TaskDirective
+ app.add_directive_to_domain('py', 'task', TaskDirective)
app.add_config_value('celery_task_prefix', '(task)', True)
| {"golden_diff": "diff --git a/celery/contrib/sphinx.py b/celery/contrib/sphinx.py\n--- a/celery/contrib/sphinx.py\n+++ b/celery/contrib/sphinx.py\n@@ -69,5 +69,5 @@\n def setup(app):\n \"\"\"Setup Sphinx extension.\"\"\"\n app.add_autodocumenter(TaskDocumenter)\n- app.domains['py'].directives['task'] = TaskDirective\n+ app.add_directive_to_domain('py', 'task', TaskDirective)\n app.add_config_value('celery_task_prefix', '(task)', True)\n", "issue": "celery.contrib.sphinx fails with Sphinx 1.6.1\nWhen using the `celery.contrib.sphinx` extension with Sphinx 1.6.1 with Celery 4.0.2 the following occurs:\r\n\r\n```\r\nException occurred:\r\n File \"/home/ubuntu/virtualenvs/venv-system/lib/python2.7/site-packages/celery/contrib/sphinx.py\", line 72, in setup\r\n app.domains['py'].directives['task'] = TaskDirective\r\nAttributeError: 'Sphinx' object has no attribute 'domains'\r\nThe full traceback has been saved in /tmp/sphinx-err-oOWabx.log, if you want to report the issue to the developers.\r\nPlease also report this if it was a user error, so that a better error message can be provided next time.\r\nA bug report can be filed in the tracker at <https://github.com/sphinx-doc/sphinx/issues>. Thanks!\r\nmake: *** [html] Error 1\r\n```\r\n\r\nThe `domains` property seems to have been removed in sphinx-doc/sphinx#3656 and I think this line needs to be replaced with the [`add_directive` method](http://www.sphinx-doc.org/en/stable/extdev/appapi.html#sphinx.application.Sphinx.add_directive) (or more likely the [`add_directive_to_domain` method](http://www.sphinx-doc.org/en/stable/extdev/appapi.html#sphinx.application.Sphinx.add_directive_to_domain)).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Sphinx documentation plugin used to document tasks.\n\nIntroduction\n============\n\nUsage\n-----\n\nAdd the extension to your :file:`docs/conf.py` configuration module:\n\n.. code-block:: python\n\n extensions = (...,\n 'celery.contrib.sphinx')\n\nIf you'd like to change the prefix for tasks in reference documentation\nthen you can change the ``celery_task_prefix`` configuration value:\n\n.. code-block:: python\n\n celery_task_prefix = '(task)' # < default\n\nWith the extension installed `autodoc` will automatically find\ntask decorated objects and generate the correct (as well as\nadd a ``(task)`` prefix), and you can also refer to the tasks\nusing `:task:proj.tasks.add` syntax.\n\nUse ``.. autotask::`` to manually document a task.\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nfrom inspect import formatargspec\nfrom sphinx.domains.python import PyModulelevel\nfrom sphinx.ext.autodoc import FunctionDocumenter\nfrom celery.app.task import BaseTask\nfrom celery.five import getfullargspec\n\n\nclass TaskDocumenter(FunctionDocumenter):\n \"\"\"Document task definitions.\"\"\"\n\n objtype = 'task'\n member_order = 11\n\n @classmethod\n def can_document_member(cls, member, membername, isattr, parent):\n return isinstance(member, BaseTask) and getattr(member, '__wrapped__')\n\n def format_args(self):\n wrapped = getattr(self.object, '__wrapped__', None)\n if wrapped is not None:\n argspec = getfullargspec(wrapped)\n fmt = formatargspec(*argspec)\n fmt = fmt.replace('\\\\', '\\\\\\\\')\n return fmt\n return ''\n\n def document_members(self, all_members=False):\n pass\n\n\nclass TaskDirective(PyModulelevel):\n \"\"\"Sphinx task directive.\"\"\"\n\n def get_signature_prefix(self, sig):\n return self.env.config.celery_task_prefix\n\n\ndef setup(app):\n \"\"\"Setup Sphinx extension.\"\"\"\n app.add_autodocumenter(TaskDocumenter)\n app.domains['py'].directives['task'] = TaskDirective\n app.add_config_value('celery_task_prefix', '(task)', True)\n", "path": "celery/contrib/sphinx.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Sphinx documentation plugin used to document tasks.\n\nIntroduction\n============\n\nUsage\n-----\n\nAdd the extension to your :file:`docs/conf.py` configuration module:\n\n.. code-block:: python\n\n extensions = (...,\n 'celery.contrib.sphinx')\n\nIf you'd like to change the prefix for tasks in reference documentation\nthen you can change the ``celery_task_prefix`` configuration value:\n\n.. code-block:: python\n\n celery_task_prefix = '(task)' # < default\n\nWith the extension installed `autodoc` will automatically find\ntask decorated objects and generate the correct (as well as\nadd a ``(task)`` prefix), and you can also refer to the tasks\nusing `:task:proj.tasks.add` syntax.\n\nUse ``.. autotask::`` to manually document a task.\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nfrom inspect import formatargspec\nfrom sphinx.domains.python import PyModulelevel\nfrom sphinx.ext.autodoc import FunctionDocumenter\nfrom celery.app.task import BaseTask\nfrom celery.five import getfullargspec\n\n\nclass TaskDocumenter(FunctionDocumenter):\n \"\"\"Document task definitions.\"\"\"\n\n objtype = 'task'\n member_order = 11\n\n @classmethod\n def can_document_member(cls, member, membername, isattr, parent):\n return isinstance(member, BaseTask) and getattr(member, '__wrapped__')\n\n def format_args(self):\n wrapped = getattr(self.object, '__wrapped__', None)\n if wrapped is not None:\n argspec = getfullargspec(wrapped)\n fmt = formatargspec(*argspec)\n fmt = fmt.replace('\\\\', '\\\\\\\\')\n return fmt\n return ''\n\n def document_members(self, all_members=False):\n pass\n\n\nclass TaskDirective(PyModulelevel):\n \"\"\"Sphinx task directive.\"\"\"\n\n def get_signature_prefix(self, sig):\n return self.env.config.celery_task_prefix\n\n\ndef setup(app):\n \"\"\"Setup Sphinx extension.\"\"\"\n app.add_autodocumenter(TaskDocumenter)\n app.add_directive_to_domain('py', 'task', TaskDirective)\n app.add_config_value('celery_task_prefix', '(task)', True)\n", "path": "celery/contrib/sphinx.py"}]} | 1,197 | 127 |
gh_patches_debug_5248 | rasdani/github-patches | git_diff | mindsdb__mindsdb-855 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add update argument to executable function
Add optional boolean argument update to [make_executable](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/utilities/wizards.py#L230)() function, so if it's set to true then the executable should run
```
py -m pip install mindsdb --upgrade
```
before
```
py -m mindsdb
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/utilities/wizards.py`
Content:
```
1 import os
2 import json
3 from datetime import datetime, timedelta
4
5
6 def _in(ask, default, use_default):
7 if use_default:
8 return default
9
10 user_input = input(f'{ask} (Default: {default})')
11 if user_input is None or user_input == '':
12 user_input = default
13
14 if type(default) == int:
15 user_input = int(user_input)
16
17 if type(default) == bool:
18 user_input = int(user_input)
19
20 return user_input
21
22
23 def auto_config(python_path, pip_path, storage_dir):
24 config = {
25 "debug": False,
26 "config_version": "1.3",
27 "api": {
28 },
29 "integrations": {
30 "default_clickhouse": {
31 "enabled": False,
32 "type": 'clickhouse'
33 },
34 "default_mariadb": {
35 "enabled": False,
36 "type": 'mariadb'
37 },
38 "default_mysql": {
39 "enabled": False,
40 "type": 'mysql'
41 },
42 "default_postgres": {
43 "enabled": False,
44 "type": 'postgres',
45 "database": 'postgres'
46 },
47 "default_mssql": {
48 "enabled": False,
49 "type": 'mssql'
50 },
51 "default_mongodb": {
52 "enabled": False,
53 "type": 'mongodb'
54 }
55 },
56 'storage_dir': storage_dir
57 }
58
59 if isinstance(python_path, str):
60 config['python_interpreter'] = python_path
61
62 if isinstance(pip_path, str):
63 config['pip_path'] = python_path
64
65 return config
66
67
68 def make_ssl_cert(file_path):
69 from cryptography import x509
70 from cryptography.x509.oid import NameOID
71 from cryptography.hazmat.backends import default_backend
72 from cryptography.hazmat.primitives import hashes
73 from cryptography.hazmat.primitives import serialization
74 from cryptography.hazmat.primitives.asymmetric import rsa
75
76 key = rsa.generate_private_key(
77 public_exponent=65537,
78 key_size=2048,
79 backend=default_backend(),
80 )
81
82 name = x509.Name([
83 x509.NameAttribute(NameOID.COMMON_NAME, 'mdb_autogen'),
84 x509.NameAttribute(NameOID.COUNTRY_NAME, 'US'),
85 x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, 'California'),
86 x509.NameAttribute(NameOID.LOCALITY_NAME, 'Berkeley'),
87 x509.NameAttribute(NameOID.ORGANIZATION_NAME, 'MindsDB')
88 ])
89
90 now = datetime.utcnow()
91 cert = (
92 x509.CertificateBuilder()
93 .subject_name(name)
94 .issuer_name(name)
95 .public_key(key.public_key())
96 .serial_number(1)
97 .not_valid_before(now - timedelta(days=10 * 365))
98 .not_valid_after(now + timedelta(days=10 * 365))
99 .add_extension(
100 x509.BasicConstraints(ca=True, path_length=0),
101 False
102 )
103 .sign(key, hashes.SHA256(), default_backend())
104 )
105 cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM)
106 key_pem = key.private_bytes(
107 encoding=serialization.Encoding.PEM,
108 format=serialization.PrivateFormat.TraditionalOpenSSL,
109 encryption_algorithm=serialization.NoEncryption(),
110 )
111
112 with open(file_path, 'wb') as f:
113 f.write(key_pem + cert_pem)
114
115
116 def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False):
117 config = auto_config(python_path, pip_path, storage_dir)
118
119 http = _in('Enable HTTP API ? [Y/N]', 'Y', use_default)
120 if http in ['Y', 'y']:
121 config['api']['http'] = {}
122 config['api']['http']['host'] = _in('HTTP interface host: ', '127.0.0.1', use_default)
123 config['api']['http']['port'] = _in('HTTP interface port: ', '47334', use_default)
124
125 mysql = _in('Enable MYSQL API ? [Y/N]', 'Y', use_default)
126 if mysql in ['Y', 'y']:
127 config['api']['mysql'] = {}
128 config['api']['mysql']['host'] = _in('MYSQL interface host', '127.0.0.1', use_default)
129 config['api']['mysql']['port'] = _in('MYSQL interface port', '47335', use_default)
130 config['api']['mysql']['user'] = _in('MYSQL interface user', 'mindsdb', use_default)
131 config['api']['mysql']['password'] = _in('MYSQL interface password', '', use_default)
132
133 mongodb = _in('Enable Mongo API ? [Y/N]', 'Y', use_default)
134 if mongodb in ['Y', 'y']:
135 config['api']['mongodb'] = {}
136 config['api']['mongodb']['host'] = _in('Mongo interface host: ', '127.0.0.1', use_default)
137 config['api']['mongodb']['port'] = _in('Mongo interface port: ', '47336', use_default)
138
139 clickhouse = _in('Connect to clickhouse ? [Y/N]', 'Y', use_default)
140 if clickhouse in ['Y', 'y']:
141 config['integrations']['default_clickhouse']['enabled'] = _in('Enable Clickhouse integration?: ', False, use_default)
142 config['integrations']['default_clickhouse']['host'] = _in('Clickhouse host: ', '127.0.0.1', use_default)
143 config['integrations']['default_clickhouse']['port'] = _in('Clickhouse port: ', 8123, use_default)
144 config['integrations']['default_clickhouse']['user'] = _in('Clickhouse user: ', 'default', use_default)
145 config['integrations']['default_clickhouse']['password'] = _in('Clickhouse password: ', '', use_default)
146 config['integrations']['default_clickhouse']['type'] = 'clickhouse'
147
148 mariadb = _in('Connect to Mariadb ? [Y/N]', 'Y', use_default)
149 if mariadb in ['Y', 'y']:
150 config['integrations']['default_mariadb']['enabled'] = _in('Enable Mariadb integration?: ', False, use_default)
151 config['integrations']['default_mariadb']['host'] = _in('Mariadb host: ', '127.0.0.1', use_default)
152 config['integrations']['default_mariadb']['port'] = _in('Mariadb port: ', 3306, use_default)
153 config['integrations']['default_mariadb']['user'] = _in('Mariadb user: ', 'root', use_default)
154 config['integrations']['default_mariadb']['password'] = _in('Mariadb password: ', '', use_default)
155 config['integrations']['default_mariadb']['type'] = 'mariadb'
156
157 mysql = _in('Connect to MySQL ? [Y/N]', 'Y', use_default)
158 if mysql in ['Y', 'y']:
159 config['integrations']['default_mysql']['enabled'] = _in('Enable MySQL integration?: ', False, use_default)
160 config['integrations']['default_mysql']['host'] = _in('MySQL host: ', '127.0.0.1', use_default)
161 config['integrations']['default_mysql']['port'] = _in('MySQL port: ', 3306, use_default)
162 config['integrations']['default_mysql']['user'] = _in('MySQL user: ', 'root', use_default)
163 config['integrations']['default_mysql']['password'] = _in('MySQL password: ', '', use_default)
164 config['integrations']['default_mysql']['type'] = 'mysql'
165
166 mysql = _in('Connect to PostgreSQL ? [Y/N]', 'Y', use_default)
167 if mysql in ['Y', 'y']:
168 config['integrations']['default_postgres']['enabled'] = _in('Enable PostgreSQL integration?: ', False, use_default)
169 config['integrations']['default_postgres']['host'] = _in('PostgreSQL host: ', '127.0.0.1', use_default)
170 config['integrations']['default_postgres']['port'] = _in('PostgreSQL port: ', 5432, use_default)
171 config['integrations']['default_postgres']['user'] = _in('PostgreSQL user: ', 'postgres', use_default)
172 config['integrations']['default_postgres']['password'] = _in('PostgreSQL password: ', '', use_default)
173 config['integrations']['default_postgres']['database'] = _in('PostgreSQL database: ', 'postgres', use_default)
174 config['integrations']['default_postgres']['type'] = 'postgres'
175
176 mssql = _in('Connect to MSSQL ? [Y/N]', 'Y', use_default)
177 if mssql in ['Y', 'y']:
178 config['integrations']['default_mssql']['enabled'] = _in('Enable MSSQL integration?: ', False, use_default)
179 config['integrations']['default_mssql']['host'] = _in('MSSQL host: ', '127.0.0.1', use_default)
180 config['integrations']['default_mssql']['port'] = _in('MSSQL port: ', 1433, use_default)
181 config['integrations']['default_mssql']['user'] = _in('MSSQL user: ', 'sa', use_default)
182 config['integrations']['default_mssql']['password'] = _in('MSSQL password: ', '', use_default)
183 config['integrations']['default_mssql']['odbc_driver_name'] = _in('MySQL ODBC driver name: ', 'MySQL ODBC 8.0 Unicode Driver', use_default)
184 config['integrations']['default_mssql']['type'] = 'mssql'
185
186 mongodb = _in('Connect to MongoDB ? [Y/N]', 'Y', use_default)
187 if mongodb in ['Y', 'y']:
188 config['integrations']['default_mongodb']['enabled'] = _in('Enable MongoDB integration?: ', False, use_default)
189 config['integrations']['default_mongodb']['host'] = _in('MongoDB host: ', '127.0.0.1', use_default)
190 config['integrations']['default_mongodb']['port'] = _in('MongoDB port: ', 27017, use_default)
191 config['integrations']['default_mongodb']['user'] = _in('MongoDB user: ', '', use_default)
192 config['integrations']['default_mongodb']['password'] = _in('MongoDB password: ', '', use_default)
193 config['integrations']['default_mongodb']['type'] = 'mongodb'
194
195 for db_name in list(config['integrations'].keys()):
196 if not config['integrations'][db_name]['enabled']:
197 del config['integrations'][db_name]
198
199
200 config_path = os.path.join(config_dir, 'config.json')
201 with open(config_path, 'w') as fp:
202 json.dump(config, fp, indent=4, sort_keys=True)
203
204 return config_path
205
206
207 def daemon_creator(python_path, config_path=None):
208 daemon_path = '/etc/systemd/system/mindsdb.service'
209 service_txt = f"""[Unit]
210 Description=Mindsdb
211 [Service]
212 ExecStart={python_path} -m mindsdb { "--config="+config_path if config_path else ""}
213 [Install]
214 WantedBy=multi-user.target
215 """.strip(' ')
216
217 try:
218 with open(daemon_path, 'w') as fp:
219 fp.write(service_txt)
220 except Exception as e:
221 print(f'Failed to create daemon, error: {e}')
222
223 try:
224 os.system('systemctl daemon-reload')
225 except Exception as e:
226 print(f'Failed to load daemon, error: {e}')
227 return daemon_path
228
229
230 def make_executable(python_path, exec_path, config_path=None):
231 text = f"""#!/bin/bash
232 {python_path} -m mindsdb { "--config="+config_path if config_path else ""}
233 """
234
235 with open(exec_path, 'w') as fp:
236 fp.write(text)
237
238 os.system(f'chmod +x {exec_path}')
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mindsdb/utilities/wizards.py b/mindsdb/utilities/wizards.py
--- a/mindsdb/utilities/wizards.py
+++ b/mindsdb/utilities/wizards.py
@@ -227,8 +227,14 @@
return daemon_path
-def make_executable(python_path, exec_path, config_path=None):
+def make_executable(python_path, exec_path, config_path=None, update=False):
text = f"""#!/bin/bash
+update={str(update).lower()}
+
+if [ "$update" = true ]
+ then
+ {python_path} -m pip install mindsdb --upgrade
+fi
{python_path} -m mindsdb { "--config="+config_path if config_path else ""}
"""
| {"golden_diff": "diff --git a/mindsdb/utilities/wizards.py b/mindsdb/utilities/wizards.py\n--- a/mindsdb/utilities/wizards.py\n+++ b/mindsdb/utilities/wizards.py\n@@ -227,8 +227,14 @@\n return daemon_path\n \n \n-def make_executable(python_path, exec_path, config_path=None):\n+def make_executable(python_path, exec_path, config_path=None, update=False):\n text = f\"\"\"#!/bin/bash\n+update={str(update).lower()}\n+\n+if [ \"$update\" = true ] \n+ then\n+ {python_path} -m pip install mindsdb --upgrade \n+fi\n {python_path} -m mindsdb { \"--config=\"+config_path if config_path else \"\"}\n \"\"\"\n", "issue": "Add update argument to executable function\nAdd optional boolean argument update to [make_executable](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/utilities/wizards.py#L230)() function, so if it's set to true then the executable should run \r\n```\r\npy -m pip install mindsdb --upgrade \r\n```\r\nbefore \r\n```\r\npy -m mindsdb\r\n```\r\n\n", "before_files": [{"content": "import os\nimport json\nfrom datetime import datetime, timedelta\n\n\ndef _in(ask, default, use_default):\n if use_default:\n return default\n\n user_input = input(f'{ask} (Default: {default})')\n if user_input is None or user_input == '':\n user_input = default\n\n if type(default) == int:\n user_input = int(user_input)\n\n if type(default) == bool:\n user_input = int(user_input)\n\n return user_input\n\n\ndef auto_config(python_path, pip_path, storage_dir):\n config = {\n \"debug\": False,\n \"config_version\": \"1.3\",\n \"api\": {\n },\n \"integrations\": {\n \"default_clickhouse\": {\n \"enabled\": False,\n \"type\": 'clickhouse'\n },\n \"default_mariadb\": {\n \"enabled\": False,\n \"type\": 'mariadb'\n },\n \"default_mysql\": {\n \"enabled\": False,\n \"type\": 'mysql'\n },\n \"default_postgres\": {\n \"enabled\": False,\n \"type\": 'postgres',\n \"database\": 'postgres'\n },\n \"default_mssql\": {\n \"enabled\": False,\n \"type\": 'mssql'\n },\n \"default_mongodb\": {\n \"enabled\": False,\n \"type\": 'mongodb'\n }\n },\n 'storage_dir': storage_dir\n }\n\n if isinstance(python_path, str):\n config['python_interpreter'] = python_path\n\n if isinstance(pip_path, str):\n config['pip_path'] = python_path\n\n return config\n\n\ndef make_ssl_cert(file_path):\n from cryptography import x509\n from cryptography.x509.oid import NameOID\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives import hashes\n from cryptography.hazmat.primitives import serialization\n from cryptography.hazmat.primitives.asymmetric import rsa\n\n key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend(),\n )\n\n name = x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, 'mdb_autogen'),\n x509.NameAttribute(NameOID.COUNTRY_NAME, 'US'),\n x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, 'California'),\n x509.NameAttribute(NameOID.LOCALITY_NAME, 'Berkeley'),\n x509.NameAttribute(NameOID.ORGANIZATION_NAME, 'MindsDB')\n ])\n\n now = datetime.utcnow()\n cert = (\n x509.CertificateBuilder()\n .subject_name(name)\n .issuer_name(name)\n .public_key(key.public_key())\n .serial_number(1)\n .not_valid_before(now - timedelta(days=10 * 365))\n .not_valid_after(now + timedelta(days=10 * 365))\n .add_extension(\n x509.BasicConstraints(ca=True, path_length=0),\n False\n )\n .sign(key, hashes.SHA256(), default_backend())\n )\n cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM)\n key_pem = key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption(),\n )\n\n with open(file_path, 'wb') as f:\n f.write(key_pem + cert_pem)\n\n\ndef cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False):\n config = auto_config(python_path, pip_path, storage_dir)\n\n http = _in('Enable HTTP API ? [Y/N]', 'Y', use_default)\n if http in ['Y', 'y']:\n config['api']['http'] = {}\n config['api']['http']['host'] = _in('HTTP interface host: ', '127.0.0.1', use_default)\n config['api']['http']['port'] = _in('HTTP interface port: ', '47334', use_default)\n\n mysql = _in('Enable MYSQL API ? [Y/N]', 'Y', use_default)\n if mysql in ['Y', 'y']:\n config['api']['mysql'] = {}\n config['api']['mysql']['host'] = _in('MYSQL interface host', '127.0.0.1', use_default)\n config['api']['mysql']['port'] = _in('MYSQL interface port', '47335', use_default)\n config['api']['mysql']['user'] = _in('MYSQL interface user', 'mindsdb', use_default)\n config['api']['mysql']['password'] = _in('MYSQL interface password', '', use_default)\n\n mongodb = _in('Enable Mongo API ? [Y/N]', 'Y', use_default)\n if mongodb in ['Y', 'y']:\n config['api']['mongodb'] = {}\n config['api']['mongodb']['host'] = _in('Mongo interface host: ', '127.0.0.1', use_default)\n config['api']['mongodb']['port'] = _in('Mongo interface port: ', '47336', use_default)\n\n clickhouse = _in('Connect to clickhouse ? [Y/N]', 'Y', use_default)\n if clickhouse in ['Y', 'y']:\n config['integrations']['default_clickhouse']['enabled'] = _in('Enable Clickhouse integration?: ', False, use_default)\n config['integrations']['default_clickhouse']['host'] = _in('Clickhouse host: ', '127.0.0.1', use_default)\n config['integrations']['default_clickhouse']['port'] = _in('Clickhouse port: ', 8123, use_default)\n config['integrations']['default_clickhouse']['user'] = _in('Clickhouse user: ', 'default', use_default)\n config['integrations']['default_clickhouse']['password'] = _in('Clickhouse password: ', '', use_default)\n config['integrations']['default_clickhouse']['type'] = 'clickhouse'\n\n mariadb = _in('Connect to Mariadb ? [Y/N]', 'Y', use_default)\n if mariadb in ['Y', 'y']:\n config['integrations']['default_mariadb']['enabled'] = _in('Enable Mariadb integration?: ', False, use_default)\n config['integrations']['default_mariadb']['host'] = _in('Mariadb host: ', '127.0.0.1', use_default)\n config['integrations']['default_mariadb']['port'] = _in('Mariadb port: ', 3306, use_default)\n config['integrations']['default_mariadb']['user'] = _in('Mariadb user: ', 'root', use_default)\n config['integrations']['default_mariadb']['password'] = _in('Mariadb password: ', '', use_default)\n config['integrations']['default_mariadb']['type'] = 'mariadb'\n\n mysql = _in('Connect to MySQL ? [Y/N]', 'Y', use_default)\n if mysql in ['Y', 'y']:\n config['integrations']['default_mysql']['enabled'] = _in('Enable MySQL integration?: ', False, use_default)\n config['integrations']['default_mysql']['host'] = _in('MySQL host: ', '127.0.0.1', use_default)\n config['integrations']['default_mysql']['port'] = _in('MySQL port: ', 3306, use_default)\n config['integrations']['default_mysql']['user'] = _in('MySQL user: ', 'root', use_default)\n config['integrations']['default_mysql']['password'] = _in('MySQL password: ', '', use_default)\n config['integrations']['default_mysql']['type'] = 'mysql'\n\n mysql = _in('Connect to PostgreSQL ? [Y/N]', 'Y', use_default)\n if mysql in ['Y', 'y']:\n config['integrations']['default_postgres']['enabled'] = _in('Enable PostgreSQL integration?: ', False, use_default)\n config['integrations']['default_postgres']['host'] = _in('PostgreSQL host: ', '127.0.0.1', use_default)\n config['integrations']['default_postgres']['port'] = _in('PostgreSQL port: ', 5432, use_default)\n config['integrations']['default_postgres']['user'] = _in('PostgreSQL user: ', 'postgres', use_default)\n config['integrations']['default_postgres']['password'] = _in('PostgreSQL password: ', '', use_default)\n config['integrations']['default_postgres']['database'] = _in('PostgreSQL database: ', 'postgres', use_default)\n config['integrations']['default_postgres']['type'] = 'postgres'\n\n mssql = _in('Connect to MSSQL ? [Y/N]', 'Y', use_default)\n if mssql in ['Y', 'y']:\n config['integrations']['default_mssql']['enabled'] = _in('Enable MSSQL integration?: ', False, use_default)\n config['integrations']['default_mssql']['host'] = _in('MSSQL host: ', '127.0.0.1', use_default)\n config['integrations']['default_mssql']['port'] = _in('MSSQL port: ', 1433, use_default)\n config['integrations']['default_mssql']['user'] = _in('MSSQL user: ', 'sa', use_default)\n config['integrations']['default_mssql']['password'] = _in('MSSQL password: ', '', use_default)\n config['integrations']['default_mssql']['odbc_driver_name'] = _in('MySQL ODBC driver name: ', 'MySQL ODBC 8.0 Unicode Driver', use_default)\n config['integrations']['default_mssql']['type'] = 'mssql'\n\n mongodb = _in('Connect to MongoDB ? [Y/N]', 'Y', use_default)\n if mongodb in ['Y', 'y']:\n config['integrations']['default_mongodb']['enabled'] = _in('Enable MongoDB integration?: ', False, use_default)\n config['integrations']['default_mongodb']['host'] = _in('MongoDB host: ', '127.0.0.1', use_default)\n config['integrations']['default_mongodb']['port'] = _in('MongoDB port: ', 27017, use_default)\n config['integrations']['default_mongodb']['user'] = _in('MongoDB user: ', '', use_default)\n config['integrations']['default_mongodb']['password'] = _in('MongoDB password: ', '', use_default)\n config['integrations']['default_mongodb']['type'] = 'mongodb'\n\n for db_name in list(config['integrations'].keys()):\n if not config['integrations'][db_name]['enabled']:\n del config['integrations'][db_name]\n\n\n config_path = os.path.join(config_dir, 'config.json')\n with open(config_path, 'w') as fp:\n json.dump(config, fp, indent=4, sort_keys=True)\n\n return config_path\n\n\ndef daemon_creator(python_path, config_path=None):\n daemon_path = '/etc/systemd/system/mindsdb.service'\n service_txt = f\"\"\"[Unit]\n Description=Mindsdb\n [Service]\n ExecStart={python_path} -m mindsdb { \"--config=\"+config_path if config_path else \"\"}\n [Install]\n WantedBy=multi-user.target\n \"\"\".strip(' ')\n\n try:\n with open(daemon_path, 'w') as fp:\n fp.write(service_txt)\n except Exception as e:\n print(f'Failed to create daemon, error: {e}')\n\n try:\n os.system('systemctl daemon-reload')\n except Exception as e:\n print(f'Failed to load daemon, error: {e}')\n return daemon_path\n\n\ndef make_executable(python_path, exec_path, config_path=None):\n text = f\"\"\"#!/bin/bash\n{python_path} -m mindsdb { \"--config=\"+config_path if config_path else \"\"}\n\"\"\"\n\n with open(exec_path, 'w') as fp:\n fp.write(text)\n\n os.system(f'chmod +x {exec_path}')\n", "path": "mindsdb/utilities/wizards.py"}], "after_files": [{"content": "import os\nimport json\nfrom datetime import datetime, timedelta\n\n\ndef _in(ask, default, use_default):\n if use_default:\n return default\n\n user_input = input(f'{ask} (Default: {default})')\n if user_input is None or user_input == '':\n user_input = default\n\n if type(default) == int:\n user_input = int(user_input)\n\n if type(default) == bool:\n user_input = int(user_input)\n\n return user_input\n\n\ndef auto_config(python_path, pip_path, storage_dir):\n config = {\n \"debug\": False,\n \"config_version\": \"1.3\",\n \"api\": {\n },\n \"integrations\": {\n \"default_clickhouse\": {\n \"enabled\": False,\n \"type\": 'clickhouse'\n },\n \"default_mariadb\": {\n \"enabled\": False,\n \"type\": 'mariadb'\n },\n \"default_mysql\": {\n \"enabled\": False,\n \"type\": 'mysql'\n },\n \"default_postgres\": {\n \"enabled\": False,\n \"type\": 'postgres',\n \"database\": 'postgres'\n },\n \"default_mssql\": {\n \"enabled\": False,\n \"type\": 'mssql'\n },\n \"default_mongodb\": {\n \"enabled\": False,\n \"type\": 'mongodb'\n }\n },\n 'storage_dir': storage_dir\n }\n\n if isinstance(python_path, str):\n config['python_interpreter'] = python_path\n\n if isinstance(pip_path, str):\n config['pip_path'] = python_path\n\n return config\n\n\ndef make_ssl_cert(file_path):\n from cryptography import x509\n from cryptography.x509.oid import NameOID\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives import hashes\n from cryptography.hazmat.primitives import serialization\n from cryptography.hazmat.primitives.asymmetric import rsa\n\n key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend(),\n )\n\n name = x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, 'mdb_autogen'),\n x509.NameAttribute(NameOID.COUNTRY_NAME, 'US'),\n x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, 'California'),\n x509.NameAttribute(NameOID.LOCALITY_NAME, 'Berkeley'),\n x509.NameAttribute(NameOID.ORGANIZATION_NAME, 'MindsDB')\n ])\n\n now = datetime.utcnow()\n cert = (\n x509.CertificateBuilder()\n .subject_name(name)\n .issuer_name(name)\n .public_key(key.public_key())\n .serial_number(1)\n .not_valid_before(now - timedelta(days=10 * 365))\n .not_valid_after(now + timedelta(days=10 * 365))\n .add_extension(\n x509.BasicConstraints(ca=True, path_length=0),\n False\n )\n .sign(key, hashes.SHA256(), default_backend())\n )\n cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM)\n key_pem = key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption(),\n )\n\n with open(file_path, 'wb') as f:\n f.write(key_pem + cert_pem)\n\n\ndef cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False):\n config = auto_config(python_path, pip_path, storage_dir)\n\n http = _in('Enable HTTP API ? [Y/N]', 'Y', use_default)\n if http in ['Y', 'y']:\n config['api']['http'] = {}\n config['api']['http']['host'] = _in('HTTP interface host: ', '127.0.0.1', use_default)\n config['api']['http']['port'] = _in('HTTP interface port: ', '47334', use_default)\n\n mysql = _in('Enable MYSQL API ? [Y/N]', 'Y', use_default)\n if mysql in ['Y', 'y']:\n config['api']['mysql'] = {}\n config['api']['mysql']['host'] = _in('MYSQL interface host', '127.0.0.1', use_default)\n config['api']['mysql']['port'] = _in('MYSQL interface port', '47335', use_default)\n config['api']['mysql']['user'] = _in('MYSQL interface user', 'mindsdb', use_default)\n config['api']['mysql']['password'] = _in('MYSQL interface password', '', use_default)\n\n mongodb = _in('Enable Mongo API ? [Y/N]', 'Y', use_default)\n if mongodb in ['Y', 'y']:\n config['api']['mongodb'] = {}\n config['api']['mongodb']['host'] = _in('Mongo interface host: ', '127.0.0.1', use_default)\n config['api']['mongodb']['port'] = _in('Mongo interface port: ', '47336', use_default)\n\n clickhouse = _in('Connect to clickhouse ? [Y/N]', 'Y', use_default)\n if clickhouse in ['Y', 'y']:\n config['integrations']['default_clickhouse']['enabled'] = _in('Enable Clickhouse integration?: ', False, use_default)\n config['integrations']['default_clickhouse']['host'] = _in('Clickhouse host: ', '127.0.0.1', use_default)\n config['integrations']['default_clickhouse']['port'] = _in('Clickhouse port: ', 8123, use_default)\n config['integrations']['default_clickhouse']['user'] = _in('Clickhouse user: ', 'default', use_default)\n config['integrations']['default_clickhouse']['password'] = _in('Clickhouse password: ', '', use_default)\n config['integrations']['default_clickhouse']['type'] = 'clickhouse'\n\n mariadb = _in('Connect to Mariadb ? [Y/N]', 'Y', use_default)\n if mariadb in ['Y', 'y']:\n config['integrations']['default_mariadb']['enabled'] = _in('Enable Mariadb integration?: ', False, use_default)\n config['integrations']['default_mariadb']['host'] = _in('Mariadb host: ', '127.0.0.1', use_default)\n config['integrations']['default_mariadb']['port'] = _in('Mariadb port: ', 3306, use_default)\n config['integrations']['default_mariadb']['user'] = _in('Mariadb user: ', 'root', use_default)\n config['integrations']['default_mariadb']['password'] = _in('Mariadb password: ', '', use_default)\n config['integrations']['default_mariadb']['type'] = 'mariadb'\n\n mysql = _in('Connect to MySQL ? [Y/N]', 'Y', use_default)\n if mysql in ['Y', 'y']:\n config['integrations']['default_mysql']['enabled'] = _in('Enable MySQL integration?: ', False, use_default)\n config['integrations']['default_mysql']['host'] = _in('MySQL host: ', '127.0.0.1', use_default)\n config['integrations']['default_mysql']['port'] = _in('MySQL port: ', 3306, use_default)\n config['integrations']['default_mysql']['user'] = _in('MySQL user: ', 'root', use_default)\n config['integrations']['default_mysql']['password'] = _in('MySQL password: ', '', use_default)\n config['integrations']['default_mysql']['type'] = 'mysql'\n\n mysql = _in('Connect to PostgreSQL ? [Y/N]', 'Y', use_default)\n if mysql in ['Y', 'y']:\n config['integrations']['default_postgres']['enabled'] = _in('Enable PostgreSQL integration?: ', False, use_default)\n config['integrations']['default_postgres']['host'] = _in('PostgreSQL host: ', '127.0.0.1', use_default)\n config['integrations']['default_postgres']['port'] = _in('PostgreSQL port: ', 5432, use_default)\n config['integrations']['default_postgres']['user'] = _in('PostgreSQL user: ', 'postgres', use_default)\n config['integrations']['default_postgres']['password'] = _in('PostgreSQL password: ', '', use_default)\n config['integrations']['default_postgres']['database'] = _in('PostgreSQL database: ', 'postgres', use_default)\n config['integrations']['default_postgres']['type'] = 'postgres'\n\n mssql = _in('Connect to MSSQL ? [Y/N]', 'Y', use_default)\n if mssql in ['Y', 'y']:\n config['integrations']['default_mssql']['enabled'] = _in('Enable MSSQL integration?: ', False, use_default)\n config['integrations']['default_mssql']['host'] = _in('MSSQL host: ', '127.0.0.1', use_default)\n config['integrations']['default_mssql']['port'] = _in('MSSQL port: ', 1433, use_default)\n config['integrations']['default_mssql']['user'] = _in('MSSQL user: ', 'sa', use_default)\n config['integrations']['default_mssql']['password'] = _in('MSSQL password: ', '', use_default)\n config['integrations']['default_mssql']['odbc_driver_name'] = _in('MySQL ODBC driver name: ', 'MySQL ODBC 8.0 Unicode Driver', use_default)\n config['integrations']['default_mssql']['type'] = 'mssql'\n\n mongodb = _in('Connect to MongoDB ? [Y/N]', 'Y', use_default)\n if mongodb in ['Y', 'y']:\n config['integrations']['default_mongodb']['enabled'] = _in('Enable MongoDB integration?: ', False, use_default)\n config['integrations']['default_mongodb']['host'] = _in('MongoDB host: ', '127.0.0.1', use_default)\n config['integrations']['default_mongodb']['port'] = _in('MongoDB port: ', 27017, use_default)\n config['integrations']['default_mongodb']['user'] = _in('MongoDB user: ', '', use_default)\n config['integrations']['default_mongodb']['password'] = _in('MongoDB password: ', '', use_default)\n config['integrations']['default_mongodb']['type'] = 'mongodb'\n\n for db_name in list(config['integrations'].keys()):\n if not config['integrations'][db_name]['enabled']:\n del config['integrations'][db_name]\n\n\n config_path = os.path.join(config_dir, 'config.json')\n with open(config_path, 'w') as fp:\n json.dump(config, fp, indent=4, sort_keys=True)\n\n return config_path\n\n\ndef daemon_creator(python_path, config_path=None):\n daemon_path = '/etc/systemd/system/mindsdb.service'\n service_txt = f\"\"\"[Unit]\n Description=Mindsdb\n [Service]\n ExecStart={python_path} -m mindsdb { \"--config=\"+config_path if config_path else \"\"}\n [Install]\n WantedBy=multi-user.target\n \"\"\".strip(' ')\n\n try:\n with open(daemon_path, 'w') as fp:\n fp.write(service_txt)\n except Exception as e:\n print(f'Failed to create daemon, error: {e}')\n\n try:\n os.system('systemctl daemon-reload')\n except Exception as e:\n print(f'Failed to load daemon, error: {e}')\n return daemon_path\n\n\ndef make_executable(python_path, exec_path, config_path=None, update=False):\n text = f\"\"\"#!/bin/bash\nupdate={str(update).lower()}\n\nif [ \"$update\" = true ] \n then\n {python_path} -m pip install mindsdb --upgrade \nfi\n{python_path} -m mindsdb { \"--config=\"+config_path if config_path else \"\"}\n\"\"\"\n\n with open(exec_path, 'w') as fp:\n fp.write(text)\n\n os.system(f'chmod +x {exec_path}')\n", "path": "mindsdb/utilities/wizards.py"}]} | 3,673 | 170 |
gh_patches_debug_21604 | rasdani/github-patches | git_diff | tough-dev-school__education-backend-68 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Установить django-axes
https://github.com/jazzband/django-axes
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/app/settings.py`
Content:
```
1 import os
2
3 import environ
4 from celery.schedules import crontab
5
6 root = environ.Path(__file__) - 2 # three folder back (/a/b/c/ - 3 = /)
7 env = environ.Env(DEBUG=(bool, False)) # set default values and casting
8 environ.Env.read_env() # reading .env file
9 SITE_ROOT = root()
10
11 USE_L10N = True
12 USE_i18N = True
13
14 LANGUAGE_CODE = 'ru'
15 LOCALE_PATHS = ['locale']
16
17 INTERNAL_IPS = [
18 '127.0.0.1',
19 ]
20 FRONTEND_URL = 'https://education.borshev.com'
21
22 USE_TZ = False
23 TIME_ZONE = env('TIME_ZONE', cast=str, default='Europe/Moscow')
24
25 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
26 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
27 TEST_RUNNER = 'app.test.disable_test_command_runner.DisableTestCommandRunner'
28
29
30 # Quick-start development settings - unsuitable for production
31 # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
32
33 # SECURITY WARNING: keep the secret key used in production secret!
34 SECRET_KEY = env('SECRET_KEY', cast=str, default='s3cr3t')
35
36 # SECURITY WARNING: don't run with debug turned on in production!
37 DEBUG = env('DEBUG', cast=bool, default=False)
38 CI = env('CI', cast=bool, default=False)
39
40 ABSOLUTE_HOST = env('ABSOLUTE_HOST', cast=str, default='https://edu-app.borshev.com')
41 ALLOWED_HOSTS = [
42 'edu-app.borshev.com',
43 'localhost',
44 'localhost:8000',
45 'education.borshev.com',
46 ABSOLUTE_HOST.replace('https://', ''),
47 ]
48
49 CORS_ORIGIN_WHITELIST = [
50 'https://pmdaily.ru',
51 'https://education.borshev.com',
52 ]
53
54 CSRF_TRUSTED_ORIGINS = [
55 'pmdaily.ru',
56 'education.borshev.com',
57 'borshev.com',
58 ]
59
60
61 # Application definition
62
63 INSTALLED_APPS = [
64 'app',
65 'users',
66 'orders',
67 'courses',
68 'onetime',
69 'shipping',
70 'tinkoff',
71 'triggers',
72 'magnets',
73
74 'corsheaders',
75 'anymail',
76 'rest_framework',
77 'rest_framework.authtoken',
78 'django_filters',
79
80 'django.contrib.admin',
81 'django.contrib.auth',
82 'django.contrib.contenttypes',
83 'django.contrib.sessions',
84 'django.contrib.messages',
85 'django.contrib.staticfiles',
86
87 'debug_toolbar',
88 ]
89
90 MIDDLEWARE = [
91 'django.middleware.security.SecurityMiddleware',
92
93 'django.contrib.sessions.middleware.SessionMiddleware',
94 'corsheaders.middleware.CorsMiddleware',
95 'django.middleware.common.CommonMiddleware',
96 # 'django.middleware.csrf.CsrfViewMiddleware',
97 'django.contrib.auth.middleware.AuthenticationMiddleware',
98 'django.contrib.auth.middleware.RemoteUserMiddleware',
99 'django.contrib.messages.middleware.MessageMiddleware',
100 'django.middleware.clickjacking.XFrameOptionsMiddleware',
101 'debug_toolbar.middleware.DebugToolbarMiddleware',
102 ]
103
104 if not DEBUG and not CI:
105 MIDDLEWARE.insert(1, 'whitenoise.middleware.WhiteNoiseMiddleware')
106
107
108 REST_FRAMEWORK = {
109 'DEFAULT_PERMISSION_CLASSES': (
110 'rest_framework.permissions.IsAuthenticated',
111 ),
112 'DEFAULT_AUTHENTICATION_CLASSES': (
113 'rest_framework.authentication.TokenAuthentication',
114 ),
115 'DEFAULT_RENDERER_CLASSES': [
116 'app.renderers.AppJSONRenderer',
117 ],
118 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
119 'DEFAULT_PAGINATION_CLASS': 'app.pagination.AppPagination',
120 'PAGE_SIZE': 20,
121 }
122
123 ROOT_URLCONF = 'app.urls'
124
125 TEMPLATES = [
126 {
127 'BACKEND': 'django.template.backends.django.DjangoTemplates',
128 'DIRS': [],
129 'APP_DIRS': True,
130 'OPTIONS': {
131 'context_processors': [
132 'django.template.context_processors.debug',
133 'django.template.context_processors.request',
134 'django.contrib.auth.context_processors.auth',
135 'django.contrib.messages.context_processors.messages',
136 ],
137 },
138 },
139 ]
140
141 WSGI_APPLICATION = 'app.wsgi.application'
142
143
144 # Database
145 # https://docs.djangoproject.com/en/2.2/ref/settings/#databases
146 DATABASES = {
147 'default': env.db(), # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
148 }
149 AUTH_USER_MODEL = 'users.User'
150 AUTHENTICATION_BACKENDS = [
151 'django.contrib.auth.backends.ModelBackend',
152 'django.contrib.auth.backends.RemoteUserBackend',
153 ]
154 HEALTH_CHECKS_ERROR_CODE = 503
155 HEALTH_CHECKS = {
156 'db': 'django_healthchecks.contrib.check_database',
157 }
158
159 MEDIA_URL = env('MEDIA_URL', default='/media/')
160
161 STATIC_URL = env('STATIC_URL', default='/static/')
162 STATIC_ROOT = env('STATIC_ROOT')
163
164 SENTRY_DSN = env('SENTRY_DSN', cast=str, default='')
165
166 if not DEBUG and len(SENTRY_DSN):
167 import sentry_sdk
168 from sentry_sdk.integrations.django import DjangoIntegration
169 from sentry_sdk.integrations.celery import CeleryIntegration
170 from sentry_sdk.integrations.redis import RedisIntegration
171
172 sentry_sdk.init(
173 dsn=SENTRY_DSN,
174 integrations=[DjangoIntegration(), CeleryIntegration(), RedisIntegration()],
175 )
176
177 BROKER_URL = env('CELERY_BACKEND')
178 CELERY_ALWAYS_EAGER = env('CELERY_ALWAYS_EAGER', cast=bool, default=DEBUG) # by default in debug mode we run all celery tasks in foregroud.
179 CELERY_TIMEZONE = TIME_ZONE
180 CELERY_ENABLE_UTC = False
181 CELERYBEAT_SCHEDULE = {
182 'run_started_purchase_trigger': {
183 'task': 'triggers.tasks.check_for_started_purchase_triggers',
184 'schedule': crontab(hour='*', minute=15),
185 },
186 'run_record_feedback_trigger': {
187 'task': 'triggers.tasks.check_for_record_feedback_triggers',
188 'schedule': crontab(hour='*', minute=15),
189 },
190 }
191
192
193 AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID', default=None)
194 AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY', default=None)
195 AWS_STORAGE_BUCKET_NAME = env('AWS_STORAGE_BUCKET_NAME', default=None)
196 AWS_S3_REGION_NAME = env('AWS_S3_REGION_NAME', default=None)
197 AWS_S3_ENDPOINT_URL = env('AWS_S3_ENDPOINT_URL', default=None)
198
199 EMAIL_ENABLED = env('EMAIL_ENABLED', cast=bool, default=False)
200
201 EMAIL_BACKEND = env('EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
202
203 MAILJET_API_KEY = env('MAILJET_API_KEY', default='')
204 MAILJET_SECRET_KEY = env('MAILJET_SECRET_KEY', default='')
205 MAILJET_CONTACT_LIST_ID = env('MAILJET_CONTACT_LIST_ID', cast=int, default=None)
206
207 MAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY', default='')
208 MAILCHIMP_CONTACT_LIST_ID = env('MAILCHIMP_CONTACT_LIST_ID', cast=str, default=None)
209
210 DEFAULT_FROM_EMAIL = env('EMAIL_FROM', cast=str, default='')
211 ANYMAIL = {
212 'POSTMARK_SERVER_TOKEN': env('POSTMARK_SERVER_TOKEN', cast=str, default=''),
213 'DEBUG_API_REQUESTS': env('DEBUG'),
214 }
215
216 CLICKMEETING_API_KEY = env('CLICKMEETING_API_KEY', default=None, cast=str)
217
218 ZOOMUS_API_KEY = env('ZOOMUS_API_KEY', default=None, cast=str)
219 ZOOMUS_API_SECRET = env('ZOOMUS_API_SECRET', default=None, cast=str)
220
221 TINKOFF_TERMINAL_KEY = env('TINKOFF_TERMINAL_KEY', default=None)
222 TINKOFF_TERMINAL_PASSWORD = env('TINKOFF_TERMINAL_PASSWORD', default=None)
223
224 SEND_HAPPINESS_MESSAGES = env('SEND_HAPPINESS_MESSAGES', cast=bool, default=False)
225
226 # Uncomment this lines to catch all runtime warnings as errors
227
228 # import warnings # noqa
229 # warnings.filterwarnings(
230 # 'error', r".*",
231 # RuntimeWarning, r".*"
232 # )
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/app/settings.py b/src/app/settings.py
--- a/src/app/settings.py
+++ b/src/app/settings.py
@@ -77,6 +77,7 @@
'rest_framework.authtoken',
'django_filters',
+ 'axes',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
@@ -98,6 +99,7 @@
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
+ 'axes.middleware.AxesMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
@@ -148,6 +150,7 @@
}
AUTH_USER_MODEL = 'users.User'
AUTHENTICATION_BACKENDS = [
+ 'axes.backends.AxesBackend',
'django.contrib.auth.backends.ModelBackend',
'django.contrib.auth.backends.RemoteUserBackend',
]
| {"golden_diff": "diff --git a/src/app/settings.py b/src/app/settings.py\n--- a/src/app/settings.py\n+++ b/src/app/settings.py\n@@ -77,6 +77,7 @@\n 'rest_framework.authtoken',\n 'django_filters',\n \n+ 'axes',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n@@ -98,6 +99,7 @@\n 'django.contrib.auth.middleware.RemoteUserMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n+ 'axes.middleware.AxesMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n ]\n \n@@ -148,6 +150,7 @@\n }\n AUTH_USER_MODEL = 'users.User'\n AUTHENTICATION_BACKENDS = [\n+ 'axes.backends.AxesBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'django.contrib.auth.backends.RemoteUserBackend',\n ]\n", "issue": "\u0423\u0441\u0442\u0430\u043d\u043e\u0432\u0438\u0442\u044c django-axes\nhttps://github.com/jazzband/django-axes\n", "before_files": [{"content": "import os\n\nimport environ\nfrom celery.schedules import crontab\n\nroot = environ.Path(__file__) - 2 # three folder back (/a/b/c/ - 3 = /)\nenv = environ.Env(DEBUG=(bool, False)) # set default values and casting\nenviron.Env.read_env() # reading .env file\nSITE_ROOT = root()\n\nUSE_L10N = True\nUSE_i18N = True\n\nLANGUAGE_CODE = 'ru'\nLOCALE_PATHS = ['locale']\n\nINTERNAL_IPS = [\n '127.0.0.1',\n]\nFRONTEND_URL = 'https://education.borshev.com'\n\nUSE_TZ = False\nTIME_ZONE = env('TIME_ZONE', cast=str, default='Europe/Moscow')\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nTEST_RUNNER = 'app.test.disable_test_command_runner.DisableTestCommandRunner'\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env('SECRET_KEY', cast=str, default='s3cr3t')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env('DEBUG', cast=bool, default=False)\nCI = env('CI', cast=bool, default=False)\n\nABSOLUTE_HOST = env('ABSOLUTE_HOST', cast=str, default='https://edu-app.borshev.com')\nALLOWED_HOSTS = [\n 'edu-app.borshev.com',\n 'localhost',\n 'localhost:8000',\n 'education.borshev.com',\n ABSOLUTE_HOST.replace('https://', ''),\n]\n\nCORS_ORIGIN_WHITELIST = [\n 'https://pmdaily.ru',\n 'https://education.borshev.com',\n]\n\nCSRF_TRUSTED_ORIGINS = [\n 'pmdaily.ru',\n 'education.borshev.com',\n 'borshev.com',\n]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'app',\n 'users',\n 'orders',\n 'courses',\n 'onetime',\n 'shipping',\n 'tinkoff',\n 'triggers',\n 'magnets',\n\n 'corsheaders',\n 'anymail',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'debug_toolbar',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.RemoteUserMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nif not DEBUG and not CI:\n MIDDLEWARE.insert(1, 'whitenoise.middleware.WhiteNoiseMiddleware')\n\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_RENDERER_CLASSES': [\n 'app.renderers.AppJSONRenderer',\n ],\n 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',\n 'DEFAULT_PAGINATION_CLASS': 'app.pagination.AppPagination',\n 'PAGE_SIZE': 20,\n}\n\nROOT_URLCONF = 'app.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'app.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\nDATABASES = {\n 'default': env.db(), # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ\n}\nAUTH_USER_MODEL = 'users.User'\nAUTHENTICATION_BACKENDS = [\n 'django.contrib.auth.backends.ModelBackend',\n 'django.contrib.auth.backends.RemoteUserBackend',\n]\nHEALTH_CHECKS_ERROR_CODE = 503\nHEALTH_CHECKS = {\n 'db': 'django_healthchecks.contrib.check_database',\n}\n\nMEDIA_URL = env('MEDIA_URL', default='/media/')\n\nSTATIC_URL = env('STATIC_URL', default='/static/')\nSTATIC_ROOT = env('STATIC_ROOT')\n\nSENTRY_DSN = env('SENTRY_DSN', cast=str, default='')\n\nif not DEBUG and len(SENTRY_DSN):\n import sentry_sdk\n from sentry_sdk.integrations.django import DjangoIntegration\n from sentry_sdk.integrations.celery import CeleryIntegration\n from sentry_sdk.integrations.redis import RedisIntegration\n\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[DjangoIntegration(), CeleryIntegration(), RedisIntegration()],\n )\n\nBROKER_URL = env('CELERY_BACKEND')\nCELERY_ALWAYS_EAGER = env('CELERY_ALWAYS_EAGER', cast=bool, default=DEBUG) # by default in debug mode we run all celery tasks in foregroud.\nCELERY_TIMEZONE = TIME_ZONE\nCELERY_ENABLE_UTC = False\nCELERYBEAT_SCHEDULE = {\n 'run_started_purchase_trigger': {\n 'task': 'triggers.tasks.check_for_started_purchase_triggers',\n 'schedule': crontab(hour='*', minute=15),\n },\n 'run_record_feedback_trigger': {\n 'task': 'triggers.tasks.check_for_record_feedback_triggers',\n 'schedule': crontab(hour='*', minute=15),\n },\n}\n\n\nAWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID', default=None)\nAWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY', default=None)\nAWS_STORAGE_BUCKET_NAME = env('AWS_STORAGE_BUCKET_NAME', default=None)\nAWS_S3_REGION_NAME = env('AWS_S3_REGION_NAME', default=None)\nAWS_S3_ENDPOINT_URL = env('AWS_S3_ENDPOINT_URL', default=None)\n\nEMAIL_ENABLED = env('EMAIL_ENABLED', cast=bool, default=False)\n\nEMAIL_BACKEND = env('EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')\n\nMAILJET_API_KEY = env('MAILJET_API_KEY', default='')\nMAILJET_SECRET_KEY = env('MAILJET_SECRET_KEY', default='')\nMAILJET_CONTACT_LIST_ID = env('MAILJET_CONTACT_LIST_ID', cast=int, default=None)\n\nMAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY', default='')\nMAILCHIMP_CONTACT_LIST_ID = env('MAILCHIMP_CONTACT_LIST_ID', cast=str, default=None)\n\nDEFAULT_FROM_EMAIL = env('EMAIL_FROM', cast=str, default='')\nANYMAIL = {\n 'POSTMARK_SERVER_TOKEN': env('POSTMARK_SERVER_TOKEN', cast=str, default=''),\n 'DEBUG_API_REQUESTS': env('DEBUG'),\n}\n\nCLICKMEETING_API_KEY = env('CLICKMEETING_API_KEY', default=None, cast=str)\n\nZOOMUS_API_KEY = env('ZOOMUS_API_KEY', default=None, cast=str)\nZOOMUS_API_SECRET = env('ZOOMUS_API_SECRET', default=None, cast=str)\n\nTINKOFF_TERMINAL_KEY = env('TINKOFF_TERMINAL_KEY', default=None)\nTINKOFF_TERMINAL_PASSWORD = env('TINKOFF_TERMINAL_PASSWORD', default=None)\n\nSEND_HAPPINESS_MESSAGES = env('SEND_HAPPINESS_MESSAGES', cast=bool, default=False)\n\n# Uncomment this lines to catch all runtime warnings as errors\n\n# import warnings # noqa\n# warnings.filterwarnings(\n# 'error', r\".*\",\n# RuntimeWarning, r\".*\"\n# )\n", "path": "src/app/settings.py"}], "after_files": [{"content": "import os\n\nimport environ\nfrom celery.schedules import crontab\n\nroot = environ.Path(__file__) - 2 # three folder back (/a/b/c/ - 3 = /)\nenv = environ.Env(DEBUG=(bool, False)) # set default values and casting\nenviron.Env.read_env() # reading .env file\nSITE_ROOT = root()\n\nUSE_L10N = True\nUSE_i18N = True\n\nLANGUAGE_CODE = 'ru'\nLOCALE_PATHS = ['locale']\n\nINTERNAL_IPS = [\n '127.0.0.1',\n]\nFRONTEND_URL = 'https://education.borshev.com'\n\nUSE_TZ = False\nTIME_ZONE = env('TIME_ZONE', cast=str, default='Europe/Moscow')\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nTEST_RUNNER = 'app.test.disable_test_command_runner.DisableTestCommandRunner'\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env('SECRET_KEY', cast=str, default='s3cr3t')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env('DEBUG', cast=bool, default=False)\nCI = env('CI', cast=bool, default=False)\n\nABSOLUTE_HOST = env('ABSOLUTE_HOST', cast=str, default='https://edu-app.borshev.com')\nALLOWED_HOSTS = [\n 'edu-app.borshev.com',\n 'localhost',\n 'localhost:8000',\n 'education.borshev.com',\n ABSOLUTE_HOST.replace('https://', ''),\n]\n\nCORS_ORIGIN_WHITELIST = [\n 'https://pmdaily.ru',\n 'https://education.borshev.com',\n]\n\nCSRF_TRUSTED_ORIGINS = [\n 'pmdaily.ru',\n 'education.borshev.com',\n 'borshev.com',\n]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'app',\n 'users',\n 'orders',\n 'courses',\n 'onetime',\n 'shipping',\n 'tinkoff',\n 'triggers',\n 'magnets',\n\n 'corsheaders',\n 'anymail',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n\n 'axes',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'debug_toolbar',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.RemoteUserMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'axes.middleware.AxesMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nif not DEBUG and not CI:\n MIDDLEWARE.insert(1, 'whitenoise.middleware.WhiteNoiseMiddleware')\n\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_RENDERER_CLASSES': [\n 'app.renderers.AppJSONRenderer',\n ],\n 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',\n 'DEFAULT_PAGINATION_CLASS': 'app.pagination.AppPagination',\n 'PAGE_SIZE': 20,\n}\n\nROOT_URLCONF = 'app.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'app.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\nDATABASES = {\n 'default': env.db(), # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ\n}\nAUTH_USER_MODEL = 'users.User'\nAUTHENTICATION_BACKENDS = [\n 'axes.backends.AxesBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'django.contrib.auth.backends.RemoteUserBackend',\n]\nHEALTH_CHECKS_ERROR_CODE = 503\nHEALTH_CHECKS = {\n 'db': 'django_healthchecks.contrib.check_database',\n}\n\nMEDIA_URL = env('MEDIA_URL', default='/media/')\n\nSTATIC_URL = env('STATIC_URL', default='/static/')\nSTATIC_ROOT = env('STATIC_ROOT')\n\nSENTRY_DSN = env('SENTRY_DSN', cast=str, default='')\n\nif not DEBUG and len(SENTRY_DSN):\n import sentry_sdk\n from sentry_sdk.integrations.django import DjangoIntegration\n from sentry_sdk.integrations.celery import CeleryIntegration\n from sentry_sdk.integrations.redis import RedisIntegration\n\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[DjangoIntegration(), CeleryIntegration(), RedisIntegration()],\n )\n\nBROKER_URL = env('CELERY_BACKEND')\nCELERY_ALWAYS_EAGER = env('CELERY_ALWAYS_EAGER', cast=bool, default=DEBUG) # by default in debug mode we run all celery tasks in foregroud.\nCELERY_TIMEZONE = TIME_ZONE\nCELERY_ENABLE_UTC = False\nCELERYBEAT_SCHEDULE = {\n 'run_started_purchase_trigger': {\n 'task': 'triggers.tasks.check_for_started_purchase_triggers',\n 'schedule': crontab(hour='*', minute=15),\n },\n 'run_record_feedback_trigger': {\n 'task': 'triggers.tasks.check_for_record_feedback_triggers',\n 'schedule': crontab(hour='*', minute=15),\n },\n}\n\n\nAWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID', default=None)\nAWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY', default=None)\nAWS_STORAGE_BUCKET_NAME = env('AWS_STORAGE_BUCKET_NAME', default=None)\nAWS_S3_REGION_NAME = env('AWS_S3_REGION_NAME', default=None)\nAWS_S3_ENDPOINT_URL = env('AWS_S3_ENDPOINT_URL', default=None)\n\nEMAIL_ENABLED = env('EMAIL_ENABLED', cast=bool, default=False)\n\nEMAIL_BACKEND = env('EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')\n\nMAILJET_API_KEY = env('MAILJET_API_KEY', default='')\nMAILJET_SECRET_KEY = env('MAILJET_SECRET_KEY', default='')\nMAILJET_CONTACT_LIST_ID = env('MAILJET_CONTACT_LIST_ID', cast=int, default=None)\n\nMAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY', default='')\nMAILCHIMP_CONTACT_LIST_ID = env('MAILCHIMP_CONTACT_LIST_ID', cast=str, default=None)\n\nDEFAULT_FROM_EMAIL = env('EMAIL_FROM', cast=str, default='')\nANYMAIL = {\n 'POSTMARK_SERVER_TOKEN': env('POSTMARK_SERVER_TOKEN', cast=str, default=''),\n 'DEBUG_API_REQUESTS': env('DEBUG'),\n}\n\nCLICKMEETING_API_KEY = env('CLICKMEETING_API_KEY', default=None, cast=str)\n\nZOOMUS_API_KEY = env('ZOOMUS_API_KEY', default=None, cast=str)\nZOOMUS_API_SECRET = env('ZOOMUS_API_SECRET', default=None, cast=str)\n\nTINKOFF_TERMINAL_KEY = env('TINKOFF_TERMINAL_KEY', default=None)\nTINKOFF_TERMINAL_PASSWORD = env('TINKOFF_TERMINAL_PASSWORD', default=None)\n\nSEND_HAPPINESS_MESSAGES = env('SEND_HAPPINESS_MESSAGES', cast=bool, default=False)\n\n# Uncomment this lines to catch all runtime warnings as errors\n\n# import warnings # noqa\n# warnings.filterwarnings(\n# 'error', r\".*\",\n# RuntimeWarning, r\".*\"\n# )\n", "path": "src/app/settings.py"}]} | 2,633 | 199 |
gh_patches_debug_30866 | rasdani/github-patches | git_diff | sktime__sktime-3723 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[MNT] PyPi publish action for 0.14 failed
Summary by @fkiraly:
The attempted 0.14.0 release has failed at pypi upload of successfully built wheels.
Help on diagnosing and solving this is appreciated.
Original post:
---
Just to let you know: 0.14 has not been published on PyPI yet:
https://github.com/sktime/sktime/actions/runs/3402037795
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """Install script for sktime."""
4
5 __author__ = ["mloning", "lmmentel"]
6
7 import codecs
8
9 import toml
10 from setuptools import find_packages, setup
11
12 pyproject = toml.load("pyproject.toml")
13
14
15 def long_description():
16 """Read and return README as long description."""
17 with codecs.open("README.md", encoding="utf-8-sig") as f:
18 return f.read()
19
20
21 # ground truth package metadata is loaded from pyproject.toml
22 # for context see:
23 # - [PEP 621 -- Storing project metadata in pyproject.toml]
24 # (https://www.python.org/dev/peps/pep-0621)
25 pyproject = toml.load("pyproject.toml")
26
27
28 def setup_package():
29 """Set up package."""
30 projectname = pyproject["project"]["name"]
31 setup(
32 author_email="[email protected]",
33 author=f"{projectname} developers",
34 classifiers=pyproject["project"]["classifiers"],
35 description=pyproject["project"]["description"],
36 download_url=pyproject["project"]["urls"]["download"],
37 extras_require=pyproject["project"]["optional-dependencies"],
38 include_package_data=True,
39 install_requires=pyproject["project"]["dependencies"],
40 keywords=pyproject["project"]["keywords"],
41 license=pyproject["project"]["license"],
42 long_description=long_description(),
43 maintainer_email="[email protected]",
44 maintainer=f"{projectname} developers",
45 name=projectname,
46 package_data={
47 "sktime": [
48 "*.csv",
49 "*.csv.gz",
50 "*.arff",
51 "*.arff.gz",
52 "*.txt",
53 "*.ts",
54 "*.tsv",
55 ]
56 },
57 packages=find_packages(
58 where=".",
59 exclude=["tests", "tests.*"],
60 ),
61 project_urls=pyproject["project"]["urls"],
62 python_requires=pyproject["project"]["requires-python"],
63 setup_requires=pyproject["build-system"]["requires"],
64 url=pyproject["project"]["urls"]["repository"],
65 version=pyproject["project"]["version"],
66 zip_safe=False,
67 )
68
69
70 if __name__ == "__main__":
71 setup_package()
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
deleted file mode 100644
--- a/setup.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-"""Install script for sktime."""
-
-__author__ = ["mloning", "lmmentel"]
-
-import codecs
-
-import toml
-from setuptools import find_packages, setup
-
-pyproject = toml.load("pyproject.toml")
-
-
-def long_description():
- """Read and return README as long description."""
- with codecs.open("README.md", encoding="utf-8-sig") as f:
- return f.read()
-
-
-# ground truth package metadata is loaded from pyproject.toml
-# for context see:
-# - [PEP 621 -- Storing project metadata in pyproject.toml]
-# (https://www.python.org/dev/peps/pep-0621)
-pyproject = toml.load("pyproject.toml")
-
-
-def setup_package():
- """Set up package."""
- projectname = pyproject["project"]["name"]
- setup(
- author_email="[email protected]",
- author=f"{projectname} developers",
- classifiers=pyproject["project"]["classifiers"],
- description=pyproject["project"]["description"],
- download_url=pyproject["project"]["urls"]["download"],
- extras_require=pyproject["project"]["optional-dependencies"],
- include_package_data=True,
- install_requires=pyproject["project"]["dependencies"],
- keywords=pyproject["project"]["keywords"],
- license=pyproject["project"]["license"],
- long_description=long_description(),
- maintainer_email="[email protected]",
- maintainer=f"{projectname} developers",
- name=projectname,
- package_data={
- "sktime": [
- "*.csv",
- "*.csv.gz",
- "*.arff",
- "*.arff.gz",
- "*.txt",
- "*.ts",
- "*.tsv",
- ]
- },
- packages=find_packages(
- where=".",
- exclude=["tests", "tests.*"],
- ),
- project_urls=pyproject["project"]["urls"],
- python_requires=pyproject["project"]["requires-python"],
- setup_requires=pyproject["build-system"]["requires"],
- url=pyproject["project"]["urls"]["repository"],
- version=pyproject["project"]["version"],
- zip_safe=False,
- )
-
-
-if __name__ == "__main__":
- setup_package()
| {"golden_diff": "diff --git a/setup.py b/setup.py\ndeleted file mode 100644\n--- a/setup.py\n+++ /dev/null\n@@ -1,71 +0,0 @@\n-#! /usr/bin/env python\n-# -*- coding: utf-8 -*-\n-\"\"\"Install script for sktime.\"\"\"\n-\n-__author__ = [\"mloning\", \"lmmentel\"]\n-\n-import codecs\n-\n-import toml\n-from setuptools import find_packages, setup\n-\n-pyproject = toml.load(\"pyproject.toml\")\n-\n-\n-def long_description():\n- \"\"\"Read and return README as long description.\"\"\"\n- with codecs.open(\"README.md\", encoding=\"utf-8-sig\") as f:\n- return f.read()\n-\n-\n-# ground truth package metadata is loaded from pyproject.toml\n-# for context see:\n-# - [PEP 621 -- Storing project metadata in pyproject.toml]\n-# (https://www.python.org/dev/peps/pep-0621)\n-pyproject = toml.load(\"pyproject.toml\")\n-\n-\n-def setup_package():\n- \"\"\"Set up package.\"\"\"\n- projectname = pyproject[\"project\"][\"name\"]\n- setup(\n- author_email=\"[email protected]\",\n- author=f\"{projectname} developers\",\n- classifiers=pyproject[\"project\"][\"classifiers\"],\n- description=pyproject[\"project\"][\"description\"],\n- download_url=pyproject[\"project\"][\"urls\"][\"download\"],\n- extras_require=pyproject[\"project\"][\"optional-dependencies\"],\n- include_package_data=True,\n- install_requires=pyproject[\"project\"][\"dependencies\"],\n- keywords=pyproject[\"project\"][\"keywords\"],\n- license=pyproject[\"project\"][\"license\"],\n- long_description=long_description(),\n- maintainer_email=\"[email protected]\",\n- maintainer=f\"{projectname} developers\",\n- name=projectname,\n- package_data={\n- \"sktime\": [\n- \"*.csv\",\n- \"*.csv.gz\",\n- \"*.arff\",\n- \"*.arff.gz\",\n- \"*.txt\",\n- \"*.ts\",\n- \"*.tsv\",\n- ]\n- },\n- packages=find_packages(\n- where=\".\",\n- exclude=[\"tests\", \"tests.*\"],\n- ),\n- project_urls=pyproject[\"project\"][\"urls\"],\n- python_requires=pyproject[\"project\"][\"requires-python\"],\n- setup_requires=pyproject[\"build-system\"][\"requires\"],\n- url=pyproject[\"project\"][\"urls\"][\"repository\"],\n- version=pyproject[\"project\"][\"version\"],\n- zip_safe=False,\n- )\n-\n-\n-if __name__ == \"__main__\":\n- setup_package()\n", "issue": "[MNT] PyPi publish action for 0.14 failed\nSummary by @fkiraly:\r\nThe attempted 0.14.0 release has failed at pypi upload of successfully built wheels.\r\n\r\nHelp on diagnosing and solving this is appreciated.\r\n\r\nOriginal post:\r\n---\r\nJust to let you know: 0.14 has not been published on PyPI yet:\r\n\r\nhttps://github.com/sktime/sktime/actions/runs/3402037795\n", "before_files": [{"content": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Install script for sktime.\"\"\"\n\n__author__ = [\"mloning\", \"lmmentel\"]\n\nimport codecs\n\nimport toml\nfrom setuptools import find_packages, setup\n\npyproject = toml.load(\"pyproject.toml\")\n\n\ndef long_description():\n \"\"\"Read and return README as long description.\"\"\"\n with codecs.open(\"README.md\", encoding=\"utf-8-sig\") as f:\n return f.read()\n\n\n# ground truth package metadata is loaded from pyproject.toml\n# for context see:\n# - [PEP 621 -- Storing project metadata in pyproject.toml]\n# (https://www.python.org/dev/peps/pep-0621)\npyproject = toml.load(\"pyproject.toml\")\n\n\ndef setup_package():\n \"\"\"Set up package.\"\"\"\n projectname = pyproject[\"project\"][\"name\"]\n setup(\n author_email=\"[email protected]\",\n author=f\"{projectname} developers\",\n classifiers=pyproject[\"project\"][\"classifiers\"],\n description=pyproject[\"project\"][\"description\"],\n download_url=pyproject[\"project\"][\"urls\"][\"download\"],\n extras_require=pyproject[\"project\"][\"optional-dependencies\"],\n include_package_data=True,\n install_requires=pyproject[\"project\"][\"dependencies\"],\n keywords=pyproject[\"project\"][\"keywords\"],\n license=pyproject[\"project\"][\"license\"],\n long_description=long_description(),\n maintainer_email=\"[email protected]\",\n maintainer=f\"{projectname} developers\",\n name=projectname,\n package_data={\n \"sktime\": [\n \"*.csv\",\n \"*.csv.gz\",\n \"*.arff\",\n \"*.arff.gz\",\n \"*.txt\",\n \"*.ts\",\n \"*.tsv\",\n ]\n },\n packages=find_packages(\n where=\".\",\n exclude=[\"tests\", \"tests.*\"],\n ),\n project_urls=pyproject[\"project\"][\"urls\"],\n python_requires=pyproject[\"project\"][\"requires-python\"],\n setup_requires=pyproject[\"build-system\"][\"requires\"],\n url=pyproject[\"project\"][\"urls\"][\"repository\"],\n version=pyproject[\"project\"][\"version\"],\n zip_safe=False,\n )\n\n\nif __name__ == \"__main__\":\n setup_package()\n", "path": "setup.py"}], "after_files": [{"content": null, "path": "setup.py"}]} | 982 | 583 |
gh_patches_debug_37703 | rasdani/github-patches | git_diff | keras-team__keras-16755 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GlobalMax{Avg}Pooling output infinity or NaN when the input shape is 0
**System information**.
- Have I written custom code (as opposed to using a stock example script provided in Keras): Yes
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu 20.04
- TensorFlow installed from (source or binary): binary
- TensorFlow version (use command below): latest
- Python version: 2.8.0
- Bazel version (if compiling from source): N/A
- GPU model and memory: N/A
- Exact command to reproduce:
```
import keras
from keras.layers import *
x = keras.layers.Input((5, 0, 16, 16))
layer1 = keras.layers.GlobalMaxPooling3D()
layer2 = keras.layers.GlobalAveragePooling3D()
y1 = layer1(x)
y2 = layer2(x)
model1 = keras.models.Model(x,y1)
model2 = keras.models.Model(x,y2)
import numpy as np
input = np.random.rand(10, 5, 0, 16, 16)
res1 = model1.predict(input)
res2 = model2.predict(input)
print(res1, res2)
```
**Describe the problem**.
The behavior of GlobalMax{Average}PoolingND is undefined when the feature dimension of input is zero. I compare the result with another library ONNXRuntime, it will directly raise an exception as follows:
```
Status Message: /onnxruntime_src/onnxruntime/core/providers/cpu/nn/pool_attributes.h:101 std::vector<long int> onnxruntime::PoolAttributes::SetOutputSize(const onnxruntime::TensorShape&, int64_t, std::vector<long int>*) const input_shape.Size() > 0 || input_shape[0] == 0 was false. Invalid input shape. Only N can be zero. Got:{100,16,5,0,5}
```
**Describe the current behavior**.
TensorFlow will either output nan or infinity when the feature dimension of tensor is zero
**Describe the expected behavior**.
I guess an exception would be better. The tensor with empty shape should be exposed instead of outputting nan or inf.
**[Contributing](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md)**.
- Do you want to contribute a PR? (yes/no): no
- If yes, please read [this page](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md) for instructions
- Briefly describe your candidate solution(if contributing):
**Standalone code to reproduce the issue**.
Please refer to the above code for reproduction.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras/layers/pooling/base_global_pooling3d.py`
Content:
```
1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Private base class for global pooling 3D layers."""
16
17
18 import tensorflow.compat.v2 as tf
19
20 from keras.engine.base_layer import Layer
21 from keras.engine.input_spec import InputSpec
22 from keras.utils import conv_utils
23
24
25 class GlobalPooling3D(Layer):
26 """Abstract class for different global pooling 3D layers."""
27
28 def __init__(self, data_format=None, keepdims=False, **kwargs):
29 super().__init__(**kwargs)
30 self.data_format = conv_utils.normalize_data_format(data_format)
31 self.input_spec = InputSpec(ndim=5)
32 self.keepdims = keepdims
33
34 def compute_output_shape(self, input_shape):
35 input_shape = tf.TensorShape(input_shape).as_list()
36 if self.data_format == "channels_last":
37 if self.keepdims:
38 return tf.TensorShape([input_shape[0], 1, 1, 1, input_shape[4]])
39 else:
40 return tf.TensorShape([input_shape[0], input_shape[4]])
41 else:
42 if self.keepdims:
43 return tf.TensorShape([input_shape[0], input_shape[1], 1, 1, 1])
44 else:
45 return tf.TensorShape([input_shape[0], input_shape[1]])
46
47 def call(self, inputs):
48 raise NotImplementedError
49
50 def get_config(self):
51 config = {"data_format": self.data_format, "keepdims": self.keepdims}
52 base_config = super().get_config()
53 return dict(list(base_config.items()) + list(config.items()))
54
```
Path: `keras/layers/pooling/base_global_pooling1d.py`
Content:
```
1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Private base class for global pooling 1D layers."""
16
17
18 import tensorflow.compat.v2 as tf
19
20 from keras.engine.base_layer import Layer
21 from keras.engine.input_spec import InputSpec
22 from keras.utils import conv_utils
23
24
25 class GlobalPooling1D(Layer):
26 """Abstract class for different global pooling 1D layers."""
27
28 def __init__(self, data_format="channels_last", keepdims=False, **kwargs):
29 super().__init__(**kwargs)
30 self.input_spec = InputSpec(ndim=3)
31 self.data_format = conv_utils.normalize_data_format(data_format)
32 self.keepdims = keepdims
33
34 def compute_output_shape(self, input_shape):
35 input_shape = tf.TensorShape(input_shape).as_list()
36 if self.data_format == "channels_first":
37 if self.keepdims:
38 return tf.TensorShape([input_shape[0], input_shape[1], 1])
39 else:
40 return tf.TensorShape([input_shape[0], input_shape[1]])
41 else:
42 if self.keepdims:
43 return tf.TensorShape([input_shape[0], 1, input_shape[2]])
44 else:
45 return tf.TensorShape([input_shape[0], input_shape[2]])
46
47 def call(self, inputs):
48 raise NotImplementedError
49
50 def get_config(self):
51 config = {"data_format": self.data_format, "keepdims": self.keepdims}
52 base_config = super().get_config()
53 return dict(list(base_config.items()) + list(config.items()))
54
```
Path: `keras/layers/pooling/base_global_pooling2d.py`
Content:
```
1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Private base class for global pooling 2D layers."""
16
17
18 import tensorflow.compat.v2 as tf
19
20 from keras.engine.base_layer import Layer
21 from keras.engine.input_spec import InputSpec
22 from keras.utils import conv_utils
23
24
25 class GlobalPooling2D(Layer):
26 """Abstract class for different global pooling 2D layers."""
27
28 def __init__(self, data_format=None, keepdims=False, **kwargs):
29 super().__init__(**kwargs)
30 self.data_format = conv_utils.normalize_data_format(data_format)
31 self.input_spec = InputSpec(ndim=4)
32 self.keepdims = keepdims
33
34 def compute_output_shape(self, input_shape):
35 input_shape = tf.TensorShape(input_shape).as_list()
36 if self.data_format == "channels_last":
37 if self.keepdims:
38 return tf.TensorShape([input_shape[0], 1, 1, input_shape[3]])
39 else:
40 return tf.TensorShape([input_shape[0], input_shape[3]])
41 else:
42 if self.keepdims:
43 return tf.TensorShape([input_shape[0], input_shape[1], 1, 1])
44 else:
45 return tf.TensorShape([input_shape[0], input_shape[1]])
46
47 def call(self, inputs):
48 raise NotImplementedError
49
50 def get_config(self):
51 config = {"data_format": self.data_format, "keepdims": self.keepdims}
52 base_config = super().get_config()
53 return dict(list(base_config.items()) + list(config.items()))
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/keras/layers/pooling/base_global_pooling1d.py b/keras/layers/pooling/base_global_pooling1d.py
--- a/keras/layers/pooling/base_global_pooling1d.py
+++ b/keras/layers/pooling/base_global_pooling1d.py
@@ -31,6 +31,21 @@
self.data_format = conv_utils.normalize_data_format(data_format)
self.keepdims = keepdims
+ def _validate_reduction_axis(self, input_shape, axes):
+ for axis in axes:
+ if input_shape[axis] == 0:
+ raise ValueError(
+ f"Incorrect input shape {input_shape} "
+ f"with dimension 0 at reduction axis {axis}."
+ )
+
+ def build(self, input_shape):
+ input_shape = tf.TensorShape(input_shape).as_list()
+ if self.data_format == "channels_last":
+ self._validate_reduction_axis(input_shape, [1])
+ else:
+ self._validate_reduction_axis(input_shape, [2])
+
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == "channels_first":
diff --git a/keras/layers/pooling/base_global_pooling2d.py b/keras/layers/pooling/base_global_pooling2d.py
--- a/keras/layers/pooling/base_global_pooling2d.py
+++ b/keras/layers/pooling/base_global_pooling2d.py
@@ -31,6 +31,21 @@
self.input_spec = InputSpec(ndim=4)
self.keepdims = keepdims
+ def _validate_reduction_axis(self, input_shape, axes):
+ for axis in axes:
+ if input_shape[axis] == 0:
+ raise ValueError(
+ f"Incorrect input shape {input_shape} "
+ f"with dimension 0 at reduction axis {axis}."
+ )
+
+ def build(self, input_shape):
+ input_shape = tf.TensorShape(input_shape).as_list()
+ if self.data_format == "channels_last":
+ self._validate_reduction_axis(input_shape, [1, 2])
+ else:
+ self._validate_reduction_axis(input_shape, [2, 3])
+
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == "channels_last":
diff --git a/keras/layers/pooling/base_global_pooling3d.py b/keras/layers/pooling/base_global_pooling3d.py
--- a/keras/layers/pooling/base_global_pooling3d.py
+++ b/keras/layers/pooling/base_global_pooling3d.py
@@ -31,6 +31,21 @@
self.input_spec = InputSpec(ndim=5)
self.keepdims = keepdims
+ def _validate_reduction_axis(self, input_shape, axes):
+ for axis in axes:
+ if input_shape[axis] == 0:
+ raise ValueError(
+ f"Incorrect input shape {input_shape} "
+ f"with dimension 0 at reduction axis {axis}."
+ )
+
+ def build(self, input_shape):
+ input_shape = tf.TensorShape(input_shape).as_list()
+ if self.data_format == "channels_last":
+ self._validate_reduction_axis(input_shape, [1, 2, 3])
+ else:
+ self._validate_reduction_axis(input_shape, [2, 3, 4])
+
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == "channels_last":
| {"golden_diff": "diff --git a/keras/layers/pooling/base_global_pooling1d.py b/keras/layers/pooling/base_global_pooling1d.py\n--- a/keras/layers/pooling/base_global_pooling1d.py\n+++ b/keras/layers/pooling/base_global_pooling1d.py\n@@ -31,6 +31,21 @@\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.keepdims = keepdims\n \n+ def _validate_reduction_axis(self, input_shape, axes):\n+ for axis in axes:\n+ if input_shape[axis] == 0:\n+ raise ValueError(\n+ f\"Incorrect input shape {input_shape} \"\n+ f\"with dimension 0 at reduction axis {axis}.\"\n+ )\n+\n+ def build(self, input_shape):\n+ input_shape = tf.TensorShape(input_shape).as_list()\n+ if self.data_format == \"channels_last\":\n+ self._validate_reduction_axis(input_shape, [1])\n+ else:\n+ self._validate_reduction_axis(input_shape, [2])\n+\n def compute_output_shape(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_first\":\ndiff --git a/keras/layers/pooling/base_global_pooling2d.py b/keras/layers/pooling/base_global_pooling2d.py\n--- a/keras/layers/pooling/base_global_pooling2d.py\n+++ b/keras/layers/pooling/base_global_pooling2d.py\n@@ -31,6 +31,21 @@\n self.input_spec = InputSpec(ndim=4)\n self.keepdims = keepdims\n \n+ def _validate_reduction_axis(self, input_shape, axes):\n+ for axis in axes:\n+ if input_shape[axis] == 0:\n+ raise ValueError(\n+ f\"Incorrect input shape {input_shape} \"\n+ f\"with dimension 0 at reduction axis {axis}.\"\n+ )\n+\n+ def build(self, input_shape):\n+ input_shape = tf.TensorShape(input_shape).as_list()\n+ if self.data_format == \"channels_last\":\n+ self._validate_reduction_axis(input_shape, [1, 2])\n+ else:\n+ self._validate_reduction_axis(input_shape, [2, 3])\n+\n def compute_output_shape(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_last\":\ndiff --git a/keras/layers/pooling/base_global_pooling3d.py b/keras/layers/pooling/base_global_pooling3d.py\n--- a/keras/layers/pooling/base_global_pooling3d.py\n+++ b/keras/layers/pooling/base_global_pooling3d.py\n@@ -31,6 +31,21 @@\n self.input_spec = InputSpec(ndim=5)\n self.keepdims = keepdims\n \n+ def _validate_reduction_axis(self, input_shape, axes):\n+ for axis in axes:\n+ if input_shape[axis] == 0:\n+ raise ValueError(\n+ f\"Incorrect input shape {input_shape} \"\n+ f\"with dimension 0 at reduction axis {axis}.\"\n+ )\n+\n+ def build(self, input_shape):\n+ input_shape = tf.TensorShape(input_shape).as_list()\n+ if self.data_format == \"channels_last\":\n+ self._validate_reduction_axis(input_shape, [1, 2, 3])\n+ else:\n+ self._validate_reduction_axis(input_shape, [2, 3, 4])\n+\n def compute_output_shape(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_last\":\n", "issue": "GlobalMax{Avg}Pooling output infinity or NaN when the input shape is 0\n\r\n**System information**.\r\n- Have I written custom code (as opposed to using a stock example script provided in Keras): Yes\r\n- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu 20.04\r\n- TensorFlow installed from (source or binary): binary\r\n- TensorFlow version (use command below): latest\r\n- Python version: 2.8.0\r\n- Bazel version (if compiling from source): N/A\r\n- GPU model and memory: N/A\r\n- Exact command to reproduce:\r\n\r\n```\r\nimport keras\r\nfrom keras.layers import *\r\nx = keras.layers.Input((5, 0, 16, 16))\r\nlayer1 = keras.layers.GlobalMaxPooling3D()\r\nlayer2 = keras.layers.GlobalAveragePooling3D()\r\ny1 = layer1(x)\r\ny2 = layer2(x)\r\nmodel1 = keras.models.Model(x,y1)\r\nmodel2 = keras.models.Model(x,y2)\r\nimport numpy as np\r\ninput = np.random.rand(10, 5, 0, 16, 16)\r\nres1 = model1.predict(input)\r\nres2 = model2.predict(input)\r\nprint(res1, res2)\r\n```\r\n\r\n**Describe the problem**.\r\nThe behavior of GlobalMax{Average}PoolingND is undefined when the feature dimension of input is zero. I compare the result with another library ONNXRuntime, it will directly raise an exception as follows:\r\n```\r\nStatus Message: /onnxruntime_src/onnxruntime/core/providers/cpu/nn/pool_attributes.h:101 std::vector<long int> onnxruntime::PoolAttributes::SetOutputSize(const onnxruntime::TensorShape&, int64_t, std::vector<long int>*) const input_shape.Size() > 0 || input_shape[0] == 0 was false. Invalid input shape. Only N can be zero. Got:{100,16,5,0,5}\r\n```\r\n\r\n\r\n**Describe the current behavior**.\r\nTensorFlow will either output nan or infinity when the feature dimension of tensor is zero\r\n\r\n**Describe the expected behavior**.\r\nI guess an exception would be better. The tensor with empty shape should be exposed instead of outputting nan or inf.\r\n\r\n**[Contributing](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md)**.\r\n\r\n- Do you want to contribute a PR? (yes/no): no\r\n- If yes, please read [this page](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md) for instructions\r\n- Briefly describe your candidate solution(if contributing):\r\n\r\n**Standalone code to reproduce the issue**.\r\nPlease refer to the above code for reproduction.\r\n\n", "before_files": [{"content": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Private base class for global pooling 3D layers.\"\"\"\n\n\nimport tensorflow.compat.v2 as tf\n\nfrom keras.engine.base_layer import Layer\nfrom keras.engine.input_spec import InputSpec\nfrom keras.utils import conv_utils\n\n\nclass GlobalPooling3D(Layer):\n \"\"\"Abstract class for different global pooling 3D layers.\"\"\"\n\n def __init__(self, data_format=None, keepdims=False, **kwargs):\n super().__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.input_spec = InputSpec(ndim=5)\n self.keepdims = keepdims\n\n def compute_output_shape(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_last\":\n if self.keepdims:\n return tf.TensorShape([input_shape[0], 1, 1, 1, input_shape[4]])\n else:\n return tf.TensorShape([input_shape[0], input_shape[4]])\n else:\n if self.keepdims:\n return tf.TensorShape([input_shape[0], input_shape[1], 1, 1, 1])\n else:\n return tf.TensorShape([input_shape[0], input_shape[1]])\n\n def call(self, inputs):\n raise NotImplementedError\n\n def get_config(self):\n config = {\"data_format\": self.data_format, \"keepdims\": self.keepdims}\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "keras/layers/pooling/base_global_pooling3d.py"}, {"content": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Private base class for global pooling 1D layers.\"\"\"\n\n\nimport tensorflow.compat.v2 as tf\n\nfrom keras.engine.base_layer import Layer\nfrom keras.engine.input_spec import InputSpec\nfrom keras.utils import conv_utils\n\n\nclass GlobalPooling1D(Layer):\n \"\"\"Abstract class for different global pooling 1D layers.\"\"\"\n\n def __init__(self, data_format=\"channels_last\", keepdims=False, **kwargs):\n super().__init__(**kwargs)\n self.input_spec = InputSpec(ndim=3)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.keepdims = keepdims\n\n def compute_output_shape(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_first\":\n if self.keepdims:\n return tf.TensorShape([input_shape[0], input_shape[1], 1])\n else:\n return tf.TensorShape([input_shape[0], input_shape[1]])\n else:\n if self.keepdims:\n return tf.TensorShape([input_shape[0], 1, input_shape[2]])\n else:\n return tf.TensorShape([input_shape[0], input_shape[2]])\n\n def call(self, inputs):\n raise NotImplementedError\n\n def get_config(self):\n config = {\"data_format\": self.data_format, \"keepdims\": self.keepdims}\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "keras/layers/pooling/base_global_pooling1d.py"}, {"content": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Private base class for global pooling 2D layers.\"\"\"\n\n\nimport tensorflow.compat.v2 as tf\n\nfrom keras.engine.base_layer import Layer\nfrom keras.engine.input_spec import InputSpec\nfrom keras.utils import conv_utils\n\n\nclass GlobalPooling2D(Layer):\n \"\"\"Abstract class for different global pooling 2D layers.\"\"\"\n\n def __init__(self, data_format=None, keepdims=False, **kwargs):\n super().__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.input_spec = InputSpec(ndim=4)\n self.keepdims = keepdims\n\n def compute_output_shape(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_last\":\n if self.keepdims:\n return tf.TensorShape([input_shape[0], 1, 1, input_shape[3]])\n else:\n return tf.TensorShape([input_shape[0], input_shape[3]])\n else:\n if self.keepdims:\n return tf.TensorShape([input_shape[0], input_shape[1], 1, 1])\n else:\n return tf.TensorShape([input_shape[0], input_shape[1]])\n\n def call(self, inputs):\n raise NotImplementedError\n\n def get_config(self):\n config = {\"data_format\": self.data_format, \"keepdims\": self.keepdims}\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "keras/layers/pooling/base_global_pooling2d.py"}], "after_files": [{"content": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Private base class for global pooling 3D layers.\"\"\"\n\n\nimport tensorflow.compat.v2 as tf\n\nfrom keras.engine.base_layer import Layer\nfrom keras.engine.input_spec import InputSpec\nfrom keras.utils import conv_utils\n\n\nclass GlobalPooling3D(Layer):\n \"\"\"Abstract class for different global pooling 3D layers.\"\"\"\n\n def __init__(self, data_format=None, keepdims=False, **kwargs):\n super().__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.input_spec = InputSpec(ndim=5)\n self.keepdims = keepdims\n\n def _validate_reduction_axis(self, input_shape, axes):\n for axis in axes:\n if input_shape[axis] == 0:\n raise ValueError(\n f\"Incorrect input shape {input_shape} \"\n f\"with dimension 0 at reduction axis {axis}.\"\n )\n\n def build(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_last\":\n self._validate_reduction_axis(input_shape, [1, 2, 3])\n else:\n self._validate_reduction_axis(input_shape, [2, 3, 4])\n\n def compute_output_shape(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_last\":\n if self.keepdims:\n return tf.TensorShape([input_shape[0], 1, 1, 1, input_shape[4]])\n else:\n return tf.TensorShape([input_shape[0], input_shape[4]])\n else:\n if self.keepdims:\n return tf.TensorShape([input_shape[0], input_shape[1], 1, 1, 1])\n else:\n return tf.TensorShape([input_shape[0], input_shape[1]])\n\n def call(self, inputs):\n raise NotImplementedError\n\n def get_config(self):\n config = {\"data_format\": self.data_format, \"keepdims\": self.keepdims}\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "keras/layers/pooling/base_global_pooling3d.py"}, {"content": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Private base class for global pooling 1D layers.\"\"\"\n\n\nimport tensorflow.compat.v2 as tf\n\nfrom keras.engine.base_layer import Layer\nfrom keras.engine.input_spec import InputSpec\nfrom keras.utils import conv_utils\n\n\nclass GlobalPooling1D(Layer):\n \"\"\"Abstract class for different global pooling 1D layers.\"\"\"\n\n def __init__(self, data_format=\"channels_last\", keepdims=False, **kwargs):\n super().__init__(**kwargs)\n self.input_spec = InputSpec(ndim=3)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.keepdims = keepdims\n\n def _validate_reduction_axis(self, input_shape, axes):\n for axis in axes:\n if input_shape[axis] == 0:\n raise ValueError(\n f\"Incorrect input shape {input_shape} \"\n f\"with dimension 0 at reduction axis {axis}.\"\n )\n\n def build(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_last\":\n self._validate_reduction_axis(input_shape, [1])\n else:\n self._validate_reduction_axis(input_shape, [2])\n\n def compute_output_shape(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_first\":\n if self.keepdims:\n return tf.TensorShape([input_shape[0], input_shape[1], 1])\n else:\n return tf.TensorShape([input_shape[0], input_shape[1]])\n else:\n if self.keepdims:\n return tf.TensorShape([input_shape[0], 1, input_shape[2]])\n else:\n return tf.TensorShape([input_shape[0], input_shape[2]])\n\n def call(self, inputs):\n raise NotImplementedError\n\n def get_config(self):\n config = {\"data_format\": self.data_format, \"keepdims\": self.keepdims}\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "keras/layers/pooling/base_global_pooling1d.py"}, {"content": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Private base class for global pooling 2D layers.\"\"\"\n\n\nimport tensorflow.compat.v2 as tf\n\nfrom keras.engine.base_layer import Layer\nfrom keras.engine.input_spec import InputSpec\nfrom keras.utils import conv_utils\n\n\nclass GlobalPooling2D(Layer):\n \"\"\"Abstract class for different global pooling 2D layers.\"\"\"\n\n def __init__(self, data_format=None, keepdims=False, **kwargs):\n super().__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.input_spec = InputSpec(ndim=4)\n self.keepdims = keepdims\n\n def _validate_reduction_axis(self, input_shape, axes):\n for axis in axes:\n if input_shape[axis] == 0:\n raise ValueError(\n f\"Incorrect input shape {input_shape} \"\n f\"with dimension 0 at reduction axis {axis}.\"\n )\n\n def build(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_last\":\n self._validate_reduction_axis(input_shape, [1, 2])\n else:\n self._validate_reduction_axis(input_shape, [2, 3])\n\n def compute_output_shape(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_last\":\n if self.keepdims:\n return tf.TensorShape([input_shape[0], 1, 1, input_shape[3]])\n else:\n return tf.TensorShape([input_shape[0], input_shape[3]])\n else:\n if self.keepdims:\n return tf.TensorShape([input_shape[0], input_shape[1], 1, 1])\n else:\n return tf.TensorShape([input_shape[0], input_shape[1]])\n\n def call(self, inputs):\n raise NotImplementedError\n\n def get_config(self):\n config = {\"data_format\": self.data_format, \"keepdims\": self.keepdims}\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "keras/layers/pooling/base_global_pooling2d.py"}]} | 2,580 | 832 |
gh_patches_debug_31443 | rasdani/github-patches | git_diff | benoitc__gunicorn-1231 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError when running gunicorn over HTTPS
When running gunicorn with my certificate and keyfile over https, sending any request to the server gives me this error.
```
Traceback (most recent call last):
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/arbiter.py", line 515, in spawn_worker
worker.init_process()
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/base.py", line 126, in init_process
self.run()
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/sync.py", line 119, in run
self.run_for_one(timeout)
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/sync.py", line 66, in run_for_one
self.accept(listener)
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/sync.py", line 30, in accept
self.handle(listener, client, addr)
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/sync.py", line 141, in handle
self.handle_error(req, client, addr, e)
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/base.py", line 213, in handle_error
self.log.exception("Error handling request %s", req.uri)
AttributeError: 'NoneType' object has no attribute 'uri'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gunicorn/workers/base.py`
Content:
```
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 from datetime import datetime
7 import os
8 import signal
9 import sys
10 import time
11 import traceback
12 from random import randint
13
14
15 from gunicorn import util
16 from gunicorn.workers.workertmp import WorkerTmp
17 from gunicorn.reloader import Reloader
18 from gunicorn.http.errors import (
19 InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,
20 InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,
21 )
22 from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest
23 from gunicorn.http.wsgi import default_environ, Response
24 from gunicorn.six import MAXSIZE
25
26
27 class Worker(object):
28
29 SIGNALS = [getattr(signal, "SIG%s" % x)
30 for x in "ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split()]
31
32 PIPE = []
33
34 def __init__(self, age, ppid, sockets, app, timeout, cfg, log):
35 """\
36 This is called pre-fork so it shouldn't do anything to the
37 current process. If there's a need to make process wide
38 changes you'll want to do that in ``self.init_process()``.
39 """
40 self.age = age
41 self.ppid = ppid
42 self.sockets = sockets
43 self.app = app
44 self.timeout = timeout
45 self.cfg = cfg
46 self.booted = False
47 self.aborted = False
48 self.reloader = None
49
50 self.nr = 0
51 jitter = randint(0, cfg.max_requests_jitter)
52 self.max_requests = cfg.max_requests + jitter or MAXSIZE
53 self.alive = True
54 self.log = log
55 self.tmp = WorkerTmp(cfg)
56
57 def __str__(self):
58 return "<Worker %s>" % self.pid
59
60 @property
61 def pid(self):
62 return os.getpid()
63
64 def notify(self):
65 """\
66 Your worker subclass must arrange to have this method called
67 once every ``self.timeout`` seconds. If you fail in accomplishing
68 this task, the master process will murder your workers.
69 """
70 self.tmp.notify()
71
72 def run(self):
73 """\
74 This is the mainloop of a worker process. You should override
75 this method in a subclass to provide the intended behaviour
76 for your particular evil schemes.
77 """
78 raise NotImplementedError()
79
80 def init_process(self):
81 """\
82 If you override this method in a subclass, the last statement
83 in the function should be to call this method with
84 super(MyWorkerClass, self).init_process() so that the ``run()``
85 loop is initiated.
86 """
87
88 # start the reloader
89 if self.cfg.reload:
90 def changed(fname):
91 self.log.info("Worker reloading: %s modified", fname)
92 os.kill(self.pid, signal.SIGQUIT)
93 self.reloader = Reloader(callback=changed)
94 self.reloader.start()
95
96 # set environment' variables
97 if self.cfg.env:
98 for k, v in self.cfg.env.items():
99 os.environ[k] = v
100
101 util.set_owner_process(self.cfg.uid, self.cfg.gid)
102
103 # Reseed the random number generator
104 util.seed()
105
106 # For waking ourselves up
107 self.PIPE = os.pipe()
108 for p in self.PIPE:
109 util.set_non_blocking(p)
110 util.close_on_exec(p)
111
112 # Prevent fd inheritance
113 [util.close_on_exec(s) for s in self.sockets]
114 util.close_on_exec(self.tmp.fileno())
115
116 self.log.close_on_exec()
117
118 self.init_signals()
119
120 self.cfg.post_worker_init(self)
121
122 self.load_wsgi()
123
124 # Enter main run loop
125 self.booted = True
126 self.run()
127
128 def load_wsgi(self):
129 try:
130 self.wsgi = self.app.wsgi()
131 except SyntaxError as e:
132 if not self.cfg.reload:
133 raise
134
135 self.log.exception(e)
136
137 # fix from PR #1228
138 # storing the traceback into exc_tb will create a circular reference.
139 # per https://docs.python.org/2/library/sys.html#sys.exc_info warning,
140 # delete the traceback after use.
141 try:
142 exc_type, exc_val, exc_tb = sys.exc_info()
143 self.reloader.add_extra_file(exc_val.filename)
144
145 tb_string = traceback.format_exc(exc_tb)
146 self.wsgi = util.make_fail_app(tb_string)
147 finally:
148 del exc_tb
149
150 def init_signals(self):
151 # reset signaling
152 [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]
153 # init new signaling
154 signal.signal(signal.SIGQUIT, self.handle_quit)
155 signal.signal(signal.SIGTERM, self.handle_exit)
156 signal.signal(signal.SIGINT, self.handle_quit)
157 signal.signal(signal.SIGWINCH, self.handle_winch)
158 signal.signal(signal.SIGUSR1, self.handle_usr1)
159 signal.signal(signal.SIGABRT, self.handle_abort)
160
161 # Don't let SIGTERM and SIGUSR1 disturb active requests
162 # by interrupting system calls
163 if hasattr(signal, 'siginterrupt'): # python >= 2.6
164 signal.siginterrupt(signal.SIGTERM, False)
165 signal.siginterrupt(signal.SIGUSR1, False)
166
167 def handle_usr1(self, sig, frame):
168 self.log.reopen_files()
169
170 def handle_exit(self, sig, frame):
171 self.alive = False
172
173 def handle_quit(self, sig, frame):
174 self.alive = False
175 # worker_int callback
176 self.cfg.worker_int(self)
177 time.sleep(0.1)
178 sys.exit(0)
179
180 def handle_abort(self, sig, frame):
181 self.alive = False
182 self.cfg.worker_abort(self)
183 sys.exit(1)
184
185 def handle_error(self, req, client, addr, exc):
186 request_start = datetime.now()
187 addr = addr or ('', -1) # unix socket case
188 if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,
189 InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,
190 LimitRequestLine, LimitRequestHeaders,
191 InvalidProxyLine, ForbiddenProxyRequest)):
192
193 status_int = 400
194 reason = "Bad Request"
195
196 if isinstance(exc, InvalidRequestLine):
197 mesg = "Invalid Request Line '%s'" % str(exc)
198 elif isinstance(exc, InvalidRequestMethod):
199 mesg = "Invalid Method '%s'" % str(exc)
200 elif isinstance(exc, InvalidHTTPVersion):
201 mesg = "Invalid HTTP Version '%s'" % str(exc)
202 elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):
203 mesg = "%s" % str(exc)
204 if not req and hasattr(exc, "req"):
205 req = exc.req # for access log
206 elif isinstance(exc, LimitRequestLine):
207 mesg = "%s" % str(exc)
208 elif isinstance(exc, LimitRequestHeaders):
209 mesg = "Error parsing headers: '%s'" % str(exc)
210 elif isinstance(exc, InvalidProxyLine):
211 mesg = "'%s'" % str(exc)
212 elif isinstance(exc, ForbiddenProxyRequest):
213 reason = "Forbidden"
214 mesg = "Request forbidden"
215 status_int = 403
216
217 msg = "Invalid request from ip={ip}: {error}"
218 self.log.debug(msg.format(ip=addr[0], error=str(exc)))
219 else:
220 self.log.exception("Error handling request %s", req.uri)
221
222 status_int = 500
223 reason = "Internal Server Error"
224 mesg = ""
225
226 if req is not None:
227 request_time = datetime.now() - request_start
228 environ = default_environ(req, client, self.cfg)
229 environ['REMOTE_ADDR'] = addr[0]
230 environ['REMOTE_PORT'] = str(addr[1])
231 resp = Response(req, client, self.cfg)
232 resp.status = "%s %s" % (status_int, reason)
233 resp.response_length = len(mesg)
234 self.log.access(resp, req, environ, request_time)
235
236 try:
237 util.write_error(client, status_int, reason, mesg)
238 except:
239 self.log.debug("Failed to send error message.")
240
241 def handle_winch(self, sig, fname):
242 # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.
243 return
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py
--- a/gunicorn/workers/base.py
+++ b/gunicorn/workers/base.py
@@ -5,12 +5,12 @@
from datetime import datetime
import os
+from random import randint
import signal
+from ssl import SSLError
import sys
import time
import traceback
-from random import randint
-
from gunicorn import util
from gunicorn.workers.workertmp import WorkerTmp
@@ -181,7 +181,8 @@
if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,
InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,
LimitRequestLine, LimitRequestHeaders,
- InvalidProxyLine, ForbiddenProxyRequest)):
+ InvalidProxyLine, ForbiddenProxyRequest,
+ SSLError)):
status_int = 400
reason = "Bad Request"
@@ -206,12 +207,16 @@
reason = "Forbidden"
mesg = "Request forbidden"
status_int = 403
+ elif isinstance(exc, SSLError):
+ reason = "Forbidden"
+ mesg = "'%s'" % str(exc)
+ status_int = 403
msg = "Invalid request from ip={ip}: {error}"
self.log.debug(msg.format(ip=addr[0], error=str(exc)))
else:
- self.log.exception("Error handling request %s", req.uri)
-
+ if hasattr(req, "uri"):
+ self.log.exception("Error handling request %s", req.uri)
status_int = 500
reason = "Internal Server Error"
mesg = ""
| {"golden_diff": "diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py\n--- a/gunicorn/workers/base.py\n+++ b/gunicorn/workers/base.py\n@@ -5,12 +5,12 @@\n \n from datetime import datetime\n import os\n+from random import randint\n import signal\n+from ssl import SSLError\n import sys\n import time\n import traceback\n-from random import randint\n-\n \n from gunicorn import util\n from gunicorn.workers.workertmp import WorkerTmp\n@@ -181,7 +181,8 @@\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n- InvalidProxyLine, ForbiddenProxyRequest)):\n+ InvalidProxyLine, ForbiddenProxyRequest,\n+ SSLError)):\n \n status_int = 400\n reason = \"Bad Request\"\n@@ -206,12 +207,16 @@\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n+ elif isinstance(exc, SSLError):\n+ reason = \"Forbidden\"\n+ mesg = \"'%s'\" % str(exc)\n+ status_int = 403\n \n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n- self.log.exception(\"Error handling request %s\", req.uri)\n-\n+ if hasattr(req, \"uri\"):\n+ self.log.exception(\"Error handling request %s\", req.uri)\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n", "issue": "AttributeError when running gunicorn over HTTPS\nWhen running gunicorn with my certificate and keyfile over https, sending any request to the server gives me this error. \n\n```\nTraceback (most recent call last):\n File \"/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/arbiter.py\", line 515, in spawn_worker\n worker.init_process()\n File \"/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/base.py\", line 126, in init_process\n self.run()\n File \"/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/sync.py\", line 119, in run\n self.run_for_one(timeout)\n File \"/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/sync.py\", line 66, in run_for_one\n self.accept(listener)\n File \"/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/sync.py\", line 30, in accept\n self.handle(listener, client, addr)\n File \"/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/sync.py\", line 141, in handle\n self.handle_error(req, client, addr, e)\n File \"/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/base.py\", line 213, in handle_error\n self.log.exception(\"Error handling request %s\", req.uri)\nAttributeError: 'NoneType' object has no attribute 'uri'\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nimport signal\nimport sys\nimport time\nimport traceback\nfrom random import randint\n\n\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import Reloader\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n self.reloader = None\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n @property\n def pid(self):\n return os.getpid()\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n os.kill(self.pid, signal.SIGQUIT)\n self.reloader = Reloader(callback=changed)\n self.reloader.start()\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n [util.close_on_exec(s) for s in self.sockets]\n util.close_on_exec(self.tmp.fileno())\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n self.cfg.post_worker_init(self)\n\n self.load_wsgi()\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def load_wsgi(self):\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n if not self.cfg.reload:\n raise\n\n self.log.exception(e)\n\n # fix from PR #1228\n # storing the traceback into exc_tb will create a circular reference.\n # per https://docs.python.org/2/library/sys.html#sys.exc_info warning,\n # delete the traceback after use.\n try:\n exc_type, exc_val, exc_tb = sys.exc_info()\n self.reloader.add_extra_file(exc_val.filename)\n\n tb_string = traceback.format_exc(exc_tb)\n self.wsgi = util.make_fail_app(tb_string)\n finally:\n del exc_tb\n\n def init_signals(self):\n # reset signaling\n [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n self.log.exception(\"Error handling request %s\", req.uri)\n\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n return\n", "path": "gunicorn/workers/base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nfrom random import randint\nimport signal\nfrom ssl import SSLError\nimport sys\nimport time\nimport traceback\n\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import Reloader\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n self.reloader = None\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n @property\n def pid(self):\n return os.getpid()\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n os.kill(self.pid, signal.SIGQUIT)\n self.reloader = Reloader(callback=changed)\n self.reloader.start()\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n [util.close_on_exec(s) for s in self.sockets]\n util.close_on_exec(self.tmp.fileno())\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n self.cfg.post_worker_init(self)\n\n self.load_wsgi()\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def load_wsgi(self):\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n if not self.cfg.reload:\n raise\n\n self.log.exception(e)\n\n exc_type, exc_val, exc_tb = sys.exc_info()\n self.reloader.add_extra_file(exc_val.filename)\n\n tb_string = traceback.format_exc(exc_tb)\n self.wsgi = util.make_fail_app(tb_string)\n\n def init_signals(self):\n # reset signaling\n [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest,\n SSLError)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n elif isinstance(exc, SSLError):\n reason = \"Forbidden\"\n mesg = \"'%s'\" % str(exc)\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n if hasattr(req, \"uri\"):\n self.log.exception(\"Error handling request %s\", req.uri)\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n return\n", "path": "gunicorn/workers/base.py"}]} | 3,059 | 370 |
gh_patches_debug_32720 | rasdani/github-patches | git_diff | kartoza__prj.app-164 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
We need to ensure that pending items shows only pending items
At the moment after you edit or create a new site, the pending items list shows all items - it should show only items awaiting moderation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django_project/base/models/project.py`
Content:
```
1 # coding=utf-8
2 """Project model used by all apps."""
3 import os
4 import logging
5 from django.core.urlresolvers import reverse
6 from django.utils.text import slugify
7 from django.conf.global_settings import MEDIA_ROOT
8 from django.db import models
9 from django.utils.translation import ugettext_lazy as _
10 from changes.models.version import Version
11 from core.settings.contrib import STOP_WORDS
12 from django.contrib.auth.models import User
13 from django.conf import settings
14
15
16 logger = logging.getLogger(__name__)
17
18
19 class ApprovedProjectManager(models.Manager):
20 """Custom project manager that shows only approved records."""
21
22 def get_query_set(self):
23 """Query set generator"""
24 return super(
25 ApprovedProjectManager, self).get_query_set().filter(
26 approved=True)
27
28
29 class UnapprovedProjectManager(models.Manager):
30 """Custom project manager that shows only unapproved records."""
31
32 def get_query_set(self):
33 """Query set generator"""
34 return super(
35 UnapprovedProjectManager, self).get_query_set().filter(
36 approved=False)
37
38
39 class PublicProjectManager(models.Manager):
40 """Custom project manager that shows only public and approved projects."""
41
42 def get_query_set(self):
43 """Query set generator"""
44 return super(
45 PublicProjectManager, self).get_query_set().filter(
46 private=False).filter(approved=True)
47
48
49 class Project(models.Model):
50 """A project model e.g. QGIS, InaSAFE etc."""
51 name = models.CharField(
52 help_text=_('Name of this project.'),
53 max_length=255,
54 null=False,
55 blank=False,
56 unique=True)
57
58 description = models.CharField(
59 help_text=_('A description for the project'),
60 max_length=500,
61 blank=True,
62 null=True
63 )
64
65 image_file = models.ImageField(
66 help_text=_('A logo image for this project. '
67 'Most browsers support dragging the image directly on to '
68 'the "Choose File" button above.'),
69 upload_to=os.path.join(MEDIA_ROOT, 'images/projects'),
70 blank=True
71 )
72
73 approved = models.BooleanField(
74 help_text=_('Whether this project has been approved for use yet.'),
75 default=False
76 )
77
78 private = models.BooleanField(
79 help_text=_('Only visible to logged-in users?'),
80 default=False
81 )
82
83 owner = models.ForeignKey(User)
84 slug = models.SlugField(unique=True)
85 objects = models.Manager()
86 approved_objects = ApprovedProjectManager()
87 unapproved_objects = UnapprovedProjectManager()
88 public_objects = PublicProjectManager()
89
90 # noinspection PyClassicStyleClass
91 class Meta:
92 """Meta class for project."""
93 app_label = 'base'
94 ordering = ['name']
95
96 def save(self, *args, **kwargs):
97 """Overloaded save method.
98
99 :param args:
100 :param kwargs:
101 """
102 if not self.pk:
103 words = self.name.split()
104 filtered_words = [t for t in words if t.lower() not in STOP_WORDS]
105 new_list = unicode(' '.join(filtered_words))
106 self.slug = slugify(new_list)[:50]
107 super(Project, self).save(*args, **kwargs)
108
109 def __unicode__(self):
110 return u'%s' % self.name
111
112 def get_absolute_url(self):
113 """Return URL to project detail page
114
115 :return: URL
116 :rtype: str
117
118 """
119 return reverse('project-detail', kwargs={'slug': self.slug})
120
121 def versions(self):
122 """Get all the versions for this project."""
123 qs = Version.objects.filter(project=self).order_by('-padded_version')
124 return qs
125
126 def latest_versions(self):
127 """Get the latest version.
128
129 How many versions returned is determined by the pagination threshold.
130
131 :returns: List of versions.
132 :rtype: list"""
133 return self.versions()[:settings.PROJECT_VERSION_LIST_SIZE]
134
135 @staticmethod
136 def pagination_threshold(self):
137 """Find out how many versions to list per page.
138
139 :returns: The count of items to show per page as defined in
140 settings.PROJECT_VERSION_LIST_SIZE.
141 :rtype: int
142 """
143 return settings.PROJECT_VERSION_LIST_SIZE
144
145 def pagination_threshold_exceeded(self):
146 """Check if project version count exceeds pagination threshold.
147
148 :returns: Flag indicating if there are more versions than
149 self.threshold.
150 :rtype: bool
151 """
152 if self.versions().count() >= settings.PROJECT_VERSION_LIST_SIZE:
153 return True
154 else:
155 return False
156
```
Path: `django_project/base/views/project.py`
Content:
```
1 # coding=utf-8
2 """Views for projects."""
3 # noinspection PyUnresolvedReferences
4 import logging
5 from django.core.urlresolvers import reverse
6 from django.http import Http404
7 from django.shortcuts import get_object_or_404
8 from django.views.generic import (
9 ListView,
10 CreateView,
11 DeleteView,
12 DetailView,
13 UpdateView,
14 RedirectView,
15 )
16 from braces.views import LoginRequiredMixin, StaffuserRequiredMixin
17 from pure_pagination.mixins import PaginationMixin
18 from changes.models import Version
19 from ..models import Project
20 from ..forms import ProjectForm
21 from vota.models import Committee, Ballot
22 from django.conf import settings
23
24 logger = logging.getLogger(__name__)
25
26
27 class ProjectMixin(object):
28 model = Project
29 form_class = ProjectForm
30
31
32 class ProjectBallotListView(ProjectMixin, PaginationMixin, DetailView):
33 """List all ballots within in a project"""
34 context_object_name = 'project'
35 template_name = 'project/ballot-list.html'
36 paginate_by = 1000
37
38 def get_context_data(self, **kwargs):
39 context = super(
40 ProjectBallotListView, self).get_context_data(**kwargs)
41 committees = Committee.objects.filter(project=self.object)
42 ballots = []
43 for committee in committees:
44 if self.request.user.is_authenticated and \
45 self.request.user in committee.users.all():
46 committee_ballots = Ballot.objects.filter(
47 committee=committee)
48 else:
49 committee_ballots = Ballot.objects.filter(
50 committee=committee).filter(private=False)
51 if committee_ballots:
52 ballots.append(committee_ballots)
53 context['ballots_list'] = ballots
54 return context
55
56 def get_queryset(self):
57 if self.request.user.is_authenticated():
58 projects_qs = Project.approved_objects.all()
59 else:
60 projects_qs = Project.public_objects.all()
61 return projects_qs
62
63
64 class ProjectListView(ProjectMixin, PaginationMixin, ListView):
65 """List all approved projects"""
66 context_object_name = 'projects'
67 template_name = 'project/list.html'
68 paginate_by = 1000
69
70 def get_context_data(self, **kwargs):
71 """Add to the view's context data
72
73 :param kwargs: (django dictionary)
74 :type kwargs: dict
75
76 :return: context
77 :rtype: dict
78
79 """
80 context = super(ProjectListView, self).get_context_data(**kwargs)
81 context['num_projects'] = self.get_queryset().count()
82 context[
83 'PROJECT_VERSION_LIST_SIZE'] = settings.PROJECT_VERSION_LIST_SIZE
84 return context
85
86 def get_queryset(self):
87 """Specify the queryset
88
89 Return a specific queryset based on the requesting user's status
90
91 :return: If user.is_authenticated: All approved projects
92 If not user.is_authenticated: All public projects
93 :rtype: QuerySet
94
95 """
96 if self.request.user.is_authenticated():
97 projects_qs = Project.approved_objects.all()
98 else:
99 projects_qs = Project.public_objects.all()
100 return projects_qs
101
102
103 class ProjectDetailView(ProjectMixin, DetailView):
104 context_object_name = 'project'
105 template_name = 'project/detail.html'
106
107 def get_context_data(self, **kwargs):
108 context = super(ProjectDetailView, self).get_context_data(**kwargs)
109 context['projects'] = self.get_queryset()
110 context['committees'] = Committee.objects.filter(project=self.object)
111 page_size = settings.PROJECT_VERSION_LIST_SIZE
112 context['versions'] = Version.objects.filter(
113 project=self.object).order_by('-padded_version')[:page_size]
114 return context
115
116 def get_queryset(self):
117 projects_qs = Project.approved_objects.all()
118 return projects_qs
119
120 def get_object(self, queryset=None):
121 obj = super(ProjectDetailView, self).get_object(queryset)
122 obj.request_user = self.request.user
123 return obj
124
125
126 class ProjectDeleteView(LoginRequiredMixin, ProjectMixin, DeleteView):
127 context_object_name = 'project'
128 template_name = 'project/delete.html'
129
130 def get_success_url(self):
131 return reverse('project-list')
132
133 def get_queryset(self):
134 if not self.request.user.is_authenticated():
135 raise Http404
136
137 qs = Project.objects.all()
138 if self.request.user.is_staff:
139 return qs
140 else:
141 return qs.filter(creator=self.request.user)
142
143
144 class ProjectCreateView(LoginRequiredMixin, ProjectMixin, CreateView):
145 context_object_name = 'project'
146 template_name = 'project/create.html'
147
148 def get_success_url(self):
149 return reverse('pending-project-list')
150
151 def get_form_kwargs(self):
152 kwargs = super(ProjectCreateView, self).get_form_kwargs()
153 kwargs.update({'user': self.request.user})
154 return kwargs
155
156
157 class ProjectUpdateView(LoginRequiredMixin, ProjectMixin, UpdateView):
158 context_object_name = 'project'
159 template_name = 'project/update.html'
160
161 def get_form_kwargs(self):
162 kwargs = super(ProjectUpdateView, self).get_form_kwargs()
163 kwargs.update({'user': self.request.user})
164 return kwargs
165
166 def get_queryset(self):
167 qs = Project.objects
168 if self.request.user.is_staff:
169 return qs
170 else:
171 return qs.filter(creator=self.request.user)
172
173 def get_success_url(self):
174 return reverse('project-detail', kwargs={'slug': self.object.slug})
175
176
177 class PendingProjectListView(
178 ProjectMixin, PaginationMixin, ListView, StaffuserRequiredMixin):
179 """List all users unapproved projects - staff users see all unapproved."""
180 context_object_name = 'projects'
181 template_name = 'project/list.html'
182 paginate_by = settings.PROJECT_VERSION_LIST_SIZE
183
184 def get_queryset(self):
185 projects_qs = Project.unapproved_objects.all()
186 if self.request.user.is_staff:
187 return projects_qs
188 else:
189 return projects_qs.filter(creator=self.request.user)
190
191 def get_context_data(self, **kwargs):
192 context = super(
193 PendingProjectListView, self).get_context_data(**kwargs)
194 context['num_projects'] = self.get_queryset().count()
195 context['unapproved'] = True
196 return context
197
198
199 class ApproveProjectView(StaffuserRequiredMixin, ProjectMixin, RedirectView):
200 permanent = False
201 query_string = True
202 pattern_name = 'pending-project-list'
203
204 def get_redirect_url(self, slug):
205 projects_qs = Project.unapproved_objects.all()
206 project = get_object_or_404(projects_qs, slug=slug)
207 project.approved = True
208 project.save()
209 return reverse(self.pattern_name)
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django_project/base/models/project.py b/django_project/base/models/project.py
--- a/django_project/base/models/project.py
+++ b/django_project/base/models/project.py
@@ -19,30 +19,30 @@
class ApprovedProjectManager(models.Manager):
"""Custom project manager that shows only approved records."""
- def get_query_set(self):
+ def get_queryset(self):
"""Query set generator"""
return super(
- ApprovedProjectManager, self).get_query_set().filter(
+ ApprovedProjectManager, self).get_queryset().filter(
approved=True)
class UnapprovedProjectManager(models.Manager):
"""Custom project manager that shows only unapproved records."""
- def get_query_set(self):
+ def get_queryset(self):
"""Query set generator"""
return super(
- UnapprovedProjectManager, self).get_query_set().filter(
+ UnapprovedProjectManager, self).get_queryset().filter(
approved=False)
class PublicProjectManager(models.Manager):
"""Custom project manager that shows only public and approved projects."""
- def get_query_set(self):
+ def get_queryset(self):
"""Query set generator"""
return super(
- PublicProjectManager, self).get_query_set().filter(
+ PublicProjectManager, self).get_queryset().filter(
private=False).filter(approved=True)
diff --git a/django_project/base/views/project.py b/django_project/base/views/project.py
--- a/django_project/base/views/project.py
+++ b/django_project/base/views/project.py
@@ -199,7 +199,7 @@
class ApproveProjectView(StaffuserRequiredMixin, ProjectMixin, RedirectView):
permanent = False
query_string = True
- pattern_name = 'pending-project-list'
+ pattern_name = 'home'
def get_redirect_url(self, slug):
projects_qs = Project.unapproved_objects.all()
| {"golden_diff": "diff --git a/django_project/base/models/project.py b/django_project/base/models/project.py\n--- a/django_project/base/models/project.py\n+++ b/django_project/base/models/project.py\n@@ -19,30 +19,30 @@\n class ApprovedProjectManager(models.Manager):\n \"\"\"Custom project manager that shows only approved records.\"\"\"\n \n- def get_query_set(self):\n+ def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n- ApprovedProjectManager, self).get_query_set().filter(\n+ ApprovedProjectManager, self).get_queryset().filter(\n approved=True)\n \n \n class UnapprovedProjectManager(models.Manager):\n \"\"\"Custom project manager that shows only unapproved records.\"\"\"\n \n- def get_query_set(self):\n+ def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n- UnapprovedProjectManager, self).get_query_set().filter(\n+ UnapprovedProjectManager, self).get_queryset().filter(\n approved=False)\n \n \n class PublicProjectManager(models.Manager):\n \"\"\"Custom project manager that shows only public and approved projects.\"\"\"\n \n- def get_query_set(self):\n+ def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n- PublicProjectManager, self).get_query_set().filter(\n+ PublicProjectManager, self).get_queryset().filter(\n private=False).filter(approved=True)\n \n \ndiff --git a/django_project/base/views/project.py b/django_project/base/views/project.py\n--- a/django_project/base/views/project.py\n+++ b/django_project/base/views/project.py\n@@ -199,7 +199,7 @@\n class ApproveProjectView(StaffuserRequiredMixin, ProjectMixin, RedirectView):\n permanent = False\n query_string = True\n- pattern_name = 'pending-project-list'\n+ pattern_name = 'home'\n \n def get_redirect_url(self, slug):\n projects_qs = Project.unapproved_objects.all()\n", "issue": "We need to ensure that pending items shows only pending items\nAt the moment after you edit or create a new site, the pending items list shows all items - it should show only items awaiting moderation.\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Project model used by all apps.\"\"\"\nimport os\nimport logging\nfrom django.core.urlresolvers import reverse\nfrom django.utils.text import slugify\nfrom django.conf.global_settings import MEDIA_ROOT\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom changes.models.version import Version\nfrom core.settings.contrib import STOP_WORDS\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApprovedProjectManager(models.Manager):\n \"\"\"Custom project manager that shows only approved records.\"\"\"\n\n def get_query_set(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ApprovedProjectManager, self).get_query_set().filter(\n approved=True)\n\n\nclass UnapprovedProjectManager(models.Manager):\n \"\"\"Custom project manager that shows only unapproved records.\"\"\"\n\n def get_query_set(self):\n \"\"\"Query set generator\"\"\"\n return super(\n UnapprovedProjectManager, self).get_query_set().filter(\n approved=False)\n\n\nclass PublicProjectManager(models.Manager):\n \"\"\"Custom project manager that shows only public and approved projects.\"\"\"\n\n def get_query_set(self):\n \"\"\"Query set generator\"\"\"\n return super(\n PublicProjectManager, self).get_query_set().filter(\n private=False).filter(approved=True)\n\n\nclass Project(models.Model):\n \"\"\"A project model e.g. QGIS, InaSAFE etc.\"\"\"\n name = models.CharField(\n help_text=_('Name of this project.'),\n max_length=255,\n null=False,\n blank=False,\n unique=True)\n\n description = models.CharField(\n help_text=_('A description for the project'),\n max_length=500,\n blank=True,\n null=True\n )\n\n image_file = models.ImageField(\n help_text=_('A logo image for this project. '\n 'Most browsers support dragging the image directly on to '\n 'the \"Choose File\" button above.'),\n upload_to=os.path.join(MEDIA_ROOT, 'images/projects'),\n blank=True\n )\n\n approved = models.BooleanField(\n help_text=_('Whether this project has been approved for use yet.'),\n default=False\n )\n\n private = models.BooleanField(\n help_text=_('Only visible to logged-in users?'),\n default=False\n )\n\n owner = models.ForeignKey(User)\n slug = models.SlugField(unique=True)\n objects = models.Manager()\n approved_objects = ApprovedProjectManager()\n unapproved_objects = UnapprovedProjectManager()\n public_objects = PublicProjectManager()\n\n # noinspection PyClassicStyleClass\n class Meta:\n \"\"\"Meta class for project.\"\"\"\n app_label = 'base'\n ordering = ['name']\n\n def save(self, *args, **kwargs):\n \"\"\"Overloaded save method.\n\n :param args:\n :param kwargs:\n \"\"\"\n if not self.pk:\n words = self.name.split()\n filtered_words = [t for t in words if t.lower() not in STOP_WORDS]\n new_list = unicode(' '.join(filtered_words))\n self.slug = slugify(new_list)[:50]\n super(Project, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % self.name\n\n def get_absolute_url(self):\n \"\"\"Return URL to project detail page\n\n :return: URL\n :rtype: str\n\n \"\"\"\n return reverse('project-detail', kwargs={'slug': self.slug})\n\n def versions(self):\n \"\"\"Get all the versions for this project.\"\"\"\n qs = Version.objects.filter(project=self).order_by('-padded_version')\n return qs\n\n def latest_versions(self):\n \"\"\"Get the latest version.\n\n How many versions returned is determined by the pagination threshold.\n\n :returns: List of versions.\n :rtype: list\"\"\"\n return self.versions()[:settings.PROJECT_VERSION_LIST_SIZE]\n\n @staticmethod\n def pagination_threshold(self):\n \"\"\"Find out how many versions to list per page.\n\n :returns: The count of items to show per page as defined in\n settings.PROJECT_VERSION_LIST_SIZE.\n :rtype: int\n \"\"\"\n return settings.PROJECT_VERSION_LIST_SIZE\n\n def pagination_threshold_exceeded(self):\n \"\"\"Check if project version count exceeds pagination threshold.\n\n :returns: Flag indicating if there are more versions than\n self.threshold.\n :rtype: bool\n \"\"\"\n if self.versions().count() >= settings.PROJECT_VERSION_LIST_SIZE:\n return True\n else:\n return False\n", "path": "django_project/base/models/project.py"}, {"content": "# coding=utf-8\n\"\"\"Views for projects.\"\"\"\n# noinspection PyUnresolvedReferences\nimport logging\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import (\n ListView,\n CreateView,\n DeleteView,\n DetailView,\n UpdateView,\n RedirectView,\n)\nfrom braces.views import LoginRequiredMixin, StaffuserRequiredMixin\nfrom pure_pagination.mixins import PaginationMixin\nfrom changes.models import Version\nfrom ..models import Project\nfrom ..forms import ProjectForm\nfrom vota.models import Committee, Ballot\nfrom django.conf import settings\n\nlogger = logging.getLogger(__name__)\n\n\nclass ProjectMixin(object):\n model = Project\n form_class = ProjectForm\n\n\nclass ProjectBallotListView(ProjectMixin, PaginationMixin, DetailView):\n \"\"\"List all ballots within in a project\"\"\"\n context_object_name = 'project'\n template_name = 'project/ballot-list.html'\n paginate_by = 1000\n\n def get_context_data(self, **kwargs):\n context = super(\n ProjectBallotListView, self).get_context_data(**kwargs)\n committees = Committee.objects.filter(project=self.object)\n ballots = []\n for committee in committees:\n if self.request.user.is_authenticated and \\\n self.request.user in committee.users.all():\n committee_ballots = Ballot.objects.filter(\n committee=committee)\n else:\n committee_ballots = Ballot.objects.filter(\n committee=committee).filter(private=False)\n if committee_ballots:\n ballots.append(committee_ballots)\n context['ballots_list'] = ballots\n return context\n\n def get_queryset(self):\n if self.request.user.is_authenticated():\n projects_qs = Project.approved_objects.all()\n else:\n projects_qs = Project.public_objects.all()\n return projects_qs\n\n\nclass ProjectListView(ProjectMixin, PaginationMixin, ListView):\n \"\"\"List all approved projects\"\"\"\n context_object_name = 'projects'\n template_name = 'project/list.html'\n paginate_by = 1000\n\n def get_context_data(self, **kwargs):\n \"\"\"Add to the view's context data\n\n :param kwargs: (django dictionary)\n :type kwargs: dict\n\n :return: context\n :rtype: dict\n\n \"\"\"\n context = super(ProjectListView, self).get_context_data(**kwargs)\n context['num_projects'] = self.get_queryset().count()\n context[\n 'PROJECT_VERSION_LIST_SIZE'] = settings.PROJECT_VERSION_LIST_SIZE\n return context\n\n def get_queryset(self):\n \"\"\"Specify the queryset\n\n Return a specific queryset based on the requesting user's status\n\n :return: If user.is_authenticated: All approved projects\n If not user.is_authenticated: All public projects\n :rtype: QuerySet\n\n \"\"\"\n if self.request.user.is_authenticated():\n projects_qs = Project.approved_objects.all()\n else:\n projects_qs = Project.public_objects.all()\n return projects_qs\n\n\nclass ProjectDetailView(ProjectMixin, DetailView):\n context_object_name = 'project'\n template_name = 'project/detail.html'\n\n def get_context_data(self, **kwargs):\n context = super(ProjectDetailView, self).get_context_data(**kwargs)\n context['projects'] = self.get_queryset()\n context['committees'] = Committee.objects.filter(project=self.object)\n page_size = settings.PROJECT_VERSION_LIST_SIZE\n context['versions'] = Version.objects.filter(\n project=self.object).order_by('-padded_version')[:page_size]\n return context\n\n def get_queryset(self):\n projects_qs = Project.approved_objects.all()\n return projects_qs\n\n def get_object(self, queryset=None):\n obj = super(ProjectDetailView, self).get_object(queryset)\n obj.request_user = self.request.user\n return obj\n\n\nclass ProjectDeleteView(LoginRequiredMixin, ProjectMixin, DeleteView):\n context_object_name = 'project'\n template_name = 'project/delete.html'\n\n def get_success_url(self):\n return reverse('project-list')\n\n def get_queryset(self):\n if not self.request.user.is_authenticated():\n raise Http404\n\n qs = Project.objects.all()\n if self.request.user.is_staff:\n return qs\n else:\n return qs.filter(creator=self.request.user)\n\n\nclass ProjectCreateView(LoginRequiredMixin, ProjectMixin, CreateView):\n context_object_name = 'project'\n template_name = 'project/create.html'\n\n def get_success_url(self):\n return reverse('pending-project-list')\n\n def get_form_kwargs(self):\n kwargs = super(ProjectCreateView, self).get_form_kwargs()\n kwargs.update({'user': self.request.user})\n return kwargs\n\n\nclass ProjectUpdateView(LoginRequiredMixin, ProjectMixin, UpdateView):\n context_object_name = 'project'\n template_name = 'project/update.html'\n\n def get_form_kwargs(self):\n kwargs = super(ProjectUpdateView, self).get_form_kwargs()\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def get_queryset(self):\n qs = Project.objects\n if self.request.user.is_staff:\n return qs\n else:\n return qs.filter(creator=self.request.user)\n\n def get_success_url(self):\n return reverse('project-detail', kwargs={'slug': self.object.slug})\n\n\nclass PendingProjectListView(\n ProjectMixin, PaginationMixin, ListView, StaffuserRequiredMixin):\n \"\"\"List all users unapproved projects - staff users see all unapproved.\"\"\"\n context_object_name = 'projects'\n template_name = 'project/list.html'\n paginate_by = settings.PROJECT_VERSION_LIST_SIZE\n\n def get_queryset(self):\n projects_qs = Project.unapproved_objects.all()\n if self.request.user.is_staff:\n return projects_qs\n else:\n return projects_qs.filter(creator=self.request.user)\n\n def get_context_data(self, **kwargs):\n context = super(\n PendingProjectListView, self).get_context_data(**kwargs)\n context['num_projects'] = self.get_queryset().count()\n context['unapproved'] = True\n return context\n\n\nclass ApproveProjectView(StaffuserRequiredMixin, ProjectMixin, RedirectView):\n permanent = False\n query_string = True\n pattern_name = 'pending-project-list'\n\n def get_redirect_url(self, slug):\n projects_qs = Project.unapproved_objects.all()\n project = get_object_or_404(projects_qs, slug=slug)\n project.approved = True\n project.save()\n return reverse(self.pattern_name)\n", "path": "django_project/base/views/project.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"Project model used by all apps.\"\"\"\nimport os\nimport logging\nfrom django.core.urlresolvers import reverse\nfrom django.utils.text import slugify\nfrom django.conf.global_settings import MEDIA_ROOT\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom changes.models.version import Version\nfrom core.settings.contrib import STOP_WORDS\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApprovedProjectManager(models.Manager):\n \"\"\"Custom project manager that shows only approved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ApprovedProjectManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedProjectManager(models.Manager):\n \"\"\"Custom project manager that shows only unapproved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n UnapprovedProjectManager, self).get_queryset().filter(\n approved=False)\n\n\nclass PublicProjectManager(models.Manager):\n \"\"\"Custom project manager that shows only public and approved projects.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n PublicProjectManager, self).get_queryset().filter(\n private=False).filter(approved=True)\n\n\nclass Project(models.Model):\n \"\"\"A project model e.g. QGIS, InaSAFE etc.\"\"\"\n name = models.CharField(\n help_text=_('Name of this project.'),\n max_length=255,\n null=False,\n blank=False,\n unique=True)\n\n description = models.CharField(\n help_text=_('A description for the project'),\n max_length=500,\n blank=True,\n null=True\n )\n\n image_file = models.ImageField(\n help_text=_('A logo image for this project. '\n 'Most browsers support dragging the image directly on to '\n 'the \"Choose File\" button above.'),\n upload_to=os.path.join(MEDIA_ROOT, 'images/projects'),\n blank=True\n )\n\n approved = models.BooleanField(\n help_text=_('Whether this project has been approved for use yet.'),\n default=False\n )\n\n private = models.BooleanField(\n help_text=_('Only visible to logged-in users?'),\n default=False\n )\n\n owner = models.ForeignKey(User)\n slug = models.SlugField(unique=True)\n objects = models.Manager()\n approved_objects = ApprovedProjectManager()\n unapproved_objects = UnapprovedProjectManager()\n public_objects = PublicProjectManager()\n\n # noinspection PyClassicStyleClass\n class Meta:\n \"\"\"Meta class for project.\"\"\"\n app_label = 'base'\n ordering = ['name']\n\n def save(self, *args, **kwargs):\n \"\"\"Overloaded save method.\n\n :param args:\n :param kwargs:\n \"\"\"\n if not self.pk:\n words = self.name.split()\n filtered_words = [t for t in words if t.lower() not in STOP_WORDS]\n new_list = unicode(' '.join(filtered_words))\n self.slug = slugify(new_list)[:50]\n super(Project, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % self.name\n\n def get_absolute_url(self):\n \"\"\"Return URL to project detail page\n\n :return: URL\n :rtype: str\n\n \"\"\"\n return reverse('project-detail', kwargs={'slug': self.slug})\n\n def versions(self):\n \"\"\"Get all the versions for this project.\"\"\"\n qs = Version.objects.filter(project=self).order_by('-padded_version')\n return qs\n\n def latest_versions(self):\n \"\"\"Get the latest version.\n\n How many versions returned is determined by the pagination threshold.\n\n :returns: List of versions.\n :rtype: list\"\"\"\n return self.versions()[:settings.PROJECT_VERSION_LIST_SIZE]\n\n @staticmethod\n def pagination_threshold(self):\n \"\"\"Find out how many versions to list per page.\n\n :returns: The count of items to show per page as defined in\n settings.PROJECT_VERSION_LIST_SIZE.\n :rtype: int\n \"\"\"\n return settings.PROJECT_VERSION_LIST_SIZE\n\n def pagination_threshold_exceeded(self):\n \"\"\"Check if project version count exceeds pagination threshold.\n\n :returns: Flag indicating if there are more versions than\n self.threshold.\n :rtype: bool\n \"\"\"\n if self.versions().count() >= settings.PROJECT_VERSION_LIST_SIZE:\n return True\n else:\n return False\n", "path": "django_project/base/models/project.py"}, {"content": "# coding=utf-8\n\"\"\"Views for projects.\"\"\"\n# noinspection PyUnresolvedReferences\nimport logging\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import (\n ListView,\n CreateView,\n DeleteView,\n DetailView,\n UpdateView,\n RedirectView,\n)\nfrom braces.views import LoginRequiredMixin, StaffuserRequiredMixin\nfrom pure_pagination.mixins import PaginationMixin\nfrom changes.models import Version\nfrom ..models import Project\nfrom ..forms import ProjectForm\nfrom vota.models import Committee, Ballot\nfrom django.conf import settings\n\nlogger = logging.getLogger(__name__)\n\n\nclass ProjectMixin(object):\n model = Project\n form_class = ProjectForm\n\n\nclass ProjectBallotListView(ProjectMixin, PaginationMixin, DetailView):\n \"\"\"List all ballots within in a project\"\"\"\n context_object_name = 'project'\n template_name = 'project/ballot-list.html'\n paginate_by = 1000\n\n def get_context_data(self, **kwargs):\n context = super(\n ProjectBallotListView, self).get_context_data(**kwargs)\n committees = Committee.objects.filter(project=self.object)\n ballots = []\n for committee in committees:\n if self.request.user.is_authenticated and \\\n self.request.user in committee.users.all():\n committee_ballots = Ballot.objects.filter(\n committee=committee)\n else:\n committee_ballots = Ballot.objects.filter(\n committee=committee).filter(private=False)\n if committee_ballots:\n ballots.append(committee_ballots)\n context['ballots_list'] = ballots\n return context\n\n def get_queryset(self):\n if self.request.user.is_authenticated():\n projects_qs = Project.approved_objects.all()\n else:\n projects_qs = Project.public_objects.all()\n return projects_qs\n\n\nclass ProjectListView(ProjectMixin, PaginationMixin, ListView):\n \"\"\"List all approved projects\"\"\"\n context_object_name = 'projects'\n template_name = 'project/list.html'\n paginate_by = 1000\n\n def get_context_data(self, **kwargs):\n \"\"\"Add to the view's context data\n\n :param kwargs: (django dictionary)\n :type kwargs: dict\n\n :return: context\n :rtype: dict\n\n \"\"\"\n context = super(ProjectListView, self).get_context_data(**kwargs)\n context['num_projects'] = self.get_queryset().count()\n context[\n 'PROJECT_VERSION_LIST_SIZE'] = settings.PROJECT_VERSION_LIST_SIZE\n return context\n\n def get_queryset(self):\n \"\"\"Specify the queryset\n\n Return a specific queryset based on the requesting user's status\n\n :return: If user.is_authenticated: All approved projects\n If not user.is_authenticated: All public projects\n :rtype: QuerySet\n\n \"\"\"\n if self.request.user.is_authenticated():\n projects_qs = Project.approved_objects.all()\n else:\n projects_qs = Project.public_objects.all()\n return projects_qs\n\n\nclass ProjectDetailView(ProjectMixin, DetailView):\n context_object_name = 'project'\n template_name = 'project/detail.html'\n\n def get_context_data(self, **kwargs):\n context = super(ProjectDetailView, self).get_context_data(**kwargs)\n context['projects'] = self.get_queryset()\n context['committees'] = Committee.objects.filter(project=self.object)\n page_size = settings.PROJECT_VERSION_LIST_SIZE\n context['versions'] = Version.objects.filter(\n project=self.object).order_by('-padded_version')[:page_size]\n return context\n\n def get_queryset(self):\n projects_qs = Project.approved_objects.all()\n return projects_qs\n\n def get_object(self, queryset=None):\n obj = super(ProjectDetailView, self).get_object(queryset)\n obj.request_user = self.request.user\n return obj\n\n\nclass ProjectDeleteView(LoginRequiredMixin, ProjectMixin, DeleteView):\n context_object_name = 'project'\n template_name = 'project/delete.html'\n\n def get_success_url(self):\n return reverse('project-list')\n\n def get_queryset(self):\n if not self.request.user.is_authenticated():\n raise Http404\n\n qs = Project.objects.all()\n if self.request.user.is_staff:\n return qs\n else:\n return qs.filter(creator=self.request.user)\n\n\nclass ProjectCreateView(LoginRequiredMixin, ProjectMixin, CreateView):\n context_object_name = 'project'\n template_name = 'project/create.html'\n\n def get_success_url(self):\n return reverse('pending-project-list')\n\n def get_form_kwargs(self):\n kwargs = super(ProjectCreateView, self).get_form_kwargs()\n kwargs.update({'user': self.request.user})\n return kwargs\n\n\nclass ProjectUpdateView(LoginRequiredMixin, ProjectMixin, UpdateView):\n context_object_name = 'project'\n template_name = 'project/update.html'\n\n def get_form_kwargs(self):\n kwargs = super(ProjectUpdateView, self).get_form_kwargs()\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def get_queryset(self):\n qs = Project.objects\n if self.request.user.is_staff:\n return qs\n else:\n return qs.filter(creator=self.request.user)\n\n def get_success_url(self):\n return reverse('project-detail', kwargs={'slug': self.object.slug})\n\n\nclass PendingProjectListView(\n ProjectMixin, PaginationMixin, ListView, StaffuserRequiredMixin):\n \"\"\"List all users unapproved projects - staff users see all unapproved.\"\"\"\n context_object_name = 'projects'\n template_name = 'project/list.html'\n paginate_by = settings.PROJECT_VERSION_LIST_SIZE\n\n def get_queryset(self):\n projects_qs = Project.unapproved_objects.all()\n if self.request.user.is_staff:\n return projects_qs\n else:\n return projects_qs.filter(creator=self.request.user)\n\n def get_context_data(self, **kwargs):\n context = super(\n PendingProjectListView, self).get_context_data(**kwargs)\n context['num_projects'] = self.get_queryset().count()\n context['unapproved'] = True\n return context\n\n\nclass ApproveProjectView(StaffuserRequiredMixin, ProjectMixin, RedirectView):\n permanent = False\n query_string = True\n pattern_name = 'home'\n\n def get_redirect_url(self, slug):\n projects_qs = Project.unapproved_objects.all()\n project = get_object_or_404(projects_qs, slug=slug)\n project.approved = True\n project.save()\n return reverse(self.pattern_name)\n", "path": "django_project/base/views/project.py"}]} | 3,559 | 413 |
gh_patches_debug_2677 | rasdani/github-patches | git_diff | yt-project__yt-4463 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: setting a boolean parameter via the command line break runtime
### Bug report
**Bug summary**
**Code for reproduction**
```shell
$ yt config set --local yt colored_logs true && python -c "import yt"
```
**Actual outcome**
<!--The output produced by the above code, which may be a screenshot, console
output, etc.-->
```python-traceback
Traceback (most recent call last):
File "/Users/robcleme/.pyenv/versions/yt-dev/bin/yt", line 8, in <module>
sys.exit(run_main())
^^^^^^^^^^
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/command_line.py", line 1615, in run_main
args.func(args)
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/command_line.py", line 228, in run
self(args)
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/command_line.py", line 1402, in __call__
set_config(args.section, args.option, args.value, self.config_file)
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/configure.py", line 195, in set_config
CONFIG.set(section, *option_path, _cast_value_helper(value))
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/configure.py", line 79, in set
self.config_root.upsert_from_list(
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/configuration_tree.py", line 54, in upsert_from_list
next_node.upsert_from_list(next_keys, value, extra_data)
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/configuration_tree.py", line 46, in upsert_from_list
leaf.value = value
^^^^^^^^^^
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/configuration_tree.py", line 187, in value
raise TypeError(msg)
TypeError: Error when setting yt.colored_logs.
Tried to assign a value of type <class 'str'>, expected type <class 'bool'>.
This entry was last modified in file: /Users/robcleme/dev/yt-project/yt/yt.toml.
```
One way to patch this would be to special-case `true` and `false` to be interpreted as booleans when received from the command line.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt/utilities/configure.py`
Content:
```
1 import os
2 import sys
3 import warnings
4 from pathlib import Path
5 from typing import Callable, List
6
7 import tomli_w
8 from more_itertools import always_iterable
9
10 from yt.utilities.configuration_tree import ConfigLeaf, ConfigNode
11
12 if sys.version_info >= (3, 11):
13 import tomllib
14 else:
15 import tomli as tomllib
16
17 configuration_callbacks: List[Callable[["YTConfig"], None]] = []
18
19
20 def config_dir():
21 config_root = os.environ.get(
22 "XDG_CONFIG_HOME", os.path.join(os.path.expanduser("~"), ".config")
23 )
24 conf_dir = os.path.join(config_root, "yt")
25 return conf_dir
26
27
28 class YTConfig:
29 def __init__(self, defaults=None):
30 if defaults is None:
31 defaults = {}
32 self.config_root = ConfigNode(None)
33
34 def get(self, section, *keys, callback=None):
35 node_or_leaf = self.config_root.get(section, *keys)
36 if isinstance(node_or_leaf, ConfigLeaf):
37 if callback is not None:
38 return callback(node_or_leaf)
39 return node_or_leaf.value
40 return node_or_leaf
41
42 def get_most_specific(self, section, *keys, **kwargs):
43 use_fallback = "fallback" in kwargs
44 fallback = kwargs.pop("fallback", None)
45 try:
46 return self.config_root.get_deepest_leaf(section, *keys)
47 except KeyError as err:
48 if use_fallback:
49 return fallback
50 else:
51 raise err
52
53 def update(self, new_values, metadata=None):
54 if metadata is None:
55 metadata = {}
56 self.config_root.update(new_values, metadata)
57
58 def has_section(self, section):
59 try:
60 self.config_root.get_child(section)
61 return True
62 except KeyError:
63 return False
64
65 def add_section(self, section):
66 self.config_root.add_child(section)
67
68 def remove_section(self, section):
69 if self.has_section(section):
70 self.config_root.remove_child(section)
71 return True
72 else:
73 return False
74
75 def set(self, *args, metadata=None):
76 section, *keys, value = args
77 if metadata is None:
78 metadata = {"source": "runtime"}
79 self.config_root.upsert_from_list(
80 [section] + list(keys), value, extra_data=metadata
81 )
82
83 def remove(self, *args):
84 self.config_root.pop_leaf(args)
85
86 def read(self, file_names):
87 file_names_read = []
88 for fname in always_iterable(file_names):
89 if not os.path.exists(fname):
90 continue
91 metadata = {"source": f"file: {fname}"}
92 try:
93 with open(fname, "rb") as fh:
94 data = tomllib.load(fh)
95 except tomllib.TOMLDecodeError as exc:
96 warnings.warn(
97 f"Could not load configuration file {fname} (invalid TOML: {exc})",
98 stacklevel=2,
99 )
100 else:
101 self.update(data, metadata=metadata)
102 file_names_read.append(fname)
103
104 return file_names_read
105
106 def write(self, file_handler):
107 value = self.config_root.as_dict()
108 config_as_str = tomli_w.dumps(value)
109
110 try:
111 file_path = Path(file_handler)
112 except TypeError:
113 if not hasattr(file_handler, "write"):
114 raise TypeError(
115 f"Expected a path to a file, or a writable object, got {file_handler}"
116 ) from None
117 file_handler.write(config_as_str)
118 else:
119 pdir = file_path.parent
120 if not pdir.exists():
121 warnings.warn(
122 f"{pdir!s} does not exist, creating it (recursively)", stacklevel=2
123 )
124 os.makedirs(pdir)
125 file_path.write_text(config_as_str)
126
127 @staticmethod
128 def get_global_config_file():
129 return os.path.join(config_dir(), "yt.toml")
130
131 @staticmethod
132 def get_local_config_file():
133 path = Path.cwd()
134 while path.parent is not path:
135 candidate = path.joinpath("yt.toml")
136 if candidate.is_file():
137 return os.path.abspath(candidate)
138 else:
139 path = path.parent
140
141 return os.path.join(os.path.abspath(os.curdir), "yt.toml")
142
143 def __setitem__(self, args, value):
144 section, *keys = always_iterable(args)
145 self.set(section, *keys, value, metadata=None)
146
147 def __getitem__(self, key):
148 section, *keys = always_iterable(key)
149 return self.get(section, *keys)
150
151 def __contains__(self, item):
152 return item in self.config_root
153
154 # Add support for IPython rich display
155 # see https://ipython.readthedocs.io/en/stable/config/integrating.html
156 def _repr_json_(self):
157 return self.config_root._repr_json_()
158
159
160 CONFIG = YTConfig()
161
162
163 def _cast_bool_helper(value):
164 if value == "True":
165 return True
166 elif value == "False":
167 return False
168 else:
169 raise ValueError("Cannot safely cast to bool")
170
171
172 def _expand_all(s):
173 return os.path.expandvars(os.path.expanduser(s))
174
175
176 def _cast_value_helper(value, types=(_cast_bool_helper, int, float, _expand_all)):
177 for t in types:
178 try:
179 retval = t(value)
180 return retval
181 except ValueError:
182 pass
183
184
185 def get_config(section, option):
186 *option_path, option_name = option.split(".")
187 return CONFIG.get(section, *option_path, option_name)
188
189
190 def set_config(section, option, value, config_file):
191 if not CONFIG.has_section(section):
192 CONFIG.add_section(section)
193
194 option_path = option.split(".")
195 CONFIG.set(section, *option_path, _cast_value_helper(value))
196 write_config(config_file)
197
198
199 def write_config(config_file):
200 CONFIG.write(config_file)
201
202
203 def rm_config(section, option, config_file):
204 option_path = option.split(".")
205 CONFIG.remove(section, *option_path)
206 write_config(config_file)
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yt/utilities/configure.py b/yt/utilities/configure.py
--- a/yt/utilities/configure.py
+++ b/yt/utilities/configure.py
@@ -161,9 +161,9 @@
def _cast_bool_helper(value):
- if value == "True":
+ if value in ("true", "True", True):
return True
- elif value == "False":
+ elif value in ("false", "False", False):
return False
else:
raise ValueError("Cannot safely cast to bool")
| {"golden_diff": "diff --git a/yt/utilities/configure.py b/yt/utilities/configure.py\n--- a/yt/utilities/configure.py\n+++ b/yt/utilities/configure.py\n@@ -161,9 +161,9 @@\n \n \n def _cast_bool_helper(value):\n- if value == \"True\":\n+ if value in (\"true\", \"True\", True):\n return True\n- elif value == \"False\":\n+ elif value in (\"false\", \"False\", False):\n return False\n else:\n raise ValueError(\"Cannot safely cast to bool\")\n", "issue": "BUG: setting a boolean parameter via the command line break runtime\n\r\n### Bug report\r\n\r\n**Bug summary**\r\n\r\n**Code for reproduction**\r\n\r\n```shell\r\n$ yt config set --local yt colored_logs true && python -c \"import yt\"\r\n```\r\n\r\n**Actual outcome**\r\n\r\n<!--The output produced by the above code, which may be a screenshot, console\r\noutput, etc.-->\r\n\r\n```python-traceback\r\nTraceback (most recent call last):\r\n File \"/Users/robcleme/.pyenv/versions/yt-dev/bin/yt\", line 8, in <module>\r\n sys.exit(run_main())\r\n ^^^^^^^^^^\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/command_line.py\", line 1615, in run_main\r\n args.func(args)\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/command_line.py\", line 228, in run\r\n self(args)\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/command_line.py\", line 1402, in __call__\r\n set_config(args.section, args.option, args.value, self.config_file)\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/configure.py\", line 195, in set_config\r\n CONFIG.set(section, *option_path, _cast_value_helper(value))\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/configure.py\", line 79, in set\r\n self.config_root.upsert_from_list(\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/configuration_tree.py\", line 54, in upsert_from_list\r\n next_node.upsert_from_list(next_keys, value, extra_data)\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/configuration_tree.py\", line 46, in upsert_from_list\r\n leaf.value = value\r\n ^^^^^^^^^^\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/configuration_tree.py\", line 187, in value\r\n raise TypeError(msg)\r\nTypeError: Error when setting yt.colored_logs.\r\nTried to assign a value of type <class 'str'>, expected type <class 'bool'>.\r\nThis entry was last modified in file: /Users/robcleme/dev/yt-project/yt/yt.toml.\r\n```\r\n\r\n\r\nOne way to patch this would be to special-case `true` and `false` to be interpreted as booleans when received from the command line.\n", "before_files": [{"content": "import os\nimport sys\nimport warnings\nfrom pathlib import Path\nfrom typing import Callable, List\n\nimport tomli_w\nfrom more_itertools import always_iterable\n\nfrom yt.utilities.configuration_tree import ConfigLeaf, ConfigNode\n\nif sys.version_info >= (3, 11):\n import tomllib\nelse:\n import tomli as tomllib\n\nconfiguration_callbacks: List[Callable[[\"YTConfig\"], None]] = []\n\n\ndef config_dir():\n config_root = os.environ.get(\n \"XDG_CONFIG_HOME\", os.path.join(os.path.expanduser(\"~\"), \".config\")\n )\n conf_dir = os.path.join(config_root, \"yt\")\n return conf_dir\n\n\nclass YTConfig:\n def __init__(self, defaults=None):\n if defaults is None:\n defaults = {}\n self.config_root = ConfigNode(None)\n\n def get(self, section, *keys, callback=None):\n node_or_leaf = self.config_root.get(section, *keys)\n if isinstance(node_or_leaf, ConfigLeaf):\n if callback is not None:\n return callback(node_or_leaf)\n return node_or_leaf.value\n return node_or_leaf\n\n def get_most_specific(self, section, *keys, **kwargs):\n use_fallback = \"fallback\" in kwargs\n fallback = kwargs.pop(\"fallback\", None)\n try:\n return self.config_root.get_deepest_leaf(section, *keys)\n except KeyError as err:\n if use_fallback:\n return fallback\n else:\n raise err\n\n def update(self, new_values, metadata=None):\n if metadata is None:\n metadata = {}\n self.config_root.update(new_values, metadata)\n\n def has_section(self, section):\n try:\n self.config_root.get_child(section)\n return True\n except KeyError:\n return False\n\n def add_section(self, section):\n self.config_root.add_child(section)\n\n def remove_section(self, section):\n if self.has_section(section):\n self.config_root.remove_child(section)\n return True\n else:\n return False\n\n def set(self, *args, metadata=None):\n section, *keys, value = args\n if metadata is None:\n metadata = {\"source\": \"runtime\"}\n self.config_root.upsert_from_list(\n [section] + list(keys), value, extra_data=metadata\n )\n\n def remove(self, *args):\n self.config_root.pop_leaf(args)\n\n def read(self, file_names):\n file_names_read = []\n for fname in always_iterable(file_names):\n if not os.path.exists(fname):\n continue\n metadata = {\"source\": f\"file: {fname}\"}\n try:\n with open(fname, \"rb\") as fh:\n data = tomllib.load(fh)\n except tomllib.TOMLDecodeError as exc:\n warnings.warn(\n f\"Could not load configuration file {fname} (invalid TOML: {exc})\",\n stacklevel=2,\n )\n else:\n self.update(data, metadata=metadata)\n file_names_read.append(fname)\n\n return file_names_read\n\n def write(self, file_handler):\n value = self.config_root.as_dict()\n config_as_str = tomli_w.dumps(value)\n\n try:\n file_path = Path(file_handler)\n except TypeError:\n if not hasattr(file_handler, \"write\"):\n raise TypeError(\n f\"Expected a path to a file, or a writable object, got {file_handler}\"\n ) from None\n file_handler.write(config_as_str)\n else:\n pdir = file_path.parent\n if not pdir.exists():\n warnings.warn(\n f\"{pdir!s} does not exist, creating it (recursively)\", stacklevel=2\n )\n os.makedirs(pdir)\n file_path.write_text(config_as_str)\n\n @staticmethod\n def get_global_config_file():\n return os.path.join(config_dir(), \"yt.toml\")\n\n @staticmethod\n def get_local_config_file():\n path = Path.cwd()\n while path.parent is not path:\n candidate = path.joinpath(\"yt.toml\")\n if candidate.is_file():\n return os.path.abspath(candidate)\n else:\n path = path.parent\n\n return os.path.join(os.path.abspath(os.curdir), \"yt.toml\")\n\n def __setitem__(self, args, value):\n section, *keys = always_iterable(args)\n self.set(section, *keys, value, metadata=None)\n\n def __getitem__(self, key):\n section, *keys = always_iterable(key)\n return self.get(section, *keys)\n\n def __contains__(self, item):\n return item in self.config_root\n\n # Add support for IPython rich display\n # see https://ipython.readthedocs.io/en/stable/config/integrating.html\n def _repr_json_(self):\n return self.config_root._repr_json_()\n\n\nCONFIG = YTConfig()\n\n\ndef _cast_bool_helper(value):\n if value == \"True\":\n return True\n elif value == \"False\":\n return False\n else:\n raise ValueError(\"Cannot safely cast to bool\")\n\n\ndef _expand_all(s):\n return os.path.expandvars(os.path.expanduser(s))\n\n\ndef _cast_value_helper(value, types=(_cast_bool_helper, int, float, _expand_all)):\n for t in types:\n try:\n retval = t(value)\n return retval\n except ValueError:\n pass\n\n\ndef get_config(section, option):\n *option_path, option_name = option.split(\".\")\n return CONFIG.get(section, *option_path, option_name)\n\n\ndef set_config(section, option, value, config_file):\n if not CONFIG.has_section(section):\n CONFIG.add_section(section)\n\n option_path = option.split(\".\")\n CONFIG.set(section, *option_path, _cast_value_helper(value))\n write_config(config_file)\n\n\ndef write_config(config_file):\n CONFIG.write(config_file)\n\n\ndef rm_config(section, option, config_file):\n option_path = option.split(\".\")\n CONFIG.remove(section, *option_path)\n write_config(config_file)\n", "path": "yt/utilities/configure.py"}], "after_files": [{"content": "import os\nimport sys\nimport warnings\nfrom pathlib import Path\nfrom typing import Callable, List\n\nimport tomli_w\nfrom more_itertools import always_iterable\n\nfrom yt.utilities.configuration_tree import ConfigLeaf, ConfigNode\n\nif sys.version_info >= (3, 11):\n import tomllib\nelse:\n import tomli as tomllib\n\nconfiguration_callbacks: List[Callable[[\"YTConfig\"], None]] = []\n\n\ndef config_dir():\n config_root = os.environ.get(\n \"XDG_CONFIG_HOME\", os.path.join(os.path.expanduser(\"~\"), \".config\")\n )\n conf_dir = os.path.join(config_root, \"yt\")\n return conf_dir\n\n\nclass YTConfig:\n def __init__(self, defaults=None):\n if defaults is None:\n defaults = {}\n self.config_root = ConfigNode(None)\n\n def get(self, section, *keys, callback=None):\n node_or_leaf = self.config_root.get(section, *keys)\n if isinstance(node_or_leaf, ConfigLeaf):\n if callback is not None:\n return callback(node_or_leaf)\n return node_or_leaf.value\n return node_or_leaf\n\n def get_most_specific(self, section, *keys, **kwargs):\n use_fallback = \"fallback\" in kwargs\n fallback = kwargs.pop(\"fallback\", None)\n try:\n return self.config_root.get_deepest_leaf(section, *keys)\n except KeyError as err:\n if use_fallback:\n return fallback\n else:\n raise err\n\n def update(self, new_values, metadata=None):\n if metadata is None:\n metadata = {}\n self.config_root.update(new_values, metadata)\n\n def has_section(self, section):\n try:\n self.config_root.get_child(section)\n return True\n except KeyError:\n return False\n\n def add_section(self, section):\n self.config_root.add_child(section)\n\n def remove_section(self, section):\n if self.has_section(section):\n self.config_root.remove_child(section)\n return True\n else:\n return False\n\n def set(self, *args, metadata=None):\n section, *keys, value = args\n if metadata is None:\n metadata = {\"source\": \"runtime\"}\n self.config_root.upsert_from_list(\n [section] + list(keys), value, extra_data=metadata\n )\n\n def remove(self, *args):\n self.config_root.pop_leaf(args)\n\n def read(self, file_names):\n file_names_read = []\n for fname in always_iterable(file_names):\n if not os.path.exists(fname):\n continue\n metadata = {\"source\": f\"file: {fname}\"}\n try:\n with open(fname, \"rb\") as fh:\n data = tomllib.load(fh)\n except tomllib.TOMLDecodeError as exc:\n warnings.warn(\n f\"Could not load configuration file {fname} (invalid TOML: {exc})\",\n stacklevel=2,\n )\n else:\n self.update(data, metadata=metadata)\n file_names_read.append(fname)\n\n return file_names_read\n\n def write(self, file_handler):\n value = self.config_root.as_dict()\n config_as_str = tomli_w.dumps(value)\n\n try:\n file_path = Path(file_handler)\n except TypeError:\n if not hasattr(file_handler, \"write\"):\n raise TypeError(\n f\"Expected a path to a file, or a writable object, got {file_handler}\"\n ) from None\n file_handler.write(config_as_str)\n else:\n pdir = file_path.parent\n if not pdir.exists():\n warnings.warn(\n f\"{pdir!s} does not exist, creating it (recursively)\", stacklevel=2\n )\n os.makedirs(pdir)\n file_path.write_text(config_as_str)\n\n @staticmethod\n def get_global_config_file():\n return os.path.join(config_dir(), \"yt.toml\")\n\n @staticmethod\n def get_local_config_file():\n path = Path.cwd()\n while path.parent is not path:\n candidate = path.joinpath(\"yt.toml\")\n if candidate.is_file():\n return os.path.abspath(candidate)\n else:\n path = path.parent\n\n return os.path.join(os.path.abspath(os.curdir), \"yt.toml\")\n\n def __setitem__(self, args, value):\n section, *keys = always_iterable(args)\n self.set(section, *keys, value, metadata=None)\n\n def __getitem__(self, key):\n section, *keys = always_iterable(key)\n return self.get(section, *keys)\n\n def __contains__(self, item):\n return item in self.config_root\n\n # Add support for IPython rich display\n # see https://ipython.readthedocs.io/en/stable/config/integrating.html\n def _repr_json_(self):\n return self.config_root._repr_json_()\n\n\nCONFIG = YTConfig()\n\n\ndef _cast_bool_helper(value):\n if value in (\"true\", \"True\", True):\n return True\n elif value in (\"false\", \"False\", False):\n return False\n else:\n raise ValueError(\"Cannot safely cast to bool\")\n\n\ndef _expand_all(s):\n return os.path.expandvars(os.path.expanduser(s))\n\n\ndef _cast_value_helper(value, types=(_cast_bool_helper, int, float, _expand_all)):\n for t in types:\n try:\n retval = t(value)\n return retval\n except ValueError:\n pass\n\n\ndef get_config(section, option):\n *option_path, option_name = option.split(\".\")\n return CONFIG.get(section, *option_path, option_name)\n\n\ndef set_config(section, option, value, config_file):\n if not CONFIG.has_section(section):\n CONFIG.add_section(section)\n\n option_path = option.split(\".\")\n CONFIG.set(section, *option_path, _cast_value_helper(value))\n write_config(config_file)\n\n\ndef write_config(config_file):\n CONFIG.write(config_file)\n\n\ndef rm_config(section, option, config_file):\n option_path = option.split(\".\")\n CONFIG.remove(section, *option_path)\n write_config(config_file)\n", "path": "yt/utilities/configure.py"}]} | 2,655 | 125 |
gh_patches_debug_15204 | rasdani/github-patches | git_diff | certbot__certbot-4496 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MisconfigurationError: nginx restart failed ... build server_names_hash
```
...
include servers-enabled/*;
}
2017-01-28 14:14:20,669:DEBUG:certbot.error_handler:Encountered exception:
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/certbot/auth_handler.py", line 112, in _solve_challenges
resp = self.auth.perform(self.achalls)
File "/usr/lib/python2.7/site-packages/certbot_nginx/configurator.py", line 818, in perform
self.restart()
File "/usr/lib/python2.7/site-packages/certbot_nginx/configurator.py", line 603, in restart
nginx_restart(self.conf('ctl'), self.nginx_conf)
File "/usr/lib/python2.7/site-packages/certbot_nginx/configurator.py", line 865, in nginx_restart
"nginx restart failed:\n%s\n%s" % (stdout, stderr))
MisconfigurationError: nginx restart failed:
2017/01/28 08:14:20 [emerg] 4619#4619: could not build server_names_hash, you should increase server_names_hash_bucket_
size: 64
2017-01-28 14:14:20,671:DEBUG:certbot.error_handler:Calling registered functions
2017-01-28 14:14:20,671:INFO:certbot.auth_handler:Cleaning up challenges
2017-01-28 14:14:25,210:DEBUG:certbot.main:Exiting abnormally:
Traceback (most recent call last):
File "/usr/bin/certbot", line 11, in <module>
load_entry_point('certbot==0.10.1', 'console_scripts', 'certbot')()
File "/usr/lib/python2.7/site-packages/certbot/main.py", line 849, in main
return config.func(config, plugins)
File "/usr/lib/python2.7/site-packages/certbot/main.py", line 626, in obtain_cert
action, _ = _auth_from_available(le_client, config, domains, certname, lineage)
File "/usr/lib/python2.7/site-packages/certbot/main.py", line 107, in _auth_from_available
lineage = le_client.obtain_and_enroll_certificate(domains, certname)
File "/usr/lib/python2.7/site-packages/certbot/client.py", line 291, in obtain_and_enroll_certificate
certr, chain, key, _ = self.obtain_certificate(domains)
File "/usr/lib/python2.7/site-packages/certbot/client.py", line 262, in obtain_certificate
self.config.allow_subset_of_names)
File "/usr/lib/python2.7/site-packages/certbot/auth_handler.py", line 74, in get_authorizations
resp = self._solve_challenges()
File "/usr/lib/python2.7/site-packages/certbot/auth_handler.py", line 112, in _solve_challenges
resp = self.auth.perform(self.achalls)
File "/usr/lib/python2.7/site-packages/certbot_nginx/configurator.py", line 818, in perform
self.restart()
File "/usr/lib/python2.7/site-packages/certbot_nginx/configurator.py", line 603, in restart
nginx_restart(self.conf('ctl'), self.nginx_conf)
File "/usr/lib/python2.7/site-packages/certbot_nginx/configurator.py", line 865, in nginx_restart
"nginx restart failed:\n%s\n%s" % (stdout, stderr))
MisconfigurationError: nginx restart failed:
2017/01/28 08:14:20 [emerg] 4619#4619: could not build server_names_hash, you should increase server_names_hash_bucket_size: 64
```
yet both "nginx -t" and "nginx -s reload" succeed.
EDIT: I should add that I had experienced the "server_names_hash" before and found that I missed a semi-colon at the end of my "server_name" line.
But in this case, I DID grep recursively through my nginx dir for "server_name" and made sure everything had a ';'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `certbot-nginx/certbot_nginx/tls_sni_01.py`
Content:
```
1 """A class that performs TLS-SNI-01 challenges for Nginx"""
2
3 import logging
4 import os
5
6 import six
7
8 from certbot import errors
9 from certbot.plugins import common
10
11 from certbot_nginx import obj
12 from certbot_nginx import nginxparser
13
14
15 logger = logging.getLogger(__name__)
16
17
18 class NginxTlsSni01(common.TLSSNI01):
19 """TLS-SNI-01 authenticator for Nginx
20
21 :ivar configurator: NginxConfigurator object
22 :type configurator: :class:`~nginx.configurator.NginxConfigurator`
23
24 :ivar list achalls: Annotated
25 class:`~certbot.achallenges.KeyAuthorizationAnnotatedChallenge`
26 challenges
27
28 :param list indices: Meant to hold indices of challenges in a
29 larger array. NginxTlsSni01 is capable of solving many challenges
30 at once which causes an indexing issue within NginxConfigurator
31 who must return all responses in order. Imagine NginxConfigurator
32 maintaining state about where all of the http-01 Challenges,
33 TLS-SNI-01 Challenges belong in the response array. This is an
34 optional utility.
35
36 :param str challenge_conf: location of the challenge config file
37
38 """
39
40 def perform(self):
41 """Perform a challenge on Nginx.
42
43 :returns: list of :class:`certbot.acme.challenges.TLSSNI01Response`
44 :rtype: list
45
46 """
47 if not self.achalls:
48 return []
49
50 addresses = []
51 default_addr = "{0} ssl".format(
52 self.configurator.config.tls_sni_01_port)
53
54 for achall in self.achalls:
55 vhost = self.configurator.choose_vhost(achall.domain)
56 if vhost is None:
57 logger.error(
58 "No nginx vhost exists with server_name matching: %s. "
59 "Please specify server_names in the Nginx config.",
60 achall.domain)
61 return None
62
63 if vhost.addrs:
64 addresses.append(list(vhost.addrs))
65 else:
66 addresses.append([obj.Addr.fromstring(default_addr)])
67
68 # Create challenge certs
69 responses = [self._setup_challenge_cert(x) for x in self.achalls]
70
71 # Set up the configuration
72 self._mod_config(addresses)
73
74 # Save reversible changes
75 self.configurator.save("SNI Challenge", True)
76
77 return responses
78
79 def _mod_config(self, ll_addrs):
80 """Modifies Nginx config to include challenge server blocks.
81
82 :param list ll_addrs: list of lists of
83 :class:`certbot_nginx.obj.Addr` to apply
84
85 :raises .MisconfigurationError:
86 Unable to find a suitable HTTP block in which to include
87 authenticator hosts.
88
89 """
90 # Add the 'include' statement for the challenges if it doesn't exist
91 # already in the main config
92 included = False
93 include_directive = ['\n', 'include', ' ', self.challenge_conf]
94 root = self.configurator.parser.loc["root"]
95
96 bucket_directive = ['\n', 'server_names_hash_bucket_size', ' ', '128']
97
98 main = self.configurator.parser.parsed[root]
99 for line in main:
100 if line[0] == ['http']:
101 body = line[1]
102 found_bucket = False
103 for inner_line in body:
104 if inner_line[0] == bucket_directive[1]:
105 found_bucket = True
106 if not found_bucket:
107 body.insert(0, bucket_directive)
108 if include_directive not in body:
109 body.insert(0, include_directive)
110 included = True
111 break
112 if not included:
113 raise errors.MisconfigurationError(
114 'LetsEncrypt could not find an HTTP block to include '
115 'TLS-SNI-01 challenges in %s.' % root)
116
117 config = [self._make_server_block(pair[0], pair[1])
118 for pair in six.moves.zip(self.achalls, ll_addrs)]
119 config = nginxparser.UnspacedList(config)
120
121 self.configurator.reverter.register_file_creation(
122 True, self.challenge_conf)
123
124 with open(self.challenge_conf, "w") as new_conf:
125 nginxparser.dump(config, new_conf)
126
127 def _make_server_block(self, achall, addrs):
128 """Creates a server block for a challenge.
129
130 :param achall: Annotated TLS-SNI-01 challenge
131 :type achall:
132 :class:`certbot.achallenges.KeyAuthorizationAnnotatedChallenge`
133
134 :param list addrs: addresses of challenged domain
135 :class:`list` of type :class:`~nginx.obj.Addr`
136
137 :returns: server block for the challenge host
138 :rtype: list
139
140 """
141 document_root = os.path.join(
142 self.configurator.config.work_dir, "tls_sni_01_page")
143
144 block = [['listen', ' ', addr.to_string(include_default=False)] for addr in addrs]
145
146 block.extend([['server_name', ' ',
147 achall.response(achall.account_key).z_domain.decode('ascii')],
148 # access and error logs necessary for
149 # integration testing (non-root)
150 ['access_log', ' ', os.path.join(
151 self.configurator.config.work_dir, 'access.log')],
152 ['error_log', ' ', os.path.join(
153 self.configurator.config.work_dir, 'error.log')],
154 ['ssl_certificate', ' ', self.get_cert_path(achall)],
155 ['ssl_certificate_key', ' ', self.get_key_path(achall)],
156 [['location', ' ', '/'], [['root', ' ', document_root]]]] +
157 self.configurator.parser.loc["ssl_options"])
158 return [['server'], block]
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/certbot-nginx/certbot_nginx/tls_sni_01.py b/certbot-nginx/certbot_nginx/tls_sni_01.py
--- a/certbot-nginx/certbot_nginx/tls_sni_01.py
+++ b/certbot-nginx/certbot_nginx/tls_sni_01.py
@@ -100,9 +100,13 @@
if line[0] == ['http']:
body = line[1]
found_bucket = False
+ posn = 0
for inner_line in body:
if inner_line[0] == bucket_directive[1]:
+ if int(inner_line[1]) < int(bucket_directive[3]):
+ body[posn] = bucket_directive
found_bucket = True
+ posn += 1
if not found_bucket:
body.insert(0, bucket_directive)
if include_directive not in body:
| {"golden_diff": "diff --git a/certbot-nginx/certbot_nginx/tls_sni_01.py b/certbot-nginx/certbot_nginx/tls_sni_01.py\n--- a/certbot-nginx/certbot_nginx/tls_sni_01.py\n+++ b/certbot-nginx/certbot_nginx/tls_sni_01.py\n@@ -100,9 +100,13 @@\n if line[0] == ['http']:\n body = line[1]\n found_bucket = False\n+ posn = 0\n for inner_line in body:\n if inner_line[0] == bucket_directive[1]:\n+ if int(inner_line[1]) < int(bucket_directive[3]):\n+ body[posn] = bucket_directive\n found_bucket = True\n+ posn += 1\n if not found_bucket:\n body.insert(0, bucket_directive)\n if include_directive not in body:\n", "issue": "MisconfigurationError: nginx restart failed ... build server_names_hash\n```\r\n...\r\n include servers-enabled/*;\r\n}\r\n\r\n2017-01-28 14:14:20,669:DEBUG:certbot.error_handler:Encountered exception:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/site-packages/certbot/auth_handler.py\", line 112, in _solve_challenges\r\n resp = self.auth.perform(self.achalls)\r\n File \"/usr/lib/python2.7/site-packages/certbot_nginx/configurator.py\", line 818, in perform\r\n self.restart()\r\n File \"/usr/lib/python2.7/site-packages/certbot_nginx/configurator.py\", line 603, in restart\r\n nginx_restart(self.conf('ctl'), self.nginx_conf)\r\n File \"/usr/lib/python2.7/site-packages/certbot_nginx/configurator.py\", line 865, in nginx_restart\r\n \"nginx restart failed:\\n%s\\n%s\" % (stdout, stderr))\r\nMisconfigurationError: nginx restart failed:\r\n\r\n2017/01/28 08:14:20 [emerg] 4619#4619: could not build server_names_hash, you should increase server_names_hash_bucket_\r\nsize: 64\r\n\r\n\r\n2017-01-28 14:14:20,671:DEBUG:certbot.error_handler:Calling registered functions\r\n2017-01-28 14:14:20,671:INFO:certbot.auth_handler:Cleaning up challenges\r\n2017-01-28 14:14:25,210:DEBUG:certbot.main:Exiting abnormally:\r\nTraceback (most recent call last):\r\n File \"/usr/bin/certbot\", line 11, in <module>\r\n load_entry_point('certbot==0.10.1', 'console_scripts', 'certbot')()\r\n File \"/usr/lib/python2.7/site-packages/certbot/main.py\", line 849, in main\r\n return config.func(config, plugins)\r\n File \"/usr/lib/python2.7/site-packages/certbot/main.py\", line 626, in obtain_cert\r\n action, _ = _auth_from_available(le_client, config, domains, certname, lineage)\r\n File \"/usr/lib/python2.7/site-packages/certbot/main.py\", line 107, in _auth_from_available\r\n lineage = le_client.obtain_and_enroll_certificate(domains, certname)\r\n File \"/usr/lib/python2.7/site-packages/certbot/client.py\", line 291, in obtain_and_enroll_certificate\r\n certr, chain, key, _ = self.obtain_certificate(domains)\r\n File \"/usr/lib/python2.7/site-packages/certbot/client.py\", line 262, in obtain_certificate\r\n self.config.allow_subset_of_names)\r\n File \"/usr/lib/python2.7/site-packages/certbot/auth_handler.py\", line 74, in get_authorizations\r\n resp = self._solve_challenges()\r\n File \"/usr/lib/python2.7/site-packages/certbot/auth_handler.py\", line 112, in _solve_challenges\r\n resp = self.auth.perform(self.achalls)\r\n File \"/usr/lib/python2.7/site-packages/certbot_nginx/configurator.py\", line 818, in perform\r\n self.restart()\r\n File \"/usr/lib/python2.7/site-packages/certbot_nginx/configurator.py\", line 603, in restart\r\n nginx_restart(self.conf('ctl'), self.nginx_conf)\r\n File \"/usr/lib/python2.7/site-packages/certbot_nginx/configurator.py\", line 865, in nginx_restart\r\n \"nginx restart failed:\\n%s\\n%s\" % (stdout, stderr))\r\nMisconfigurationError: nginx restart failed:\r\n\r\n2017/01/28 08:14:20 [emerg] 4619#4619: could not build server_names_hash, you should increase server_names_hash_bucket_size: 64\r\n```\r\nyet both \"nginx -t\" and \"nginx -s reload\" succeed.\r\n\r\nEDIT: I should add that I had experienced the \"server_names_hash\" before and found that I missed a semi-colon at the end of my \"server_name\" line.\r\nBut in this case, I DID grep recursively through my nginx dir for \"server_name\" and made sure everything had a ';'\n", "before_files": [{"content": "\"\"\"A class that performs TLS-SNI-01 challenges for Nginx\"\"\"\n\nimport logging\nimport os\n\nimport six\n\nfrom certbot import errors\nfrom certbot.plugins import common\n\nfrom certbot_nginx import obj\nfrom certbot_nginx import nginxparser\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass NginxTlsSni01(common.TLSSNI01):\n \"\"\"TLS-SNI-01 authenticator for Nginx\n\n :ivar configurator: NginxConfigurator object\n :type configurator: :class:`~nginx.configurator.NginxConfigurator`\n\n :ivar list achalls: Annotated\n class:`~certbot.achallenges.KeyAuthorizationAnnotatedChallenge`\n challenges\n\n :param list indices: Meant to hold indices of challenges in a\n larger array. NginxTlsSni01 is capable of solving many challenges\n at once which causes an indexing issue within NginxConfigurator\n who must return all responses in order. Imagine NginxConfigurator\n maintaining state about where all of the http-01 Challenges,\n TLS-SNI-01 Challenges belong in the response array. This is an\n optional utility.\n\n :param str challenge_conf: location of the challenge config file\n\n \"\"\"\n\n def perform(self):\n \"\"\"Perform a challenge on Nginx.\n\n :returns: list of :class:`certbot.acme.challenges.TLSSNI01Response`\n :rtype: list\n\n \"\"\"\n if not self.achalls:\n return []\n\n addresses = []\n default_addr = \"{0} ssl\".format(\n self.configurator.config.tls_sni_01_port)\n\n for achall in self.achalls:\n vhost = self.configurator.choose_vhost(achall.domain)\n if vhost is None:\n logger.error(\n \"No nginx vhost exists with server_name matching: %s. \"\n \"Please specify server_names in the Nginx config.\",\n achall.domain)\n return None\n\n if vhost.addrs:\n addresses.append(list(vhost.addrs))\n else:\n addresses.append([obj.Addr.fromstring(default_addr)])\n\n # Create challenge certs\n responses = [self._setup_challenge_cert(x) for x in self.achalls]\n\n # Set up the configuration\n self._mod_config(addresses)\n\n # Save reversible changes\n self.configurator.save(\"SNI Challenge\", True)\n\n return responses\n\n def _mod_config(self, ll_addrs):\n \"\"\"Modifies Nginx config to include challenge server blocks.\n\n :param list ll_addrs: list of lists of\n :class:`certbot_nginx.obj.Addr` to apply\n\n :raises .MisconfigurationError:\n Unable to find a suitable HTTP block in which to include\n authenticator hosts.\n\n \"\"\"\n # Add the 'include' statement for the challenges if it doesn't exist\n # already in the main config\n included = False\n include_directive = ['\\n', 'include', ' ', self.challenge_conf]\n root = self.configurator.parser.loc[\"root\"]\n\n bucket_directive = ['\\n', 'server_names_hash_bucket_size', ' ', '128']\n\n main = self.configurator.parser.parsed[root]\n for line in main:\n if line[0] == ['http']:\n body = line[1]\n found_bucket = False\n for inner_line in body:\n if inner_line[0] == bucket_directive[1]:\n found_bucket = True\n if not found_bucket:\n body.insert(0, bucket_directive)\n if include_directive not in body:\n body.insert(0, include_directive)\n included = True\n break\n if not included:\n raise errors.MisconfigurationError(\n 'LetsEncrypt could not find an HTTP block to include '\n 'TLS-SNI-01 challenges in %s.' % root)\n\n config = [self._make_server_block(pair[0], pair[1])\n for pair in six.moves.zip(self.achalls, ll_addrs)]\n config = nginxparser.UnspacedList(config)\n\n self.configurator.reverter.register_file_creation(\n True, self.challenge_conf)\n\n with open(self.challenge_conf, \"w\") as new_conf:\n nginxparser.dump(config, new_conf)\n\n def _make_server_block(self, achall, addrs):\n \"\"\"Creates a server block for a challenge.\n\n :param achall: Annotated TLS-SNI-01 challenge\n :type achall:\n :class:`certbot.achallenges.KeyAuthorizationAnnotatedChallenge`\n\n :param list addrs: addresses of challenged domain\n :class:`list` of type :class:`~nginx.obj.Addr`\n\n :returns: server block for the challenge host\n :rtype: list\n\n \"\"\"\n document_root = os.path.join(\n self.configurator.config.work_dir, \"tls_sni_01_page\")\n\n block = [['listen', ' ', addr.to_string(include_default=False)] for addr in addrs]\n\n block.extend([['server_name', ' ',\n achall.response(achall.account_key).z_domain.decode('ascii')],\n # access and error logs necessary for\n # integration testing (non-root)\n ['access_log', ' ', os.path.join(\n self.configurator.config.work_dir, 'access.log')],\n ['error_log', ' ', os.path.join(\n self.configurator.config.work_dir, 'error.log')],\n ['ssl_certificate', ' ', self.get_cert_path(achall)],\n ['ssl_certificate_key', ' ', self.get_key_path(achall)],\n [['location', ' ', '/'], [['root', ' ', document_root]]]] +\n self.configurator.parser.loc[\"ssl_options\"])\n return [['server'], block]\n", "path": "certbot-nginx/certbot_nginx/tls_sni_01.py"}], "after_files": [{"content": "\"\"\"A class that performs TLS-SNI-01 challenges for Nginx\"\"\"\n\nimport logging\nimport os\n\nimport six\n\nfrom certbot import errors\nfrom certbot.plugins import common\n\nfrom certbot_nginx import obj\nfrom certbot_nginx import nginxparser\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass NginxTlsSni01(common.TLSSNI01):\n \"\"\"TLS-SNI-01 authenticator for Nginx\n\n :ivar configurator: NginxConfigurator object\n :type configurator: :class:`~nginx.configurator.NginxConfigurator`\n\n :ivar list achalls: Annotated\n class:`~certbot.achallenges.KeyAuthorizationAnnotatedChallenge`\n challenges\n\n :param list indices: Meant to hold indices of challenges in a\n larger array. NginxTlsSni01 is capable of solving many challenges\n at once which causes an indexing issue within NginxConfigurator\n who must return all responses in order. Imagine NginxConfigurator\n maintaining state about where all of the http-01 Challenges,\n TLS-SNI-01 Challenges belong in the response array. This is an\n optional utility.\n\n :param str challenge_conf: location of the challenge config file\n\n \"\"\"\n\n def perform(self):\n \"\"\"Perform a challenge on Nginx.\n\n :returns: list of :class:`certbot.acme.challenges.TLSSNI01Response`\n :rtype: list\n\n \"\"\"\n if not self.achalls:\n return []\n\n addresses = []\n default_addr = \"{0} ssl\".format(\n self.configurator.config.tls_sni_01_port)\n\n for achall in self.achalls:\n vhost = self.configurator.choose_vhost(achall.domain)\n if vhost is None:\n logger.error(\n \"No nginx vhost exists with server_name matching: %s. \"\n \"Please specify server_names in the Nginx config.\",\n achall.domain)\n return None\n\n if vhost.addrs:\n addresses.append(list(vhost.addrs))\n else:\n addresses.append([obj.Addr.fromstring(default_addr)])\n\n # Create challenge certs\n responses = [self._setup_challenge_cert(x) for x in self.achalls]\n\n # Set up the configuration\n self._mod_config(addresses)\n\n # Save reversible changes\n self.configurator.save(\"SNI Challenge\", True)\n\n return responses\n\n def _mod_config(self, ll_addrs):\n \"\"\"Modifies Nginx config to include challenge server blocks.\n\n :param list ll_addrs: list of lists of\n :class:`certbot_nginx.obj.Addr` to apply\n\n :raises .MisconfigurationError:\n Unable to find a suitable HTTP block in which to include\n authenticator hosts.\n\n \"\"\"\n # Add the 'include' statement for the challenges if it doesn't exist\n # already in the main config\n included = False\n include_directive = ['\\n', 'include', ' ', self.challenge_conf]\n root = self.configurator.parser.loc[\"root\"]\n\n bucket_directive = ['\\n', 'server_names_hash_bucket_size', ' ', '128']\n\n main = self.configurator.parser.parsed[root]\n for line in main:\n if line[0] == ['http']:\n body = line[1]\n found_bucket = False\n posn = 0\n for inner_line in body:\n if inner_line[0] == bucket_directive[1]:\n if int(inner_line[1]) < int(bucket_directive[3]):\n body[posn] = bucket_directive\n found_bucket = True\n posn += 1\n if not found_bucket:\n body.insert(0, bucket_directive)\n if include_directive not in body:\n body.insert(0, include_directive)\n included = True\n break\n if not included:\n raise errors.MisconfigurationError(\n 'LetsEncrypt could not find an HTTP block to include '\n 'TLS-SNI-01 challenges in %s.' % root)\n\n config = [self._make_server_block(pair[0], pair[1])\n for pair in six.moves.zip(self.achalls, ll_addrs)]\n config = nginxparser.UnspacedList(config)\n\n self.configurator.reverter.register_file_creation(\n True, self.challenge_conf)\n\n with open(self.challenge_conf, \"w\") as new_conf:\n nginxparser.dump(config, new_conf)\n\n def _make_server_block(self, achall, addrs):\n \"\"\"Creates a server block for a challenge.\n\n :param achall: Annotated TLS-SNI-01 challenge\n :type achall:\n :class:`certbot.achallenges.KeyAuthorizationAnnotatedChallenge`\n\n :param list addrs: addresses of challenged domain\n :class:`list` of type :class:`~nginx.obj.Addr`\n\n :returns: server block for the challenge host\n :rtype: list\n\n \"\"\"\n document_root = os.path.join(\n self.configurator.config.work_dir, \"tls_sni_01_page\")\n\n block = [['listen', ' ', addr.to_string(include_default=False)] for addr in addrs]\n\n block.extend([['server_name', ' ',\n achall.response(achall.account_key).z_domain.decode('ascii')],\n # access and error logs necessary for\n # integration testing (non-root)\n ['access_log', ' ', os.path.join(\n self.configurator.config.work_dir, 'access.log')],\n ['error_log', ' ', os.path.join(\n self.configurator.config.work_dir, 'error.log')],\n ['ssl_certificate', ' ', self.get_cert_path(achall)],\n ['ssl_certificate_key', ' ', self.get_key_path(achall)],\n [['location', ' ', '/'], [['root', ' ', document_root]]]] +\n self.configurator.parser.loc[\"ssl_options\"])\n return [['server'], block]\n", "path": "certbot-nginx/certbot_nginx/tls_sni_01.py"}]} | 2,912 | 218 |
gh_patches_debug_16842 | rasdani/github-patches | git_diff | gwastro__pycbc-2462 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
memory leak in mkl fft useage.
https://github.com/gwastro/pycbc/blob/master/pycbc/fft/mkl.py#L112
It should have been fixed by the above, but this seems not to be the case for mkl. Not sure what is causing the leak at this point, but it does not occur with FFTW.
This was observed in pycbc live testing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pycbc/fft/mkl.py`
Content:
```
1 import ctypes, pycbc.libutils
2 from pycbc.types import zeros
3 from .core import _BaseFFT, _BaseIFFT
4 import pycbc.scheme as _scheme
5
6 lib = pycbc.libutils.get_ctypes_library('mkl_rt', [])
7 if lib is None:
8 raise ImportError
9
10 #MKL constants taken from mkl_df_defines.h
11 DFTI_FORWARD_DOMAIN = 0
12 DFTI_DIMENSION = 1
13 DFTI_LENGTHS = 2
14 DFTI_PRECISION = 3
15 DFTI_FORWARD_SCALE = 4
16 DFTI_BACKWARD_SCALE = 5
17 DFTI_NUMBER_OF_TRANSFORMS = 7
18 DFTI_COMPLEX_STORAGE = 8
19 DFTI_REAL_STORAGE = 9
20 DFTI_CONJUGATE_EVEN_STORAGE = 10
21 DFTI_PLACEMENT = 11
22 DFTI_INPUT_STRIDES = 12
23 DFTI_OUTPUT_STRIDES = 13
24 DFTI_INPUT_DISTANCE = 14
25 DFTI_OUTPUT_DISTANCE = 15
26 DFTI_WORKSPACE = 17
27 DFTI_ORDERING = 18
28 DFTI_TRANSPOSE = 19
29 DFTI_DESCRIPTOR_NAME = 20
30 DFTI_PACKED_FORMAT = 21
31 DFTI_COMMIT_STATUS = 22
32 DFTI_VERSION = 23
33 DFTI_NUMBER_OF_USER_THREADS = 26
34 DFTI_THREAD_LIMIT = 27
35 DFTI_COMMITTED = 30
36 DFTI_UNCOMMITTED = 31
37 DFTI_COMPLEX = 32
38 DFTI_REAL = 33
39 DFTI_SINGLE = 35
40 DFTI_DOUBLE = 36
41 DFTI_COMPLEX_COMPLEX = 39
42 DFTI_COMPLEX_REAL = 40
43 DFTI_REAL_COMPLEX = 41
44 DFTI_REAL_REAL = 42
45 DFTI_INPLACE = 43
46 DFTI_NOT_INPLACE = 44
47 DFTI_ORDERED = 48
48 DFTI_BACKWARD_SCRAMBLED = 49
49 DFTI_ALLOW = 51
50 DFTI_AVOID = 52
51 DFTI_NONE = 53
52 DFTI_CCS_FORMAT = 54
53 DFTI_PACK_FORMAT = 55
54 DFTI_PERM_FORMAT = 56
55 DFTI_CCE_FORMAT = 57
56
57 mkl_prec = {'single': DFTI_SINGLE,
58 'double': DFTI_DOUBLE,
59 }
60
61 mkl_domain = {'real': {'complex': DFTI_REAL},
62 'complex': {'real': DFTI_REAL,
63 'complex':DFTI_COMPLEX,
64 }
65 }
66
67 def check_status(status):
68 """ Check the status of a mkl functions and raise a python exeption if
69 there is an error.
70 """
71 if status:
72 msg = lib.DftiErrorMessage(status)
73 msg = ctypes.c_char_p(msg).value
74 raise RuntimeError(msg)
75
76 def create_descriptor(size, idtype, odtype, inplace):
77 invec = zeros(1, dtype=idtype)
78 outvec = zeros(1, dtype=odtype)
79
80 desc = ctypes.c_void_p(1)
81 f = lib.DftiCreateDescriptor
82 f.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]
83
84 prec = mkl_prec[invec.precision]
85 domain = mkl_domain[str(invec.kind)][str(outvec.kind)]
86
87 status = f(ctypes.byref(desc), prec, domain, 1, size)
88 if inplace:
89 lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_INPLACE)
90 else:
91 lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
92 lib.DftiSetValue(desc, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_CCS_FORMAT)
93 lib.DftiCommitDescriptor(desc)
94 check_status(status)
95 return desc
96
97 def fft(invec, outvec, prec, itype, otype):
98 descr = create_descriptor(max(len(invec), len(outvec)), invec.dtype,
99 outvec.dtype, (invec.ptr == outvec.ptr))
100 f = lib.DftiComputeForward
101 f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
102 status = f(descr, invec.ptr, outvec.ptr)
103 lib.DftiFreeDescriptor(descr)
104 check_status(status)
105
106 def ifft(invec, outvec, prec, itype, otype):
107 descr = create_descriptor(max(len(invec), len(outvec)), invec.dtype,
108 outvec.dtype, (invec.ptr == outvec.ptr))
109 f = lib.DftiComputeBackward
110 f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
111 status = f(descr, invec.ptr, outvec.ptr)
112 lib.DftiFreeDescriptor(descr)
113 check_status(status)
114
115 # Class based API
116
117 _create_descr = lib.DftiCreateDescriptor
118 _create_descr.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]
119
120 def _get_desc(fftobj):
121 desc = ctypes.c_void_p(1)
122 prec = mkl_prec[fftobj.invec.precision]
123 domain = mkl_domain[str(fftobj.invec.kind)][str(fftobj.outvec.kind)]
124 status = _create_descr(ctypes.byref(desc), prec, domain, 1, fftobj.size)
125 check_status(status)
126 # Now we set various things depending on exactly what kind of transform we're
127 # performing.
128
129 lib.DftiSetValue.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
130
131 # The following only matters if the transform is C2R or R2C
132 status = lib.DftiSetValue(desc, DFTI_CONJUGATE_EVEN_STORAGE,
133 DFTI_COMPLEX_COMPLEX)
134 check_status(status)
135
136 # In-place or out-of-place:
137 if fftobj.inplace:
138 status = lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_INPLACE)
139 else:
140 status = lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
141 check_status(status)
142
143 # If we are performing a batched transform:
144 if fftobj.nbatch > 1:
145 status = lib.DftiSetValue(desc, DFTI_NUMBER_OF_TRANSFORMS, fftobj.nbatch)
146 check_status(status)
147 status = lib.DftiSetValue(desc, DFTI_INPUT_DISTANCE, fftobj.idist)
148 check_status(status)
149 status = lib.DftiSetValue(desc, DFTI_OUTPUT_DISTANCE, fftobj.odist)
150 check_status(status)
151
152 # Knowing how many threads will be allowed may help select a better transform
153 nthreads = _scheme.mgr.state.num_threads
154 status = lib.DftiSetValue(desc, DFTI_THREAD_LIMIT, nthreads)
155 check_status(status)
156
157 # Now everything's ready, so commit
158 status = lib.DftiCommitDescriptor(desc)
159 check_status(status)
160
161 return desc
162
163 class FFT(_BaseFFT):
164 def __init__(self, invec, outvec, nbatch=1, size=None):
165 super(FFT, self).__init__(invec, outvec, nbatch, size)
166 self.iptr = self.invec.ptr
167 self.optr = self.outvec.ptr
168 self._efunc = lib.DftiComputeForward
169 self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
170 self.desc = _get_desc(self)
171
172 def execute(self):
173 self._efunc(self.desc, self.iptr, self.optr)
174
175 class IFFT(_BaseIFFT):
176 def __init__(self, invec, outvec, nbatch=1, size=None):
177 super(IFFT, self).__init__(invec, outvec, nbatch, size)
178 self.iptr = self.invec.ptr
179 self.optr = self.outvec.ptr
180 self._efunc = lib.DftiComputeBackward
181 self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
182 self.desc = _get_desc(self)
183
184 def execute(self):
185 self._efunc(self.desc, self.iptr, self.optr)
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pycbc/fft/mkl.py b/pycbc/fft/mkl.py
--- a/pycbc/fft/mkl.py
+++ b/pycbc/fft/mkl.py
@@ -100,7 +100,7 @@
f = lib.DftiComputeForward
f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
status = f(descr, invec.ptr, outvec.ptr)
- lib.DftiFreeDescriptor(descr)
+ lib.DftiFreeDescriptor(ctypes.byref(descr))
check_status(status)
def ifft(invec, outvec, prec, itype, otype):
@@ -109,7 +109,7 @@
f = lib.DftiComputeBackward
f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
status = f(descr, invec.ptr, outvec.ptr)
- lib.DftiFreeDescriptor(descr)
+ lib.DftiFreeDescriptor(ctypes.byref(descr))
check_status(status)
# Class based API
| {"golden_diff": "diff --git a/pycbc/fft/mkl.py b/pycbc/fft/mkl.py\n--- a/pycbc/fft/mkl.py\n+++ b/pycbc/fft/mkl.py\n@@ -100,7 +100,7 @@\n f = lib.DftiComputeForward\n f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]\n status = f(descr, invec.ptr, outvec.ptr)\n- lib.DftiFreeDescriptor(descr)\n+ lib.DftiFreeDescriptor(ctypes.byref(descr))\n check_status(status)\n \n def ifft(invec, outvec, prec, itype, otype):\n@@ -109,7 +109,7 @@\n f = lib.DftiComputeBackward\n f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]\n status = f(descr, invec.ptr, outvec.ptr)\n- lib.DftiFreeDescriptor(descr)\n+ lib.DftiFreeDescriptor(ctypes.byref(descr))\n check_status(status)\n \n # Class based API\n", "issue": "memory leak in mkl fft useage. \nhttps://github.com/gwastro/pycbc/blob/master/pycbc/fft/mkl.py#L112\r\n\r\nIt should have been fixed by the above, but this seems not to be the case for mkl. Not sure what is causing the leak at this point, but it does not occur with FFTW.\r\n\r\nThis was observed in pycbc live testing.\n", "before_files": [{"content": "import ctypes, pycbc.libutils\nfrom pycbc.types import zeros\nfrom .core import _BaseFFT, _BaseIFFT\nimport pycbc.scheme as _scheme\n\nlib = pycbc.libutils.get_ctypes_library('mkl_rt', [])\nif lib is None:\n raise ImportError\n\n#MKL constants taken from mkl_df_defines.h\nDFTI_FORWARD_DOMAIN = 0\nDFTI_DIMENSION = 1\nDFTI_LENGTHS = 2\nDFTI_PRECISION = 3\nDFTI_FORWARD_SCALE = 4\nDFTI_BACKWARD_SCALE = 5\nDFTI_NUMBER_OF_TRANSFORMS = 7\nDFTI_COMPLEX_STORAGE = 8\nDFTI_REAL_STORAGE = 9\nDFTI_CONJUGATE_EVEN_STORAGE = 10\nDFTI_PLACEMENT = 11\nDFTI_INPUT_STRIDES = 12\nDFTI_OUTPUT_STRIDES = 13\nDFTI_INPUT_DISTANCE = 14\nDFTI_OUTPUT_DISTANCE = 15\nDFTI_WORKSPACE = 17\nDFTI_ORDERING = 18\nDFTI_TRANSPOSE = 19\nDFTI_DESCRIPTOR_NAME = 20\nDFTI_PACKED_FORMAT = 21\nDFTI_COMMIT_STATUS = 22\nDFTI_VERSION = 23\nDFTI_NUMBER_OF_USER_THREADS = 26\nDFTI_THREAD_LIMIT = 27\nDFTI_COMMITTED = 30\nDFTI_UNCOMMITTED = 31\nDFTI_COMPLEX = 32\nDFTI_REAL = 33\nDFTI_SINGLE = 35\nDFTI_DOUBLE = 36\nDFTI_COMPLEX_COMPLEX = 39\nDFTI_COMPLEX_REAL = 40\nDFTI_REAL_COMPLEX = 41\nDFTI_REAL_REAL = 42\nDFTI_INPLACE = 43\nDFTI_NOT_INPLACE = 44\nDFTI_ORDERED = 48\nDFTI_BACKWARD_SCRAMBLED = 49\nDFTI_ALLOW = 51\nDFTI_AVOID = 52\nDFTI_NONE = 53\nDFTI_CCS_FORMAT = 54\nDFTI_PACK_FORMAT = 55\nDFTI_PERM_FORMAT = 56\nDFTI_CCE_FORMAT = 57\n\nmkl_prec = {'single': DFTI_SINGLE,\n 'double': DFTI_DOUBLE,\n }\n\nmkl_domain = {'real': {'complex': DFTI_REAL},\n 'complex': {'real': DFTI_REAL,\n 'complex':DFTI_COMPLEX,\n }\n }\n\ndef check_status(status):\n \"\"\" Check the status of a mkl functions and raise a python exeption if\n there is an error.\n \"\"\"\n if status:\n msg = lib.DftiErrorMessage(status)\n msg = ctypes.c_char_p(msg).value\n raise RuntimeError(msg)\n\ndef create_descriptor(size, idtype, odtype, inplace):\n invec = zeros(1, dtype=idtype)\n outvec = zeros(1, dtype=odtype)\n\n desc = ctypes.c_void_p(1)\n f = lib.DftiCreateDescriptor\n f.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]\n\n prec = mkl_prec[invec.precision]\n domain = mkl_domain[str(invec.kind)][str(outvec.kind)]\n\n status = f(ctypes.byref(desc), prec, domain, 1, size)\n if inplace:\n lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_INPLACE)\n else:\n lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_NOT_INPLACE)\n lib.DftiSetValue(desc, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_CCS_FORMAT)\n lib.DftiCommitDescriptor(desc)\n check_status(status)\n return desc\n\ndef fft(invec, outvec, prec, itype, otype):\n descr = create_descriptor(max(len(invec), len(outvec)), invec.dtype,\n outvec.dtype, (invec.ptr == outvec.ptr))\n f = lib.DftiComputeForward\n f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]\n status = f(descr, invec.ptr, outvec.ptr)\n lib.DftiFreeDescriptor(descr)\n check_status(status)\n\ndef ifft(invec, outvec, prec, itype, otype):\n descr = create_descriptor(max(len(invec), len(outvec)), invec.dtype,\n outvec.dtype, (invec.ptr == outvec.ptr))\n f = lib.DftiComputeBackward\n f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]\n status = f(descr, invec.ptr, outvec.ptr)\n lib.DftiFreeDescriptor(descr)\n check_status(status)\n\n# Class based API\n\n_create_descr = lib.DftiCreateDescriptor\n_create_descr.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]\n\ndef _get_desc(fftobj):\n desc = ctypes.c_void_p(1)\n prec = mkl_prec[fftobj.invec.precision]\n domain = mkl_domain[str(fftobj.invec.kind)][str(fftobj.outvec.kind)]\n status = _create_descr(ctypes.byref(desc), prec, domain, 1, fftobj.size)\n check_status(status)\n # Now we set various things depending on exactly what kind of transform we're\n # performing.\n\n lib.DftiSetValue.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]\n\n # The following only matters if the transform is C2R or R2C\n status = lib.DftiSetValue(desc, DFTI_CONJUGATE_EVEN_STORAGE,\n DFTI_COMPLEX_COMPLEX)\n check_status(status)\n\n # In-place or out-of-place:\n if fftobj.inplace:\n status = lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_INPLACE)\n else:\n status = lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_NOT_INPLACE)\n check_status(status)\n\n # If we are performing a batched transform:\n if fftobj.nbatch > 1:\n status = lib.DftiSetValue(desc, DFTI_NUMBER_OF_TRANSFORMS, fftobj.nbatch)\n check_status(status)\n status = lib.DftiSetValue(desc, DFTI_INPUT_DISTANCE, fftobj.idist)\n check_status(status)\n status = lib.DftiSetValue(desc, DFTI_OUTPUT_DISTANCE, fftobj.odist)\n check_status(status)\n\n # Knowing how many threads will be allowed may help select a better transform\n nthreads = _scheme.mgr.state.num_threads\n status = lib.DftiSetValue(desc, DFTI_THREAD_LIMIT, nthreads)\n check_status(status)\n\n # Now everything's ready, so commit\n status = lib.DftiCommitDescriptor(desc)\n check_status(status)\n\n return desc\n\nclass FFT(_BaseFFT):\n def __init__(self, invec, outvec, nbatch=1, size=None):\n super(FFT, self).__init__(invec, outvec, nbatch, size)\n self.iptr = self.invec.ptr\n self.optr = self.outvec.ptr\n self._efunc = lib.DftiComputeForward\n self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]\n self.desc = _get_desc(self)\n\n def execute(self):\n self._efunc(self.desc, self.iptr, self.optr)\n\nclass IFFT(_BaseIFFT):\n def __init__(self, invec, outvec, nbatch=1, size=None):\n super(IFFT, self).__init__(invec, outvec, nbatch, size)\n self.iptr = self.invec.ptr\n self.optr = self.outvec.ptr\n self._efunc = lib.DftiComputeBackward\n self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]\n self.desc = _get_desc(self)\n\n def execute(self):\n self._efunc(self.desc, self.iptr, self.optr)\n", "path": "pycbc/fft/mkl.py"}], "after_files": [{"content": "import ctypes, pycbc.libutils\nfrom pycbc.types import zeros\nfrom .core import _BaseFFT, _BaseIFFT\nimport pycbc.scheme as _scheme\n\nlib = pycbc.libutils.get_ctypes_library('mkl_rt', [])\nif lib is None:\n raise ImportError\n\n#MKL constants taken from mkl_df_defines.h\nDFTI_FORWARD_DOMAIN = 0\nDFTI_DIMENSION = 1\nDFTI_LENGTHS = 2\nDFTI_PRECISION = 3\nDFTI_FORWARD_SCALE = 4\nDFTI_BACKWARD_SCALE = 5\nDFTI_NUMBER_OF_TRANSFORMS = 7\nDFTI_COMPLEX_STORAGE = 8\nDFTI_REAL_STORAGE = 9\nDFTI_CONJUGATE_EVEN_STORAGE = 10\nDFTI_PLACEMENT = 11\nDFTI_INPUT_STRIDES = 12\nDFTI_OUTPUT_STRIDES = 13\nDFTI_INPUT_DISTANCE = 14\nDFTI_OUTPUT_DISTANCE = 15\nDFTI_WORKSPACE = 17\nDFTI_ORDERING = 18\nDFTI_TRANSPOSE = 19\nDFTI_DESCRIPTOR_NAME = 20\nDFTI_PACKED_FORMAT = 21\nDFTI_COMMIT_STATUS = 22\nDFTI_VERSION = 23\nDFTI_NUMBER_OF_USER_THREADS = 26\nDFTI_THREAD_LIMIT = 27\nDFTI_COMMITTED = 30\nDFTI_UNCOMMITTED = 31\nDFTI_COMPLEX = 32\nDFTI_REAL = 33\nDFTI_SINGLE = 35\nDFTI_DOUBLE = 36\nDFTI_COMPLEX_COMPLEX = 39\nDFTI_COMPLEX_REAL = 40\nDFTI_REAL_COMPLEX = 41\nDFTI_REAL_REAL = 42\nDFTI_INPLACE = 43\nDFTI_NOT_INPLACE = 44\nDFTI_ORDERED = 48\nDFTI_BACKWARD_SCRAMBLED = 49\nDFTI_ALLOW = 51\nDFTI_AVOID = 52\nDFTI_NONE = 53\nDFTI_CCS_FORMAT = 54\nDFTI_PACK_FORMAT = 55\nDFTI_PERM_FORMAT = 56\nDFTI_CCE_FORMAT = 57\n\nmkl_prec = {'single': DFTI_SINGLE,\n 'double': DFTI_DOUBLE,\n }\n\nmkl_domain = {'real': {'complex': DFTI_REAL},\n 'complex': {'real': DFTI_REAL,\n 'complex':DFTI_COMPLEX,\n }\n }\n\ndef check_status(status):\n \"\"\" Check the status of a mkl functions and raise a python exeption if\n there is an error.\n \"\"\"\n if status:\n msg = lib.DftiErrorMessage(status)\n msg = ctypes.c_char_p(msg).value\n raise RuntimeError(msg)\n\ndef create_descriptor(size, idtype, odtype, inplace):\n invec = zeros(1, dtype=idtype)\n outvec = zeros(1, dtype=odtype)\n\n desc = ctypes.c_void_p(1)\n f = lib.DftiCreateDescriptor\n f.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]\n\n prec = mkl_prec[invec.precision]\n domain = mkl_domain[str(invec.kind)][str(outvec.kind)]\n\n status = f(ctypes.byref(desc), prec, domain, 1, size)\n if inplace:\n lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_INPLACE)\n else:\n lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_NOT_INPLACE)\n lib.DftiSetValue(desc, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_CCS_FORMAT)\n lib.DftiCommitDescriptor(desc)\n check_status(status)\n return desc\n\ndef fft(invec, outvec, prec, itype, otype):\n descr = create_descriptor(max(len(invec), len(outvec)), invec.dtype,\n outvec.dtype, (invec.ptr == outvec.ptr))\n f = lib.DftiComputeForward\n f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]\n status = f(descr, invec.ptr, outvec.ptr)\n lib.DftiFreeDescriptor(ctypes.byref(descr))\n check_status(status)\n\ndef ifft(invec, outvec, prec, itype, otype):\n descr = create_descriptor(max(len(invec), len(outvec)), invec.dtype,\n outvec.dtype, (invec.ptr == outvec.ptr))\n f = lib.DftiComputeBackward\n f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]\n status = f(descr, invec.ptr, outvec.ptr)\n lib.DftiFreeDescriptor(ctypes.byref(descr))\n check_status(status)\n\n# Class based API\n\n_create_descr = lib.DftiCreateDescriptor\n_create_descr.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]\n\ndef _get_desc(fftobj):\n desc = ctypes.c_void_p(1)\n prec = mkl_prec[fftobj.invec.precision]\n domain = mkl_domain[str(fftobj.invec.kind)][str(fftobj.outvec.kind)]\n status = _create_descr(ctypes.byref(desc), prec, domain, 1, fftobj.size)\n check_status(status)\n # Now we set various things depending on exactly what kind of transform we're\n # performing.\n\n lib.DftiSetValue.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]\n\n # The following only matters if the transform is C2R or R2C\n status = lib.DftiSetValue(desc, DFTI_CONJUGATE_EVEN_STORAGE,\n DFTI_COMPLEX_COMPLEX)\n check_status(status)\n\n # In-place or out-of-place:\n if fftobj.inplace:\n status = lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_INPLACE)\n else:\n status = lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_NOT_INPLACE)\n check_status(status)\n\n # If we are performing a batched transform:\n if fftobj.nbatch > 1:\n status = lib.DftiSetValue(desc, DFTI_NUMBER_OF_TRANSFORMS, fftobj.nbatch)\n check_status(status)\n status = lib.DftiSetValue(desc, DFTI_INPUT_DISTANCE, fftobj.idist)\n check_status(status)\n status = lib.DftiSetValue(desc, DFTI_OUTPUT_DISTANCE, fftobj.odist)\n check_status(status)\n\n # Knowing how many threads will be allowed may help select a better transform\n nthreads = _scheme.mgr.state.num_threads\n status = lib.DftiSetValue(desc, DFTI_THREAD_LIMIT, nthreads)\n check_status(status)\n\n # Now everything's ready, so commit\n status = lib.DftiCommitDescriptor(desc)\n check_status(status)\n\n return desc\n\nclass FFT(_BaseFFT):\n def __init__(self, invec, outvec, nbatch=1, size=None):\n super(FFT, self).__init__(invec, outvec, nbatch, size)\n self.iptr = self.invec.ptr\n self.optr = self.outvec.ptr\n self._efunc = lib.DftiComputeForward\n self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]\n self.desc = _get_desc(self)\n\n def execute(self):\n self._efunc(self.desc, self.iptr, self.optr)\n\nclass IFFT(_BaseIFFT):\n def __init__(self, invec, outvec, nbatch=1, size=None):\n super(IFFT, self).__init__(invec, outvec, nbatch, size)\n self.iptr = self.invec.ptr\n self.optr = self.outvec.ptr\n self._efunc = lib.DftiComputeBackward\n self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]\n self.desc = _get_desc(self)\n\n def execute(self):\n self._efunc(self.desc, self.iptr, self.optr)\n", "path": "pycbc/fft/mkl.py"}]} | 2,664 | 249 |
gh_patches_debug_24590 | rasdani/github-patches | git_diff | apluslms__a-plus-1216 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Display personal deadline extensions on a student’s points page
A student’s points page in A+ just shows the official deadlines for each module. Personalized deadlines are not shown. This has repeatedly caused confusion when a student believes there’s something wrong with their personalized deadline. It would be better if the student was able to see the actual deadlines on their points page (and possibly elsewhere?).
This would also eliminate some confusion in communications between members of course staff, as staff members would also easily see the student’s DL extensions.
I’m not sure off the top of my head how best to display this on the page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `course/templatetags/course.py`
Content:
```
1 from typing import Any, Dict, List, Union
2
3 from django import template
4 from django.db import models
5 from django.utils.safestring import mark_safe
6 from django.utils.translation import get_language
7
8 from exercise.cache.content import CachedContent
9 from course.models import CourseInstance, UserTagging
10 from lib.localization_syntax import pick_localized
11 from userprofile.models import UserProfile
12 from ..cache.menu import CachedTopMenu
13
14
15 register = template.Library()
16
17
18 def _prepare_topmenu(context):
19 if 'topmenu' not in context:
20 request = context.get('request', None)
21 context['topmenu'] = CachedTopMenu(request.user if request else None)
22 return context['topmenu']
23
24
25 @register.inclusion_tag("course/_course_dropdown_menu.html", takes_context=True)
26 def course_menu(context):
27 menu = _prepare_topmenu(context)
28 return { "instances": menu.courses() }
29
30
31 @register.inclusion_tag('course/_group_select.html', takes_context=True)
32 def group_select(context):
33 instance = context.get('instance', None)
34 if not instance:
35 return { 'groups': [] }
36 menu = _prepare_topmenu(context)
37 groups, selected = menu.groups(instance)
38 return {
39 'instance': instance,
40 'groups': groups,
41 'selected': selected,
42 }
43
44
45 @register.filter
46 def escape_slashes(string):
47 return str(string).replace('/', '\/') # noqa: W605
48
49 @register.filter
50 def parse_localization(entry):
51 return pick_localized(entry, get_language())
52
53
54 @register.filter
55 def list_unselected(langs):
56 listed = list(filter(lambda x: x and x != get_language(), langs.split("|")))
57 return listed
58
59
60 @register.filter
61 def is_visible(entry):
62 return CachedContent.is_visible(entry)
63
64
65 @register.filter
66 def is_visible_to(entry, user):
67 return entry.is_visible_to(user)
68
69
70 @register.filter
71 def is_listed(entry):
72 return CachedContent.is_listed(entry)
73
74
75 @register.filter
76 def len_listed(entries):
77 return len([e for e in entries if CachedContent.is_listed(e)])
78
79
80 @register.filter
81 def is_in_maintenance(entry):
82 return CachedContent.is_in_maintenance(entry)
83
84
85 @register.filter
86 def exercises_open(entry, now):
87 return entry['opening_time'] <= now <= entry['closing_time']
88
89
90 @register.filter
91 def exercises_submittable(entry, now):
92 if entry['late_allowed']:
93 return entry['opening_time'] <= now <= entry['late_time']
94 return entry['opening_time'] <= now <= entry['closing_time']
95
96
97 @register.filter
98 def has_opened(entry, now):
99 return entry['opening_time'] <= now
100
101
102 @register.filter
103 def url(model_object, name=None):
104 if name:
105 return model_object.get_url(name)
106 return model_object.get_display_url()
107
108
109 @register.filter
110 def names(profiles):
111 return ", ".join(p.user.get_full_name() for p in profiles)
112
113
114 @register.inclusion_tag('course/_avatars.html')
115 def avatars(profiles):
116 return { 'profiles': profiles }
117
118
119 @register.inclusion_tag("course/_profiles.html")
120 def profiles(
121 profiles: Union[UserProfile, List[UserProfile], models.QuerySet[UserProfile]],
122 instance: CourseInstance,
123 is_teacher: bool
124 ) -> Dict[str, Any]:
125 if isinstance(profiles, UserProfile):
126 profiles = [profiles]
127 elif isinstance(profiles, models.QuerySet):
128 # Avoid re-fetching the queryset
129 profiles = list(profiles)
130 return {
131 'instance': instance,
132 'profiles': profiles,
133 'is_teacher': is_teacher,
134 }
135
136
137 @register.simple_tag
138 def tags(profile, instance):
139 tags = UserTagging.objects.get_all(profile, instance)
140 return mark_safe(' '.join(tag.html_label for tag in tags))
141
142
143 @register.filter
144 def enrollment_audience(enrollment_audience_val):
145 # convert enrollment audience Enum value to the string description
146 return CourseInstance.ENROLLMENT_AUDIENCE[enrollment_audience_val]
147
148
149 @register.filter
150 def view_content_to(view_content_to_val):
151 # convert "view content to" Enum value to the string description
152 return CourseInstance.VIEW_ACCESS[view_content_to_val]
153
154
155 @register.filter
156 def is_banned_student(profile, course_instance):
157 return course_instance.is_banned(profile.user)
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/course/templatetags/course.py b/course/templatetags/course.py
--- a/course/templatetags/course.py
+++ b/course/templatetags/course.py
@@ -1,4 +1,5 @@
from typing import Any, Dict, List, Union
+from copy import deepcopy
from django import template
from django.db import models
@@ -22,6 +23,11 @@
return context['topmenu']
+def _deadline_extended_exercise_open(entry, now):
+ personal_deadline = entry.get('personal_deadline')
+ return personal_deadline is not None and entry['opening_time'] <= now <= personal_deadline
+
+
@register.inclusion_tag("course/_course_dropdown_menu.html", takes_context=True)
def course_menu(context):
menu = _prepare_topmenu(context)
@@ -87,6 +93,17 @@
return entry['opening_time'] <= now <= entry['closing_time']
[email protected]
+def deadline_extended_exercise_open(entry, now):
+ return _deadline_extended_exercise_open(entry, now)
+
+
[email protected]
+def deadline_extended_exercises_open(entry, now):
+ entries = deepcopy(entry['flatted'])
+ return any(_deadline_extended_exercise_open(entry, now) for entry in entries)
+
+
@register.filter
def exercises_submittable(entry, now):
if entry['late_allowed']:
| {"golden_diff": "diff --git a/course/templatetags/course.py b/course/templatetags/course.py\n--- a/course/templatetags/course.py\n+++ b/course/templatetags/course.py\n@@ -1,4 +1,5 @@\n from typing import Any, Dict, List, Union\n+from copy import deepcopy\n \n from django import template\n from django.db import models\n@@ -22,6 +23,11 @@\n return context['topmenu']\n \n \n+def _deadline_extended_exercise_open(entry, now):\n+ personal_deadline = entry.get('personal_deadline')\n+ return personal_deadline is not None and entry['opening_time'] <= now <= personal_deadline\n+\n+\n @register.inclusion_tag(\"course/_course_dropdown_menu.html\", takes_context=True)\n def course_menu(context):\n menu = _prepare_topmenu(context)\n@@ -87,6 +93,17 @@\n return entry['opening_time'] <= now <= entry['closing_time']\n \n \[email protected]\n+def deadline_extended_exercise_open(entry, now):\n+ return _deadline_extended_exercise_open(entry, now)\n+\n+\[email protected]\n+def deadline_extended_exercises_open(entry, now):\n+ entries = deepcopy(entry['flatted'])\n+ return any(_deadline_extended_exercise_open(entry, now) for entry in entries)\n+\n+\n @register.filter\n def exercises_submittable(entry, now):\n if entry['late_allowed']:\n", "issue": "Display personal deadline extensions on a student\u2019s points page\nA student\u2019s points page in A+ just shows the official deadlines for each module. Personalized deadlines are not shown. This has repeatedly caused confusion when a student believes there\u2019s something wrong with their personalized deadline. It would be better if the student was able to see the actual deadlines on their points page (and possibly elsewhere?). \r\n\r\nThis would also eliminate some confusion in communications between members of course staff, as staff members would also easily see the student\u2019s DL extensions. \r\n\r\nI\u2019m not sure off the top of my head how best to display this on the page.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Any, Dict, List, Union\n\nfrom django import template\nfrom django.db import models\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import get_language\n\nfrom exercise.cache.content import CachedContent\nfrom course.models import CourseInstance, UserTagging\nfrom lib.localization_syntax import pick_localized\nfrom userprofile.models import UserProfile\nfrom ..cache.menu import CachedTopMenu\n\n\nregister = template.Library()\n\n\ndef _prepare_topmenu(context):\n if 'topmenu' not in context:\n request = context.get('request', None)\n context['topmenu'] = CachedTopMenu(request.user if request else None)\n return context['topmenu']\n\n\[email protected]_tag(\"course/_course_dropdown_menu.html\", takes_context=True)\ndef course_menu(context):\n menu = _prepare_topmenu(context)\n return { \"instances\": menu.courses() }\n\n\[email protected]_tag('course/_group_select.html', takes_context=True)\ndef group_select(context):\n instance = context.get('instance', None)\n if not instance:\n return { 'groups': [] }\n menu = _prepare_topmenu(context)\n groups, selected = menu.groups(instance)\n return {\n 'instance': instance,\n 'groups': groups,\n 'selected': selected,\n }\n\n\[email protected]\ndef escape_slashes(string):\n return str(string).replace('/', '\\/') # noqa: W605\n\[email protected]\ndef parse_localization(entry):\n return pick_localized(entry, get_language())\n\n\[email protected]\ndef list_unselected(langs):\n listed = list(filter(lambda x: x and x != get_language(), langs.split(\"|\")))\n return listed\n\n\[email protected]\ndef is_visible(entry):\n return CachedContent.is_visible(entry)\n\n\[email protected]\ndef is_visible_to(entry, user):\n return entry.is_visible_to(user)\n\n\[email protected]\ndef is_listed(entry):\n return CachedContent.is_listed(entry)\n\n\[email protected]\ndef len_listed(entries):\n return len([e for e in entries if CachedContent.is_listed(e)])\n\n\[email protected]\ndef is_in_maintenance(entry):\n return CachedContent.is_in_maintenance(entry)\n\n\[email protected]\ndef exercises_open(entry, now):\n return entry['opening_time'] <= now <= entry['closing_time']\n\n\[email protected]\ndef exercises_submittable(entry, now):\n if entry['late_allowed']:\n return entry['opening_time'] <= now <= entry['late_time']\n return entry['opening_time'] <= now <= entry['closing_time']\n\n\[email protected]\ndef has_opened(entry, now):\n return entry['opening_time'] <= now\n\n\[email protected]\ndef url(model_object, name=None):\n if name:\n return model_object.get_url(name)\n return model_object.get_display_url()\n\n\[email protected]\ndef names(profiles):\n return \", \".join(p.user.get_full_name() for p in profiles)\n\n\[email protected]_tag('course/_avatars.html')\ndef avatars(profiles):\n return { 'profiles': profiles }\n\n\[email protected]_tag(\"course/_profiles.html\")\ndef profiles(\n profiles: Union[UserProfile, List[UserProfile], models.QuerySet[UserProfile]],\n instance: CourseInstance,\n is_teacher: bool\n ) -> Dict[str, Any]:\n if isinstance(profiles, UserProfile):\n profiles = [profiles]\n elif isinstance(profiles, models.QuerySet):\n # Avoid re-fetching the queryset\n profiles = list(profiles)\n return {\n 'instance': instance,\n 'profiles': profiles,\n 'is_teacher': is_teacher,\n }\n\n\[email protected]_tag\ndef tags(profile, instance):\n tags = UserTagging.objects.get_all(profile, instance)\n return mark_safe(' '.join(tag.html_label for tag in tags))\n\n\[email protected]\ndef enrollment_audience(enrollment_audience_val):\n # convert enrollment audience Enum value to the string description\n return CourseInstance.ENROLLMENT_AUDIENCE[enrollment_audience_val]\n\n\[email protected]\ndef view_content_to(view_content_to_val):\n # convert \"view content to\" Enum value to the string description\n return CourseInstance.VIEW_ACCESS[view_content_to_val]\n\n\[email protected]\ndef is_banned_student(profile, course_instance):\n return course_instance.is_banned(profile.user)\n", "path": "course/templatetags/course.py"}], "after_files": [{"content": "from typing import Any, Dict, List, Union\nfrom copy import deepcopy\n\nfrom django import template\nfrom django.db import models\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import get_language\n\nfrom exercise.cache.content import CachedContent\nfrom course.models import CourseInstance, UserTagging\nfrom lib.localization_syntax import pick_localized\nfrom userprofile.models import UserProfile\nfrom ..cache.menu import CachedTopMenu\n\n\nregister = template.Library()\n\n\ndef _prepare_topmenu(context):\n if 'topmenu' not in context:\n request = context.get('request', None)\n context['topmenu'] = CachedTopMenu(request.user if request else None)\n return context['topmenu']\n\n\ndef _deadline_extended_exercise_open(entry, now):\n personal_deadline = entry.get('personal_deadline')\n return personal_deadline is not None and entry['opening_time'] <= now <= personal_deadline\n\n\[email protected]_tag(\"course/_course_dropdown_menu.html\", takes_context=True)\ndef course_menu(context):\n menu = _prepare_topmenu(context)\n return { \"instances\": menu.courses() }\n\n\[email protected]_tag('course/_group_select.html', takes_context=True)\ndef group_select(context):\n instance = context.get('instance', None)\n if not instance:\n return { 'groups': [] }\n menu = _prepare_topmenu(context)\n groups, selected = menu.groups(instance)\n return {\n 'instance': instance,\n 'groups': groups,\n 'selected': selected,\n }\n\n\[email protected]\ndef escape_slashes(string):\n return str(string).replace('/', '\\/') # noqa: W605\n\[email protected]\ndef parse_localization(entry):\n return pick_localized(entry, get_language())\n\n\[email protected]\ndef list_unselected(langs):\n listed = list(filter(lambda x: x and x != get_language(), langs.split(\"|\")))\n return listed\n\n\[email protected]\ndef is_visible(entry):\n return CachedContent.is_visible(entry)\n\n\[email protected]\ndef is_visible_to(entry, user):\n return entry.is_visible_to(user)\n\n\[email protected]\ndef is_listed(entry):\n return CachedContent.is_listed(entry)\n\n\[email protected]\ndef len_listed(entries):\n return len([e for e in entries if CachedContent.is_listed(e)])\n\n\[email protected]\ndef is_in_maintenance(entry):\n return CachedContent.is_in_maintenance(entry)\n\n\[email protected]\ndef exercises_open(entry, now):\n return entry['opening_time'] <= now <= entry['closing_time']\n\n\[email protected]\ndef deadline_extended_exercise_open(entry, now):\n return _deadline_extended_exercise_open(entry, now)\n\n\[email protected]\ndef deadline_extended_exercises_open(entry, now):\n entries = deepcopy(entry['flatted'])\n return any(_deadline_extended_exercise_open(entry, now) for entry in entries)\n\n\[email protected]\ndef exercises_submittable(entry, now):\n if entry['late_allowed']:\n return entry['opening_time'] <= now <= entry['late_time']\n return entry['opening_time'] <= now <= entry['closing_time']\n\n\[email protected]\ndef has_opened(entry, now):\n return entry['opening_time'] <= now\n\n\[email protected]\ndef url(model_object, name=None):\n if name:\n return model_object.get_url(name)\n return model_object.get_display_url()\n\n\[email protected]\ndef names(profiles):\n return \", \".join(p.user.get_full_name() for p in profiles)\n\n\[email protected]_tag('course/_avatars.html')\ndef avatars(profiles):\n return { 'profiles': profiles }\n\n\[email protected]_tag(\"course/_profiles.html\")\ndef profiles(\n profiles: Union[UserProfile, List[UserProfile], models.QuerySet[UserProfile]],\n instance: CourseInstance,\n is_teacher: bool\n ) -> Dict[str, Any]:\n if isinstance(profiles, UserProfile):\n profiles = [profiles]\n elif isinstance(profiles, models.QuerySet):\n # Avoid re-fetching the queryset\n profiles = list(profiles)\n return {\n 'instance': instance,\n 'profiles': profiles,\n 'is_teacher': is_teacher,\n }\n\n\[email protected]_tag\ndef tags(profile, instance):\n tags = UserTagging.objects.get_all(profile, instance)\n return mark_safe(' '.join(tag.html_label for tag in tags))\n\n\[email protected]\ndef enrollment_audience(enrollment_audience_val):\n # convert enrollment audience Enum value to the string description\n return CourseInstance.ENROLLMENT_AUDIENCE[enrollment_audience_val]\n\n\[email protected]\ndef view_content_to(view_content_to_val):\n # convert \"view content to\" Enum value to the string description\n return CourseInstance.VIEW_ACCESS[view_content_to_val]\n\n\[email protected]\ndef is_banned_student(profile, course_instance):\n return course_instance.is_banned(profile.user)\n", "path": "course/templatetags/course.py"}]} | 1,700 | 310 |
gh_patches_debug_13156 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-5599 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Added retry on internal error
Added retry on internal error as suggested by the internal error message: '500 Error encountered during execution. Retrying may solve the problem.'.
Rationalised the conditions structure so it would simplify addition of other retry conditions.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bigquery/google/cloud/bigquery/retry.py`
Content:
```
1
2 # Copyright 2018 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from google.api_core import retry
17
18
19 def _should_retry(exc):
20 """Predicate for determining when to retry.
21
22 We retry if and only if the 'reason' is 'backendError'
23 or 'rateLimitExceeded'.
24 """
25 if not hasattr(exc, 'errors'):
26 return False
27 if len(exc.errors) == 0:
28 return False
29 reason = exc.errors[0]['reason']
30 return reason == 'backendError' or reason == 'rateLimitExceeded'
31
32
33 DEFAULT_RETRY = retry.Retry(predicate=_should_retry)
34 """The default retry object.
35
36 Any method with a ``retry`` parameter will be retried automatically,
37 with reasonable defaults. To disable retry, pass ``retry=None``.
38 To modify the default retry behavior, call a ``with_XXX`` method
39 on ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,
40 pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.
41 """
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bigquery/google/cloud/bigquery/retry.py b/bigquery/google/cloud/bigquery/retry.py
--- a/bigquery/google/cloud/bigquery/retry.py
+++ b/bigquery/google/cloud/bigquery/retry.py
@@ -16,6 +16,13 @@
from google.api_core import retry
+_RETRYABLE_REASONS = frozenset([
+ 'backendError',
+ 'rateLimitExceeded',
+ 'internalError',
+])
+
+
def _should_retry(exc):
"""Predicate for determining when to retry.
@@ -27,7 +34,7 @@
if len(exc.errors) == 0:
return False
reason = exc.errors[0]['reason']
- return reason == 'backendError' or reason == 'rateLimitExceeded'
+ return reason in _RETRYABLE_REASONS
DEFAULT_RETRY = retry.Retry(predicate=_should_retry)
| {"golden_diff": "diff --git a/bigquery/google/cloud/bigquery/retry.py b/bigquery/google/cloud/bigquery/retry.py\n--- a/bigquery/google/cloud/bigquery/retry.py\n+++ b/bigquery/google/cloud/bigquery/retry.py\n@@ -16,6 +16,13 @@\n from google.api_core import retry\n \n \n+_RETRYABLE_REASONS = frozenset([\n+ 'backendError',\n+ 'rateLimitExceeded',\n+ 'internalError',\n+])\n+\n+\n def _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n \n@@ -27,7 +34,7 @@\n if len(exc.errors) == 0:\n return False\n reason = exc.errors[0]['reason']\n- return reason == 'backendError' or reason == 'rateLimitExceeded'\n+ return reason in _RETRYABLE_REASONS\n \n \n DEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n", "issue": "Added retry on internal error\nAdded retry on internal error as suggested by the internal error message: '500 Error encountered during execution. Retrying may solve the problem.'. \r\nRationalised the conditions structure so it would simplify addition of other retry conditions.\n", "before_files": [{"content": "\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import retry\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, 'errors'):\n return False\n if len(exc.errors) == 0:\n return False\n reason = exc.errors[0]['reason']\n return reason == 'backendError' or reason == 'rateLimitExceeded'\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "bigquery/google/cloud/bigquery/retry.py"}], "after_files": [{"content": "\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import retry\n\n\n_RETRYABLE_REASONS = frozenset([\n 'backendError',\n 'rateLimitExceeded',\n 'internalError',\n])\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, 'errors'):\n return False\n if len(exc.errors) == 0:\n return False\n reason = exc.errors[0]['reason']\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "bigquery/google/cloud/bigquery/retry.py"}]} | 728 | 200 |
gh_patches_debug_20996 | rasdani/github-patches | git_diff | pytorch__torchdynamo-242 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TorchDynamo is incompatible with einops.repeat
Hi,
We are doing some functionality validation and performance benchmark for TorchDynamo, and we found that TorchDynamo is incompatible with "einops" (pip package)'s "repeat" function (or maybe also some other functions in einops). For all models that call einops.repeat in nn.Module model definition (e.g. vision transformers models from https://github.com/lucidrains/vit-pytorch), the models can run with regular PyTorch but failed when using TorchDynamo with simply a FX backend, like the example shown in TorchDynamo homepage README.
Reproduction code (test.py):
```
import torch
from torch import nn
from einops import repeat
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--regular', action='store_true')
parser.add_argument('--torchdynamo', action='store_true')
args = parser.parse_args()
class Net(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
repeat(x, 'h w -> h w c', c=3).shape # an einops.repeat() sentence
return x
X = torch.randn(30, 40)
model = Net()
model.eval()
# Regular
if args.regular:
model(X)
# Torchdynamo
if args.torchdynamo:
import torchdynamo
from typing import List
def my_compiler(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
return gm.forward
with torchdynamo.optimize(my_compiler), torch.no_grad():
model(X)
```
For this code snippet, ```python test.py --regular``` will pass but ```python test.py --torchdynamo``` will fail.
Error Log:
```
Traceback (most recent call last):
File "/tmp/kyao/torchdynamo/test_einops.py", line 37, in <module>
model(X)
File "/mnt/local_disk/miniconda3/envs/ipexnew/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/tmp/kyao/torchdynamo/test_einops.py", line 15, in forward
def forward(self, x):
File "/mnt/local_disk/miniconda3/envs/ipexnew/lib/python3.9/site-packages/einops-0.4.1-py3.9.egg/einops/einops.py", line 490, in repeat
def repeat(tensor: Tensor, pattern: str, **axes_lengths) -> Tensor:
File "/mnt/local_disk/miniconda3/envs/ipexnew/lib/python3.9/site-packages/einops-0.4.1-py3.9.egg/einops/einops.py", line 410, in reduce
return _apply_recipe(recipe, tensor, reduction_type=reduction)
File "/mnt/local_disk/miniconda3/envs/ipexnew/lib/python3.9/site-packages/einops-0.4.1-py3.9.egg/einops/einops.py", line 229, in _apply_recipe
def _apply_recipe(recipe: TransformRecipe, tensor: Tensor, reduction_type: Reduction) -> Tensor:
File "/mnt/local_disk/miniconda3/envs/ipexnew/lib/python3.9/site-packages/einops-0.4.1-py3.9.egg/einops/_backends.py", line 22, in get_backend
def get_backend(tensor) -> 'AbstractBackend':
File "/mnt/local_disk/miniconda3/envs/ipexnew/lib/python3.9/site-packages/einops-0.4.1-py3.9.egg/einops/_backends.py", line 22, in get_backend
def get_backend(tensor) -> 'AbstractBackend':
IndexError: list index out of range
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchdynamo/variables/user_defined.py`
Content:
```
1 import collections
2 import dataclasses
3 import importlib
4 import inspect
5 import types
6 from typing import Dict
7 from typing import List
8
9 import torch.nn
10
11 from .. import variables
12 from ..exc import unimplemented
13 from ..guards import Guard
14 from ..guards import GuardBuilder
15 from ..source import AttrSource
16 from ..source import ODictGetItemSource
17 from ..utils import is_namedtuple_cls
18 from ..utils import namedtuple_fields
19 from .base import MutableLocal
20 from .base import VariableTracker
21
22
23 class UserDefinedVariable(VariableTracker):
24 pass
25
26
27 class UserDefinedClassVariable(UserDefinedVariable):
28 def __init__(self, value, **kwargs):
29 super().__init__(**kwargs)
30 self.value = value
31
32 def as_python_constant(self):
33 return self.value
34
35 def var_getattr(self, tx, name: str) -> "VariableTracker":
36 options = VariableTracker.propagate(self)
37 try:
38 obj = inspect.getattr_static(self.value, name)
39 except AttributeError:
40 obj = None
41
42 if isinstance(obj, staticmethod):
43 return variables.UserFunctionVariable(obj.__get__(self.value), **options)
44 elif isinstance(obj, classmethod):
45 return variables.UserMethodVariable(obj.__func__, self, **options)
46
47 return super(UserDefinedClassVariable, self).var_getattr(tx, name)
48
49 def call_function(
50 self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]"
51 ) -> "VariableTracker":
52 from ..side_effects import SideEffects
53
54 options = VariableTracker.propagate(self, args, kwargs.values())
55
56 if is_namedtuple_cls(self.value):
57 fields = namedtuple_fields(self.value)
58 items = list(args)
59 items.extend([None] * (len(fields) - len(items)))
60 for name, value in kwargs.items():
61 assert name in fields
62 items[fields.index(name)] = value
63 assert all(x is not None for x in items)
64 return variables.NamedTupleVariable(
65 items, self.value, **VariableTracker.propagate(self, items)
66 )
67 elif (
68 inspect.getattr_static(self.value, "__new__", None) in (object.__new__,)
69 and SideEffects.cls_supports_mutation_side_effects(self.value)
70 and self.source
71 ):
72 var = tx.output.side_effects.track_object_new(
73 self.source, self.value, UserDefinedObjectVariable, options
74 )
75 return var.add_options(var.call_method(tx, "__init__", args, kwargs))
76 elif variables.DataClassVariable.is_matching_cls(self.value):
77 options["mutable_local"] = MutableLocal()
78 return variables.DataClassVariable.create(self.value, args, kwargs, options)
79
80 return super().call_function(tx, args, kwargs)
81
82 def const_getattr(self, tx, name):
83 if name == "__name__":
84 return self.value.__name__
85 return super().const_getattr(tx, name)
86
87
88 class UserDefinedObjectVariable(UserDefinedVariable):
89 """
90 Mostly objects of defined type. Catch-all for something where we only know the type.
91 """
92
93 def __init__(self, value, value_type=None, **kwargs):
94 super(UserDefinedObjectVariable, self).__init__(**kwargs)
95 self.value = value
96 self.value_type = value_type or type(value)
97 assert type(value) is self.value_type
98
99 def __str__(self):
100 inner = self.value_type.__name__
101 if inner == "builtin_function_or_method":
102 inner = str(getattr(self.value, "__name__", None))
103 return f"{self.__class__.__name__}({inner})"
104
105 def python_type(self):
106 return self.value_type
107
108 def call_method(
109 self,
110 tx,
111 name,
112 args: "List[VariableTracker]",
113 kwargs: "Dict[str, VariableTracker]",
114 ) -> "VariableTracker":
115 from . import ConstantVariable
116 from . import TupleVariable
117 from . import UserMethodVariable
118
119 options = VariableTracker.propagate(self, args, kwargs.values())
120
121 if name not in getattr(self.value, "__dict__", {}):
122 try:
123 method = inspect.getattr_static(type(self.value), name)
124 except AttributeError:
125 method = None
126
127 if method is object.__init__:
128 return ConstantVariable(None, **options)
129
130 if method is collections.OrderedDict.keys and self.source:
131 # subclass of OrderedDict
132 assert not (args or kwargs)
133 keys = list(self.value.keys())
134 assert all(map(ConstantVariable.is_literal, keys))
135 return TupleVariable(
136 [ConstantVariable(k, **options) for k in keys], **options
137 ).add_guard(
138 Guard(
139 self.source.name(),
140 self.source.guard_source(),
141 GuardBuilder.ODICT_KEYS,
142 )
143 )
144
145 if (
146 method is collections.OrderedDict.items
147 and isinstance(self.value, collections.OrderedDict)
148 and self.source
149 ):
150 assert not (args or kwargs)
151 items = []
152 keys = self.call_method(tx, "keys", [], {})
153 options = VariableTracker.propagate(self, args, kwargs.values(), keys)
154 for key in keys.unpack_var_sequence(tx):
155 items.append(
156 TupleVariable(
157 [key, self.odict_getitem(tx, key)],
158 **options,
159 )
160 )
161 return TupleVariable(items, **options)
162
163 if method is collections.OrderedDict.__getitem__ and len(args) == 1:
164 assert not kwargs
165 return self.odict_getitem(tx, args[0])
166
167 # check for methods implemented in C++
168 if isinstance(method, types.FunctionType):
169 # TODO(jansel): add a guard to check for monkey patching?
170 return UserMethodVariable(method, self, **options).call_function(
171 tx, args, kwargs
172 )
173
174 return super().call_method(tx, name, args, kwargs)
175
176 def _check_for_getattribute(self):
177 try:
178 if isinstance(
179 inspect.getattr_static(type(self.value), "__getattribute__"),
180 types.FunctionType,
181 ):
182 unimplemented("UserDefinedObjectVariable with custom __getattribute__")
183 except AttributeError:
184 pass
185
186 def _check_for_getattr(self):
187 try:
188 getattr_fn = inspect.getattr_static(type(self.value), "__getattr__")
189 except AttributeError:
190 getattr_fn = None
191 if getattr_fn is torch.nn.Module.__getattr__:
192 # ignore this case of getattr
193 getattr_fn = None
194 return getattr_fn
195
196 def _getattr_static(self, name):
197 if isinstance(self.value, (dataclasses.Field, torch.nn.Module)):
198 # getattr_static doesn't work on these
199 subobj = getattr(self.value, name)
200 else:
201 subobj = inspect.getattr_static(self.value, name)
202 return subobj
203
204 def var_getattr(self, tx, name):
205 from . import ConstantVariable
206 from .builder import VariableBuilder
207
208 options = VariableTracker.propagate(self)
209 value = self.value
210 source = AttrSource(self.source, name) if self.source else None
211 self._check_for_getattribute()
212 getattr_fn = self._check_for_getattr()
213
214 try:
215 subobj = self._getattr_static(name)
216 except AttributeError:
217 if isinstance(getattr_fn, types.FunctionType):
218 return variables.UserMethodVariable(
219 getattr_fn, self, **options
220 ).call_function(tx, [ConstantVariable(name)], {})
221 elif getattr_fn is not None:
222 unimplemented("UserDefined with non-function __getattr__")
223
224 if isinstance(subobj, property):
225 return variables.UserMethodVariable(
226 subobj.fget, self, **options
227 ).call_function(tx, [], {})
228
229 if (
230 name in getattr(value, "__dict__", {})
231 or ConstantVariable.is_literal(subobj)
232 or isinstance(
233 subobj,
234 (
235 torch.Tensor,
236 torch.nn.Module,
237 ),
238 )
239 ):
240 if source:
241 return VariableBuilder(tx, source)(subobj).add_options(options)
242 elif ConstantVariable.is_literal(subobj):
243 return ConstantVariable(subobj, **options)
244
245 if (
246 name not in getattr(value, "__dict__", {})
247 and type(value).__module__.startswith("torch.")
248 and not callable(value)
249 ):
250 if not source:
251 assert getattr(
252 importlib.import_module(type(value).__module__),
253 type(value).__name__,
254 ) is type(value)
255 source = AttrSource(
256 AttrSource(
257 tx.import_source(type(value).__module__), type(value).__name__
258 ),
259 name,
260 )
261 return VariableBuilder(tx, source)(subobj).add_options(options)
262
263 if isinstance(
264 subobj,
265 (
266 torch.distributions.constraints._Interval,
267 torch.distributions.constraints._Real,
268 torch.distributions.constraints.Constraint,
269 ),
270 ):
271 return UserDefinedObjectVariable(subobj, source=source, **options)
272
273 if name == "__class__":
274 return UserDefinedClassVariable(type(self.value), source=source, **options)
275
276 return variables.GetAttrVariable(self, name, source=source, **options)
277
278 def call_hasattr(self, tx, name: str) -> "VariableTracker":
279 if not self.source:
280 unimplemented("hasattr no source")
281 options = VariableTracker.propagate(self)
282 options["guards"].add(
283 AttrSource(self.source, name).make_guard(GuardBuilder.HASATTR)
284 )
285 if self._check_for_getattribute() or self._check_for_getattr():
286 unimplemented("hasattr with custom __getattr__")
287
288 try:
289 self._getattr_static(name)
290 return variables.ConstantVariable(True, **options)
291 except AttributeError:
292 return variables.ConstantVariable(False, **options)
293
294 def odict_getitem(self, tx, key):
295 from .builder import VariableBuilder
296
297 return VariableBuilder(
298 tx,
299 ODictGetItemSource(self.source, key.as_python_constant()),
300 )(
301 collections.OrderedDict.__getitem__(self.value, key.as_python_constant())
302 ).add_options(
303 key, self
304 )
305
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchdynamo/variables/user_defined.py b/torchdynamo/variables/user_defined.py
--- a/torchdynamo/variables/user_defined.py
+++ b/torchdynamo/variables/user_defined.py
@@ -46,6 +46,32 @@
return super(UserDefinedClassVariable, self).var_getattr(tx, name)
+ def call_method(
+ self,
+ tx,
+ name,
+ args: "List[VariableTracker]",
+ kwargs: "Dict[str, VariableTracker]",
+ ) -> "VariableTracker":
+ if (
+ name == "__subclasses__"
+ and len(args) == 0
+ and not kwargs
+ and "__subclasses__" not in self.value.__dict__
+ ):
+ options = VariableTracker.propagate(self, args, kwargs.values())
+ options["mutable_local"] = MutableLocal()
+ subs_as_vars: List[VariableTracker] = list()
+ for sub in self.value.__subclasses__():
+ source = AttrSource(tx.import_source(sub.__module__), sub.__name__)
+ subs_as_vars.append(
+ variables.UserDefinedClassVariable(sub, source=source)
+ )
+
+ return variables.ListVariable(subs_as_vars, **options)
+
+ return super().call_method(tx, args, kwargs)
+
def call_function(
self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]"
) -> "VariableTracker":
| {"golden_diff": "diff --git a/torchdynamo/variables/user_defined.py b/torchdynamo/variables/user_defined.py\n--- a/torchdynamo/variables/user_defined.py\n+++ b/torchdynamo/variables/user_defined.py\n@@ -46,6 +46,32 @@\n \n return super(UserDefinedClassVariable, self).var_getattr(tx, name)\n \n+ def call_method(\n+ self,\n+ tx,\n+ name,\n+ args: \"List[VariableTracker]\",\n+ kwargs: \"Dict[str, VariableTracker]\",\n+ ) -> \"VariableTracker\":\n+ if (\n+ name == \"__subclasses__\"\n+ and len(args) == 0\n+ and not kwargs\n+ and \"__subclasses__\" not in self.value.__dict__\n+ ):\n+ options = VariableTracker.propagate(self, args, kwargs.values())\n+ options[\"mutable_local\"] = MutableLocal()\n+ subs_as_vars: List[VariableTracker] = list()\n+ for sub in self.value.__subclasses__():\n+ source = AttrSource(tx.import_source(sub.__module__), sub.__name__)\n+ subs_as_vars.append(\n+ variables.UserDefinedClassVariable(sub, source=source)\n+ )\n+\n+ return variables.ListVariable(subs_as_vars, **options)\n+\n+ return super().call_method(tx, args, kwargs)\n+\n def call_function(\n self, tx, args: \"List[VariableTracker]\", kwargs: \"Dict[str, VariableTracker]\"\n ) -> \"VariableTracker\":\n", "issue": "TorchDynamo is incompatible with einops.repeat\nHi,\r\n\r\nWe are doing some functionality validation and performance benchmark for TorchDynamo, and we found that TorchDynamo is incompatible with \"einops\" (pip package)'s \"repeat\" function (or maybe also some other functions in einops). For all models that call einops.repeat in nn.Module model definition (e.g. vision transformers models from https://github.com/lucidrains/vit-pytorch), the models can run with regular PyTorch but failed when using TorchDynamo with simply a FX backend, like the example shown in TorchDynamo homepage README.\r\n\r\nReproduction code (test.py):\r\n```\r\nimport torch\r\nfrom torch import nn\r\nfrom einops import repeat\r\nimport argparse\r\n\r\nparser = argparse.ArgumentParser(description='')\r\nparser.add_argument('--regular', action='store_true')\r\nparser.add_argument('--torchdynamo', action='store_true')\r\nargs = parser.parse_args()\r\n\r\nclass Net(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n def forward(self, x):\r\n repeat(x, 'h w -> h w c', c=3).shape # an einops.repeat() sentence\r\n return x\r\n\r\nX = torch.randn(30, 40)\r\nmodel = Net()\r\n\r\nmodel.eval()\r\n\r\n# Regular\r\nif args.regular:\r\n model(X)\r\n\r\n# Torchdynamo\r\nif args.torchdynamo:\r\n import torchdynamo\r\n from typing import List\r\n \r\n def my_compiler(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):\r\n return gm.forward\r\n \r\n with torchdynamo.optimize(my_compiler), torch.no_grad():\r\n model(X)\r\n```\r\nFor this code snippet, ```python test.py --regular``` will pass but ```python test.py --torchdynamo``` will fail.\r\n\r\nError Log:\r\n```\r\nTraceback (most recent call last):\r\n File \"/tmp/kyao/torchdynamo/test_einops.py\", line 37, in <module>\r\n model(X)\r\n File \"/mnt/local_disk/miniconda3/envs/ipexnew/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1110, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"/tmp/kyao/torchdynamo/test_einops.py\", line 15, in forward\r\n def forward(self, x):\r\n File \"/mnt/local_disk/miniconda3/envs/ipexnew/lib/python3.9/site-packages/einops-0.4.1-py3.9.egg/einops/einops.py\", line 490, in repeat\r\n def repeat(tensor: Tensor, pattern: str, **axes_lengths) -> Tensor:\r\n File \"/mnt/local_disk/miniconda3/envs/ipexnew/lib/python3.9/site-packages/einops-0.4.1-py3.9.egg/einops/einops.py\", line 410, in reduce\r\n return _apply_recipe(recipe, tensor, reduction_type=reduction)\r\n File \"/mnt/local_disk/miniconda3/envs/ipexnew/lib/python3.9/site-packages/einops-0.4.1-py3.9.egg/einops/einops.py\", line 229, in _apply_recipe\r\n def _apply_recipe(recipe: TransformRecipe, tensor: Tensor, reduction_type: Reduction) -> Tensor:\r\n File \"/mnt/local_disk/miniconda3/envs/ipexnew/lib/python3.9/site-packages/einops-0.4.1-py3.9.egg/einops/_backends.py\", line 22, in get_backend\r\n def get_backend(tensor) -> 'AbstractBackend':\r\n File \"/mnt/local_disk/miniconda3/envs/ipexnew/lib/python3.9/site-packages/einops-0.4.1-py3.9.egg/einops/_backends.py\", line 22, in get_backend\r\n def get_backend(tensor) -> 'AbstractBackend':\r\nIndexError: list index out of range\r\n```\n", "before_files": [{"content": "import collections\nimport dataclasses\nimport importlib\nimport inspect\nimport types\nfrom typing import Dict\nfrom typing import List\n\nimport torch.nn\n\nfrom .. import variables\nfrom ..exc import unimplemented\nfrom ..guards import Guard\nfrom ..guards import GuardBuilder\nfrom ..source import AttrSource\nfrom ..source import ODictGetItemSource\nfrom ..utils import is_namedtuple_cls\nfrom ..utils import namedtuple_fields\nfrom .base import MutableLocal\nfrom .base import VariableTracker\n\n\nclass UserDefinedVariable(VariableTracker):\n pass\n\n\nclass UserDefinedClassVariable(UserDefinedVariable):\n def __init__(self, value, **kwargs):\n super().__init__(**kwargs)\n self.value = value\n\n def as_python_constant(self):\n return self.value\n\n def var_getattr(self, tx, name: str) -> \"VariableTracker\":\n options = VariableTracker.propagate(self)\n try:\n obj = inspect.getattr_static(self.value, name)\n except AttributeError:\n obj = None\n\n if isinstance(obj, staticmethod):\n return variables.UserFunctionVariable(obj.__get__(self.value), **options)\n elif isinstance(obj, classmethod):\n return variables.UserMethodVariable(obj.__func__, self, **options)\n\n return super(UserDefinedClassVariable, self).var_getattr(tx, name)\n\n def call_function(\n self, tx, args: \"List[VariableTracker]\", kwargs: \"Dict[str, VariableTracker]\"\n ) -> \"VariableTracker\":\n from ..side_effects import SideEffects\n\n options = VariableTracker.propagate(self, args, kwargs.values())\n\n if is_namedtuple_cls(self.value):\n fields = namedtuple_fields(self.value)\n items = list(args)\n items.extend([None] * (len(fields) - len(items)))\n for name, value in kwargs.items():\n assert name in fields\n items[fields.index(name)] = value\n assert all(x is not None for x in items)\n return variables.NamedTupleVariable(\n items, self.value, **VariableTracker.propagate(self, items)\n )\n elif (\n inspect.getattr_static(self.value, \"__new__\", None) in (object.__new__,)\n and SideEffects.cls_supports_mutation_side_effects(self.value)\n and self.source\n ):\n var = tx.output.side_effects.track_object_new(\n self.source, self.value, UserDefinedObjectVariable, options\n )\n return var.add_options(var.call_method(tx, \"__init__\", args, kwargs))\n elif variables.DataClassVariable.is_matching_cls(self.value):\n options[\"mutable_local\"] = MutableLocal()\n return variables.DataClassVariable.create(self.value, args, kwargs, options)\n\n return super().call_function(tx, args, kwargs)\n\n def const_getattr(self, tx, name):\n if name == \"__name__\":\n return self.value.__name__\n return super().const_getattr(tx, name)\n\n\nclass UserDefinedObjectVariable(UserDefinedVariable):\n \"\"\"\n Mostly objects of defined type. Catch-all for something where we only know the type.\n \"\"\"\n\n def __init__(self, value, value_type=None, **kwargs):\n super(UserDefinedObjectVariable, self).__init__(**kwargs)\n self.value = value\n self.value_type = value_type or type(value)\n assert type(value) is self.value_type\n\n def __str__(self):\n inner = self.value_type.__name__\n if inner == \"builtin_function_or_method\":\n inner = str(getattr(self.value, \"__name__\", None))\n return f\"{self.__class__.__name__}({inner})\"\n\n def python_type(self):\n return self.value_type\n\n def call_method(\n self,\n tx,\n name,\n args: \"List[VariableTracker]\",\n kwargs: \"Dict[str, VariableTracker]\",\n ) -> \"VariableTracker\":\n from . import ConstantVariable\n from . import TupleVariable\n from . import UserMethodVariable\n\n options = VariableTracker.propagate(self, args, kwargs.values())\n\n if name not in getattr(self.value, \"__dict__\", {}):\n try:\n method = inspect.getattr_static(type(self.value), name)\n except AttributeError:\n method = None\n\n if method is object.__init__:\n return ConstantVariable(None, **options)\n\n if method is collections.OrderedDict.keys and self.source:\n # subclass of OrderedDict\n assert not (args or kwargs)\n keys = list(self.value.keys())\n assert all(map(ConstantVariable.is_literal, keys))\n return TupleVariable(\n [ConstantVariable(k, **options) for k in keys], **options\n ).add_guard(\n Guard(\n self.source.name(),\n self.source.guard_source(),\n GuardBuilder.ODICT_KEYS,\n )\n )\n\n if (\n method is collections.OrderedDict.items\n and isinstance(self.value, collections.OrderedDict)\n and self.source\n ):\n assert not (args or kwargs)\n items = []\n keys = self.call_method(tx, \"keys\", [], {})\n options = VariableTracker.propagate(self, args, kwargs.values(), keys)\n for key in keys.unpack_var_sequence(tx):\n items.append(\n TupleVariable(\n [key, self.odict_getitem(tx, key)],\n **options,\n )\n )\n return TupleVariable(items, **options)\n\n if method is collections.OrderedDict.__getitem__ and len(args) == 1:\n assert not kwargs\n return self.odict_getitem(tx, args[0])\n\n # check for methods implemented in C++\n if isinstance(method, types.FunctionType):\n # TODO(jansel): add a guard to check for monkey patching?\n return UserMethodVariable(method, self, **options).call_function(\n tx, args, kwargs\n )\n\n return super().call_method(tx, name, args, kwargs)\n\n def _check_for_getattribute(self):\n try:\n if isinstance(\n inspect.getattr_static(type(self.value), \"__getattribute__\"),\n types.FunctionType,\n ):\n unimplemented(\"UserDefinedObjectVariable with custom __getattribute__\")\n except AttributeError:\n pass\n\n def _check_for_getattr(self):\n try:\n getattr_fn = inspect.getattr_static(type(self.value), \"__getattr__\")\n except AttributeError:\n getattr_fn = None\n if getattr_fn is torch.nn.Module.__getattr__:\n # ignore this case of getattr\n getattr_fn = None\n return getattr_fn\n\n def _getattr_static(self, name):\n if isinstance(self.value, (dataclasses.Field, torch.nn.Module)):\n # getattr_static doesn't work on these\n subobj = getattr(self.value, name)\n else:\n subobj = inspect.getattr_static(self.value, name)\n return subobj\n\n def var_getattr(self, tx, name):\n from . import ConstantVariable\n from .builder import VariableBuilder\n\n options = VariableTracker.propagate(self)\n value = self.value\n source = AttrSource(self.source, name) if self.source else None\n self._check_for_getattribute()\n getattr_fn = self._check_for_getattr()\n\n try:\n subobj = self._getattr_static(name)\n except AttributeError:\n if isinstance(getattr_fn, types.FunctionType):\n return variables.UserMethodVariable(\n getattr_fn, self, **options\n ).call_function(tx, [ConstantVariable(name)], {})\n elif getattr_fn is not None:\n unimplemented(\"UserDefined with non-function __getattr__\")\n\n if isinstance(subobj, property):\n return variables.UserMethodVariable(\n subobj.fget, self, **options\n ).call_function(tx, [], {})\n\n if (\n name in getattr(value, \"__dict__\", {})\n or ConstantVariable.is_literal(subobj)\n or isinstance(\n subobj,\n (\n torch.Tensor,\n torch.nn.Module,\n ),\n )\n ):\n if source:\n return VariableBuilder(tx, source)(subobj).add_options(options)\n elif ConstantVariable.is_literal(subobj):\n return ConstantVariable(subobj, **options)\n\n if (\n name not in getattr(value, \"__dict__\", {})\n and type(value).__module__.startswith(\"torch.\")\n and not callable(value)\n ):\n if not source:\n assert getattr(\n importlib.import_module(type(value).__module__),\n type(value).__name__,\n ) is type(value)\n source = AttrSource(\n AttrSource(\n tx.import_source(type(value).__module__), type(value).__name__\n ),\n name,\n )\n return VariableBuilder(tx, source)(subobj).add_options(options)\n\n if isinstance(\n subobj,\n (\n torch.distributions.constraints._Interval,\n torch.distributions.constraints._Real,\n torch.distributions.constraints.Constraint,\n ),\n ):\n return UserDefinedObjectVariable(subobj, source=source, **options)\n\n if name == \"__class__\":\n return UserDefinedClassVariable(type(self.value), source=source, **options)\n\n return variables.GetAttrVariable(self, name, source=source, **options)\n\n def call_hasattr(self, tx, name: str) -> \"VariableTracker\":\n if not self.source:\n unimplemented(\"hasattr no source\")\n options = VariableTracker.propagate(self)\n options[\"guards\"].add(\n AttrSource(self.source, name).make_guard(GuardBuilder.HASATTR)\n )\n if self._check_for_getattribute() or self._check_for_getattr():\n unimplemented(\"hasattr with custom __getattr__\")\n\n try:\n self._getattr_static(name)\n return variables.ConstantVariable(True, **options)\n except AttributeError:\n return variables.ConstantVariable(False, **options)\n\n def odict_getitem(self, tx, key):\n from .builder import VariableBuilder\n\n return VariableBuilder(\n tx,\n ODictGetItemSource(self.source, key.as_python_constant()),\n )(\n collections.OrderedDict.__getitem__(self.value, key.as_python_constant())\n ).add_options(\n key, self\n )\n", "path": "torchdynamo/variables/user_defined.py"}], "after_files": [{"content": "import collections\nimport dataclasses\nimport importlib\nimport inspect\nimport types\nfrom typing import Dict\nfrom typing import List\n\nimport torch.nn\n\nfrom .. import variables\nfrom ..exc import unimplemented\nfrom ..guards import Guard\nfrom ..guards import GuardBuilder\nfrom ..source import AttrSource\nfrom ..source import ODictGetItemSource\nfrom ..utils import is_namedtuple_cls\nfrom ..utils import namedtuple_fields\nfrom .base import MutableLocal\nfrom .base import VariableTracker\n\n\nclass UserDefinedVariable(VariableTracker):\n pass\n\n\nclass UserDefinedClassVariable(UserDefinedVariable):\n def __init__(self, value, **kwargs):\n super().__init__(**kwargs)\n self.value = value\n\n def as_python_constant(self):\n return self.value\n\n def var_getattr(self, tx, name: str) -> \"VariableTracker\":\n options = VariableTracker.propagate(self)\n try:\n obj = inspect.getattr_static(self.value, name)\n except AttributeError:\n obj = None\n\n if isinstance(obj, staticmethod):\n return variables.UserFunctionVariable(obj.__get__(self.value), **options)\n elif isinstance(obj, classmethod):\n return variables.UserMethodVariable(obj.__func__, self, **options)\n\n return super(UserDefinedClassVariable, self).var_getattr(tx, name)\n\n def call_method(\n self,\n tx,\n name,\n args: \"List[VariableTracker]\",\n kwargs: \"Dict[str, VariableTracker]\",\n ) -> \"VariableTracker\":\n if (\n name == \"__subclasses__\"\n and len(args) == 0\n and not kwargs\n and \"__subclasses__\" not in self.value.__dict__\n ):\n options = VariableTracker.propagate(self, args, kwargs.values())\n options[\"mutable_local\"] = MutableLocal()\n subs_as_vars: List[VariableTracker] = list()\n for sub in self.value.__subclasses__():\n source = AttrSource(tx.import_source(sub.__module__), sub.__name__)\n subs_as_vars.append(\n variables.UserDefinedClassVariable(sub, source=source)\n )\n\n return variables.ListVariable(subs_as_vars, **options)\n\n return super().call_method(tx, args, kwargs)\n\n def call_function(\n self, tx, args: \"List[VariableTracker]\", kwargs: \"Dict[str, VariableTracker]\"\n ) -> \"VariableTracker\":\n from ..side_effects import SideEffects\n\n options = VariableTracker.propagate(self, args, kwargs.values())\n\n if is_namedtuple_cls(self.value):\n fields = namedtuple_fields(self.value)\n items = list(args)\n items.extend([None] * (len(fields) - len(items)))\n for name, value in kwargs.items():\n assert name in fields\n items[fields.index(name)] = value\n assert all(x is not None for x in items)\n return variables.NamedTupleVariable(\n items, self.value, **VariableTracker.propagate(self, items)\n )\n elif (\n inspect.getattr_static(self.value, \"__new__\", None) in (object.__new__,)\n and SideEffects.cls_supports_mutation_side_effects(self.value)\n and self.source\n ):\n var = tx.output.side_effects.track_object_new(\n self.source, self.value, UserDefinedObjectVariable, options\n )\n return var.add_options(var.call_method(tx, \"__init__\", args, kwargs))\n elif variables.DataClassVariable.is_matching_cls(self.value):\n options[\"mutable_local\"] = MutableLocal()\n return variables.DataClassVariable.create(self.value, args, kwargs, options)\n\n return super().call_function(tx, args, kwargs)\n\n def const_getattr(self, tx, name):\n if name == \"__name__\":\n return self.value.__name__\n return super().const_getattr(tx, name)\n\n\nclass UserDefinedObjectVariable(UserDefinedVariable):\n \"\"\"\n Mostly objects of defined type. Catch-all for something where we only know the type.\n \"\"\"\n\n def __init__(self, value, value_type=None, **kwargs):\n super(UserDefinedObjectVariable, self).__init__(**kwargs)\n self.value = value\n self.value_type = value_type or type(value)\n assert type(value) is self.value_type\n\n def __str__(self):\n inner = self.value_type.__name__\n if inner == \"builtin_function_or_method\":\n inner = str(getattr(self.value, \"__name__\", None))\n return f\"{self.__class__.__name__}({inner})\"\n\n def python_type(self):\n return self.value_type\n\n def call_method(\n self,\n tx,\n name,\n args: \"List[VariableTracker]\",\n kwargs: \"Dict[str, VariableTracker]\",\n ) -> \"VariableTracker\":\n from . import ConstantVariable\n from . import TupleVariable\n from . import UserMethodVariable\n\n options = VariableTracker.propagate(self, args, kwargs.values())\n\n if name not in getattr(self.value, \"__dict__\", {}):\n try:\n method = inspect.getattr_static(type(self.value), name)\n except AttributeError:\n method = None\n\n if method is object.__init__:\n return ConstantVariable(None, **options)\n\n if method is collections.OrderedDict.keys and self.source:\n # subclass of OrderedDict\n assert not (args or kwargs)\n keys = list(self.value.keys())\n assert all(map(ConstantVariable.is_literal, keys))\n return TupleVariable(\n [ConstantVariable(k, **options) for k in keys], **options\n ).add_guard(\n Guard(\n self.source.name(),\n self.source.guard_source(),\n GuardBuilder.ODICT_KEYS,\n )\n )\n\n if (\n method is collections.OrderedDict.items\n and isinstance(self.value, collections.OrderedDict)\n and self.source\n ):\n assert not (args or kwargs)\n items = []\n keys = self.call_method(tx, \"keys\", [], {})\n options = VariableTracker.propagate(self, args, kwargs.values(), keys)\n for key in keys.unpack_var_sequence(tx):\n items.append(\n TupleVariable(\n [key, self.odict_getitem(tx, key)],\n **options,\n )\n )\n return TupleVariable(items, **options)\n\n if method is collections.OrderedDict.__getitem__ and len(args) == 1:\n assert not kwargs\n return self.odict_getitem(tx, args[0])\n\n # check for methods implemented in C++\n if isinstance(method, types.FunctionType):\n # TODO(jansel): add a guard to check for monkey patching?\n return UserMethodVariable(method, self, **options).call_function(\n tx, args, kwargs\n )\n\n return super().call_method(tx, name, args, kwargs)\n\n def _check_for_getattribute(self):\n try:\n if isinstance(\n inspect.getattr_static(type(self.value), \"__getattribute__\"),\n types.FunctionType,\n ):\n unimplemented(\"UserDefinedObjectVariable with custom __getattribute__\")\n except AttributeError:\n pass\n\n def _check_for_getattr(self):\n try:\n getattr_fn = inspect.getattr_static(type(self.value), \"__getattr__\")\n except AttributeError:\n getattr_fn = None\n if getattr_fn is torch.nn.Module.__getattr__:\n # ignore this case of getattr\n getattr_fn = None\n return getattr_fn\n\n def _getattr_static(self, name):\n if isinstance(self.value, (dataclasses.Field, torch.nn.Module)):\n # getattr_static doesn't work on these\n subobj = getattr(self.value, name)\n else:\n subobj = inspect.getattr_static(self.value, name)\n return subobj\n\n def var_getattr(self, tx, name):\n from . import ConstantVariable\n from .builder import VariableBuilder\n\n options = VariableTracker.propagate(self)\n value = self.value\n source = AttrSource(self.source, name) if self.source else None\n self._check_for_getattribute()\n getattr_fn = self._check_for_getattr()\n\n try:\n subobj = self._getattr_static(name)\n except AttributeError:\n if isinstance(getattr_fn, types.FunctionType):\n return variables.UserMethodVariable(\n getattr_fn, self, **options\n ).call_function(tx, [ConstantVariable(name)], {})\n elif getattr_fn is not None:\n unimplemented(\"UserDefined with non-function __getattr__\")\n\n if isinstance(subobj, property):\n return variables.UserMethodVariable(\n subobj.fget, self, **options\n ).call_function(tx, [], {})\n\n if (\n name in getattr(value, \"__dict__\", {})\n or ConstantVariable.is_literal(subobj)\n or isinstance(\n subobj,\n (\n torch.Tensor,\n torch.nn.Module,\n ),\n )\n ):\n if source:\n return VariableBuilder(tx, source)(subobj).add_options(options)\n elif ConstantVariable.is_literal(subobj):\n return ConstantVariable(subobj, **options)\n\n if (\n name not in getattr(value, \"__dict__\", {})\n and type(value).__module__.startswith(\"torch.\")\n and not callable(value)\n ):\n if not source:\n assert getattr(\n importlib.import_module(type(value).__module__),\n type(value).__name__,\n ) is type(value)\n source = AttrSource(\n AttrSource(\n tx.import_source(type(value).__module__), type(value).__name__\n ),\n name,\n )\n return VariableBuilder(tx, source)(subobj).add_options(options)\n\n if isinstance(\n subobj,\n (\n torch.distributions.constraints._Interval,\n torch.distributions.constraints._Real,\n torch.distributions.constraints.Constraint,\n ),\n ):\n return UserDefinedObjectVariable(subobj, source=source, **options)\n\n if name == \"__class__\":\n return UserDefinedClassVariable(type(self.value), source=source, **options)\n\n return variables.GetAttrVariable(self, name, source=source, **options)\n\n def call_hasattr(self, tx, name: str) -> \"VariableTracker\":\n if not self.source:\n unimplemented(\"hasattr no source\")\n options = VariableTracker.propagate(self)\n options[\"guards\"].add(\n AttrSource(self.source, name).make_guard(GuardBuilder.HASATTR)\n )\n if self._check_for_getattribute() or self._check_for_getattr():\n unimplemented(\"hasattr with custom __getattr__\")\n\n try:\n self._getattr_static(name)\n return variables.ConstantVariable(True, **options)\n except AttributeError:\n return variables.ConstantVariable(False, **options)\n\n def odict_getitem(self, tx, key):\n from .builder import VariableBuilder\n\n return VariableBuilder(\n tx,\n ODictGetItemSource(self.source, key.as_python_constant()),\n )(\n collections.OrderedDict.__getitem__(self.value, key.as_python_constant())\n ).add_options(\n key, self\n )\n", "path": "torchdynamo/variables/user_defined.py"}]} | 4,079 | 330 |
gh_patches_debug_32509 | rasdani/github-patches | git_diff | carpentries__amy-2107 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make URL redirects from `?next` param safer
There are a couple places where we redirect to `?next` value without checking if it's safe. There's a `django.http.is_safe_url` function we could use.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `amy/autoemails/utils.py`
Content:
```
1 from typing import Union
2
3 import django_rq
4 import pytz
5 from rq.exceptions import NoSuchJobError
6 from rq.job import Job
7 from rq_scheduler.utils import from_unix
8
9
10 def scheduled_execution_time(job_id, scheduler=None, naive=True):
11 """Get RQ-Scheduler scheduled execution time for specific job."""
12 _scheduler = scheduler
13 if not scheduler:
14 _scheduler = django_rq.get_scheduler("default")
15
16 # Scheduler keeps jobs in a single key, they are sorted by score, which is
17 # scheduled execution time (linux epoch). We can retrieve single
18 # entry's score.
19 time = _scheduler.connection.zscore(_scheduler.scheduled_jobs_key, job_id)
20
21 # Convert linux time epoch to UTC.
22 if time:
23 time = from_unix(time)
24 if not naive:
25 # By default, RQ-Scheduler uses UTC naive (TZ-unaware) objects,
26 # which we can "convert" to TZ-aware UTC.
27 time = time.replace(tzinfo=pytz.UTC)
28 return time
29
30
31 def compare_emails(a, b):
32 """EmailMultiAlternatives doesn't implement __eq__, so we have to
33 cheat our way."""
34 if a is None and b is None:
35 return True
36 elif a is None and b or b is None and a:
37 return False
38 else:
39 try:
40 return (
41 a.to == b.to
42 and a.cc == b.cc
43 and a.bcc == b.bcc
44 and a.reply_to == b.reply_to
45 and a.subject == b.subject
46 and a.body == b.body
47 )
48 except AttributeError:
49 return False
50
51
52 def check_status(job: Union[str, Job], scheduler=None):
53 _scheduler = scheduler
54 if not scheduler:
55 _scheduler = django_rq.get_scheduler("default")
56
57 if not isinstance(job, Job):
58 try:
59 job = Job.fetch(job, connection=_scheduler.connection)
60 except NoSuchJobError:
61 return None
62
63 scheduled = scheduled_execution_time(job.get_id(), scheduler)
64
65 if scheduled:
66 return job.get_status() or "scheduled"
67 else:
68 return job.get_status() or "cancelled"
69
```
Path: `amy/autoemails/views.py`
Content:
```
1 import logging
2
3 from django.contrib import messages
4 from django.shortcuts import get_object_or_404, redirect
5 from django.urls import reverse
6 from django.utils.html import format_html
7 from django.views.decorators.http import require_POST
8 import django_rq
9
10 from workshops.models import WorkshopRequest
11 from workshops.util import admin_required
12
13 from .actions import GenericAction
14 from .forms import GenericEmailScheduleForm
15 from .models import EmailTemplate, Trigger
16 from .utils import check_status, scheduled_execution_time
17
18 logger = logging.getLogger("amy.signals")
19 scheduler = django_rq.get_scheduler("default")
20 redis_connection = django_rq.get_connection("default")
21
22
23 @require_POST
24 @admin_required
25 def generic_schedule_email(request, pk):
26 """
27 Generic view for scheduling an email to be sent.
28 """
29 template_slug = request.POST.get("slug", "")
30 original_template = get_object_or_404(EmailTemplate, slug=template_slug)
31 # Hardcoded, maybe in future respond to other requests, like
32 # SelfOrganizedSubmission or WorkshopInquiry
33 trigger = get_object_or_404(
34 Trigger,
35 action__startswith="workshop-request-response",
36 template__slug=template_slug,
37 active=True,
38 )
39 form = GenericEmailScheduleForm(request.POST, instance=original_template)
40 workshop_request = get_object_or_404(WorkshopRequest, pk=pk)
41
42 if form.is_valid():
43 template = EmailTemplate(
44 slug=form.cleaned_data["slug"],
45 subject=form.cleaned_data["subject"],
46 to_header=form.cleaned_data["to_header"],
47 from_header=form.cleaned_data["from_header"],
48 cc_header=form.cleaned_data["cc_header"],
49 bcc_header=form.cleaned_data["bcc_header"],
50 reply_to_header=form.cleaned_data["reply_to_header"],
51 body_template=form.cleaned_data["body_template"],
52 )
53
54 objects = dict(request=workshop_request)
55 if workshop_request.event:
56 objects["event"] = workshop_request.event
57 objects["workshop"] = workshop_request.event
58
59 action = GenericAction(
60 trigger=trigger,
61 objects=objects,
62 )
63 action_name = GenericAction.__name__
64 launch_at = action.get_launch_at()
65 meta = dict(
66 action=action,
67 template=template,
68 launch_at=launch_at,
69 email=None,
70 context=None,
71 )
72
73 job = scheduler.enqueue_in(launch_at, action, meta=meta)
74 logger.debug("%s: enqueueing", action_name)
75 scheduled_at = scheduled_execution_time(
76 job.get_id(), scheduler=scheduler, naive=False
77 )
78 logger.debug("%s: job created [%r]", action_name, job)
79
80 rqj = workshop_request.rq_jobs.create(
81 job_id=job.get_id(),
82 trigger=trigger,
83 scheduled_execution=scheduled_at,
84 status=check_status(job),
85 mail_status="",
86 event_slug=action.event_slug(),
87 recipients=action.all_recipients(),
88 )
89
90 messages.info(
91 request,
92 format_html(
93 "New email ({}) was scheduled to run "
94 '<relative-time datetime="{}">{}</relative-time>: '
95 '<a href="{}">{}</a>.',
96 trigger.get_action_display(),
97 scheduled_at.isoformat(),
98 "{:%Y-%m-%d %H:%M}".format(scheduled_at),
99 reverse("admin:autoemails_rqjob_preview", args=[rqj.pk]),
100 job.id,
101 ),
102 fail_silently=True,
103 )
104
105 return redirect(
106 request.POST.get("next", "") or workshop_request.get_absolute_url()
107 )
108
109 else:
110 messages.error(
111 request,
112 f"Could not send the email due to form errors: {form.errors}",
113 fail_silently=True,
114 )
115
116 return redirect(
117 request.POST.get("next", "") or workshop_request.get_absolute_url()
118 )
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/amy/autoemails/utils.py b/amy/autoemails/utils.py
--- a/amy/autoemails/utils.py
+++ b/amy/autoemails/utils.py
@@ -1,5 +1,7 @@
-from typing import Union
+from typing import Optional, Union
+from django.conf import settings
+from django.utils.http import is_safe_url
import django_rq
import pytz
from rq.exceptions import NoSuchJobError
@@ -66,3 +68,9 @@
return job.get_status() or "scheduled"
else:
return job.get_status() or "cancelled"
+
+
+def safe_next_or_default_url(next_url: Optional[str], default: str) -> str:
+ if next_url is not None and is_safe_url(next_url, settings.ALLOWED_HOSTS):
+ return next_url
+ return default
diff --git a/amy/autoemails/views.py b/amy/autoemails/views.py
--- a/amy/autoemails/views.py
+++ b/amy/autoemails/views.py
@@ -13,7 +13,7 @@
from .actions import GenericAction
from .forms import GenericEmailScheduleForm
from .models import EmailTemplate, Trigger
-from .utils import check_status, scheduled_execution_time
+from .utils import check_status, safe_next_or_default_url, scheduled_execution_time
logger = logging.getLogger("amy.signals")
scheduler = django_rq.get_scheduler("default")
@@ -102,9 +102,9 @@
fail_silently=True,
)
- return redirect(
- request.POST.get("next", "") or workshop_request.get_absolute_url()
- )
+ default_url = workshop_request.get_absolute_url()
+ next_url = request.POST.get("next", None)
+ return redirect(safe_next_or_default_url(next_url, default_url))
else:
messages.error(
@@ -113,6 +113,6 @@
fail_silently=True,
)
- return redirect(
- request.POST.get("next", "") or workshop_request.get_absolute_url()
- )
+ default_url = workshop_request.get_absolute_url()
+ next_url = request.POST.get("next", None)
+ return redirect(safe_next_or_default_url(next_url, default_url))
| {"golden_diff": "diff --git a/amy/autoemails/utils.py b/amy/autoemails/utils.py\n--- a/amy/autoemails/utils.py\n+++ b/amy/autoemails/utils.py\n@@ -1,5 +1,7 @@\n-from typing import Union\n+from typing import Optional, Union\n \n+from django.conf import settings\n+from django.utils.http import is_safe_url\n import django_rq\n import pytz\n from rq.exceptions import NoSuchJobError\n@@ -66,3 +68,9 @@\n return job.get_status() or \"scheduled\"\n else:\n return job.get_status() or \"cancelled\"\n+\n+\n+def safe_next_or_default_url(next_url: Optional[str], default: str) -> str:\n+ if next_url is not None and is_safe_url(next_url, settings.ALLOWED_HOSTS):\n+ return next_url\n+ return default\ndiff --git a/amy/autoemails/views.py b/amy/autoemails/views.py\n--- a/amy/autoemails/views.py\n+++ b/amy/autoemails/views.py\n@@ -13,7 +13,7 @@\n from .actions import GenericAction\n from .forms import GenericEmailScheduleForm\n from .models import EmailTemplate, Trigger\n-from .utils import check_status, scheduled_execution_time\n+from .utils import check_status, safe_next_or_default_url, scheduled_execution_time\n \n logger = logging.getLogger(\"amy.signals\")\n scheduler = django_rq.get_scheduler(\"default\")\n@@ -102,9 +102,9 @@\n fail_silently=True,\n )\n \n- return redirect(\n- request.POST.get(\"next\", \"\") or workshop_request.get_absolute_url()\n- )\n+ default_url = workshop_request.get_absolute_url()\n+ next_url = request.POST.get(\"next\", None)\n+ return redirect(safe_next_or_default_url(next_url, default_url))\n \n else:\n messages.error(\n@@ -113,6 +113,6 @@\n fail_silently=True,\n )\n \n- return redirect(\n- request.POST.get(\"next\", \"\") or workshop_request.get_absolute_url()\n- )\n+ default_url = workshop_request.get_absolute_url()\n+ next_url = request.POST.get(\"next\", None)\n+ return redirect(safe_next_or_default_url(next_url, default_url))\n", "issue": "Make URL redirects from `?next` param safer\nThere are a couple places where we redirect to `?next` value without checking if it's safe. There's a `django.http.is_safe_url` function we could use.\n", "before_files": [{"content": "from typing import Union\n\nimport django_rq\nimport pytz\nfrom rq.exceptions import NoSuchJobError\nfrom rq.job import Job\nfrom rq_scheduler.utils import from_unix\n\n\ndef scheduled_execution_time(job_id, scheduler=None, naive=True):\n \"\"\"Get RQ-Scheduler scheduled execution time for specific job.\"\"\"\n _scheduler = scheduler\n if not scheduler:\n _scheduler = django_rq.get_scheduler(\"default\")\n\n # Scheduler keeps jobs in a single key, they are sorted by score, which is\n # scheduled execution time (linux epoch). We can retrieve single\n # entry's score.\n time = _scheduler.connection.zscore(_scheduler.scheduled_jobs_key, job_id)\n\n # Convert linux time epoch to UTC.\n if time:\n time = from_unix(time)\n if not naive:\n # By default, RQ-Scheduler uses UTC naive (TZ-unaware) objects,\n # which we can \"convert\" to TZ-aware UTC.\n time = time.replace(tzinfo=pytz.UTC)\n return time\n\n\ndef compare_emails(a, b):\n \"\"\"EmailMultiAlternatives doesn't implement __eq__, so we have to\n cheat our way.\"\"\"\n if a is None and b is None:\n return True\n elif a is None and b or b is None and a:\n return False\n else:\n try:\n return (\n a.to == b.to\n and a.cc == b.cc\n and a.bcc == b.bcc\n and a.reply_to == b.reply_to\n and a.subject == b.subject\n and a.body == b.body\n )\n except AttributeError:\n return False\n\n\ndef check_status(job: Union[str, Job], scheduler=None):\n _scheduler = scheduler\n if not scheduler:\n _scheduler = django_rq.get_scheduler(\"default\")\n\n if not isinstance(job, Job):\n try:\n job = Job.fetch(job, connection=_scheduler.connection)\n except NoSuchJobError:\n return None\n\n scheduled = scheduled_execution_time(job.get_id(), scheduler)\n\n if scheduled:\n return job.get_status() or \"scheduled\"\n else:\n return job.get_status() or \"cancelled\"\n", "path": "amy/autoemails/utils.py"}, {"content": "import logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.views.decorators.http import require_POST\nimport django_rq\n\nfrom workshops.models import WorkshopRequest\nfrom workshops.util import admin_required\n\nfrom .actions import GenericAction\nfrom .forms import GenericEmailScheduleForm\nfrom .models import EmailTemplate, Trigger\nfrom .utils import check_status, scheduled_execution_time\n\nlogger = logging.getLogger(\"amy.signals\")\nscheduler = django_rq.get_scheduler(\"default\")\nredis_connection = django_rq.get_connection(\"default\")\n\n\n@require_POST\n@admin_required\ndef generic_schedule_email(request, pk):\n \"\"\"\n Generic view for scheduling an email to be sent.\n \"\"\"\n template_slug = request.POST.get(\"slug\", \"\")\n original_template = get_object_or_404(EmailTemplate, slug=template_slug)\n # Hardcoded, maybe in future respond to other requests, like\n # SelfOrganizedSubmission or WorkshopInquiry\n trigger = get_object_or_404(\n Trigger,\n action__startswith=\"workshop-request-response\",\n template__slug=template_slug,\n active=True,\n )\n form = GenericEmailScheduleForm(request.POST, instance=original_template)\n workshop_request = get_object_or_404(WorkshopRequest, pk=pk)\n\n if form.is_valid():\n template = EmailTemplate(\n slug=form.cleaned_data[\"slug\"],\n subject=form.cleaned_data[\"subject\"],\n to_header=form.cleaned_data[\"to_header\"],\n from_header=form.cleaned_data[\"from_header\"],\n cc_header=form.cleaned_data[\"cc_header\"],\n bcc_header=form.cleaned_data[\"bcc_header\"],\n reply_to_header=form.cleaned_data[\"reply_to_header\"],\n body_template=form.cleaned_data[\"body_template\"],\n )\n\n objects = dict(request=workshop_request)\n if workshop_request.event:\n objects[\"event\"] = workshop_request.event\n objects[\"workshop\"] = workshop_request.event\n\n action = GenericAction(\n trigger=trigger,\n objects=objects,\n )\n action_name = GenericAction.__name__\n launch_at = action.get_launch_at()\n meta = dict(\n action=action,\n template=template,\n launch_at=launch_at,\n email=None,\n context=None,\n )\n\n job = scheduler.enqueue_in(launch_at, action, meta=meta)\n logger.debug(\"%s: enqueueing\", action_name)\n scheduled_at = scheduled_execution_time(\n job.get_id(), scheduler=scheduler, naive=False\n )\n logger.debug(\"%s: job created [%r]\", action_name, job)\n\n rqj = workshop_request.rq_jobs.create(\n job_id=job.get_id(),\n trigger=trigger,\n scheduled_execution=scheduled_at,\n status=check_status(job),\n mail_status=\"\",\n event_slug=action.event_slug(),\n recipients=action.all_recipients(),\n )\n\n messages.info(\n request,\n format_html(\n \"New email ({}) was scheduled to run \"\n '<relative-time datetime=\"{}\">{}</relative-time>: '\n '<a href=\"{}\">{}</a>.',\n trigger.get_action_display(),\n scheduled_at.isoformat(),\n \"{:%Y-%m-%d %H:%M}\".format(scheduled_at),\n reverse(\"admin:autoemails_rqjob_preview\", args=[rqj.pk]),\n job.id,\n ),\n fail_silently=True,\n )\n\n return redirect(\n request.POST.get(\"next\", \"\") or workshop_request.get_absolute_url()\n )\n\n else:\n messages.error(\n request,\n f\"Could not send the email due to form errors: {form.errors}\",\n fail_silently=True,\n )\n\n return redirect(\n request.POST.get(\"next\", \"\") or workshop_request.get_absolute_url()\n )\n", "path": "amy/autoemails/views.py"}], "after_files": [{"content": "from typing import Optional, Union\n\nfrom django.conf import settings\nfrom django.utils.http import is_safe_url\nimport django_rq\nimport pytz\nfrom rq.exceptions import NoSuchJobError\nfrom rq.job import Job\nfrom rq_scheduler.utils import from_unix\n\n\ndef scheduled_execution_time(job_id, scheduler=None, naive=True):\n \"\"\"Get RQ-Scheduler scheduled execution time for specific job.\"\"\"\n _scheduler = scheduler\n if not scheduler:\n _scheduler = django_rq.get_scheduler(\"default\")\n\n # Scheduler keeps jobs in a single key, they are sorted by score, which is\n # scheduled execution time (linux epoch). We can retrieve single\n # entry's score.\n time = _scheduler.connection.zscore(_scheduler.scheduled_jobs_key, job_id)\n\n # Convert linux time epoch to UTC.\n if time:\n time = from_unix(time)\n if not naive:\n # By default, RQ-Scheduler uses UTC naive (TZ-unaware) objects,\n # which we can \"convert\" to TZ-aware UTC.\n time = time.replace(tzinfo=pytz.UTC)\n return time\n\n\ndef compare_emails(a, b):\n \"\"\"EmailMultiAlternatives doesn't implement __eq__, so we have to\n cheat our way.\"\"\"\n if a is None and b is None:\n return True\n elif a is None and b or b is None and a:\n return False\n else:\n try:\n return (\n a.to == b.to\n and a.cc == b.cc\n and a.bcc == b.bcc\n and a.reply_to == b.reply_to\n and a.subject == b.subject\n and a.body == b.body\n )\n except AttributeError:\n return False\n\n\ndef check_status(job: Union[str, Job], scheduler=None):\n _scheduler = scheduler\n if not scheduler:\n _scheduler = django_rq.get_scheduler(\"default\")\n\n if not isinstance(job, Job):\n try:\n job = Job.fetch(job, connection=_scheduler.connection)\n except NoSuchJobError:\n return None\n\n scheduled = scheduled_execution_time(job.get_id(), scheduler)\n\n if scheduled:\n return job.get_status() or \"scheduled\"\n else:\n return job.get_status() or \"cancelled\"\n\n\ndef safe_next_or_default_url(next_url: Optional[str], default: str) -> str:\n if next_url is not None and is_safe_url(next_url, settings.ALLOWED_HOSTS):\n return next_url\n return default\n", "path": "amy/autoemails/utils.py"}, {"content": "import logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.views.decorators.http import require_POST\nimport django_rq\n\nfrom workshops.models import WorkshopRequest\nfrom workshops.util import admin_required\n\nfrom .actions import GenericAction\nfrom .forms import GenericEmailScheduleForm\nfrom .models import EmailTemplate, Trigger\nfrom .utils import check_status, safe_next_or_default_url, scheduled_execution_time\n\nlogger = logging.getLogger(\"amy.signals\")\nscheduler = django_rq.get_scheduler(\"default\")\nredis_connection = django_rq.get_connection(\"default\")\n\n\n@require_POST\n@admin_required\ndef generic_schedule_email(request, pk):\n \"\"\"\n Generic view for scheduling an email to be sent.\n \"\"\"\n template_slug = request.POST.get(\"slug\", \"\")\n original_template = get_object_or_404(EmailTemplate, slug=template_slug)\n # Hardcoded, maybe in future respond to other requests, like\n # SelfOrganizedSubmission or WorkshopInquiry\n trigger = get_object_or_404(\n Trigger,\n action__startswith=\"workshop-request-response\",\n template__slug=template_slug,\n active=True,\n )\n form = GenericEmailScheduleForm(request.POST, instance=original_template)\n workshop_request = get_object_or_404(WorkshopRequest, pk=pk)\n\n if form.is_valid():\n template = EmailTemplate(\n slug=form.cleaned_data[\"slug\"],\n subject=form.cleaned_data[\"subject\"],\n to_header=form.cleaned_data[\"to_header\"],\n from_header=form.cleaned_data[\"from_header\"],\n cc_header=form.cleaned_data[\"cc_header\"],\n bcc_header=form.cleaned_data[\"bcc_header\"],\n reply_to_header=form.cleaned_data[\"reply_to_header\"],\n body_template=form.cleaned_data[\"body_template\"],\n )\n\n objects = dict(request=workshop_request)\n if workshop_request.event:\n objects[\"event\"] = workshop_request.event\n objects[\"workshop\"] = workshop_request.event\n\n action = GenericAction(\n trigger=trigger,\n objects=objects,\n )\n action_name = GenericAction.__name__\n launch_at = action.get_launch_at()\n meta = dict(\n action=action,\n template=template,\n launch_at=launch_at,\n email=None,\n context=None,\n )\n\n job = scheduler.enqueue_in(launch_at, action, meta=meta)\n logger.debug(\"%s: enqueueing\", action_name)\n scheduled_at = scheduled_execution_time(\n job.get_id(), scheduler=scheduler, naive=False\n )\n logger.debug(\"%s: job created [%r]\", action_name, job)\n\n rqj = workshop_request.rq_jobs.create(\n job_id=job.get_id(),\n trigger=trigger,\n scheduled_execution=scheduled_at,\n status=check_status(job),\n mail_status=\"\",\n event_slug=action.event_slug(),\n recipients=action.all_recipients(),\n )\n\n messages.info(\n request,\n format_html(\n \"New email ({}) was scheduled to run \"\n '<relative-time datetime=\"{}\">{}</relative-time>: '\n '<a href=\"{}\">{}</a>.',\n trigger.get_action_display(),\n scheduled_at.isoformat(),\n \"{:%Y-%m-%d %H:%M}\".format(scheduled_at),\n reverse(\"admin:autoemails_rqjob_preview\", args=[rqj.pk]),\n job.id,\n ),\n fail_silently=True,\n )\n\n default_url = workshop_request.get_absolute_url()\n next_url = request.POST.get(\"next\", None)\n return redirect(safe_next_or_default_url(next_url, default_url))\n\n else:\n messages.error(\n request,\n f\"Could not send the email due to form errors: {form.errors}\",\n fail_silently=True,\n )\n\n default_url = workshop_request.get_absolute_url()\n next_url = request.POST.get(\"next\", None)\n return redirect(safe_next_or_default_url(next_url, default_url))\n", "path": "amy/autoemails/views.py"}]} | 1,978 | 485 |
gh_patches_debug_36566 | rasdani/github-patches | git_diff | fossasia__open-event-server-4825 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Populate db for permissions of Attendee and Registrar
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
Database is not having permissions of Attendee and Registrar for events different services.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
Populating db for Attendee and Registrar role required. in permissions.
**Stacktrace**
<!-- If applicable, add stacktrace to help explain your problem. -->
**Additional details (please complete the following information):**
- OS: UBUNTU 16.04
- Python Version : Python 3.5.2
- `HEAD` Commit hash [e.g. `4629c62`]
**Additional context**
<!-- Add any other context about the problem here. -->
**Wanna work on this issue**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `populate_db.py`
Content:
```
1 from app import current_app
2 from app.models import db
3 from app.api.helpers.db import get_or_create # , save_to_db
4
5 # Admin message settings
6 from app.api.helpers.system_mails import MAILS
7 from app.models.message_setting import MessageSettings
8
9 # Event Role-Service Permissions
10 from app.models.role import Role
11 from app.models.service import Service
12 from app.models.permission import Permission
13
14 from app.models.track import Track
15 from app.models.session import Session
16 from app.models.speaker import Speaker
17 from app.models.sponsor import Sponsor
18 from app.models.microlocation import Microlocation
19
20 from app.models.user import ORGANIZER, COORGANIZER, TRACK_ORGANIZER, MODERATOR, ATTENDEE, REGISTRAR
21
22 # Admin Panel Permissions
23 from app.models.panel_permission import PanelPermission
24 from app.models.custom_system_role import CustomSysRole
25
26 from app.models.setting import Setting
27 from app.models.module import Module
28
29 # User Permissions
30 from app.models.user_permission import UserPermission
31 SALES = 'sales'
32
33
34 def create_roles():
35 get_or_create(Role, name=ORGANIZER, title_name='Organizer')
36 get_or_create(Role, name=COORGANIZER, title_name='Co-organizer')
37 get_or_create(Role, name=TRACK_ORGANIZER, title_name='Track Organizer')
38 get_or_create(Role, name=MODERATOR, title_name='Moderator')
39 get_or_create(Role, name=ATTENDEE, title_name='Attendee')
40 get_or_create(Role, name=REGISTRAR, title_name='Registrar')
41
42
43 def create_services():
44 track = Track.get_service_name()
45 session = Session.get_service_name()
46 speaker = Speaker.get_service_name()
47 sponsor = Sponsor.get_service_name()
48 microlocation = Microlocation.get_service_name()
49
50 get_or_create(Service, name=track)
51 get_or_create(Service, name=session)
52 get_or_create(Service, name=speaker)
53 get_or_create(Service, name=sponsor)
54 get_or_create(Service, name=microlocation)
55
56
57 def create_settings():
58 get_or_create(Setting, app_name='Open Event')
59
60
61 def create_modules():
62 get_or_create(Module, donation_include=False)
63
64
65 def create_permissions():
66 orgr = Role.query.get(1)
67 coorgr = Role.query.get(2)
68 track_orgr = Role.query.get(3)
69 mod = Role.query.get(4)
70
71 track = Service.query.get(1)
72 session = Service.query.get(2)
73 speaker = Service.query.get(3)
74 sponsor = Service.query.get(4)
75 microlocation = Service.query.get(5)
76
77 # For ORGANIZER
78 # All four permissions set to True
79 get_or_create(Permission, role=orgr, service=track)
80 get_or_create(Permission, role=orgr, service=session)
81 get_or_create(Permission, role=orgr, service=speaker)
82 get_or_create(Permission, role=orgr, service=sponsor)
83 get_or_create(Permission, role=orgr, service=microlocation)
84
85 # For COORGANIZER
86 perm, _ = get_or_create(Permission, role=coorgr, service=track)
87 perm.can_create, perm.can_delete = False, False
88 db.session.add(perm)
89
90 perm, _ = get_or_create(Permission, role=coorgr, service=session)
91 perm.can_create, perm.can_delete = False, False
92 db.session.add(perm)
93
94 perm, _ = get_or_create(Permission, role=coorgr, service=speaker)
95 perm.can_create, perm.can_delete = False, False
96 db.session.add(perm)
97
98 perm, _ = get_or_create(Permission, role=coorgr, service=sponsor)
99 perm.can_create, perm.can_delete = False, False
100 db.session.add(perm)
101
102 perm, _ = get_or_create(Permission, role=coorgr, service=microlocation)
103 perm.can_create, perm.can_delete = False, False
104 db.session.add(perm)
105
106 # For TRACK_ORGANIZER
107 perm, _ = get_or_create(Permission, role=track_orgr, service=track)
108 db.session.add(perm)
109
110 # For MODERATOR
111 perm, _ = get_or_create(Permission, role=mod, service=track)
112 perm.can_create, perm.can_update, perm.can_delete = False, False, False
113 db.session.add(perm)
114
115
116 def create_custom_sys_roles():
117 role, _ = get_or_create(CustomSysRole, name='Sales Admin')
118 db.session.add(role)
119 role, _ = get_or_create(CustomSysRole, name='Marketer')
120 db.session.add(role)
121
122
123 def create_panel_permissions():
124 sales_admin = CustomSysRole.query.filter_by(name='Sales Admin').first()
125 perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=sales_admin)
126 db.session.add(perm)
127 marketer = CustomSysRole.query.filter_by(name='Marketer').first()
128 perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=marketer)
129 db.session.add(perm)
130
131
132 def create_user_permissions():
133 # Publish Event
134 user_perm, _ = get_or_create(UserPermission, name='publish_event',
135 description='Publish event (make event live)')
136 user_perm.verified_user = True
137 db.session.add(user_perm)
138
139 # Create Event
140 user_perm, _ = get_or_create(UserPermission, name='create_event',
141 description='Create event')
142 user_perm.verified_user, user_perm.unverified_user = True, True
143 db.session.add(user_perm)
144
145
146 def create_admin_message_settings():
147 default_mails = ["Next Event",
148 "Session Schedule Change",
149 "User email",
150 "Invitation For Papers",
151 "After Event",
152 "Ticket(s) Purchased",
153 "Session Accept or Reject",
154 "Event Published",
155 "Event Export Failed",
156 "Event Exported",
157 "Event Role Invitation",
158 "New Session Proposal"]
159 for mail in MAILS:
160 if mail in default_mails:
161 get_or_create(MessageSettings, action=mail, mail_status=1, notification_status=1, user_control_status=1)
162 else:
163 get_or_create(MessageSettings, action=mail, mail_status=0, notification_status=0, user_control_status=0)
164
165
166 def populate():
167 """
168 Create defined Roles, Services and Permissions.
169 """
170 print('Creating roles...')
171 create_roles()
172 print('Creating services...')
173 create_services()
174 print('Creating permissions...')
175 create_permissions()
176 print('Creating custom system roles...')
177 create_custom_sys_roles()
178 print('Creating admin panel permissions...')
179 create_panel_permissions()
180 print('Creating user permissions...')
181 create_user_permissions()
182 print('Creating settings...')
183 create_settings()
184 print('Creating modules...')
185 create_modules()
186 print('Creating admin message settings...')
187 create_admin_message_settings()
188
189
190 def populate_without_print():
191 """
192 Create defined Roles, Services and Permissions.
193 """
194 create_roles()
195 create_services()
196 create_permissions()
197 create_custom_sys_roles()
198 create_panel_permissions()
199 create_user_permissions()
200 create_settings()
201 create_modules()
202 create_admin_message_settings()
203
204 db.session.commit()
205
206
207 if __name__ == '__main__':
208 with current_app.app_context():
209 populate()
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/populate_db.py b/populate_db.py
--- a/populate_db.py
+++ b/populate_db.py
@@ -67,6 +67,8 @@
coorgr = Role.query.get(2)
track_orgr = Role.query.get(3)
mod = Role.query.get(4)
+ attend = Role.query.get(5)
+ regist = Role.query.get(6)
track = Service.query.get(1)
session = Service.query.get(2)
@@ -76,32 +78,17 @@
# For ORGANIZER
# All four permissions set to True
- get_or_create(Permission, role=orgr, service=track)
- get_or_create(Permission, role=orgr, service=session)
- get_or_create(Permission, role=orgr, service=speaker)
- get_or_create(Permission, role=orgr, service=sponsor)
- get_or_create(Permission, role=orgr, service=microlocation)
+ services = [track, session, speaker, sponsor, microlocation]
+ roles = [attend, regist]
+ for service in services:
+ perm, _ = get_or_create(Permission, role=orgr, service=service)
+ db.session.add(perm)
# For COORGANIZER
- perm, _ = get_or_create(Permission, role=coorgr, service=track)
- perm.can_create, perm.can_delete = False, False
- db.session.add(perm)
-
- perm, _ = get_or_create(Permission, role=coorgr, service=session)
- perm.can_create, perm.can_delete = False, False
- db.session.add(perm)
-
- perm, _ = get_or_create(Permission, role=coorgr, service=speaker)
- perm.can_create, perm.can_delete = False, False
- db.session.add(perm)
-
- perm, _ = get_or_create(Permission, role=coorgr, service=sponsor)
- perm.can_create, perm.can_delete = False, False
- db.session.add(perm)
-
- perm, _ = get_or_create(Permission, role=coorgr, service=microlocation)
- perm.can_create, perm.can_delete = False, False
- db.session.add(perm)
+ for service in services:
+ perm, _ = get_or_create(Permission, role=coorgr, service=service)
+ perm.can_create, perm.can_delete = False, False
+ db.session.add(perm)
# For TRACK_ORGANIZER
perm, _ = get_or_create(Permission, role=track_orgr, service=track)
@@ -112,6 +99,15 @@
perm.can_create, perm.can_update, perm.can_delete = False, False, False
db.session.add(perm)
+ # For ATTENDEE and REGISTRAR
+ services = [track, session, speaker, sponsor, microlocation]
+ roles = [attend, regist]
+ for role in roles:
+ for service in services:
+ perm, _ = get_or_create(Permission, role=role, service=service)
+ perm.can_create, perm.can_update, perm.can_delete = False, False, False
+ db.session.add(perm)
+
def create_custom_sys_roles():
role, _ = get_or_create(CustomSysRole, name='Sales Admin')
| {"golden_diff": "diff --git a/populate_db.py b/populate_db.py\n--- a/populate_db.py\n+++ b/populate_db.py\n@@ -67,6 +67,8 @@\n coorgr = Role.query.get(2)\n track_orgr = Role.query.get(3)\n mod = Role.query.get(4)\n+ attend = Role.query.get(5)\n+ regist = Role.query.get(6)\n \n track = Service.query.get(1)\n session = Service.query.get(2)\n@@ -76,32 +78,17 @@\n \n # For ORGANIZER\n # All four permissions set to True\n- get_or_create(Permission, role=orgr, service=track)\n- get_or_create(Permission, role=orgr, service=session)\n- get_or_create(Permission, role=orgr, service=speaker)\n- get_or_create(Permission, role=orgr, service=sponsor)\n- get_or_create(Permission, role=orgr, service=microlocation)\n+ services = [track, session, speaker, sponsor, microlocation]\n+ roles = [attend, regist]\n+ for service in services:\n+ perm, _ = get_or_create(Permission, role=orgr, service=service)\n+ db.session.add(perm)\n \n # For COORGANIZER\n- perm, _ = get_or_create(Permission, role=coorgr, service=track)\n- perm.can_create, perm.can_delete = False, False\n- db.session.add(perm)\n-\n- perm, _ = get_or_create(Permission, role=coorgr, service=session)\n- perm.can_create, perm.can_delete = False, False\n- db.session.add(perm)\n-\n- perm, _ = get_or_create(Permission, role=coorgr, service=speaker)\n- perm.can_create, perm.can_delete = False, False\n- db.session.add(perm)\n-\n- perm, _ = get_or_create(Permission, role=coorgr, service=sponsor)\n- perm.can_create, perm.can_delete = False, False\n- db.session.add(perm)\n-\n- perm, _ = get_or_create(Permission, role=coorgr, service=microlocation)\n- perm.can_create, perm.can_delete = False, False\n- db.session.add(perm)\n+ for service in services:\n+ perm, _ = get_or_create(Permission, role=coorgr, service=service)\n+ perm.can_create, perm.can_delete = False, False\n+ db.session.add(perm)\n \n # For TRACK_ORGANIZER\n perm, _ = get_or_create(Permission, role=track_orgr, service=track)\n@@ -112,6 +99,15 @@\n perm.can_create, perm.can_update, perm.can_delete = False, False, False\n db.session.add(perm)\n \n+ # For ATTENDEE and REGISTRAR\n+ services = [track, session, speaker, sponsor, microlocation]\n+ roles = [attend, regist]\n+ for role in roles:\n+ for service in services:\n+ perm, _ = get_or_create(Permission, role=role, service=service)\n+ perm.can_create, perm.can_update, perm.can_delete = False, False, False\n+ db.session.add(perm)\n+\n \n def create_custom_sys_roles():\n role, _ = get_or_create(CustomSysRole, name='Sales Admin')\n", "issue": "Populate db for permissions of Attendee and Registrar\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nDatabase is not having permissions of Attendee and Registrar for events different services.\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to '...'\r\n2. Click on '....'\r\n3. Scroll down to '....'\r\n4. See error\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nPopulating db for Attendee and Registrar role required. in permissions.\r\n**Stacktrace**\r\n<!-- If applicable, add stacktrace to help explain your problem. -->\r\n\r\n**Additional details (please complete the following information):**\r\n - OS: UBUNTU 16.04\r\n - Python Version : Python 3.5.2\r\n - `HEAD` Commit hash [e.g. `4629c62`]\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n\r\n**Wanna work on this issue**\n", "before_files": [{"content": "from app import current_app\nfrom app.models import db\nfrom app.api.helpers.db import get_or_create # , save_to_db\n\n# Admin message settings\nfrom app.api.helpers.system_mails import MAILS\nfrom app.models.message_setting import MessageSettings\n\n# Event Role-Service Permissions\nfrom app.models.role import Role\nfrom app.models.service import Service\nfrom app.models.permission import Permission\n\nfrom app.models.track import Track\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.sponsor import Sponsor\nfrom app.models.microlocation import Microlocation\n\nfrom app.models.user import ORGANIZER, COORGANIZER, TRACK_ORGANIZER, MODERATOR, ATTENDEE, REGISTRAR\n\n# Admin Panel Permissions\nfrom app.models.panel_permission import PanelPermission\nfrom app.models.custom_system_role import CustomSysRole\n\nfrom app.models.setting import Setting\nfrom app.models.module import Module\n\n# User Permissions\nfrom app.models.user_permission import UserPermission\nSALES = 'sales'\n\n\ndef create_roles():\n get_or_create(Role, name=ORGANIZER, title_name='Organizer')\n get_or_create(Role, name=COORGANIZER, title_name='Co-organizer')\n get_or_create(Role, name=TRACK_ORGANIZER, title_name='Track Organizer')\n get_or_create(Role, name=MODERATOR, title_name='Moderator')\n get_or_create(Role, name=ATTENDEE, title_name='Attendee')\n get_or_create(Role, name=REGISTRAR, title_name='Registrar')\n\n\ndef create_services():\n track = Track.get_service_name()\n session = Session.get_service_name()\n speaker = Speaker.get_service_name()\n sponsor = Sponsor.get_service_name()\n microlocation = Microlocation.get_service_name()\n\n get_or_create(Service, name=track)\n get_or_create(Service, name=session)\n get_or_create(Service, name=speaker)\n get_or_create(Service, name=sponsor)\n get_or_create(Service, name=microlocation)\n\n\ndef create_settings():\n get_or_create(Setting, app_name='Open Event')\n\n\ndef create_modules():\n get_or_create(Module, donation_include=False)\n\n\ndef create_permissions():\n orgr = Role.query.get(1)\n coorgr = Role.query.get(2)\n track_orgr = Role.query.get(3)\n mod = Role.query.get(4)\n\n track = Service.query.get(1)\n session = Service.query.get(2)\n speaker = Service.query.get(3)\n sponsor = Service.query.get(4)\n microlocation = Service.query.get(5)\n\n # For ORGANIZER\n # All four permissions set to True\n get_or_create(Permission, role=orgr, service=track)\n get_or_create(Permission, role=orgr, service=session)\n get_or_create(Permission, role=orgr, service=speaker)\n get_or_create(Permission, role=orgr, service=sponsor)\n get_or_create(Permission, role=orgr, service=microlocation)\n\n # For COORGANIZER\n perm, _ = get_or_create(Permission, role=coorgr, service=track)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n perm, _ = get_or_create(Permission, role=coorgr, service=session)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n perm, _ = get_or_create(Permission, role=coorgr, service=speaker)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n perm, _ = get_or_create(Permission, role=coorgr, service=sponsor)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n perm, _ = get_or_create(Permission, role=coorgr, service=microlocation)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n # For TRACK_ORGANIZER\n perm, _ = get_or_create(Permission, role=track_orgr, service=track)\n db.session.add(perm)\n\n # For MODERATOR\n perm, _ = get_or_create(Permission, role=mod, service=track)\n perm.can_create, perm.can_update, perm.can_delete = False, False, False\n db.session.add(perm)\n\n\ndef create_custom_sys_roles():\n role, _ = get_or_create(CustomSysRole, name='Sales Admin')\n db.session.add(role)\n role, _ = get_or_create(CustomSysRole, name='Marketer')\n db.session.add(role)\n\n\ndef create_panel_permissions():\n sales_admin = CustomSysRole.query.filter_by(name='Sales Admin').first()\n perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=sales_admin)\n db.session.add(perm)\n marketer = CustomSysRole.query.filter_by(name='Marketer').first()\n perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=marketer)\n db.session.add(perm)\n\n\ndef create_user_permissions():\n # Publish Event\n user_perm, _ = get_or_create(UserPermission, name='publish_event',\n description='Publish event (make event live)')\n user_perm.verified_user = True\n db.session.add(user_perm)\n\n # Create Event\n user_perm, _ = get_or_create(UserPermission, name='create_event',\n description='Create event')\n user_perm.verified_user, user_perm.unverified_user = True, True\n db.session.add(user_perm)\n\n\ndef create_admin_message_settings():\n default_mails = [\"Next Event\",\n \"Session Schedule Change\",\n \"User email\",\n \"Invitation For Papers\",\n \"After Event\",\n \"Ticket(s) Purchased\",\n \"Session Accept or Reject\",\n \"Event Published\",\n \"Event Export Failed\",\n \"Event Exported\",\n \"Event Role Invitation\",\n \"New Session Proposal\"]\n for mail in MAILS:\n if mail in default_mails:\n get_or_create(MessageSettings, action=mail, mail_status=1, notification_status=1, user_control_status=1)\n else:\n get_or_create(MessageSettings, action=mail, mail_status=0, notification_status=0, user_control_status=0)\n\n\ndef populate():\n \"\"\"\n Create defined Roles, Services and Permissions.\n \"\"\"\n print('Creating roles...')\n create_roles()\n print('Creating services...')\n create_services()\n print('Creating permissions...')\n create_permissions()\n print('Creating custom system roles...')\n create_custom_sys_roles()\n print('Creating admin panel permissions...')\n create_panel_permissions()\n print('Creating user permissions...')\n create_user_permissions()\n print('Creating settings...')\n create_settings()\n print('Creating modules...')\n create_modules()\n print('Creating admin message settings...')\n create_admin_message_settings()\n\n\ndef populate_without_print():\n \"\"\"\n Create defined Roles, Services and Permissions.\n \"\"\"\n create_roles()\n create_services()\n create_permissions()\n create_custom_sys_roles()\n create_panel_permissions()\n create_user_permissions()\n create_settings()\n create_modules()\n create_admin_message_settings()\n\n db.session.commit()\n\n\nif __name__ == '__main__':\n with current_app.app_context():\n populate()\n", "path": "populate_db.py"}], "after_files": [{"content": "from app import current_app\nfrom app.models import db\nfrom app.api.helpers.db import get_or_create # , save_to_db\n\n# Admin message settings\nfrom app.api.helpers.system_mails import MAILS\nfrom app.models.message_setting import MessageSettings\n\n# Event Role-Service Permissions\nfrom app.models.role import Role\nfrom app.models.service import Service\nfrom app.models.permission import Permission\n\nfrom app.models.track import Track\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.sponsor import Sponsor\nfrom app.models.microlocation import Microlocation\n\nfrom app.models.user import ORGANIZER, COORGANIZER, TRACK_ORGANIZER, MODERATOR, ATTENDEE, REGISTRAR\n\n# Admin Panel Permissions\nfrom app.models.panel_permission import PanelPermission\nfrom app.models.custom_system_role import CustomSysRole\n\nfrom app.models.setting import Setting\nfrom app.models.module import Module\n\n# User Permissions\nfrom app.models.user_permission import UserPermission\nSALES = 'sales'\n\n\ndef create_roles():\n get_or_create(Role, name=ORGANIZER, title_name='Organizer')\n get_or_create(Role, name=COORGANIZER, title_name='Co-organizer')\n get_or_create(Role, name=TRACK_ORGANIZER, title_name='Track Organizer')\n get_or_create(Role, name=MODERATOR, title_name='Moderator')\n get_or_create(Role, name=ATTENDEE, title_name='Attendee')\n get_or_create(Role, name=REGISTRAR, title_name='Registrar')\n\n\ndef create_services():\n track = Track.get_service_name()\n session = Session.get_service_name()\n speaker = Speaker.get_service_name()\n sponsor = Sponsor.get_service_name()\n microlocation = Microlocation.get_service_name()\n\n get_or_create(Service, name=track)\n get_or_create(Service, name=session)\n get_or_create(Service, name=speaker)\n get_or_create(Service, name=sponsor)\n get_or_create(Service, name=microlocation)\n\n\ndef create_settings():\n get_or_create(Setting, app_name='Open Event')\n\n\ndef create_modules():\n get_or_create(Module, donation_include=False)\n\n\ndef create_permissions():\n orgr = Role.query.get(1)\n coorgr = Role.query.get(2)\n track_orgr = Role.query.get(3)\n mod = Role.query.get(4)\n attend = Role.query.get(5)\n regist = Role.query.get(6)\n\n track = Service.query.get(1)\n session = Service.query.get(2)\n speaker = Service.query.get(3)\n sponsor = Service.query.get(4)\n microlocation = Service.query.get(5)\n\n # For ORGANIZER\n # All four permissions set to True\n services = [track, session, speaker, sponsor, microlocation]\n roles = [attend, regist]\n for service in services:\n perm, _ = get_or_create(Permission, role=orgr, service=service)\n db.session.add(perm)\n\n # For COORGANIZER\n for service in services:\n perm, _ = get_or_create(Permission, role=coorgr, service=service)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n # For TRACK_ORGANIZER\n perm, _ = get_or_create(Permission, role=track_orgr, service=track)\n db.session.add(perm)\n\n # For MODERATOR\n perm, _ = get_or_create(Permission, role=mod, service=track)\n perm.can_create, perm.can_update, perm.can_delete = False, False, False\n db.session.add(perm)\n\n # For ATTENDEE and REGISTRAR\n services = [track, session, speaker, sponsor, microlocation]\n roles = [attend, regist]\n for role in roles:\n for service in services:\n perm, _ = get_or_create(Permission, role=role, service=service)\n perm.can_create, perm.can_update, perm.can_delete = False, False, False\n db.session.add(perm)\n\n\ndef create_custom_sys_roles():\n role, _ = get_or_create(CustomSysRole, name='Sales Admin')\n db.session.add(role)\n role, _ = get_or_create(CustomSysRole, name='Marketer')\n db.session.add(role)\n\n\ndef create_panel_permissions():\n sales_admin = CustomSysRole.query.filter_by(name='Sales Admin').first()\n perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=sales_admin)\n db.session.add(perm)\n marketer = CustomSysRole.query.filter_by(name='Marketer').first()\n perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=marketer)\n db.session.add(perm)\n\n\ndef create_user_permissions():\n # Publish Event\n user_perm, _ = get_or_create(UserPermission, name='publish_event',\n description='Publish event (make event live)')\n user_perm.verified_user = True\n db.session.add(user_perm)\n\n # Create Event\n user_perm, _ = get_or_create(UserPermission, name='create_event',\n description='Create event')\n user_perm.verified_user, user_perm.unverified_user = True, True\n db.session.add(user_perm)\n\n\ndef create_admin_message_settings():\n default_mails = [\"Next Event\",\n \"Session Schedule Change\",\n \"User email\",\n \"Invitation For Papers\",\n \"After Event\",\n \"Ticket(s) Purchased\",\n \"Session Accept or Reject\",\n \"Event Published\",\n \"Event Export Failed\",\n \"Event Exported\",\n \"Event Role Invitation\",\n \"New Session Proposal\"]\n for mail in MAILS:\n if mail in default_mails:\n get_or_create(MessageSettings, action=mail, mail_status=1, notification_status=1, user_control_status=1)\n else:\n get_or_create(MessageSettings, action=mail, mail_status=0, notification_status=0, user_control_status=0)\n\n\ndef populate():\n \"\"\"\n Create defined Roles, Services and Permissions.\n \"\"\"\n print('Creating roles...')\n create_roles()\n print('Creating services...')\n create_services()\n print('Creating permissions...')\n create_permissions()\n print('Creating custom system roles...')\n create_custom_sys_roles()\n print('Creating admin panel permissions...')\n create_panel_permissions()\n print('Creating user permissions...')\n create_user_permissions()\n print('Creating settings...')\n create_settings()\n print('Creating modules...')\n create_modules()\n print('Creating admin message settings...')\n create_admin_message_settings()\n\n\ndef populate_without_print():\n \"\"\"\n Create defined Roles, Services and Permissions.\n \"\"\"\n create_roles()\n create_services()\n create_permissions()\n create_custom_sys_roles()\n create_panel_permissions()\n create_user_permissions()\n create_settings()\n create_modules()\n create_admin_message_settings()\n\n db.session.commit()\n\n\nif __name__ == '__main__':\n with current_app.app_context():\n populate()\n", "path": "populate_db.py"}]} | 2,587 | 768 |
gh_patches_debug_41337 | rasdani/github-patches | git_diff | keras-team__autokeras-388 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TextClassifier preprocessing can't run on cpu
GPU is hardcoded: [text_preprocessor.py](https://github.com/jhfjhfj1/autokeras/blob/master/autokeras/text/text_preprocessor.py#L142)
line 142: device_id_list = GPUtil.getFirstAvailable()
error if no gpu
line 147: device = '/gpu:0'
please update
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from distutils.core import setup
2 from setuptools import find_packages
3
4 setup(
5 name='autokeras',
6 packages=find_packages(exclude=('tests',)),
7 install_requires=['scipy==1.1.0',
8 'torch==0.4.1',
9 'torchvision==0.2.1',
10 'numpy==1.14.5',
11 'keras==2.2.2',
12 'scikit-learn==0.20.1',
13 'scikit-image==0.13.1',
14 'tqdm==4.25.0',
15 'tensorflow==1.10.0',
16 'imageio==2.4.1',
17 'requests==2.20.1',
18 'GPUtil==1.3.0',
19 'lightgbm==2.2.2',
20 'pandas==0.23.4',
21 'opencv-python==3.4.4.19'],
22 version='0.3.5',
23 description='AutoML for deep learning',
24 author='DATA Lab at Texas A&M University',
25 author_email='[email protected]',
26 url='http://autokeras.com',
27 download_url='https://github.com/jhfjhfj1/autokeras/archive/0.3.5.tar.gz',
28 keywords=['AutoML', 'keras'],
29 classifiers=[]
30 )
31
```
Path: `autokeras/text/text_preprocessor.py`
Content:
```
1 import os
2 import re
3
4 import GPUtil
5 import numpy as np
6
7 from autokeras.constant import Constant
8 from autokeras.utils import download_file_with_extract, temp_path_generator, ensure_dir
9
10
11 def download_pre_train(file_path, extract_path):
12 """Download pre train file from link in constant.py.
13
14 Args:
15 file_path: String, contains download file path + file name.
16 extract_path: String extract path name.
17 """
18 file_link = Constant.PRE_TRAIN_FILE_LINK
19 print("try downloading pre train weights from link %s" % file_link)
20 download_file_with_extract(file_link, file_path=file_path, extract_path=extract_path)
21
22
23 def clean_str(string):
24 """Tokenization/string cleaning for all string.
25
26 Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
27 """
28 string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
29 string = re.sub(r"\'s", " \'s", string)
30 string = re.sub(r"\'ve", " \'ve", string)
31 string = re.sub(r"n\'t", " n\'t", string)
32 string = re.sub(r"\'re", " \'re", string)
33 string = re.sub(r"\'d", " \'d", string)
34 string = re.sub(r"\'ll", " \'ll", string)
35 string = re.sub(r",", " , ", string)
36 string = re.sub(r"!", " ! ", string)
37 string = re.sub(r"\(", " \( ", string)
38 string = re.sub(r"\)", " \) ", string)
39 string = re.sub(r"\?", " \? ", string)
40 string = re.sub(r"\s{2,}", " ", string)
41 return string.strip().lower()
42
43
44 def tokenlize_text(max_num_words, max_seq_length, x_train):
45 """Tokenlize text.
46
47 Vectorize a text corpus by transform each text in texts to a sequence of integers.
48
49 Args:
50 max_num_words: Int, max number of words in the dictionary.
51 max_seq_length: Int, the length of each text sequence, padding if shorter, trim is longer.
52 x_train: List contains text data.
53
54 Returns:
55 x_train: Tokenlized input data.
56 word_index: Dictionary contains word with tokenlized index.
57 """
58 from keras_preprocessing.sequence import pad_sequences
59 from keras_preprocessing.text import Tokenizer
60 print("tokenlizing texts...")
61 tokenizer = Tokenizer(num_words=max_num_words)
62 tokenizer.fit_on_texts(x_train)
63 sequences = tokenizer.texts_to_sequences(x_train)
64 word_index = tokenizer.word_index
65 x_train = pad_sequences(sequences, maxlen=max_seq_length)
66 print("data readed and convert to %d length sequences" % max_seq_length)
67 return x_train, word_index
68
69
70 def read_embedding_index(extract_path):
71 """Read pre train file convert to embedding vector.
72
73 Read the pre trained file into a dictionary where key is the word and value is embedding vector.
74
75 Args:
76 extract_path: String contains pre trained file path.
77
78 Returns:
79 embedding_index: Dictionary contains word with pre trained index.
80 """
81 embedding_index = {}
82 f = open(os.path.join(extract_path, Constant.PRE_TRAIN_FILE_NAME))
83 for line in f:
84 values = line.split()
85 word = values[0]
86 coefs = np.asarray(values[1:], dtype='float32')
87 embedding_index[word] = coefs
88 f.close()
89 return embedding_index
90
91
92 def load_pretrain(path, word_index):
93 """Load the pretrain file into embedding weights.
94
95 This method will first generate the embedding index and then generate
96 embedding matrix according to the word_index.
97
98 Args:
99 path: String, path to store the pretrain files.
100 word_index: Dictionary contains word with tokenlized index.
101
102 Returns:
103 embedding_matrix: Numpy array as the pretrain model embedding layer weights.
104 """
105 print("loading pretrain weights...")
106 file_path = os.path.join(path, Constant.FILE_PATH)
107 extract_path = os.path.join(path, Constant.EXTRACT_PATH)
108 download_pre_train(file_path=file_path, extract_path=extract_path)
109 embedding_index = read_embedding_index(extract_path)
110 print('Total %s word vectors embedded.' % len(embedding_index))
111
112 # convert the pretrained embedding index to weights
113 embedding_matrix = np.random.random((len(word_index) + 1, Constant.EMBEDDING_DIM))
114 for word, i in word_index.items():
115 embedding_vector = embedding_index.get(word)
116 if embedding_vector is not None:
117 embedding_matrix[i] = embedding_vector
118 return embedding_matrix
119
120
121 def processing(path, word_index, input_length, x_train):
122 """Processing string array with pretrained vectors.
123
124 convert an n dimension string array into n * k * m dimension float numpy array. Each k * m array represents
125 a string. k is the input_length which means an upper bound of the string length, for string shorter than
126 k will be pad and longer string will be cropped. m is defined by the pretrained file.
127
128 Args:
129 path: String, path where the pre trained files stored.
130 word_index: Dictionary, contains word with tokenlized index.
131 input_length: Int, an upper bound of the string length.
132 x_train: String array.
133
134 Returns:
135 x_train: Numpy array as processed x_train.
136 """
137 import tensorflow as tf
138
139 embedding_matrix = load_pretrain(path=path, word_index=word_index)
140
141 # Get the first available GPU
142 device_id_list = GPUtil.getFirstAvailable()
143 device_id = device_id_list[0] # grab first element from list
144
145 # Set CUDA_VISIBLE_DEVICES to mask out all other GPUs than the first available device id
146 os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id)
147 device = '/gpu:0'
148 with tf.device(device):
149 from keras import Input, Model
150 from keras import backend
151 from keras.layers import Embedding
152 config = tf.ConfigProto(allow_soft_placement=True)
153 config.gpu_options.allow_growth = True
154 sess = tf.Session(config=config)
155 backend.set_session(sess)
156 print("generating preprocessing model...")
157 embedding_layer = Embedding(len(word_index) + 1,
158 Constant.EMBEDDING_DIM,
159 weights=[embedding_matrix],
160 input_length=input_length,
161 trainable=False)
162
163 sequence_input = Input(shape=(input_length,), dtype='int32')
164 embedded_sequences = embedding_layer(sequence_input)
165 model = Model(sequence_input, embedded_sequences)
166 print("converting text to vector...")
167 x_train = model.predict(x_train)
168 del model
169
170 return x_train
171
172
173 def text_preprocess(x_train):
174 """This is the text preprocess main method.
175
176 It takes an raw string, clean it and processing it into tokenlized numpy array.
177 """
178 if Constant.STORE_PATH == '':
179 temp_path = temp_path_generator()
180 path = temp_path + '_store'
181 else:
182 path = Constant.STORE_PATH
183
184 ensure_dir(path)
185
186 x_train = [clean_str(x) for x in x_train]
187 x_train, word_index = tokenlize_text(max_seq_length=Constant.MAX_SEQUENCE_LENGTH,
188 max_num_words=Constant.MAX_NB_WORDS,
189 x_train=x_train)
190
191 print("generating preprocessing model...")
192 x_train = processing(path=path, word_index=word_index, input_length=Constant.MAX_SEQUENCE_LENGTH, x_train=x_train)
193 return x_train
194
```
Path: `examples/text_cnn/text.py`
Content:
```
1 import pandas as pd
2
3 from autokeras import TextClassifier
4
5
6 def read_csv(file_path):
7 """csv file read example method
8 It helps you to read the csv file into python array
9
10 Attributes:
11 file_path: csv file path
12 """
13
14 print("reading data...")
15 data_train = pd.read_csv(file_path, sep='\t')
16
17 x_train = []
18 y_train = []
19 for idx in range(data_train.review.shape[0]):
20 # Modify this according to each different dataset
21 x_train.append(data_train.review[idx])
22 y_train.append(data_train.sentiment[idx])
23 return x_train, y_train
24
25
26 if __name__ == '__main__':
27 file_path = "labeledTrainData.tsv"
28 x_train, y_train = read_csv(file_path=file_path)
29 clf = TextClassifier(verbose=True)
30 clf.fit(x=x_train, y=y_train, batch_size=10, time_limit=12 * 60 * 60)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/autokeras/text/text_preprocessor.py b/autokeras/text/text_preprocessor.py
--- a/autokeras/text/text_preprocessor.py
+++ b/autokeras/text/text_preprocessor.py
@@ -1,7 +1,6 @@
import os
import re
-import GPUtil
import numpy as np
from autokeras.constant import Constant
@@ -79,7 +78,7 @@
embedding_index: Dictionary contains word with pre trained index.
"""
embedding_index = {}
- f = open(os.path.join(extract_path, Constant.PRE_TRAIN_FILE_NAME))
+ f = open(os.path.join(extract_path, Constant.PRE_TRAIN_FILE_NAME), encoding="utf-8")
for line in f:
values = line.split()
word = values[0]
@@ -138,34 +137,26 @@
embedding_matrix = load_pretrain(path=path, word_index=word_index)
- # Get the first available GPU
- device_id_list = GPUtil.getFirstAvailable()
- device_id = device_id_list[0] # grab first element from list
-
- # Set CUDA_VISIBLE_DEVICES to mask out all other GPUs than the first available device id
- os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id)
- device = '/gpu:0'
- with tf.device(device):
- from keras import Input, Model
- from keras import backend
- from keras.layers import Embedding
- config = tf.ConfigProto(allow_soft_placement=True)
- config.gpu_options.allow_growth = True
- sess = tf.Session(config=config)
- backend.set_session(sess)
- print("generating preprocessing model...")
- embedding_layer = Embedding(len(word_index) + 1,
- Constant.EMBEDDING_DIM,
- weights=[embedding_matrix],
- input_length=input_length,
- trainable=False)
-
- sequence_input = Input(shape=(input_length,), dtype='int32')
- embedded_sequences = embedding_layer(sequence_input)
- model = Model(sequence_input, embedded_sequences)
- print("converting text to vector...")
- x_train = model.predict(x_train)
- del model
+ from keras import Input, Model
+ from keras import backend
+ from keras.layers import Embedding
+ config = tf.ConfigProto(allow_soft_placement=True)
+ config.gpu_options.allow_growth = True
+ sess = tf.Session(config=config)
+ backend.set_session(sess)
+ print("generating preprocessing model...")
+ embedding_layer = Embedding(len(word_index) + 1,
+ Constant.EMBEDDING_DIM,
+ weights=[embedding_matrix],
+ input_length=input_length,
+ trainable=False)
+
+ sequence_input = Input(shape=(input_length,), dtype='int32')
+ embedded_sequences = embedding_layer(sequence_input)
+ model = Model(sequence_input, embedded_sequences)
+ print("converting text to vector...")
+ x_train = model.predict(x_train)
+ del model
return x_train
diff --git a/examples/text_cnn/text.py b/examples/text_cnn/text.py
--- a/examples/text_cnn/text.py
+++ b/examples/text_cnn/text.py
@@ -27,4 +27,4 @@
file_path = "labeledTrainData.tsv"
x_train, y_train = read_csv(file_path=file_path)
clf = TextClassifier(verbose=True)
- clf.fit(x=x_train, y=y_train, batch_size=10, time_limit=12 * 60 * 60)
+ clf.fit(x=x_train, y=y_train, time_limit=12 * 60 * 60)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,6 @@
'tensorflow==1.10.0',
'imageio==2.4.1',
'requests==2.20.1',
- 'GPUtil==1.3.0',
'lightgbm==2.2.2',
'pandas==0.23.4',
'opencv-python==3.4.4.19'],
| {"golden_diff": "diff --git a/autokeras/text/text_preprocessor.py b/autokeras/text/text_preprocessor.py\n--- a/autokeras/text/text_preprocessor.py\n+++ b/autokeras/text/text_preprocessor.py\n@@ -1,7 +1,6 @@\n import os\n import re\n \n-import GPUtil\n import numpy as np\n \n from autokeras.constant import Constant\n@@ -79,7 +78,7 @@\n embedding_index: Dictionary contains word with pre trained index.\n \"\"\"\n embedding_index = {}\n- f = open(os.path.join(extract_path, Constant.PRE_TRAIN_FILE_NAME))\n+ f = open(os.path.join(extract_path, Constant.PRE_TRAIN_FILE_NAME), encoding=\"utf-8\")\n for line in f:\n values = line.split()\n word = values[0]\n@@ -138,34 +137,26 @@\n \n embedding_matrix = load_pretrain(path=path, word_index=word_index)\n \n- # Get the first available GPU\n- device_id_list = GPUtil.getFirstAvailable()\n- device_id = device_id_list[0] # grab first element from list\n-\n- # Set CUDA_VISIBLE_DEVICES to mask out all other GPUs than the first available device id\n- os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(device_id)\n- device = '/gpu:0'\n- with tf.device(device):\n- from keras import Input, Model\n- from keras import backend\n- from keras.layers import Embedding\n- config = tf.ConfigProto(allow_soft_placement=True)\n- config.gpu_options.allow_growth = True\n- sess = tf.Session(config=config)\n- backend.set_session(sess)\n- print(\"generating preprocessing model...\")\n- embedding_layer = Embedding(len(word_index) + 1,\n- Constant.EMBEDDING_DIM,\n- weights=[embedding_matrix],\n- input_length=input_length,\n- trainable=False)\n-\n- sequence_input = Input(shape=(input_length,), dtype='int32')\n- embedded_sequences = embedding_layer(sequence_input)\n- model = Model(sequence_input, embedded_sequences)\n- print(\"converting text to vector...\")\n- x_train = model.predict(x_train)\n- del model\n+ from keras import Input, Model\n+ from keras import backend\n+ from keras.layers import Embedding\n+ config = tf.ConfigProto(allow_soft_placement=True)\n+ config.gpu_options.allow_growth = True\n+ sess = tf.Session(config=config)\n+ backend.set_session(sess)\n+ print(\"generating preprocessing model...\")\n+ embedding_layer = Embedding(len(word_index) + 1,\n+ Constant.EMBEDDING_DIM,\n+ weights=[embedding_matrix],\n+ input_length=input_length,\n+ trainable=False)\n+\n+ sequence_input = Input(shape=(input_length,), dtype='int32')\n+ embedded_sequences = embedding_layer(sequence_input)\n+ model = Model(sequence_input, embedded_sequences)\n+ print(\"converting text to vector...\")\n+ x_train = model.predict(x_train)\n+ del model\n \n return x_train\n \ndiff --git a/examples/text_cnn/text.py b/examples/text_cnn/text.py\n--- a/examples/text_cnn/text.py\n+++ b/examples/text_cnn/text.py\n@@ -27,4 +27,4 @@\n file_path = \"labeledTrainData.tsv\"\n x_train, y_train = read_csv(file_path=file_path)\n clf = TextClassifier(verbose=True)\n- clf.fit(x=x_train, y=y_train, batch_size=10, time_limit=12 * 60 * 60)\n+ clf.fit(x=x_train, y=y_train, time_limit=12 * 60 * 60)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,6 @@\n 'tensorflow==1.10.0',\n 'imageio==2.4.1',\n 'requests==2.20.1',\n- 'GPUtil==1.3.0',\n 'lightgbm==2.2.2',\n 'pandas==0.23.4',\n 'opencv-python==3.4.4.19'],\n", "issue": "TextClassifier preprocessing can't run on cpu\nGPU is hardcoded: [text_preprocessor.py](https://github.com/jhfjhfj1/autokeras/blob/master/autokeras/text/text_preprocessor.py#L142)\r\n\r\nline 142: device_id_list = GPUtil.getFirstAvailable()\r\nerror if no gpu\r\n\r\nline 147: device = '/gpu:0'\r\nplease update\n", "before_files": [{"content": "from distutils.core import setup\nfrom setuptools import find_packages\n\nsetup(\n name='autokeras',\n packages=find_packages(exclude=('tests',)),\n install_requires=['scipy==1.1.0',\n 'torch==0.4.1',\n 'torchvision==0.2.1',\n 'numpy==1.14.5',\n 'keras==2.2.2',\n 'scikit-learn==0.20.1',\n 'scikit-image==0.13.1',\n 'tqdm==4.25.0',\n 'tensorflow==1.10.0',\n 'imageio==2.4.1',\n 'requests==2.20.1',\n 'GPUtil==1.3.0',\n 'lightgbm==2.2.2',\n 'pandas==0.23.4',\n 'opencv-python==3.4.4.19'],\n version='0.3.5',\n description='AutoML for deep learning',\n author='DATA Lab at Texas A&M University',\n author_email='[email protected]',\n url='http://autokeras.com',\n download_url='https://github.com/jhfjhfj1/autokeras/archive/0.3.5.tar.gz',\n keywords=['AutoML', 'keras'],\n classifiers=[]\n)\n", "path": "setup.py"}, {"content": "import os\nimport re\n\nimport GPUtil\nimport numpy as np\n\nfrom autokeras.constant import Constant\nfrom autokeras.utils import download_file_with_extract, temp_path_generator, ensure_dir\n\n\ndef download_pre_train(file_path, extract_path):\n \"\"\"Download pre train file from link in constant.py.\n\n Args:\n file_path: String, contains download file path + file name.\n extract_path: String extract path name.\n \"\"\"\n file_link = Constant.PRE_TRAIN_FILE_LINK\n print(\"try downloading pre train weights from link %s\" % file_link)\n download_file_with_extract(file_link, file_path=file_path, extract_path=extract_path)\n\n\ndef clean_str(string):\n \"\"\"Tokenization/string cleaning for all string.\n\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\n \"\"\"\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()\n\n\ndef tokenlize_text(max_num_words, max_seq_length, x_train):\n \"\"\"Tokenlize text.\n\n Vectorize a text corpus by transform each text in texts to a sequence of integers.\n\n Args:\n max_num_words: Int, max number of words in the dictionary.\n max_seq_length: Int, the length of each text sequence, padding if shorter, trim is longer.\n x_train: List contains text data.\n\n Returns:\n x_train: Tokenlized input data.\n word_index: Dictionary contains word with tokenlized index.\n \"\"\"\n from keras_preprocessing.sequence import pad_sequences\n from keras_preprocessing.text import Tokenizer\n print(\"tokenlizing texts...\")\n tokenizer = Tokenizer(num_words=max_num_words)\n tokenizer.fit_on_texts(x_train)\n sequences = tokenizer.texts_to_sequences(x_train)\n word_index = tokenizer.word_index\n x_train = pad_sequences(sequences, maxlen=max_seq_length)\n print(\"data readed and convert to %d length sequences\" % max_seq_length)\n return x_train, word_index\n\n\ndef read_embedding_index(extract_path):\n \"\"\"Read pre train file convert to embedding vector.\n\n Read the pre trained file into a dictionary where key is the word and value is embedding vector.\n\n Args:\n extract_path: String contains pre trained file path.\n\n Returns:\n embedding_index: Dictionary contains word with pre trained index.\n \"\"\"\n embedding_index = {}\n f = open(os.path.join(extract_path, Constant.PRE_TRAIN_FILE_NAME))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embedding_index[word] = coefs\n f.close()\n return embedding_index\n\n\ndef load_pretrain(path, word_index):\n \"\"\"Load the pretrain file into embedding weights.\n\n This method will first generate the embedding index and then generate\n embedding matrix according to the word_index.\n\n Args:\n path: String, path to store the pretrain files.\n word_index: Dictionary contains word with tokenlized index.\n\n Returns:\n embedding_matrix: Numpy array as the pretrain model embedding layer weights.\n \"\"\"\n print(\"loading pretrain weights...\")\n file_path = os.path.join(path, Constant.FILE_PATH)\n extract_path = os.path.join(path, Constant.EXTRACT_PATH)\n download_pre_train(file_path=file_path, extract_path=extract_path)\n embedding_index = read_embedding_index(extract_path)\n print('Total %s word vectors embedded.' % len(embedding_index))\n\n # convert the pretrained embedding index to weights\n embedding_matrix = np.random.random((len(word_index) + 1, Constant.EMBEDDING_DIM))\n for word, i in word_index.items():\n embedding_vector = embedding_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n return embedding_matrix\n\n\ndef processing(path, word_index, input_length, x_train):\n \"\"\"Processing string array with pretrained vectors.\n\n convert an n dimension string array into n * k * m dimension float numpy array. Each k * m array represents\n a string. k is the input_length which means an upper bound of the string length, for string shorter than\n k will be pad and longer string will be cropped. m is defined by the pretrained file.\n\n Args:\n path: String, path where the pre trained files stored.\n word_index: Dictionary, contains word with tokenlized index.\n input_length: Int, an upper bound of the string length.\n x_train: String array.\n\n Returns:\n x_train: Numpy array as processed x_train.\n \"\"\"\n import tensorflow as tf\n\n embedding_matrix = load_pretrain(path=path, word_index=word_index)\n\n # Get the first available GPU\n device_id_list = GPUtil.getFirstAvailable()\n device_id = device_id_list[0] # grab first element from list\n\n # Set CUDA_VISIBLE_DEVICES to mask out all other GPUs than the first available device id\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(device_id)\n device = '/gpu:0'\n with tf.device(device):\n from keras import Input, Model\n from keras import backend\n from keras.layers import Embedding\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n backend.set_session(sess)\n print(\"generating preprocessing model...\")\n embedding_layer = Embedding(len(word_index) + 1,\n Constant.EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=input_length,\n trainable=False)\n\n sequence_input = Input(shape=(input_length,), dtype='int32')\n embedded_sequences = embedding_layer(sequence_input)\n model = Model(sequence_input, embedded_sequences)\n print(\"converting text to vector...\")\n x_train = model.predict(x_train)\n del model\n\n return x_train\n\n\ndef text_preprocess(x_train):\n \"\"\"This is the text preprocess main method.\n\n It takes an raw string, clean it and processing it into tokenlized numpy array.\n \"\"\"\n if Constant.STORE_PATH == '':\n temp_path = temp_path_generator()\n path = temp_path + '_store'\n else:\n path = Constant.STORE_PATH\n\n ensure_dir(path)\n\n x_train = [clean_str(x) for x in x_train]\n x_train, word_index = tokenlize_text(max_seq_length=Constant.MAX_SEQUENCE_LENGTH,\n max_num_words=Constant.MAX_NB_WORDS,\n x_train=x_train)\n\n print(\"generating preprocessing model...\")\n x_train = processing(path=path, word_index=word_index, input_length=Constant.MAX_SEQUENCE_LENGTH, x_train=x_train)\n return x_train\n", "path": "autokeras/text/text_preprocessor.py"}, {"content": "import pandas as pd\n\nfrom autokeras import TextClassifier\n\n\ndef read_csv(file_path):\n \"\"\"csv file read example method\n It helps you to read the csv file into python array\n\n Attributes:\n file_path: csv file path\n \"\"\"\n\n print(\"reading data...\")\n data_train = pd.read_csv(file_path, sep='\\t')\n\n x_train = []\n y_train = []\n for idx in range(data_train.review.shape[0]):\n # Modify this according to each different dataset\n x_train.append(data_train.review[idx])\n y_train.append(data_train.sentiment[idx])\n return x_train, y_train\n\n\nif __name__ == '__main__':\n file_path = \"labeledTrainData.tsv\"\n x_train, y_train = read_csv(file_path=file_path)\n clf = TextClassifier(verbose=True)\n clf.fit(x=x_train, y=y_train, batch_size=10, time_limit=12 * 60 * 60)\n", "path": "examples/text_cnn/text.py"}], "after_files": [{"content": "from distutils.core import setup\nfrom setuptools import find_packages\n\nsetup(\n name='autokeras',\n packages=find_packages(exclude=('tests',)),\n install_requires=['scipy==1.1.0',\n 'torch==0.4.1',\n 'torchvision==0.2.1',\n 'numpy==1.14.5',\n 'keras==2.2.2',\n 'scikit-learn==0.20.1',\n 'scikit-image==0.13.1',\n 'tqdm==4.25.0',\n 'tensorflow==1.10.0',\n 'imageio==2.4.1',\n 'requests==2.20.1',\n 'lightgbm==2.2.2',\n 'pandas==0.23.4',\n 'opencv-python==3.4.4.19'],\n version='0.3.5',\n description='AutoML for deep learning',\n author='DATA Lab at Texas A&M University',\n author_email='[email protected]',\n url='http://autokeras.com',\n download_url='https://github.com/jhfjhfj1/autokeras/archive/0.3.5.tar.gz',\n keywords=['AutoML', 'keras'],\n classifiers=[]\n)\n", "path": "setup.py"}, {"content": "import os\nimport re\n\nimport numpy as np\n\nfrom autokeras.constant import Constant\nfrom autokeras.utils import download_file_with_extract, temp_path_generator, ensure_dir\n\n\ndef download_pre_train(file_path, extract_path):\n \"\"\"Download pre train file from link in constant.py.\n\n Args:\n file_path: String, contains download file path + file name.\n extract_path: String extract path name.\n \"\"\"\n file_link = Constant.PRE_TRAIN_FILE_LINK\n print(\"try downloading pre train weights from link %s\" % file_link)\n download_file_with_extract(file_link, file_path=file_path, extract_path=extract_path)\n\n\ndef clean_str(string):\n \"\"\"Tokenization/string cleaning for all string.\n\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\n \"\"\"\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()\n\n\ndef tokenlize_text(max_num_words, max_seq_length, x_train):\n \"\"\"Tokenlize text.\n\n Vectorize a text corpus by transform each text in texts to a sequence of integers.\n\n Args:\n max_num_words: Int, max number of words in the dictionary.\n max_seq_length: Int, the length of each text sequence, padding if shorter, trim is longer.\n x_train: List contains text data.\n\n Returns:\n x_train: Tokenlized input data.\n word_index: Dictionary contains word with tokenlized index.\n \"\"\"\n from keras_preprocessing.sequence import pad_sequences\n from keras_preprocessing.text import Tokenizer\n print(\"tokenlizing texts...\")\n tokenizer = Tokenizer(num_words=max_num_words)\n tokenizer.fit_on_texts(x_train)\n sequences = tokenizer.texts_to_sequences(x_train)\n word_index = tokenizer.word_index\n x_train = pad_sequences(sequences, maxlen=max_seq_length)\n print(\"data readed and convert to %d length sequences\" % max_seq_length)\n return x_train, word_index\n\n\ndef read_embedding_index(extract_path):\n \"\"\"Read pre train file convert to embedding vector.\n\n Read the pre trained file into a dictionary where key is the word and value is embedding vector.\n\n Args:\n extract_path: String contains pre trained file path.\n\n Returns:\n embedding_index: Dictionary contains word with pre trained index.\n \"\"\"\n embedding_index = {}\n f = open(os.path.join(extract_path, Constant.PRE_TRAIN_FILE_NAME), encoding=\"utf-8\")\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embedding_index[word] = coefs\n f.close()\n return embedding_index\n\n\ndef load_pretrain(path, word_index):\n \"\"\"Load the pretrain file into embedding weights.\n\n This method will first generate the embedding index and then generate\n embedding matrix according to the word_index.\n\n Args:\n path: String, path to store the pretrain files.\n word_index: Dictionary contains word with tokenlized index.\n\n Returns:\n embedding_matrix: Numpy array as the pretrain model embedding layer weights.\n \"\"\"\n print(\"loading pretrain weights...\")\n file_path = os.path.join(path, Constant.FILE_PATH)\n extract_path = os.path.join(path, Constant.EXTRACT_PATH)\n download_pre_train(file_path=file_path, extract_path=extract_path)\n embedding_index = read_embedding_index(extract_path)\n print('Total %s word vectors embedded.' % len(embedding_index))\n\n # convert the pretrained embedding index to weights\n embedding_matrix = np.random.random((len(word_index) + 1, Constant.EMBEDDING_DIM))\n for word, i in word_index.items():\n embedding_vector = embedding_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n return embedding_matrix\n\n\ndef processing(path, word_index, input_length, x_train):\n \"\"\"Processing string array with pretrained vectors.\n\n convert an n dimension string array into n * k * m dimension float numpy array. Each k * m array represents\n a string. k is the input_length which means an upper bound of the string length, for string shorter than\n k will be pad and longer string will be cropped. m is defined by the pretrained file.\n\n Args:\n path: String, path where the pre trained files stored.\n word_index: Dictionary, contains word with tokenlized index.\n input_length: Int, an upper bound of the string length.\n x_train: String array.\n\n Returns:\n x_train: Numpy array as processed x_train.\n \"\"\"\n import tensorflow as tf\n\n embedding_matrix = load_pretrain(path=path, word_index=word_index)\n\n from keras import Input, Model\n from keras import backend\n from keras.layers import Embedding\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n backend.set_session(sess)\n print(\"generating preprocessing model...\")\n embedding_layer = Embedding(len(word_index) + 1,\n Constant.EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=input_length,\n trainable=False)\n\n sequence_input = Input(shape=(input_length,), dtype='int32')\n embedded_sequences = embedding_layer(sequence_input)\n model = Model(sequence_input, embedded_sequences)\n print(\"converting text to vector...\")\n x_train = model.predict(x_train)\n del model\n\n return x_train\n\n\ndef text_preprocess(x_train):\n \"\"\"This is the text preprocess main method.\n\n It takes an raw string, clean it and processing it into tokenlized numpy array.\n \"\"\"\n if Constant.STORE_PATH == '':\n temp_path = temp_path_generator()\n path = temp_path + '_store'\n else:\n path = Constant.STORE_PATH\n\n ensure_dir(path)\n\n x_train = [clean_str(x) for x in x_train]\n x_train, word_index = tokenlize_text(max_seq_length=Constant.MAX_SEQUENCE_LENGTH,\n max_num_words=Constant.MAX_NB_WORDS,\n x_train=x_train)\n\n print(\"generating preprocessing model...\")\n x_train = processing(path=path, word_index=word_index, input_length=Constant.MAX_SEQUENCE_LENGTH, x_train=x_train)\n return x_train\n", "path": "autokeras/text/text_preprocessor.py"}, {"content": "import pandas as pd\n\nfrom autokeras import TextClassifier\n\n\ndef read_csv(file_path):\n \"\"\"csv file read example method\n It helps you to read the csv file into python array\n\n Attributes:\n file_path: csv file path\n \"\"\"\n\n print(\"reading data...\")\n data_train = pd.read_csv(file_path, sep='\\t')\n\n x_train = []\n y_train = []\n for idx in range(data_train.review.shape[0]):\n # Modify this according to each different dataset\n x_train.append(data_train.review[idx])\n y_train.append(data_train.sentiment[idx])\n return x_train, y_train\n\n\nif __name__ == '__main__':\n file_path = \"labeledTrainData.tsv\"\n x_train, y_train = read_csv(file_path=file_path)\n clf = TextClassifier(verbose=True)\n clf.fit(x=x_train, y=y_train, time_limit=12 * 60 * 60)\n", "path": "examples/text_cnn/text.py"}]} | 3,116 | 920 |
gh_patches_debug_17764 | rasdani/github-patches | git_diff | joke2k__faker-2038 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BE Vat Numbers should have a modulo 97 check and start wit 1 or 0
* Faker version: 25
* OS: OS. X Monterery
VAT Numbers generated in Belgium (currently set-up in the provider ssn for locale nl_BE should start with 1 or 0 and have a controle number with a module 97 check.
Refer to https://en.wikipedia.org/wiki/VAT_identification_number
### Steps to reproduce
1. Generate vat_id wit locale nl_BE
### Expected behavior
Vat numbers should be starting with BE0 or BE1 and have a module 97 check as the last two numbers
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/ssn/nl_BE/__init__.py`
Content:
```
1 from .. import Provider as SsnProvider
2
3 """
4 For more info on rijksregisternummer, see https://nl.wikipedia.org/wiki/Rijksregisternummer
5 Dutch/French only for now ...
6 """
7
8
9 class Provider(SsnProvider):
10 def ssn(self) -> str:
11 """
12 Returns a 11 digits Belgian SSN called "rijksregisternummer" as a string
13
14 The first 6 digits represent the birthdate with (in order) year, month and day.
15 The second group of 3 digits is represents a sequence number (order of birth).
16 It is even for women and odd for men.
17 For men the range starts at 1 and ends 997, for women 2 until 998.
18 The third group of 2 digits is a checksum based on the previous 9 digits (modulo 97).
19 Divide those 9 digits by 97, subtract the remainder from 97 and that's the result.
20 For persons born in or after 2000, the 9 digit number needs to be proceeded by a 2
21 (add 2000000000) before the division by 97.
22
23 """
24
25 # see http://nl.wikipedia.org/wiki/Burgerservicenummer (in Dutch)
26 def _checksum(digits):
27 res = 97 - (digits % 97)
28 return res
29
30 # Generate a date (random)
31 mydate = self.generator.date()
32 # Convert it to an int
33 elms = mydate.split("-")
34 # Adjust for year 2000 if necessary
35 if elms[0][0] == "2":
36 above = True
37 else:
38 above = False
39 # Only keep the last 2 digits of the year
40 elms[0] = elms[0][2:4]
41 # Simulate the gender/sequence - should be 3 digits
42 seq = self.generator.random_int(1, 998)
43 # Right justify sequence and append to list
44 seq_str = f"{seq:0>3}"
45 elms.append(seq_str)
46 # Now convert list to an integer so the checksum can be calculated
47 date_as_int = int("".join(elms))
48 if above:
49 date_as_int += 2000000000
50 # Generate checksum
51 s = _checksum(date_as_int)
52 s_rjust = f"{s:0>2}"
53 # return result as a string
54 elms.append(s_rjust)
55 return "".join(elms)
56
57 vat_id_formats = ("BE##########",)
58
59 def vat_id(self) -> str:
60 """
61 http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
62 :return: A random Belgian VAT ID
63 """
64 return self.bothify(self.random_element(self.vat_id_formats))
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/providers/ssn/nl_BE/__init__.py b/faker/providers/ssn/nl_BE/__init__.py
--- a/faker/providers/ssn/nl_BE/__init__.py
+++ b/faker/providers/ssn/nl_BE/__init__.py
@@ -57,8 +57,24 @@
vat_id_formats = ("BE##########",)
def vat_id(self) -> str:
+
+ vat_id_random_section = (
+ '#######'
+ )
+
+ vat_id_possible_initial_numbers = (
+ '0',
+ '1'
+ )
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
- :return: A random Belgian VAT ID
+ https://en.wikipedia.org/wiki/VAT_identification_number
+ :return: A random Belgian VAT ID starting with 0 or 1 and has a correct checksum with a modulo 97 check
"""
- return self.bothify(self.random_element(self.vat_id_formats))
+ generated_initial_number = self.random_element(vat_id_possible_initial_numbers)
+ vat_without_check = self.bothify(generated_initial_number + vat_id_random_section)
+ vat_as_int = int(vat_without_check)
+ vat_check = 97 - (vat_as_int % 97)
+ vat_check_str = f"{vat_check:0>2}"
+
+ return "BE" + vat_without_check + vat_check_str
| {"golden_diff": "diff --git a/faker/providers/ssn/nl_BE/__init__.py b/faker/providers/ssn/nl_BE/__init__.py\n--- a/faker/providers/ssn/nl_BE/__init__.py\n+++ b/faker/providers/ssn/nl_BE/__init__.py\n@@ -57,8 +57,24 @@\n vat_id_formats = (\"BE##########\",)\n \n def vat_id(self) -> str:\n+\n+ vat_id_random_section = (\n+ '#######'\n+ )\n+\n+ vat_id_possible_initial_numbers = (\n+ '0',\n+ '1'\n+ )\n \"\"\"\n http://ec.europa.eu/taxation_customs/vies/faq.html#item_11\n- :return: A random Belgian VAT ID\n+ https://en.wikipedia.org/wiki/VAT_identification_number\n+ :return: A random Belgian VAT ID starting with 0 or 1 and has a correct checksum with a modulo 97 check\n \"\"\"\n- return self.bothify(self.random_element(self.vat_id_formats))\n+ generated_initial_number = self.random_element(vat_id_possible_initial_numbers)\n+ vat_without_check = self.bothify(generated_initial_number + vat_id_random_section)\n+ vat_as_int = int(vat_without_check)\n+ vat_check = 97 - (vat_as_int % 97)\n+ vat_check_str = f\"{vat_check:0>2}\"\n+\n+ return \"BE\" + vat_without_check + vat_check_str\n", "issue": "BE Vat Numbers should have a modulo 97 check and start wit 1 or 0\n* Faker version: 25\r\n* OS: OS. X Monterery\r\n\r\nVAT Numbers generated in Belgium (currently set-up in the provider ssn for locale nl_BE should start with 1 or 0 and have a controle number with a module 97 check.\r\n\r\nRefer to https://en.wikipedia.org/wiki/VAT_identification_number\r\n\r\n### Steps to reproduce\r\n\r\n1. Generate vat_id wit locale nl_BE\r\n\r\n\r\n### Expected behavior\r\n\r\nVat numbers should be starting with BE0 or BE1 and have a module 97 check as the last two numbers\r\n\r\n\r\n\n", "before_files": [{"content": "from .. import Provider as SsnProvider\n\n\"\"\"\nFor more info on rijksregisternummer, see https://nl.wikipedia.org/wiki/Rijksregisternummer\nDutch/French only for now ...\n\"\"\"\n\n\nclass Provider(SsnProvider):\n def ssn(self) -> str:\n \"\"\"\n Returns a 11 digits Belgian SSN called \"rijksregisternummer\" as a string\n\n The first 6 digits represent the birthdate with (in order) year, month and day.\n The second group of 3 digits is represents a sequence number (order of birth).\n It is even for women and odd for men.\n For men the range starts at 1 and ends 997, for women 2 until 998.\n The third group of 2 digits is a checksum based on the previous 9 digits (modulo 97).\n Divide those 9 digits by 97, subtract the remainder from 97 and that's the result.\n For persons born in or after 2000, the 9 digit number needs to be proceeded by a 2\n (add 2000000000) before the division by 97.\n\n \"\"\"\n\n # see http://nl.wikipedia.org/wiki/Burgerservicenummer (in Dutch)\n def _checksum(digits):\n res = 97 - (digits % 97)\n return res\n\n # Generate a date (random)\n mydate = self.generator.date()\n # Convert it to an int\n elms = mydate.split(\"-\")\n # Adjust for year 2000 if necessary\n if elms[0][0] == \"2\":\n above = True\n else:\n above = False\n # Only keep the last 2 digits of the year\n elms[0] = elms[0][2:4]\n # Simulate the gender/sequence - should be 3 digits\n seq = self.generator.random_int(1, 998)\n # Right justify sequence and append to list\n seq_str = f\"{seq:0>3}\"\n elms.append(seq_str)\n # Now convert list to an integer so the checksum can be calculated\n date_as_int = int(\"\".join(elms))\n if above:\n date_as_int += 2000000000\n # Generate checksum\n s = _checksum(date_as_int)\n s_rjust = f\"{s:0>2}\"\n # return result as a string\n elms.append(s_rjust)\n return \"\".join(elms)\n\n vat_id_formats = (\"BE##########\",)\n\n def vat_id(self) -> str:\n \"\"\"\n http://ec.europa.eu/taxation_customs/vies/faq.html#item_11\n :return: A random Belgian VAT ID\n \"\"\"\n return self.bothify(self.random_element(self.vat_id_formats))\n", "path": "faker/providers/ssn/nl_BE/__init__.py"}], "after_files": [{"content": "from .. import Provider as SsnProvider\n\n\"\"\"\nFor more info on rijksregisternummer, see https://nl.wikipedia.org/wiki/Rijksregisternummer\nDutch/French only for now ...\n\"\"\"\n\n\nclass Provider(SsnProvider):\n def ssn(self) -> str:\n \"\"\"\n Returns a 11 digits Belgian SSN called \"rijksregisternummer\" as a string\n\n The first 6 digits represent the birthdate with (in order) year, month and day.\n The second group of 3 digits is represents a sequence number (order of birth).\n It is even for women and odd for men.\n For men the range starts at 1 and ends 997, for women 2 until 998.\n The third group of 2 digits is a checksum based on the previous 9 digits (modulo 97).\n Divide those 9 digits by 97, subtract the remainder from 97 and that's the result.\n For persons born in or after 2000, the 9 digit number needs to be proceeded by a 2\n (add 2000000000) before the division by 97.\n\n \"\"\"\n\n # see http://nl.wikipedia.org/wiki/Burgerservicenummer (in Dutch)\n def _checksum(digits):\n res = 97 - (digits % 97)\n return res\n\n # Generate a date (random)\n mydate = self.generator.date()\n # Convert it to an int\n elms = mydate.split(\"-\")\n # Adjust for year 2000 if necessary\n if elms[0][0] == \"2\":\n above = True\n else:\n above = False\n # Only keep the last 2 digits of the year\n elms[0] = elms[0][2:4]\n # Simulate the gender/sequence - should be 3 digits\n seq = self.generator.random_int(1, 998)\n # Right justify sequence and append to list\n seq_str = f\"{seq:0>3}\"\n elms.append(seq_str)\n # Now convert list to an integer so the checksum can be calculated\n date_as_int = int(\"\".join(elms))\n if above:\n date_as_int += 2000000000\n # Generate checksum\n s = _checksum(date_as_int)\n s_rjust = f\"{s:0>2}\"\n # return result as a string\n elms.append(s_rjust)\n return \"\".join(elms)\n\n vat_id_formats = (\"BE##########\",)\n\n def vat_id(self) -> str:\n\n vat_id_random_section = (\n '#######'\n )\n\n vat_id_possible_initial_numbers = (\n '0',\n '1'\n )\n \"\"\"\n http://ec.europa.eu/taxation_customs/vies/faq.html#item_11\n https://en.wikipedia.org/wiki/VAT_identification_number\n :return: A random Belgian VAT ID starting with 0 or 1 and has a correct checksum with a modulo 97 check\n \"\"\"\n generated_initial_number = self.random_element(vat_id_possible_initial_numbers)\n vat_without_check = self.bothify(generated_initial_number + vat_id_random_section)\n vat_as_int = int(vat_without_check)\n vat_check = 97 - (vat_as_int % 97)\n vat_check_str = f\"{vat_check:0>2}\"\n\n return \"BE\" + vat_without_check + vat_check_str\n", "path": "faker/providers/ssn/nl_BE/__init__.py"}]} | 1,165 | 329 |
gh_patches_debug_8663 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-758 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError: mmap length is greater than file size
It's been many months since I'm using pyqtgraph and it's amazing. But recently when I've updated my maxOS to High Sierra 10.13.6 I'm facing following issue:
`ValueError: mmap length is greater than file size`
When I'm trying to run the pyqtgraph remote plotting example, the same is being thrown
Complete log is [here](https://gist.github.com/sachinsngh165/65df4e4f3dc81a283379dc032cf48220)
I'm using PyQt4+ python2.7
Also same error with PyQt5 + Python3
Please help me out it's urgent as I have to complete my project within a week.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/widgets/RemoteGraphicsView.py`
Content:
```
1 from ..Qt import QtGui, QtCore, QT_LIB
2 if QT_LIB in ['PyQt4', 'PyQt5']:
3 import sip
4 from .. import multiprocess as mp
5 from .GraphicsView import GraphicsView
6 from .. import CONFIG_OPTIONS
7 import numpy as np
8 import mmap, tempfile, ctypes, atexit, sys, random
9
10 __all__ = ['RemoteGraphicsView']
11
12 class RemoteGraphicsView(QtGui.QWidget):
13 """
14 Replacement for GraphicsView that does all scene management and rendering on a remote process,
15 while displaying on the local widget.
16
17 GraphicsItems must be created by proxy to the remote process.
18
19 """
20 def __init__(self, parent=None, *args, **kwds):
21 """
22 The keyword arguments 'useOpenGL' and 'backgound', if specified, are passed to the remote
23 GraphicsView.__init__(). All other keyword arguments are passed to multiprocess.QtProcess.__init__().
24 """
25 self._img = None
26 self._imgReq = None
27 self._sizeHint = (640,480) ## no clue why this is needed, but it seems to be the default sizeHint for GraphicsView.
28 ## without it, the widget will not compete for space against another GraphicsView.
29 QtGui.QWidget.__init__(self)
30
31 # separate local keyword arguments from remote.
32 remoteKwds = {}
33 for kwd in ['useOpenGL', 'background']:
34 if kwd in kwds:
35 remoteKwds[kwd] = kwds.pop(kwd)
36
37 self._proc = mp.QtProcess(**kwds)
38 self.pg = self._proc._import('pyqtgraph')
39 self.pg.setConfigOptions(**CONFIG_OPTIONS)
40 rpgRemote = self._proc._import('pyqtgraph.widgets.RemoteGraphicsView')
41 self._view = rpgRemote.Renderer(*args, **remoteKwds)
42 self._view._setProxyOptions(deferGetattr=True)
43
44 self.setFocusPolicy(QtCore.Qt.StrongFocus)
45 self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
46 self.setMouseTracking(True)
47 self.shm = None
48 shmFileName = self._view.shmFileName()
49 if sys.platform.startswith('win'):
50 self.shmtag = shmFileName
51 else:
52 self.shmFile = open(shmFileName, 'r')
53
54 self._view.sceneRendered.connect(mp.proxy(self.remoteSceneChanged)) #, callSync='off'))
55 ## Note: we need synchronous signals
56 ## even though there is no return value--
57 ## this informs the renderer that it is
58 ## safe to begin rendering again.
59
60 for method in ['scene', 'setCentralItem']:
61 setattr(self, method, getattr(self._view, method))
62
63 def resizeEvent(self, ev):
64 ret = QtGui.QWidget.resizeEvent(self, ev)
65 self._view.resize(self.size(), _callSync='off')
66 return ret
67
68 def sizeHint(self):
69 return QtCore.QSize(*self._sizeHint)
70
71 def remoteSceneChanged(self, data):
72 w, h, size, newfile = data
73 #self._sizeHint = (whint, hhint)
74 if self.shm is None or self.shm.size != size:
75 if self.shm is not None:
76 self.shm.close()
77 if sys.platform.startswith('win'):
78 self.shmtag = newfile ## on windows, we create a new tag for every resize
79 self.shm = mmap.mmap(-1, size, self.shmtag) ## can't use tmpfile on windows because the file can only be opened once.
80 elif sys.platform == 'darwin':
81 self.shmFile.close()
82 self.shmFile = open(self._view.shmFileName(), 'r')
83 self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_READ)
84 else:
85 self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_READ)
86 self.shm.seek(0)
87 data = self.shm.read(w*h*4)
88 self._img = QtGui.QImage(data, w, h, QtGui.QImage.Format_ARGB32)
89 self._img.data = data # data must be kept alive or PySide 1.2.1 (and probably earlier) will crash.
90 self.update()
91
92 def paintEvent(self, ev):
93 if self._img is None:
94 return
95 p = QtGui.QPainter(self)
96 p.drawImage(self.rect(), self._img, QtCore.QRect(0, 0, self._img.width(), self._img.height()))
97 p.end()
98
99 def mousePressEvent(self, ev):
100 self._view.mousePressEvent(int(ev.type()), ev.pos(), ev.globalPos(), int(ev.button()), int(ev.buttons()), int(ev.modifiers()), _callSync='off')
101 ev.accept()
102 return QtGui.QWidget.mousePressEvent(self, ev)
103
104 def mouseReleaseEvent(self, ev):
105 self._view.mouseReleaseEvent(int(ev.type()), ev.pos(), ev.globalPos(), int(ev.button()), int(ev.buttons()), int(ev.modifiers()), _callSync='off')
106 ev.accept()
107 return QtGui.QWidget.mouseReleaseEvent(self, ev)
108
109 def mouseMoveEvent(self, ev):
110 self._view.mouseMoveEvent(int(ev.type()), ev.pos(), ev.globalPos(), int(ev.button()), int(ev.buttons()), int(ev.modifiers()), _callSync='off')
111 ev.accept()
112 return QtGui.QWidget.mouseMoveEvent(self, ev)
113
114 def wheelEvent(self, ev):
115 self._view.wheelEvent(ev.pos(), ev.globalPos(), ev.delta(), int(ev.buttons()), int(ev.modifiers()), int(ev.orientation()), _callSync='off')
116 ev.accept()
117 return QtGui.QWidget.wheelEvent(self, ev)
118
119 def keyEvent(self, ev):
120 if self._view.keyEvent(int(ev.type()), int(ev.modifiers()), text, autorep, count):
121 ev.accept()
122 return QtGui.QWidget.keyEvent(self, ev)
123
124 def enterEvent(self, ev):
125 self._view.enterEvent(int(ev.type()), _callSync='off')
126 return QtGui.QWidget.enterEvent(self, ev)
127
128 def leaveEvent(self, ev):
129 self._view.leaveEvent(int(ev.type()), _callSync='off')
130 return QtGui.QWidget.leaveEvent(self, ev)
131
132 def remoteProcess(self):
133 """Return the remote process handle. (see multiprocess.remoteproxy.RemoteEventHandler)"""
134 return self._proc
135
136 def close(self):
137 """Close the remote process. After this call, the widget will no longer be updated."""
138 self._proc.close()
139
140
141 class Renderer(GraphicsView):
142 ## Created by the remote process to handle render requests
143
144 sceneRendered = QtCore.Signal(object)
145
146 def __init__(self, *args, **kwds):
147 ## Create shared memory for rendered image
148 #pg.dbg(namespace={'r': self})
149 if sys.platform.startswith('win'):
150 self.shmtag = "pyqtgraph_shmem_" + ''.join([chr((random.getrandbits(20)%25) + 97) for i in range(20)])
151 self.shm = mmap.mmap(-1, mmap.PAGESIZE, self.shmtag) # use anonymous mmap on windows
152 else:
153 self.shmFile = tempfile.NamedTemporaryFile(prefix='pyqtgraph_shmem_')
154 self.shmFile.write(b'\x00' * (mmap.PAGESIZE+1))
155 fd = self.shmFile.fileno()
156 self.shm = mmap.mmap(fd, mmap.PAGESIZE, mmap.MAP_SHARED, mmap.PROT_WRITE)
157 atexit.register(self.close)
158
159 GraphicsView.__init__(self, *args, **kwds)
160 self.scene().changed.connect(self.update)
161 self.img = None
162 self.renderTimer = QtCore.QTimer()
163 self.renderTimer.timeout.connect(self.renderView)
164 self.renderTimer.start(16)
165
166 def close(self):
167 self.shm.close()
168 if not sys.platform.startswith('win'):
169 self.shmFile.close()
170
171 def shmFileName(self):
172 if sys.platform.startswith('win'):
173 return self.shmtag
174 else:
175 return self.shmFile.name
176
177 def update(self):
178 self.img = None
179 return GraphicsView.update(self)
180
181 def resize(self, size):
182 oldSize = self.size()
183 GraphicsView.resize(self, size)
184 self.resizeEvent(QtGui.QResizeEvent(size, oldSize))
185 self.update()
186
187 def renderView(self):
188 if self.img is None:
189 ## make sure shm is large enough and get its address
190 if self.width() == 0 or self.height() == 0:
191 return
192 size = self.width() * self.height() * 4
193 if size > self.shm.size():
194 if sys.platform.startswith('win'):
195 ## windows says "WindowsError: [Error 87] the parameter is incorrect" if we try to resize the mmap
196 self.shm.close()
197 ## it also says (sometimes) 'access is denied' if we try to reuse the tag.
198 self.shmtag = "pyqtgraph_shmem_" + ''.join([chr((random.getrandbits(20)%25) + 97) for i in range(20)])
199 self.shm = mmap.mmap(-1, size, self.shmtag)
200 elif sys.platform == 'darwin':
201 self.shm.close()
202 self.shmFile.close()
203 self.shmFile = tempfile.NamedTemporaryFile(prefix='pyqtgraph_shmem_')
204 self.shmFile.write(b'\x00' * (size + 1))
205 self.shmFile.flush()
206 self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_WRITE)
207 else:
208 self.shm.resize(size)
209
210 ## render the scene directly to shared memory
211 if QT_LIB in ['PySide', 'PySide2']:
212 ch = ctypes.c_char.from_buffer(self.shm, 0)
213 #ch = ctypes.c_char_p(address)
214 self.img = QtGui.QImage(ch, self.width(), self.height(), QtGui.QImage.Format_ARGB32)
215 else:
216 address = ctypes.addressof(ctypes.c_char.from_buffer(self.shm, 0))
217
218 # different versions of pyqt have different requirements here..
219 try:
220 self.img = QtGui.QImage(sip.voidptr(address), self.width(), self.height(), QtGui.QImage.Format_ARGB32)
221 except TypeError:
222 try:
223 self.img = QtGui.QImage(memoryview(buffer(self.shm)), self.width(), self.height(), QtGui.QImage.Format_ARGB32)
224 except TypeError:
225 # Works on PyQt 4.9.6
226 self.img = QtGui.QImage(address, self.width(), self.height(), QtGui.QImage.Format_ARGB32)
227 self.img.fill(0xffffffff)
228 p = QtGui.QPainter(self.img)
229 self.render(p, self.viewRect(), self.rect())
230 p.end()
231 self.sceneRendered.emit((self.width(), self.height(), self.shm.size(), self.shmFileName()))
232
233 def mousePressEvent(self, typ, pos, gpos, btn, btns, mods):
234 typ = QtCore.QEvent.Type(typ)
235 btn = QtCore.Qt.MouseButton(btn)
236 btns = QtCore.Qt.MouseButtons(btns)
237 mods = QtCore.Qt.KeyboardModifiers(mods)
238 return GraphicsView.mousePressEvent(self, QtGui.QMouseEvent(typ, pos, gpos, btn, btns, mods))
239
240 def mouseMoveEvent(self, typ, pos, gpos, btn, btns, mods):
241 typ = QtCore.QEvent.Type(typ)
242 btn = QtCore.Qt.MouseButton(btn)
243 btns = QtCore.Qt.MouseButtons(btns)
244 mods = QtCore.Qt.KeyboardModifiers(mods)
245 return GraphicsView.mouseMoveEvent(self, QtGui.QMouseEvent(typ, pos, gpos, btn, btns, mods))
246
247 def mouseReleaseEvent(self, typ, pos, gpos, btn, btns, mods):
248 typ = QtCore.QEvent.Type(typ)
249 btn = QtCore.Qt.MouseButton(btn)
250 btns = QtCore.Qt.MouseButtons(btns)
251 mods = QtCore.Qt.KeyboardModifiers(mods)
252 return GraphicsView.mouseReleaseEvent(self, QtGui.QMouseEvent(typ, pos, gpos, btn, btns, mods))
253
254 def wheelEvent(self, pos, gpos, d, btns, mods, ori):
255 btns = QtCore.Qt.MouseButtons(btns)
256 mods = QtCore.Qt.KeyboardModifiers(mods)
257 ori = (None, QtCore.Qt.Horizontal, QtCore.Qt.Vertical)[ori]
258 return GraphicsView.wheelEvent(self, QtGui.QWheelEvent(pos, gpos, d, btns, mods, ori))
259
260 def keyEvent(self, typ, mods, text, autorep, count):
261 typ = QtCore.QEvent.Type(typ)
262 mods = QtCore.Qt.KeyboardModifiers(mods)
263 GraphicsView.keyEvent(self, QtGui.QKeyEvent(typ, mods, text, autorep, count))
264 return ev.accepted()
265
266 def enterEvent(self, typ):
267 ev = QtCore.QEvent(QtCore.QEvent.Type(typ))
268 return GraphicsView.enterEvent(self, ev)
269
270 def leaveEvent(self, typ):
271 ev = QtCore.QEvent(QtCore.QEvent.Type(typ))
272 return GraphicsView.leaveEvent(self, ev)
273
274
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyqtgraph/widgets/RemoteGraphicsView.py b/pyqtgraph/widgets/RemoteGraphicsView.py
--- a/pyqtgraph/widgets/RemoteGraphicsView.py
+++ b/pyqtgraph/widgets/RemoteGraphicsView.py
@@ -152,6 +152,7 @@
else:
self.shmFile = tempfile.NamedTemporaryFile(prefix='pyqtgraph_shmem_')
self.shmFile.write(b'\x00' * (mmap.PAGESIZE+1))
+ self.shmFile.flush()
fd = self.shmFile.fileno()
self.shm = mmap.mmap(fd, mmap.PAGESIZE, mmap.MAP_SHARED, mmap.PROT_WRITE)
atexit.register(self.close)
| {"golden_diff": "diff --git a/pyqtgraph/widgets/RemoteGraphicsView.py b/pyqtgraph/widgets/RemoteGraphicsView.py\n--- a/pyqtgraph/widgets/RemoteGraphicsView.py\n+++ b/pyqtgraph/widgets/RemoteGraphicsView.py\n@@ -152,6 +152,7 @@\n else:\n self.shmFile = tempfile.NamedTemporaryFile(prefix='pyqtgraph_shmem_')\n self.shmFile.write(b'\\x00' * (mmap.PAGESIZE+1))\n+ self.shmFile.flush()\n fd = self.shmFile.fileno()\n self.shm = mmap.mmap(fd, mmap.PAGESIZE, mmap.MAP_SHARED, mmap.PROT_WRITE)\n atexit.register(self.close)\n", "issue": "ValueError: mmap length is greater than file size \nIt's been many months since I'm using pyqtgraph and it's amazing. But recently when I've updated my maxOS to High Sierra 10.13.6 I'm facing following issue:\r\n`ValueError: mmap length is greater than file size`\r\nWhen I'm trying to run the pyqtgraph remote plotting example, the same is being thrown\r\nComplete log is [here](https://gist.github.com/sachinsngh165/65df4e4f3dc81a283379dc032cf48220)\r\nI'm using PyQt4+ python2.7\r\nAlso same error with PyQt5 + Python3\r\nPlease help me out it's urgent as I have to complete my project within a week.\n", "before_files": [{"content": "from ..Qt import QtGui, QtCore, QT_LIB\nif QT_LIB in ['PyQt4', 'PyQt5']:\n import sip\nfrom .. import multiprocess as mp\nfrom .GraphicsView import GraphicsView\nfrom .. import CONFIG_OPTIONS\nimport numpy as np\nimport mmap, tempfile, ctypes, atexit, sys, random\n\n__all__ = ['RemoteGraphicsView']\n\nclass RemoteGraphicsView(QtGui.QWidget):\n \"\"\"\n Replacement for GraphicsView that does all scene management and rendering on a remote process,\n while displaying on the local widget.\n \n GraphicsItems must be created by proxy to the remote process.\n \n \"\"\"\n def __init__(self, parent=None, *args, **kwds):\n \"\"\"\n The keyword arguments 'useOpenGL' and 'backgound', if specified, are passed to the remote\n GraphicsView.__init__(). All other keyword arguments are passed to multiprocess.QtProcess.__init__().\n \"\"\"\n self._img = None\n self._imgReq = None\n self._sizeHint = (640,480) ## no clue why this is needed, but it seems to be the default sizeHint for GraphicsView.\n ## without it, the widget will not compete for space against another GraphicsView.\n QtGui.QWidget.__init__(self)\n\n # separate local keyword arguments from remote.\n remoteKwds = {}\n for kwd in ['useOpenGL', 'background']:\n if kwd in kwds:\n remoteKwds[kwd] = kwds.pop(kwd)\n\n self._proc = mp.QtProcess(**kwds)\n self.pg = self._proc._import('pyqtgraph')\n self.pg.setConfigOptions(**CONFIG_OPTIONS)\n rpgRemote = self._proc._import('pyqtgraph.widgets.RemoteGraphicsView')\n self._view = rpgRemote.Renderer(*args, **remoteKwds)\n self._view._setProxyOptions(deferGetattr=True)\n \n self.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\n self.setMouseTracking(True)\n self.shm = None\n shmFileName = self._view.shmFileName()\n if sys.platform.startswith('win'):\n self.shmtag = shmFileName\n else:\n self.shmFile = open(shmFileName, 'r')\n \n self._view.sceneRendered.connect(mp.proxy(self.remoteSceneChanged)) #, callSync='off'))\n ## Note: we need synchronous signals\n ## even though there is no return value--\n ## this informs the renderer that it is \n ## safe to begin rendering again. \n \n for method in ['scene', 'setCentralItem']:\n setattr(self, method, getattr(self._view, method))\n \n def resizeEvent(self, ev):\n ret = QtGui.QWidget.resizeEvent(self, ev)\n self._view.resize(self.size(), _callSync='off')\n return ret\n \n def sizeHint(self):\n return QtCore.QSize(*self._sizeHint)\n \n def remoteSceneChanged(self, data):\n w, h, size, newfile = data\n #self._sizeHint = (whint, hhint)\n if self.shm is None or self.shm.size != size:\n if self.shm is not None:\n self.shm.close()\n if sys.platform.startswith('win'):\n self.shmtag = newfile ## on windows, we create a new tag for every resize\n self.shm = mmap.mmap(-1, size, self.shmtag) ## can't use tmpfile on windows because the file can only be opened once.\n elif sys.platform == 'darwin':\n self.shmFile.close()\n self.shmFile = open(self._view.shmFileName(), 'r')\n self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_READ)\n else:\n self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_READ)\n self.shm.seek(0)\n data = self.shm.read(w*h*4)\n self._img = QtGui.QImage(data, w, h, QtGui.QImage.Format_ARGB32)\n self._img.data = data # data must be kept alive or PySide 1.2.1 (and probably earlier) will crash.\n self.update()\n \n def paintEvent(self, ev):\n if self._img is None:\n return\n p = QtGui.QPainter(self)\n p.drawImage(self.rect(), self._img, QtCore.QRect(0, 0, self._img.width(), self._img.height()))\n p.end()\n \n def mousePressEvent(self, ev):\n self._view.mousePressEvent(int(ev.type()), ev.pos(), ev.globalPos(), int(ev.button()), int(ev.buttons()), int(ev.modifiers()), _callSync='off')\n ev.accept()\n return QtGui.QWidget.mousePressEvent(self, ev)\n\n def mouseReleaseEvent(self, ev):\n self._view.mouseReleaseEvent(int(ev.type()), ev.pos(), ev.globalPos(), int(ev.button()), int(ev.buttons()), int(ev.modifiers()), _callSync='off')\n ev.accept()\n return QtGui.QWidget.mouseReleaseEvent(self, ev)\n\n def mouseMoveEvent(self, ev):\n self._view.mouseMoveEvent(int(ev.type()), ev.pos(), ev.globalPos(), int(ev.button()), int(ev.buttons()), int(ev.modifiers()), _callSync='off')\n ev.accept()\n return QtGui.QWidget.mouseMoveEvent(self, ev)\n \n def wheelEvent(self, ev):\n self._view.wheelEvent(ev.pos(), ev.globalPos(), ev.delta(), int(ev.buttons()), int(ev.modifiers()), int(ev.orientation()), _callSync='off')\n ev.accept()\n return QtGui.QWidget.wheelEvent(self, ev)\n \n def keyEvent(self, ev):\n if self._view.keyEvent(int(ev.type()), int(ev.modifiers()), text, autorep, count):\n ev.accept()\n return QtGui.QWidget.keyEvent(self, ev)\n \n def enterEvent(self, ev):\n self._view.enterEvent(int(ev.type()), _callSync='off')\n return QtGui.QWidget.enterEvent(self, ev)\n \n def leaveEvent(self, ev):\n self._view.leaveEvent(int(ev.type()), _callSync='off')\n return QtGui.QWidget.leaveEvent(self, ev)\n \n def remoteProcess(self):\n \"\"\"Return the remote process handle. (see multiprocess.remoteproxy.RemoteEventHandler)\"\"\"\n return self._proc\n\n def close(self):\n \"\"\"Close the remote process. After this call, the widget will no longer be updated.\"\"\"\n self._proc.close()\n\n\nclass Renderer(GraphicsView):\n ## Created by the remote process to handle render requests\n \n sceneRendered = QtCore.Signal(object)\n \n def __init__(self, *args, **kwds):\n ## Create shared memory for rendered image\n #pg.dbg(namespace={'r': self})\n if sys.platform.startswith('win'):\n self.shmtag = \"pyqtgraph_shmem_\" + ''.join([chr((random.getrandbits(20)%25) + 97) for i in range(20)])\n self.shm = mmap.mmap(-1, mmap.PAGESIZE, self.shmtag) # use anonymous mmap on windows\n else:\n self.shmFile = tempfile.NamedTemporaryFile(prefix='pyqtgraph_shmem_')\n self.shmFile.write(b'\\x00' * (mmap.PAGESIZE+1))\n fd = self.shmFile.fileno()\n self.shm = mmap.mmap(fd, mmap.PAGESIZE, mmap.MAP_SHARED, mmap.PROT_WRITE)\n atexit.register(self.close)\n \n GraphicsView.__init__(self, *args, **kwds)\n self.scene().changed.connect(self.update)\n self.img = None\n self.renderTimer = QtCore.QTimer()\n self.renderTimer.timeout.connect(self.renderView)\n self.renderTimer.start(16)\n \n def close(self):\n self.shm.close()\n if not sys.platform.startswith('win'):\n self.shmFile.close()\n\n def shmFileName(self):\n if sys.platform.startswith('win'):\n return self.shmtag\n else:\n return self.shmFile.name\n \n def update(self):\n self.img = None\n return GraphicsView.update(self)\n \n def resize(self, size):\n oldSize = self.size()\n GraphicsView.resize(self, size)\n self.resizeEvent(QtGui.QResizeEvent(size, oldSize))\n self.update()\n \n def renderView(self):\n if self.img is None:\n ## make sure shm is large enough and get its address\n if self.width() == 0 or self.height() == 0:\n return\n size = self.width() * self.height() * 4\n if size > self.shm.size():\n if sys.platform.startswith('win'):\n ## windows says \"WindowsError: [Error 87] the parameter is incorrect\" if we try to resize the mmap\n self.shm.close()\n ## it also says (sometimes) 'access is denied' if we try to reuse the tag.\n self.shmtag = \"pyqtgraph_shmem_\" + ''.join([chr((random.getrandbits(20)%25) + 97) for i in range(20)])\n self.shm = mmap.mmap(-1, size, self.shmtag)\n elif sys.platform == 'darwin':\n self.shm.close()\n self.shmFile.close()\n self.shmFile = tempfile.NamedTemporaryFile(prefix='pyqtgraph_shmem_')\n self.shmFile.write(b'\\x00' * (size + 1))\n self.shmFile.flush()\n self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_WRITE)\n else:\n self.shm.resize(size)\n \n ## render the scene directly to shared memory\n if QT_LIB in ['PySide', 'PySide2']:\n ch = ctypes.c_char.from_buffer(self.shm, 0)\n #ch = ctypes.c_char_p(address)\n self.img = QtGui.QImage(ch, self.width(), self.height(), QtGui.QImage.Format_ARGB32)\n else:\n address = ctypes.addressof(ctypes.c_char.from_buffer(self.shm, 0))\n\n # different versions of pyqt have different requirements here..\n try:\n self.img = QtGui.QImage(sip.voidptr(address), self.width(), self.height(), QtGui.QImage.Format_ARGB32)\n except TypeError:\n try:\n self.img = QtGui.QImage(memoryview(buffer(self.shm)), self.width(), self.height(), QtGui.QImage.Format_ARGB32)\n except TypeError:\n # Works on PyQt 4.9.6\n self.img = QtGui.QImage(address, self.width(), self.height(), QtGui.QImage.Format_ARGB32)\n self.img.fill(0xffffffff)\n p = QtGui.QPainter(self.img)\n self.render(p, self.viewRect(), self.rect())\n p.end()\n self.sceneRendered.emit((self.width(), self.height(), self.shm.size(), self.shmFileName()))\n\n def mousePressEvent(self, typ, pos, gpos, btn, btns, mods):\n typ = QtCore.QEvent.Type(typ)\n btn = QtCore.Qt.MouseButton(btn)\n btns = QtCore.Qt.MouseButtons(btns)\n mods = QtCore.Qt.KeyboardModifiers(mods)\n return GraphicsView.mousePressEvent(self, QtGui.QMouseEvent(typ, pos, gpos, btn, btns, mods))\n\n def mouseMoveEvent(self, typ, pos, gpos, btn, btns, mods):\n typ = QtCore.QEvent.Type(typ)\n btn = QtCore.Qt.MouseButton(btn)\n btns = QtCore.Qt.MouseButtons(btns)\n mods = QtCore.Qt.KeyboardModifiers(mods)\n return GraphicsView.mouseMoveEvent(self, QtGui.QMouseEvent(typ, pos, gpos, btn, btns, mods))\n\n def mouseReleaseEvent(self, typ, pos, gpos, btn, btns, mods):\n typ = QtCore.QEvent.Type(typ)\n btn = QtCore.Qt.MouseButton(btn)\n btns = QtCore.Qt.MouseButtons(btns)\n mods = QtCore.Qt.KeyboardModifiers(mods)\n return GraphicsView.mouseReleaseEvent(self, QtGui.QMouseEvent(typ, pos, gpos, btn, btns, mods))\n\n def wheelEvent(self, pos, gpos, d, btns, mods, ori):\n btns = QtCore.Qt.MouseButtons(btns)\n mods = QtCore.Qt.KeyboardModifiers(mods)\n ori = (None, QtCore.Qt.Horizontal, QtCore.Qt.Vertical)[ori]\n return GraphicsView.wheelEvent(self, QtGui.QWheelEvent(pos, gpos, d, btns, mods, ori))\n\n def keyEvent(self, typ, mods, text, autorep, count):\n typ = QtCore.QEvent.Type(typ)\n mods = QtCore.Qt.KeyboardModifiers(mods)\n GraphicsView.keyEvent(self, QtGui.QKeyEvent(typ, mods, text, autorep, count))\n return ev.accepted()\n \n def enterEvent(self, typ):\n ev = QtCore.QEvent(QtCore.QEvent.Type(typ))\n return GraphicsView.enterEvent(self, ev)\n\n def leaveEvent(self, typ):\n ev = QtCore.QEvent(QtCore.QEvent.Type(typ))\n return GraphicsView.leaveEvent(self, ev)\n\n", "path": "pyqtgraph/widgets/RemoteGraphicsView.py"}], "after_files": [{"content": "from ..Qt import QtGui, QtCore, QT_LIB\nif QT_LIB in ['PyQt4', 'PyQt5']:\n import sip\nfrom .. import multiprocess as mp\nfrom .GraphicsView import GraphicsView\nfrom .. import CONFIG_OPTIONS\nimport numpy as np\nimport mmap, tempfile, ctypes, atexit, sys, random\n\n__all__ = ['RemoteGraphicsView']\n\nclass RemoteGraphicsView(QtGui.QWidget):\n \"\"\"\n Replacement for GraphicsView that does all scene management and rendering on a remote process,\n while displaying on the local widget.\n \n GraphicsItems must be created by proxy to the remote process.\n \n \"\"\"\n def __init__(self, parent=None, *args, **kwds):\n \"\"\"\n The keyword arguments 'useOpenGL' and 'backgound', if specified, are passed to the remote\n GraphicsView.__init__(). All other keyword arguments are passed to multiprocess.QtProcess.__init__().\n \"\"\"\n self._img = None\n self._imgReq = None\n self._sizeHint = (640,480) ## no clue why this is needed, but it seems to be the default sizeHint for GraphicsView.\n ## without it, the widget will not compete for space against another GraphicsView.\n QtGui.QWidget.__init__(self)\n\n # separate local keyword arguments from remote.\n remoteKwds = {}\n for kwd in ['useOpenGL', 'background']:\n if kwd in kwds:\n remoteKwds[kwd] = kwds.pop(kwd)\n\n self._proc = mp.QtProcess(**kwds)\n self.pg = self._proc._import('pyqtgraph')\n self.pg.setConfigOptions(**CONFIG_OPTIONS)\n rpgRemote = self._proc._import('pyqtgraph.widgets.RemoteGraphicsView')\n self._view = rpgRemote.Renderer(*args, **remoteKwds)\n self._view._setProxyOptions(deferGetattr=True)\n \n self.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\n self.setMouseTracking(True)\n self.shm = None\n shmFileName = self._view.shmFileName()\n if sys.platform.startswith('win'):\n self.shmtag = shmFileName\n else:\n self.shmFile = open(shmFileName, 'r')\n \n self._view.sceneRendered.connect(mp.proxy(self.remoteSceneChanged)) #, callSync='off'))\n ## Note: we need synchronous signals\n ## even though there is no return value--\n ## this informs the renderer that it is \n ## safe to begin rendering again. \n \n for method in ['scene', 'setCentralItem']:\n setattr(self, method, getattr(self._view, method))\n \n def resizeEvent(self, ev):\n ret = QtGui.QWidget.resizeEvent(self, ev)\n self._view.resize(self.size(), _callSync='off')\n return ret\n \n def sizeHint(self):\n return QtCore.QSize(*self._sizeHint)\n \n def remoteSceneChanged(self, data):\n w, h, size, newfile = data\n #self._sizeHint = (whint, hhint)\n if self.shm is None or self.shm.size != size:\n if self.shm is not None:\n self.shm.close()\n if sys.platform.startswith('win'):\n self.shmtag = newfile ## on windows, we create a new tag for every resize\n self.shm = mmap.mmap(-1, size, self.shmtag) ## can't use tmpfile on windows because the file can only be opened once.\n elif sys.platform == 'darwin':\n self.shmFile.close()\n self.shmFile = open(self._view.shmFileName(), 'r')\n self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_READ)\n else:\n self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_READ)\n self.shm.seek(0)\n data = self.shm.read(w*h*4)\n self._img = QtGui.QImage(data, w, h, QtGui.QImage.Format_ARGB32)\n self._img.data = data # data must be kept alive or PySide 1.2.1 (and probably earlier) will crash.\n self.update()\n \n def paintEvent(self, ev):\n if self._img is None:\n return\n p = QtGui.QPainter(self)\n p.drawImage(self.rect(), self._img, QtCore.QRect(0, 0, self._img.width(), self._img.height()))\n p.end()\n \n def mousePressEvent(self, ev):\n self._view.mousePressEvent(int(ev.type()), ev.pos(), ev.globalPos(), int(ev.button()), int(ev.buttons()), int(ev.modifiers()), _callSync='off')\n ev.accept()\n return QtGui.QWidget.mousePressEvent(self, ev)\n\n def mouseReleaseEvent(self, ev):\n self._view.mouseReleaseEvent(int(ev.type()), ev.pos(), ev.globalPos(), int(ev.button()), int(ev.buttons()), int(ev.modifiers()), _callSync='off')\n ev.accept()\n return QtGui.QWidget.mouseReleaseEvent(self, ev)\n\n def mouseMoveEvent(self, ev):\n self._view.mouseMoveEvent(int(ev.type()), ev.pos(), ev.globalPos(), int(ev.button()), int(ev.buttons()), int(ev.modifiers()), _callSync='off')\n ev.accept()\n return QtGui.QWidget.mouseMoveEvent(self, ev)\n \n def wheelEvent(self, ev):\n self._view.wheelEvent(ev.pos(), ev.globalPos(), ev.delta(), int(ev.buttons()), int(ev.modifiers()), int(ev.orientation()), _callSync='off')\n ev.accept()\n return QtGui.QWidget.wheelEvent(self, ev)\n \n def keyEvent(self, ev):\n if self._view.keyEvent(int(ev.type()), int(ev.modifiers()), text, autorep, count):\n ev.accept()\n return QtGui.QWidget.keyEvent(self, ev)\n \n def enterEvent(self, ev):\n self._view.enterEvent(int(ev.type()), _callSync='off')\n return QtGui.QWidget.enterEvent(self, ev)\n \n def leaveEvent(self, ev):\n self._view.leaveEvent(int(ev.type()), _callSync='off')\n return QtGui.QWidget.leaveEvent(self, ev)\n \n def remoteProcess(self):\n \"\"\"Return the remote process handle. (see multiprocess.remoteproxy.RemoteEventHandler)\"\"\"\n return self._proc\n\n def close(self):\n \"\"\"Close the remote process. After this call, the widget will no longer be updated.\"\"\"\n self._proc.close()\n\n\nclass Renderer(GraphicsView):\n ## Created by the remote process to handle render requests\n \n sceneRendered = QtCore.Signal(object)\n \n def __init__(self, *args, **kwds):\n ## Create shared memory for rendered image\n #pg.dbg(namespace={'r': self})\n if sys.platform.startswith('win'):\n self.shmtag = \"pyqtgraph_shmem_\" + ''.join([chr((random.getrandbits(20)%25) + 97) for i in range(20)])\n self.shm = mmap.mmap(-1, mmap.PAGESIZE, self.shmtag) # use anonymous mmap on windows\n else:\n self.shmFile = tempfile.NamedTemporaryFile(prefix='pyqtgraph_shmem_')\n self.shmFile.write(b'\\x00' * (mmap.PAGESIZE+1))\n self.shmFile.flush()\n fd = self.shmFile.fileno()\n self.shm = mmap.mmap(fd, mmap.PAGESIZE, mmap.MAP_SHARED, mmap.PROT_WRITE)\n atexit.register(self.close)\n \n GraphicsView.__init__(self, *args, **kwds)\n self.scene().changed.connect(self.update)\n self.img = None\n self.renderTimer = QtCore.QTimer()\n self.renderTimer.timeout.connect(self.renderView)\n self.renderTimer.start(16)\n \n def close(self):\n self.shm.close()\n if not sys.platform.startswith('win'):\n self.shmFile.close()\n\n def shmFileName(self):\n if sys.platform.startswith('win'):\n return self.shmtag\n else:\n return self.shmFile.name\n \n def update(self):\n self.img = None\n return GraphicsView.update(self)\n \n def resize(self, size):\n oldSize = self.size()\n GraphicsView.resize(self, size)\n self.resizeEvent(QtGui.QResizeEvent(size, oldSize))\n self.update()\n \n def renderView(self):\n if self.img is None:\n ## make sure shm is large enough and get its address\n if self.width() == 0 or self.height() == 0:\n return\n size = self.width() * self.height() * 4\n if size > self.shm.size():\n if sys.platform.startswith('win'):\n ## windows says \"WindowsError: [Error 87] the parameter is incorrect\" if we try to resize the mmap\n self.shm.close()\n ## it also says (sometimes) 'access is denied' if we try to reuse the tag.\n self.shmtag = \"pyqtgraph_shmem_\" + ''.join([chr((random.getrandbits(20)%25) + 97) for i in range(20)])\n self.shm = mmap.mmap(-1, size, self.shmtag)\n elif sys.platform == 'darwin':\n self.shm.close()\n self.shmFile.close()\n self.shmFile = tempfile.NamedTemporaryFile(prefix='pyqtgraph_shmem_')\n self.shmFile.write(b'\\x00' * (size + 1))\n self.shmFile.flush()\n self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_WRITE)\n else:\n self.shm.resize(size)\n \n ## render the scene directly to shared memory\n if QT_LIB in ['PySide', 'PySide2']:\n ch = ctypes.c_char.from_buffer(self.shm, 0)\n #ch = ctypes.c_char_p(address)\n self.img = QtGui.QImage(ch, self.width(), self.height(), QtGui.QImage.Format_ARGB32)\n else:\n address = ctypes.addressof(ctypes.c_char.from_buffer(self.shm, 0))\n\n # different versions of pyqt have different requirements here..\n try:\n self.img = QtGui.QImage(sip.voidptr(address), self.width(), self.height(), QtGui.QImage.Format_ARGB32)\n except TypeError:\n try:\n self.img = QtGui.QImage(memoryview(buffer(self.shm)), self.width(), self.height(), QtGui.QImage.Format_ARGB32)\n except TypeError:\n # Works on PyQt 4.9.6\n self.img = QtGui.QImage(address, self.width(), self.height(), QtGui.QImage.Format_ARGB32)\n self.img.fill(0xffffffff)\n p = QtGui.QPainter(self.img)\n self.render(p, self.viewRect(), self.rect())\n p.end()\n self.sceneRendered.emit((self.width(), self.height(), self.shm.size(), self.shmFileName()))\n\n def mousePressEvent(self, typ, pos, gpos, btn, btns, mods):\n typ = QtCore.QEvent.Type(typ)\n btn = QtCore.Qt.MouseButton(btn)\n btns = QtCore.Qt.MouseButtons(btns)\n mods = QtCore.Qt.KeyboardModifiers(mods)\n return GraphicsView.mousePressEvent(self, QtGui.QMouseEvent(typ, pos, gpos, btn, btns, mods))\n\n def mouseMoveEvent(self, typ, pos, gpos, btn, btns, mods):\n typ = QtCore.QEvent.Type(typ)\n btn = QtCore.Qt.MouseButton(btn)\n btns = QtCore.Qt.MouseButtons(btns)\n mods = QtCore.Qt.KeyboardModifiers(mods)\n return GraphicsView.mouseMoveEvent(self, QtGui.QMouseEvent(typ, pos, gpos, btn, btns, mods))\n\n def mouseReleaseEvent(self, typ, pos, gpos, btn, btns, mods):\n typ = QtCore.QEvent.Type(typ)\n btn = QtCore.Qt.MouseButton(btn)\n btns = QtCore.Qt.MouseButtons(btns)\n mods = QtCore.Qt.KeyboardModifiers(mods)\n return GraphicsView.mouseReleaseEvent(self, QtGui.QMouseEvent(typ, pos, gpos, btn, btns, mods))\n\n def wheelEvent(self, pos, gpos, d, btns, mods, ori):\n btns = QtCore.Qt.MouseButtons(btns)\n mods = QtCore.Qt.KeyboardModifiers(mods)\n ori = (None, QtCore.Qt.Horizontal, QtCore.Qt.Vertical)[ori]\n return GraphicsView.wheelEvent(self, QtGui.QWheelEvent(pos, gpos, d, btns, mods, ori))\n\n def keyEvent(self, typ, mods, text, autorep, count):\n typ = QtCore.QEvent.Type(typ)\n mods = QtCore.Qt.KeyboardModifiers(mods)\n GraphicsView.keyEvent(self, QtGui.QKeyEvent(typ, mods, text, autorep, count))\n return ev.accepted()\n \n def enterEvent(self, typ):\n ev = QtCore.QEvent(QtCore.QEvent.Type(typ))\n return GraphicsView.enterEvent(self, ev)\n\n def leaveEvent(self, typ):\n ev = QtCore.QEvent(QtCore.QEvent.Type(typ))\n return GraphicsView.leaveEvent(self, ev)\n\n", "path": "pyqtgraph/widgets/RemoteGraphicsView.py"}]} | 4,037 | 156 |
gh_patches_debug_7182 | rasdani/github-patches | git_diff | netbox-community__netbox-14794 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DH Group 15 (3072) not selectable in UI
### Deployment Type
Self-hosted
### NetBox Version
v3.7.0
### Python Version
3.11
### Steps to Reproduce
1. Open vpn/ike proposals in UI
2. Try to select DH group 15
### Expected Behavior
A dropdown containing group 15
### Observed Behavior
DH group 15 is missing
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/vpn/choices.py`
Content:
```
1 from django.utils.translation import gettext_lazy as _
2
3 from utilities.choices import ChoiceSet
4
5
6 #
7 # Tunnels
8 #
9
10 class TunnelStatusChoices(ChoiceSet):
11 key = 'Tunnel.status'
12
13 STATUS_PLANNED = 'planned'
14 STATUS_ACTIVE = 'active'
15 STATUS_DISABLED = 'disabled'
16
17 CHOICES = [
18 (STATUS_PLANNED, _('Planned'), 'cyan'),
19 (STATUS_ACTIVE, _('Active'), 'green'),
20 (STATUS_DISABLED, _('Disabled'), 'red'),
21 ]
22
23
24 class TunnelEncapsulationChoices(ChoiceSet):
25 ENCAP_GRE = 'gre'
26 ENCAP_IP_IP = 'ip-ip'
27 ENCAP_IPSEC_TRANSPORT = 'ipsec-transport'
28 ENCAP_IPSEC_TUNNEL = 'ipsec-tunnel'
29
30 CHOICES = [
31 (ENCAP_IPSEC_TRANSPORT, _('IPsec - Transport')),
32 (ENCAP_IPSEC_TUNNEL, _('IPsec - Tunnel')),
33 (ENCAP_IP_IP, _('IP-in-IP')),
34 (ENCAP_GRE, _('GRE')),
35 ]
36
37
38 class TunnelTerminationTypeChoices(ChoiceSet):
39 # For TunnelCreateForm
40 TYPE_DEVICE = 'dcim.device'
41 TYPE_VIRTUALMACHINE = 'virtualization.virtualmachine'
42
43 CHOICES = (
44 (TYPE_DEVICE, _('Device')),
45 (TYPE_VIRTUALMACHINE, _('Virtual Machine')),
46 )
47
48
49 class TunnelTerminationRoleChoices(ChoiceSet):
50 ROLE_PEER = 'peer'
51 ROLE_HUB = 'hub'
52 ROLE_SPOKE = 'spoke'
53
54 CHOICES = [
55 (ROLE_PEER, _('Peer'), 'green'),
56 (ROLE_HUB, _('Hub'), 'blue'),
57 (ROLE_SPOKE, _('Spoke'), 'orange'),
58 ]
59
60
61 #
62 # Crypto
63 #
64
65 class IKEVersionChoices(ChoiceSet):
66 VERSION_1 = 1
67 VERSION_2 = 2
68
69 CHOICES = (
70 (VERSION_1, 'IKEv1'),
71 (VERSION_2, 'IKEv2'),
72 )
73
74
75 class IKEModeChoices(ChoiceSet):
76 AGGRESSIVE = 'aggressive'
77 MAIN = 'main'
78
79 CHOICES = (
80 (AGGRESSIVE, _('Aggressive')),
81 (MAIN, _('Main')),
82 )
83
84
85 class AuthenticationMethodChoices(ChoiceSet):
86 PRESHARED_KEYS = 'preshared-keys'
87 CERTIFICATES = 'certificates'
88 RSA_SIGNATURES = 'rsa-signatures'
89 DSA_SIGNATURES = 'dsa-signatures'
90
91 CHOICES = (
92 (PRESHARED_KEYS, _('Pre-shared keys')),
93 (CERTIFICATES, _('Certificates')),
94 (RSA_SIGNATURES, _('RSA signatures')),
95 (DSA_SIGNATURES, _('DSA signatures')),
96 )
97
98
99 class IPSecModeChoices(ChoiceSet):
100 ESP = 'esp'
101 AH = 'ah'
102
103 CHOICES = (
104 (ESP, 'ESP'),
105 (AH, 'AH'),
106 )
107
108
109 class EncryptionAlgorithmChoices(ChoiceSet):
110 ENCRYPTION_AES128_CBC = 'aes-128-cbc'
111 ENCRYPTION_AES128_GCM = 'aes-128-gcm'
112 ENCRYPTION_AES192_CBC = 'aes-192-cbc'
113 ENCRYPTION_AES192_GCM = 'aes-192-gcm'
114 ENCRYPTION_AES256_CBC = 'aes-256-cbc'
115 ENCRYPTION_AES256_GCM = 'aes-256-gcm'
116 ENCRYPTION_3DES = '3des-cbc'
117 ENCRYPTION_DES = 'des-cbc'
118
119 CHOICES = (
120 (ENCRYPTION_AES128_CBC, '128-bit AES (CBC)'),
121 (ENCRYPTION_AES128_GCM, '128-bit AES (GCM)'),
122 (ENCRYPTION_AES192_CBC, '192-bit AES (CBC)'),
123 (ENCRYPTION_AES192_GCM, '192-bit AES (GCM)'),
124 (ENCRYPTION_AES256_CBC, '256-bit AES (CBC)'),
125 (ENCRYPTION_AES256_GCM, '256-bit AES (GCM)'),
126 (ENCRYPTION_3DES, '3DES'),
127 (ENCRYPTION_3DES, 'DES'),
128 )
129
130
131 class AuthenticationAlgorithmChoices(ChoiceSet):
132 AUTH_HMAC_SHA1 = 'hmac-sha1'
133 AUTH_HMAC_SHA256 = 'hmac-sha256'
134 AUTH_HMAC_SHA384 = 'hmac-sha384'
135 AUTH_HMAC_SHA512 = 'hmac-sha512'
136 AUTH_HMAC_MD5 = 'hmac-md5'
137
138 CHOICES = (
139 (AUTH_HMAC_SHA1, 'SHA-1 HMAC'),
140 (AUTH_HMAC_SHA256, 'SHA-256 HMAC'),
141 (AUTH_HMAC_SHA384, 'SHA-384 HMAC'),
142 (AUTH_HMAC_SHA512, 'SHA-512 HMAC'),
143 (AUTH_HMAC_MD5, 'MD5 HMAC'),
144 )
145
146
147 class DHGroupChoices(ChoiceSet):
148 # https://www.iana.org/assignments/ikev2-parameters/ikev2-parameters.xhtml#ikev2-parameters-8
149 GROUP_1 = 1 # 768-bit MODP
150 GROUP_2 = 2 # 1024-but MODP
151 # Groups 3-4 reserved
152 GROUP_5 = 5 # 1536-bit MODP
153 # Groups 6-13 unassigned
154 GROUP_14 = 14 # 2048-bit MODP
155 GROUP_15 = 15 # 3072-bit MODP
156 GROUP_16 = 16 # 4096-bit MODP
157 GROUP_17 = 17 # 6144-bit MODP
158 GROUP_18 = 18 # 8192-bit MODP
159 GROUP_19 = 19 # 256-bit random ECP
160 GROUP_20 = 20 # 384-bit random ECP
161 GROUP_21 = 21 # 521-bit random ECP (521 is not a typo)
162 GROUP_22 = 22 # 1024-bit MODP w/160-bit prime
163 GROUP_23 = 23 # 2048-bit MODP w/224-bit prime
164 GROUP_24 = 24 # 2048-bit MODP w/256-bit prime
165 GROUP_25 = 25 # 192-bit ECP
166 GROUP_26 = 26 # 224-bit ECP
167 GROUP_27 = 27 # brainpoolP224r1
168 GROUP_28 = 28 # brainpoolP256r1
169 GROUP_29 = 29 # brainpoolP384r1
170 GROUP_30 = 30 # brainpoolP512r1
171 GROUP_31 = 31 # Curve25519
172 GROUP_32 = 32 # Curve448
173 GROUP_33 = 33 # GOST3410_2012_256
174 GROUP_34 = 34 # GOST3410_2012_512
175
176 CHOICES = (
177 # Strings are formatted in this manner to optimize translations
178 (GROUP_1, _('Group {n}').format(n=1)),
179 (GROUP_2, _('Group {n}').format(n=2)),
180 (GROUP_5, _('Group {n}').format(n=5)),
181 (GROUP_14, _('Group {n}').format(n=14)),
182 (GROUP_16, _('Group {n}').format(n=16)),
183 (GROUP_17, _('Group {n}').format(n=17)),
184 (GROUP_18, _('Group {n}').format(n=18)),
185 (GROUP_19, _('Group {n}').format(n=19)),
186 (GROUP_20, _('Group {n}').format(n=20)),
187 (GROUP_21, _('Group {n}').format(n=21)),
188 (GROUP_22, _('Group {n}').format(n=22)),
189 (GROUP_23, _('Group {n}').format(n=23)),
190 (GROUP_24, _('Group {n}').format(n=24)),
191 (GROUP_25, _('Group {n}').format(n=25)),
192 (GROUP_26, _('Group {n}').format(n=26)),
193 (GROUP_27, _('Group {n}').format(n=27)),
194 (GROUP_28, _('Group {n}').format(n=28)),
195 (GROUP_29, _('Group {n}').format(n=29)),
196 (GROUP_30, _('Group {n}').format(n=30)),
197 (GROUP_31, _('Group {n}').format(n=31)),
198 (GROUP_32, _('Group {n}').format(n=32)),
199 (GROUP_33, _('Group {n}').format(n=33)),
200 (GROUP_34, _('Group {n}').format(n=34)),
201 )
202
203
204 #
205 # L2VPN
206 #
207
208 class L2VPNTypeChoices(ChoiceSet):
209 TYPE_VPLS = 'vpls'
210 TYPE_VPWS = 'vpws'
211 TYPE_EPL = 'epl'
212 TYPE_EVPL = 'evpl'
213 TYPE_EPLAN = 'ep-lan'
214 TYPE_EVPLAN = 'evp-lan'
215 TYPE_EPTREE = 'ep-tree'
216 TYPE_EVPTREE = 'evp-tree'
217 TYPE_VXLAN = 'vxlan'
218 TYPE_VXLAN_EVPN = 'vxlan-evpn'
219 TYPE_MPLS_EVPN = 'mpls-evpn'
220 TYPE_PBB_EVPN = 'pbb-evpn'
221
222 CHOICES = (
223 ('VPLS', (
224 (TYPE_VPWS, 'VPWS'),
225 (TYPE_VPLS, 'VPLS'),
226 )),
227 ('VXLAN', (
228 (TYPE_VXLAN, 'VXLAN'),
229 (TYPE_VXLAN_EVPN, 'VXLAN-EVPN'),
230 )),
231 ('L2VPN E-VPN', (
232 (TYPE_MPLS_EVPN, 'MPLS EVPN'),
233 (TYPE_PBB_EVPN, 'PBB EVPN'),
234 )),
235 ('E-Line', (
236 (TYPE_EPL, 'EPL'),
237 (TYPE_EVPL, 'EVPL'),
238 )),
239 ('E-LAN', (
240 (TYPE_EPLAN, _('Ethernet Private LAN')),
241 (TYPE_EVPLAN, _('Ethernet Virtual Private LAN')),
242 )),
243 ('E-Tree', (
244 (TYPE_EPTREE, _('Ethernet Private Tree')),
245 (TYPE_EVPTREE, _('Ethernet Virtual Private Tree')),
246 )),
247 )
248
249 P2P = (
250 TYPE_VPWS,
251 TYPE_EPL,
252 TYPE_EPLAN,
253 TYPE_EPTREE
254 )
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/vpn/choices.py b/netbox/vpn/choices.py
--- a/netbox/vpn/choices.py
+++ b/netbox/vpn/choices.py
@@ -179,6 +179,7 @@
(GROUP_2, _('Group {n}').format(n=2)),
(GROUP_5, _('Group {n}').format(n=5)),
(GROUP_14, _('Group {n}').format(n=14)),
+ (GROUP_15, _('Group {n}').format(n=15)),
(GROUP_16, _('Group {n}').format(n=16)),
(GROUP_17, _('Group {n}').format(n=17)),
(GROUP_18, _('Group {n}').format(n=18)),
| {"golden_diff": "diff --git a/netbox/vpn/choices.py b/netbox/vpn/choices.py\n--- a/netbox/vpn/choices.py\n+++ b/netbox/vpn/choices.py\n@@ -179,6 +179,7 @@\n (GROUP_2, _('Group {n}').format(n=2)),\n (GROUP_5, _('Group {n}').format(n=5)),\n (GROUP_14, _('Group {n}').format(n=14)),\n+ (GROUP_15, _('Group {n}').format(n=15)),\n (GROUP_16, _('Group {n}').format(n=16)),\n (GROUP_17, _('Group {n}').format(n=17)),\n (GROUP_18, _('Group {n}').format(n=18)),\n", "issue": "DH Group 15 (3072) not selectable in UI\n### Deployment Type\r\n\r\nSelf-hosted\r\n\r\n### NetBox Version\r\n\r\nv3.7.0\r\n\r\n### Python Version\r\n\r\n3.11\r\n\r\n### Steps to Reproduce\r\n\r\n1. Open vpn/ike proposals in UI\r\n2. Try to select DH group 15\r\n\r\n### Expected Behavior\r\n\r\nA dropdown containing group 15\r\n\r\n### Observed Behavior\r\n\r\nDH group 15 is missing\n", "before_files": [{"content": "from django.utils.translation import gettext_lazy as _\n\nfrom utilities.choices import ChoiceSet\n\n\n#\n# Tunnels\n#\n\nclass TunnelStatusChoices(ChoiceSet):\n key = 'Tunnel.status'\n\n STATUS_PLANNED = 'planned'\n STATUS_ACTIVE = 'active'\n STATUS_DISABLED = 'disabled'\n\n CHOICES = [\n (STATUS_PLANNED, _('Planned'), 'cyan'),\n (STATUS_ACTIVE, _('Active'), 'green'),\n (STATUS_DISABLED, _('Disabled'), 'red'),\n ]\n\n\nclass TunnelEncapsulationChoices(ChoiceSet):\n ENCAP_GRE = 'gre'\n ENCAP_IP_IP = 'ip-ip'\n ENCAP_IPSEC_TRANSPORT = 'ipsec-transport'\n ENCAP_IPSEC_TUNNEL = 'ipsec-tunnel'\n\n CHOICES = [\n (ENCAP_IPSEC_TRANSPORT, _('IPsec - Transport')),\n (ENCAP_IPSEC_TUNNEL, _('IPsec - Tunnel')),\n (ENCAP_IP_IP, _('IP-in-IP')),\n (ENCAP_GRE, _('GRE')),\n ]\n\n\nclass TunnelTerminationTypeChoices(ChoiceSet):\n # For TunnelCreateForm\n TYPE_DEVICE = 'dcim.device'\n TYPE_VIRTUALMACHINE = 'virtualization.virtualmachine'\n\n CHOICES = (\n (TYPE_DEVICE, _('Device')),\n (TYPE_VIRTUALMACHINE, _('Virtual Machine')),\n )\n\n\nclass TunnelTerminationRoleChoices(ChoiceSet):\n ROLE_PEER = 'peer'\n ROLE_HUB = 'hub'\n ROLE_SPOKE = 'spoke'\n\n CHOICES = [\n (ROLE_PEER, _('Peer'), 'green'),\n (ROLE_HUB, _('Hub'), 'blue'),\n (ROLE_SPOKE, _('Spoke'), 'orange'),\n ]\n\n\n#\n# Crypto\n#\n\nclass IKEVersionChoices(ChoiceSet):\n VERSION_1 = 1\n VERSION_2 = 2\n\n CHOICES = (\n (VERSION_1, 'IKEv1'),\n (VERSION_2, 'IKEv2'),\n )\n\n\nclass IKEModeChoices(ChoiceSet):\n AGGRESSIVE = 'aggressive'\n MAIN = 'main'\n\n CHOICES = (\n (AGGRESSIVE, _('Aggressive')),\n (MAIN, _('Main')),\n )\n\n\nclass AuthenticationMethodChoices(ChoiceSet):\n PRESHARED_KEYS = 'preshared-keys'\n CERTIFICATES = 'certificates'\n RSA_SIGNATURES = 'rsa-signatures'\n DSA_SIGNATURES = 'dsa-signatures'\n\n CHOICES = (\n (PRESHARED_KEYS, _('Pre-shared keys')),\n (CERTIFICATES, _('Certificates')),\n (RSA_SIGNATURES, _('RSA signatures')),\n (DSA_SIGNATURES, _('DSA signatures')),\n )\n\n\nclass IPSecModeChoices(ChoiceSet):\n ESP = 'esp'\n AH = 'ah'\n\n CHOICES = (\n (ESP, 'ESP'),\n (AH, 'AH'),\n )\n\n\nclass EncryptionAlgorithmChoices(ChoiceSet):\n ENCRYPTION_AES128_CBC = 'aes-128-cbc'\n ENCRYPTION_AES128_GCM = 'aes-128-gcm'\n ENCRYPTION_AES192_CBC = 'aes-192-cbc'\n ENCRYPTION_AES192_GCM = 'aes-192-gcm'\n ENCRYPTION_AES256_CBC = 'aes-256-cbc'\n ENCRYPTION_AES256_GCM = 'aes-256-gcm'\n ENCRYPTION_3DES = '3des-cbc'\n ENCRYPTION_DES = 'des-cbc'\n\n CHOICES = (\n (ENCRYPTION_AES128_CBC, '128-bit AES (CBC)'),\n (ENCRYPTION_AES128_GCM, '128-bit AES (GCM)'),\n (ENCRYPTION_AES192_CBC, '192-bit AES (CBC)'),\n (ENCRYPTION_AES192_GCM, '192-bit AES (GCM)'),\n (ENCRYPTION_AES256_CBC, '256-bit AES (CBC)'),\n (ENCRYPTION_AES256_GCM, '256-bit AES (GCM)'),\n (ENCRYPTION_3DES, '3DES'),\n (ENCRYPTION_3DES, 'DES'),\n )\n\n\nclass AuthenticationAlgorithmChoices(ChoiceSet):\n AUTH_HMAC_SHA1 = 'hmac-sha1'\n AUTH_HMAC_SHA256 = 'hmac-sha256'\n AUTH_HMAC_SHA384 = 'hmac-sha384'\n AUTH_HMAC_SHA512 = 'hmac-sha512'\n AUTH_HMAC_MD5 = 'hmac-md5'\n\n CHOICES = (\n (AUTH_HMAC_SHA1, 'SHA-1 HMAC'),\n (AUTH_HMAC_SHA256, 'SHA-256 HMAC'),\n (AUTH_HMAC_SHA384, 'SHA-384 HMAC'),\n (AUTH_HMAC_SHA512, 'SHA-512 HMAC'),\n (AUTH_HMAC_MD5, 'MD5 HMAC'),\n )\n\n\nclass DHGroupChoices(ChoiceSet):\n # https://www.iana.org/assignments/ikev2-parameters/ikev2-parameters.xhtml#ikev2-parameters-8\n GROUP_1 = 1 # 768-bit MODP\n GROUP_2 = 2 # 1024-but MODP\n # Groups 3-4 reserved\n GROUP_5 = 5 # 1536-bit MODP\n # Groups 6-13 unassigned\n GROUP_14 = 14 # 2048-bit MODP\n GROUP_15 = 15 # 3072-bit MODP\n GROUP_16 = 16 # 4096-bit MODP\n GROUP_17 = 17 # 6144-bit MODP\n GROUP_18 = 18 # 8192-bit MODP\n GROUP_19 = 19 # 256-bit random ECP\n GROUP_20 = 20 # 384-bit random ECP\n GROUP_21 = 21 # 521-bit random ECP (521 is not a typo)\n GROUP_22 = 22 # 1024-bit MODP w/160-bit prime\n GROUP_23 = 23 # 2048-bit MODP w/224-bit prime\n GROUP_24 = 24 # 2048-bit MODP w/256-bit prime\n GROUP_25 = 25 # 192-bit ECP\n GROUP_26 = 26 # 224-bit ECP\n GROUP_27 = 27 # brainpoolP224r1\n GROUP_28 = 28 # brainpoolP256r1\n GROUP_29 = 29 # brainpoolP384r1\n GROUP_30 = 30 # brainpoolP512r1\n GROUP_31 = 31 # Curve25519\n GROUP_32 = 32 # Curve448\n GROUP_33 = 33 # GOST3410_2012_256\n GROUP_34 = 34 # GOST3410_2012_512\n\n CHOICES = (\n # Strings are formatted in this manner to optimize translations\n (GROUP_1, _('Group {n}').format(n=1)),\n (GROUP_2, _('Group {n}').format(n=2)),\n (GROUP_5, _('Group {n}').format(n=5)),\n (GROUP_14, _('Group {n}').format(n=14)),\n (GROUP_16, _('Group {n}').format(n=16)),\n (GROUP_17, _('Group {n}').format(n=17)),\n (GROUP_18, _('Group {n}').format(n=18)),\n (GROUP_19, _('Group {n}').format(n=19)),\n (GROUP_20, _('Group {n}').format(n=20)),\n (GROUP_21, _('Group {n}').format(n=21)),\n (GROUP_22, _('Group {n}').format(n=22)),\n (GROUP_23, _('Group {n}').format(n=23)),\n (GROUP_24, _('Group {n}').format(n=24)),\n (GROUP_25, _('Group {n}').format(n=25)),\n (GROUP_26, _('Group {n}').format(n=26)),\n (GROUP_27, _('Group {n}').format(n=27)),\n (GROUP_28, _('Group {n}').format(n=28)),\n (GROUP_29, _('Group {n}').format(n=29)),\n (GROUP_30, _('Group {n}').format(n=30)),\n (GROUP_31, _('Group {n}').format(n=31)),\n (GROUP_32, _('Group {n}').format(n=32)),\n (GROUP_33, _('Group {n}').format(n=33)),\n (GROUP_34, _('Group {n}').format(n=34)),\n )\n\n\n#\n# L2VPN\n#\n\nclass L2VPNTypeChoices(ChoiceSet):\n TYPE_VPLS = 'vpls'\n TYPE_VPWS = 'vpws'\n TYPE_EPL = 'epl'\n TYPE_EVPL = 'evpl'\n TYPE_EPLAN = 'ep-lan'\n TYPE_EVPLAN = 'evp-lan'\n TYPE_EPTREE = 'ep-tree'\n TYPE_EVPTREE = 'evp-tree'\n TYPE_VXLAN = 'vxlan'\n TYPE_VXLAN_EVPN = 'vxlan-evpn'\n TYPE_MPLS_EVPN = 'mpls-evpn'\n TYPE_PBB_EVPN = 'pbb-evpn'\n\n CHOICES = (\n ('VPLS', (\n (TYPE_VPWS, 'VPWS'),\n (TYPE_VPLS, 'VPLS'),\n )),\n ('VXLAN', (\n (TYPE_VXLAN, 'VXLAN'),\n (TYPE_VXLAN_EVPN, 'VXLAN-EVPN'),\n )),\n ('L2VPN E-VPN', (\n (TYPE_MPLS_EVPN, 'MPLS EVPN'),\n (TYPE_PBB_EVPN, 'PBB EVPN'),\n )),\n ('E-Line', (\n (TYPE_EPL, 'EPL'),\n (TYPE_EVPL, 'EVPL'),\n )),\n ('E-LAN', (\n (TYPE_EPLAN, _('Ethernet Private LAN')),\n (TYPE_EVPLAN, _('Ethernet Virtual Private LAN')),\n )),\n ('E-Tree', (\n (TYPE_EPTREE, _('Ethernet Private Tree')),\n (TYPE_EVPTREE, _('Ethernet Virtual Private Tree')),\n )),\n )\n\n P2P = (\n TYPE_VPWS,\n TYPE_EPL,\n TYPE_EPLAN,\n TYPE_EPTREE\n )\n", "path": "netbox/vpn/choices.py"}], "after_files": [{"content": "from django.utils.translation import gettext_lazy as _\n\nfrom utilities.choices import ChoiceSet\n\n\n#\n# Tunnels\n#\n\nclass TunnelStatusChoices(ChoiceSet):\n key = 'Tunnel.status'\n\n STATUS_PLANNED = 'planned'\n STATUS_ACTIVE = 'active'\n STATUS_DISABLED = 'disabled'\n\n CHOICES = [\n (STATUS_PLANNED, _('Planned'), 'cyan'),\n (STATUS_ACTIVE, _('Active'), 'green'),\n (STATUS_DISABLED, _('Disabled'), 'red'),\n ]\n\n\nclass TunnelEncapsulationChoices(ChoiceSet):\n ENCAP_GRE = 'gre'\n ENCAP_IP_IP = 'ip-ip'\n ENCAP_IPSEC_TRANSPORT = 'ipsec-transport'\n ENCAP_IPSEC_TUNNEL = 'ipsec-tunnel'\n\n CHOICES = [\n (ENCAP_IPSEC_TRANSPORT, _('IPsec - Transport')),\n (ENCAP_IPSEC_TUNNEL, _('IPsec - Tunnel')),\n (ENCAP_IP_IP, _('IP-in-IP')),\n (ENCAP_GRE, _('GRE')),\n ]\n\n\nclass TunnelTerminationTypeChoices(ChoiceSet):\n # For TunnelCreateForm\n TYPE_DEVICE = 'dcim.device'\n TYPE_VIRTUALMACHINE = 'virtualization.virtualmachine'\n\n CHOICES = (\n (TYPE_DEVICE, _('Device')),\n (TYPE_VIRTUALMACHINE, _('Virtual Machine')),\n )\n\n\nclass TunnelTerminationRoleChoices(ChoiceSet):\n ROLE_PEER = 'peer'\n ROLE_HUB = 'hub'\n ROLE_SPOKE = 'spoke'\n\n CHOICES = [\n (ROLE_PEER, _('Peer'), 'green'),\n (ROLE_HUB, _('Hub'), 'blue'),\n (ROLE_SPOKE, _('Spoke'), 'orange'),\n ]\n\n\n#\n# Crypto\n#\n\nclass IKEVersionChoices(ChoiceSet):\n VERSION_1 = 1\n VERSION_2 = 2\n\n CHOICES = (\n (VERSION_1, 'IKEv1'),\n (VERSION_2, 'IKEv2'),\n )\n\n\nclass IKEModeChoices(ChoiceSet):\n AGGRESSIVE = 'aggressive'\n MAIN = 'main'\n\n CHOICES = (\n (AGGRESSIVE, _('Aggressive')),\n (MAIN, _('Main')),\n )\n\n\nclass AuthenticationMethodChoices(ChoiceSet):\n PRESHARED_KEYS = 'preshared-keys'\n CERTIFICATES = 'certificates'\n RSA_SIGNATURES = 'rsa-signatures'\n DSA_SIGNATURES = 'dsa-signatures'\n\n CHOICES = (\n (PRESHARED_KEYS, _('Pre-shared keys')),\n (CERTIFICATES, _('Certificates')),\n (RSA_SIGNATURES, _('RSA signatures')),\n (DSA_SIGNATURES, _('DSA signatures')),\n )\n\n\nclass IPSecModeChoices(ChoiceSet):\n ESP = 'esp'\n AH = 'ah'\n\n CHOICES = (\n (ESP, 'ESP'),\n (AH, 'AH'),\n )\n\n\nclass EncryptionAlgorithmChoices(ChoiceSet):\n ENCRYPTION_AES128_CBC = 'aes-128-cbc'\n ENCRYPTION_AES128_GCM = 'aes-128-gcm'\n ENCRYPTION_AES192_CBC = 'aes-192-cbc'\n ENCRYPTION_AES192_GCM = 'aes-192-gcm'\n ENCRYPTION_AES256_CBC = 'aes-256-cbc'\n ENCRYPTION_AES256_GCM = 'aes-256-gcm'\n ENCRYPTION_3DES = '3des-cbc'\n ENCRYPTION_DES = 'des-cbc'\n\n CHOICES = (\n (ENCRYPTION_AES128_CBC, '128-bit AES (CBC)'),\n (ENCRYPTION_AES128_GCM, '128-bit AES (GCM)'),\n (ENCRYPTION_AES192_CBC, '192-bit AES (CBC)'),\n (ENCRYPTION_AES192_GCM, '192-bit AES (GCM)'),\n (ENCRYPTION_AES256_CBC, '256-bit AES (CBC)'),\n (ENCRYPTION_AES256_GCM, '256-bit AES (GCM)'),\n (ENCRYPTION_3DES, '3DES'),\n (ENCRYPTION_3DES, 'DES'),\n )\n\n\nclass AuthenticationAlgorithmChoices(ChoiceSet):\n AUTH_HMAC_SHA1 = 'hmac-sha1'\n AUTH_HMAC_SHA256 = 'hmac-sha256'\n AUTH_HMAC_SHA384 = 'hmac-sha384'\n AUTH_HMAC_SHA512 = 'hmac-sha512'\n AUTH_HMAC_MD5 = 'hmac-md5'\n\n CHOICES = (\n (AUTH_HMAC_SHA1, 'SHA-1 HMAC'),\n (AUTH_HMAC_SHA256, 'SHA-256 HMAC'),\n (AUTH_HMAC_SHA384, 'SHA-384 HMAC'),\n (AUTH_HMAC_SHA512, 'SHA-512 HMAC'),\n (AUTH_HMAC_MD5, 'MD5 HMAC'),\n )\n\n\nclass DHGroupChoices(ChoiceSet):\n # https://www.iana.org/assignments/ikev2-parameters/ikev2-parameters.xhtml#ikev2-parameters-8\n GROUP_1 = 1 # 768-bit MODP\n GROUP_2 = 2 # 1024-but MODP\n # Groups 3-4 reserved\n GROUP_5 = 5 # 1536-bit MODP\n # Groups 6-13 unassigned\n GROUP_14 = 14 # 2048-bit MODP\n GROUP_15 = 15 # 3072-bit MODP\n GROUP_16 = 16 # 4096-bit MODP\n GROUP_17 = 17 # 6144-bit MODP\n GROUP_18 = 18 # 8192-bit MODP\n GROUP_19 = 19 # 256-bit random ECP\n GROUP_20 = 20 # 384-bit random ECP\n GROUP_21 = 21 # 521-bit random ECP (521 is not a typo)\n GROUP_22 = 22 # 1024-bit MODP w/160-bit prime\n GROUP_23 = 23 # 2048-bit MODP w/224-bit prime\n GROUP_24 = 24 # 2048-bit MODP w/256-bit prime\n GROUP_25 = 25 # 192-bit ECP\n GROUP_26 = 26 # 224-bit ECP\n GROUP_27 = 27 # brainpoolP224r1\n GROUP_28 = 28 # brainpoolP256r1\n GROUP_29 = 29 # brainpoolP384r1\n GROUP_30 = 30 # brainpoolP512r1\n GROUP_31 = 31 # Curve25519\n GROUP_32 = 32 # Curve448\n GROUP_33 = 33 # GOST3410_2012_256\n GROUP_34 = 34 # GOST3410_2012_512\n\n CHOICES = (\n # Strings are formatted in this manner to optimize translations\n (GROUP_1, _('Group {n}').format(n=1)),\n (GROUP_2, _('Group {n}').format(n=2)),\n (GROUP_5, _('Group {n}').format(n=5)),\n (GROUP_14, _('Group {n}').format(n=14)),\n (GROUP_15, _('Group {n}').format(n=15)),\n (GROUP_16, _('Group {n}').format(n=16)),\n (GROUP_17, _('Group {n}').format(n=17)),\n (GROUP_18, _('Group {n}').format(n=18)),\n (GROUP_19, _('Group {n}').format(n=19)),\n (GROUP_20, _('Group {n}').format(n=20)),\n (GROUP_21, _('Group {n}').format(n=21)),\n (GROUP_22, _('Group {n}').format(n=22)),\n (GROUP_23, _('Group {n}').format(n=23)),\n (GROUP_24, _('Group {n}').format(n=24)),\n (GROUP_25, _('Group {n}').format(n=25)),\n (GROUP_26, _('Group {n}').format(n=26)),\n (GROUP_27, _('Group {n}').format(n=27)),\n (GROUP_28, _('Group {n}').format(n=28)),\n (GROUP_29, _('Group {n}').format(n=29)),\n (GROUP_30, _('Group {n}').format(n=30)),\n (GROUP_31, _('Group {n}').format(n=31)),\n (GROUP_32, _('Group {n}').format(n=32)),\n (GROUP_33, _('Group {n}').format(n=33)),\n (GROUP_34, _('Group {n}').format(n=34)),\n )\n\n\n#\n# L2VPN\n#\n\nclass L2VPNTypeChoices(ChoiceSet):\n TYPE_VPLS = 'vpls'\n TYPE_VPWS = 'vpws'\n TYPE_EPL = 'epl'\n TYPE_EVPL = 'evpl'\n TYPE_EPLAN = 'ep-lan'\n TYPE_EVPLAN = 'evp-lan'\n TYPE_EPTREE = 'ep-tree'\n TYPE_EVPTREE = 'evp-tree'\n TYPE_VXLAN = 'vxlan'\n TYPE_VXLAN_EVPN = 'vxlan-evpn'\n TYPE_MPLS_EVPN = 'mpls-evpn'\n TYPE_PBB_EVPN = 'pbb-evpn'\n\n CHOICES = (\n ('VPLS', (\n (TYPE_VPWS, 'VPWS'),\n (TYPE_VPLS, 'VPLS'),\n )),\n ('VXLAN', (\n (TYPE_VXLAN, 'VXLAN'),\n (TYPE_VXLAN_EVPN, 'VXLAN-EVPN'),\n )),\n ('L2VPN E-VPN', (\n (TYPE_MPLS_EVPN, 'MPLS EVPN'),\n (TYPE_PBB_EVPN, 'PBB EVPN'),\n )),\n ('E-Line', (\n (TYPE_EPL, 'EPL'),\n (TYPE_EVPL, 'EVPL'),\n )),\n ('E-LAN', (\n (TYPE_EPLAN, _('Ethernet Private LAN')),\n (TYPE_EVPLAN, _('Ethernet Virtual Private LAN')),\n )),\n ('E-Tree', (\n (TYPE_EPTREE, _('Ethernet Private Tree')),\n (TYPE_EVPTREE, _('Ethernet Virtual Private Tree')),\n )),\n )\n\n P2P = (\n TYPE_VPWS,\n TYPE_EPL,\n TYPE_EPLAN,\n TYPE_EPTREE\n )\n", "path": "netbox/vpn/choices.py"}]} | 3,560 | 184 |
gh_patches_debug_24840 | rasdani/github-patches | git_diff | comic__grand-challenge.org-3038 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Display number of unique users that have submitted to each phase on the challenge stats page
Would be handy for final phase forecasting.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/pages/views.py`
Content:
```
1 from datetime import datetime
2
3 from django.contrib.auth.mixins import UserPassesTestMixin
4 from django.contrib.messages.views import SuccessMessageMixin
5 from django.core.cache import cache
6 from django.db.models import Count, Q
7 from django.http import Http404
8 from django.views.generic import (
9 CreateView,
10 DeleteView,
11 DetailView,
12 ListView,
13 TemplateView,
14 UpdateView,
15 )
16 from django_countries import countries
17 from guardian.mixins import LoginRequiredMixin
18
19 from grandchallenge.challenges.models import ChallengeRequest
20 from grandchallenge.charts.specs import stacked_bar, world_map
21 from grandchallenge.core.guardian import ObjectPermissionRequiredMixin
22 from grandchallenge.evaluation.models import Submission
23 from grandchallenge.evaluation.utils import SubmissionKindChoices
24 from grandchallenge.pages.forms import PageCreateForm, PageUpdateForm
25 from grandchallenge.pages.models import Page
26 from grandchallenge.subdomains.utils import reverse, reverse_lazy
27
28
29 class ChallengeFilteredQuerysetMixin:
30 def get_queryset(self):
31 queryset = super().get_queryset()
32 return queryset.filter(Q(challenge=self.request.challenge))
33
34
35 class ChallengeFormKwargsMixin:
36 def get_form_kwargs(self):
37 kwargs = super().get_form_kwargs()
38 kwargs.update({"challenge": self.request.challenge})
39 return kwargs
40
41
42 class PageCreate(
43 LoginRequiredMixin,
44 ObjectPermissionRequiredMixin,
45 ChallengeFormKwargsMixin,
46 CreateView,
47 ):
48 model = Page
49 form_class = PageCreateForm
50 permission_required = "change_challenge"
51 raise_exception = True
52 login_url = reverse_lazy("account_login")
53
54 def get_permission_object(self):
55 return self.request.challenge
56
57 def form_valid(self, form):
58 form.instance.challenge = self.request.challenge
59 return super().form_valid(form)
60
61
62 class PageList(
63 LoginRequiredMixin,
64 ObjectPermissionRequiredMixin,
65 ChallengeFilteredQuerysetMixin,
66 ListView,
67 ):
68 model = Page
69 permission_required = "change_challenge"
70 raise_exception = True
71 login_url = reverse_lazy("account_login")
72
73 def get_permission_object(self):
74 return self.request.challenge
75
76
77 class PageDetail(
78 UserPassesTestMixin, ChallengeFilteredQuerysetMixin, DetailView
79 ):
80 model = Page
81 raise_exception = True
82 login_url = reverse_lazy("account_login")
83
84 def test_func(self):
85 user = self.request.user
86 page = self.get_object()
87 return page.can_be_viewed_by(user=user)
88
89
90 class ChallengeHome(PageDetail):
91 def get_object(self, queryset=None):
92 page = self.request.challenge.page_set.first()
93
94 if page is None:
95 raise Http404("No pages found for this challenge")
96
97 return page
98
99
100 class PageUpdate(
101 LoginRequiredMixin,
102 ObjectPermissionRequiredMixin,
103 ChallengeFilteredQuerysetMixin,
104 ChallengeFormKwargsMixin,
105 UpdateView,
106 ):
107 model = Page
108 form_class = PageUpdateForm
109 permission_required = "change_challenge"
110 raise_exception = True
111 login_url = reverse_lazy("account_login")
112
113 def get_permission_object(self):
114 return self.request.challenge
115
116 def form_valid(self, form):
117 response = super().form_valid(form)
118 self.object.move(form.cleaned_data["move"])
119 return response
120
121
122 class PageDelete(
123 LoginRequiredMixin,
124 ObjectPermissionRequiredMixin,
125 ChallengeFilteredQuerysetMixin,
126 SuccessMessageMixin,
127 DeleteView,
128 ):
129 model = Page
130 success_message = "Page was successfully deleted"
131 permission_required = "change_challenge"
132 raise_exception = True
133 login_url = reverse_lazy("account_login")
134
135 def get_permission_object(self):
136 return self.request.challenge
137
138 def get_success_url(self):
139 return reverse(
140 "pages:list",
141 kwargs={"challenge_short_name": self.request.challenge.short_name},
142 )
143
144
145 class ChallengeStatistics(TemplateView):
146 template_name = "pages/challenge_statistics.html"
147
148 def get_context_data(self, **kwargs):
149 context = super().get_context_data()
150
151 participants = (
152 self.request.challenge.get_participants().select_related(
153 "user_profile", "verification"
154 )
155 )
156
157 participants_countries = (
158 participants.exclude(user_profile__country="")
159 .values("user_profile__country")
160 .annotate(country_count=Count("user_profile__country"))
161 .order_by("-country_count")
162 .values_list("user_profile__country", "country_count")
163 )
164
165 public_phases = self.request.challenge.phase_set.filter(public=True)
166
167 submissions = (
168 Submission.objects.filter(phase__in=public_phases)
169 .values("phase__pk", "created__year", "created__month")
170 .annotate(object_count=Count("phase__slug"))
171 .order_by("created__year", "created__month", "phase__pk")
172 )
173
174 context.update(
175 {
176 "participants": world_map(
177 values=[
178 {
179 "id": countries.numeric(c[0], padded=True),
180 "participants": c[1],
181 }
182 for c in participants_countries
183 ]
184 ),
185 "participants_total": participants.count(),
186 "submissions": stacked_bar(
187 values=[
188 {
189 "Month": datetime(
190 datum["created__year"],
191 datum["created__month"],
192 1,
193 ).isoformat(),
194 "New Submissions": datum["object_count"],
195 "Phase": datum["phase__pk"],
196 }
197 for datum in submissions
198 ],
199 lookup="New Submissions",
200 title="New Submissions per Month",
201 facet="Phase",
202 domain=[
203 (phase.pk, phase.title) for phase in public_phases
204 ],
205 ),
206 "algorithm_phases": self.request.challenge.phase_set.prefetch_related(
207 "submission_set"
208 ).filter(
209 submission_kind=SubmissionKindChoices.ALGORITHM
210 ),
211 "statistics_for_phases": cache.get("statistics_for_phases"),
212 "challenge_request": ChallengeRequest.objects.filter(
213 short_name=self.request.challenge.short_name,
214 status=ChallengeRequest.ChallengeRequestStatusChoices.ACCEPTED,
215 ).first(),
216 }
217 )
218
219 return context
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/pages/views.py b/app/grandchallenge/pages/views.py
--- a/app/grandchallenge/pages/views.py
+++ b/app/grandchallenge/pages/views.py
@@ -171,6 +171,13 @@
.order_by("created__year", "created__month", "phase__pk")
)
+ creators = (
+ Submission.objects.filter(phase__in=public_phases)
+ .values("phase__pk")
+ .annotate(creators_count=Count("creator"))
+ .order_by("phase__pk")
+ )
+
context.update(
{
"participants": world_map(
@@ -213,6 +220,21 @@
short_name=self.request.challenge.short_name,
status=ChallengeRequest.ChallengeRequestStatusChoices.ACCEPTED,
).first(),
+ "creators": stacked_bar(
+ values=[
+ {
+ "Creators": datum["creators_count"],
+ "Phase": datum["phase__pk"],
+ }
+ for datum in creators
+ ],
+ lookup="Creators",
+ title="Creators per Phase",
+ facet="Phase",
+ domain=[
+ (phase.pk, phase.title) for phase in public_phases
+ ],
+ ),
}
)
| {"golden_diff": "diff --git a/app/grandchallenge/pages/views.py b/app/grandchallenge/pages/views.py\n--- a/app/grandchallenge/pages/views.py\n+++ b/app/grandchallenge/pages/views.py\n@@ -171,6 +171,13 @@\n .order_by(\"created__year\", \"created__month\", \"phase__pk\")\n )\n \n+ creators = (\n+ Submission.objects.filter(phase__in=public_phases)\n+ .values(\"phase__pk\")\n+ .annotate(creators_count=Count(\"creator\"))\n+ .order_by(\"phase__pk\")\n+ )\n+\n context.update(\n {\n \"participants\": world_map(\n@@ -213,6 +220,21 @@\n short_name=self.request.challenge.short_name,\n status=ChallengeRequest.ChallengeRequestStatusChoices.ACCEPTED,\n ).first(),\n+ \"creators\": stacked_bar(\n+ values=[\n+ {\n+ \"Creators\": datum[\"creators_count\"],\n+ \"Phase\": datum[\"phase__pk\"],\n+ }\n+ for datum in creators\n+ ],\n+ lookup=\"Creators\",\n+ title=\"Creators per Phase\",\n+ facet=\"Phase\",\n+ domain=[\n+ (phase.pk, phase.title) for phase in public_phases\n+ ],\n+ ),\n }\n )\n", "issue": "Display number of unique users that have submitted to each phase on the challenge stats page\nWould be handy for final phase forecasting.\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.core.cache import cache\nfrom django.db.models import Count, Q\nfrom django.http import Http404\nfrom django.views.generic import (\n CreateView,\n DeleteView,\n DetailView,\n ListView,\n TemplateView,\n UpdateView,\n)\nfrom django_countries import countries\nfrom guardian.mixins import LoginRequiredMixin\n\nfrom grandchallenge.challenges.models import ChallengeRequest\nfrom grandchallenge.charts.specs import stacked_bar, world_map\nfrom grandchallenge.core.guardian import ObjectPermissionRequiredMixin\nfrom grandchallenge.evaluation.models import Submission\nfrom grandchallenge.evaluation.utils import SubmissionKindChoices\nfrom grandchallenge.pages.forms import PageCreateForm, PageUpdateForm\nfrom grandchallenge.pages.models import Page\nfrom grandchallenge.subdomains.utils import reverse, reverse_lazy\n\n\nclass ChallengeFilteredQuerysetMixin:\n def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.filter(Q(challenge=self.request.challenge))\n\n\nclass ChallengeFormKwargsMixin:\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"challenge\": self.request.challenge})\n return kwargs\n\n\nclass PageCreate(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n ChallengeFormKwargsMixin,\n CreateView,\n):\n model = Page\n form_class = PageCreateForm\n permission_required = \"change_challenge\"\n raise_exception = True\n login_url = reverse_lazy(\"account_login\")\n\n def get_permission_object(self):\n return self.request.challenge\n\n def form_valid(self, form):\n form.instance.challenge = self.request.challenge\n return super().form_valid(form)\n\n\nclass PageList(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n ChallengeFilteredQuerysetMixin,\n ListView,\n):\n model = Page\n permission_required = \"change_challenge\"\n raise_exception = True\n login_url = reverse_lazy(\"account_login\")\n\n def get_permission_object(self):\n return self.request.challenge\n\n\nclass PageDetail(\n UserPassesTestMixin, ChallengeFilteredQuerysetMixin, DetailView\n):\n model = Page\n raise_exception = True\n login_url = reverse_lazy(\"account_login\")\n\n def test_func(self):\n user = self.request.user\n page = self.get_object()\n return page.can_be_viewed_by(user=user)\n\n\nclass ChallengeHome(PageDetail):\n def get_object(self, queryset=None):\n page = self.request.challenge.page_set.first()\n\n if page is None:\n raise Http404(\"No pages found for this challenge\")\n\n return page\n\n\nclass PageUpdate(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n ChallengeFilteredQuerysetMixin,\n ChallengeFormKwargsMixin,\n UpdateView,\n):\n model = Page\n form_class = PageUpdateForm\n permission_required = \"change_challenge\"\n raise_exception = True\n login_url = reverse_lazy(\"account_login\")\n\n def get_permission_object(self):\n return self.request.challenge\n\n def form_valid(self, form):\n response = super().form_valid(form)\n self.object.move(form.cleaned_data[\"move\"])\n return response\n\n\nclass PageDelete(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n ChallengeFilteredQuerysetMixin,\n SuccessMessageMixin,\n DeleteView,\n):\n model = Page\n success_message = \"Page was successfully deleted\"\n permission_required = \"change_challenge\"\n raise_exception = True\n login_url = reverse_lazy(\"account_login\")\n\n def get_permission_object(self):\n return self.request.challenge\n\n def get_success_url(self):\n return reverse(\n \"pages:list\",\n kwargs={\"challenge_short_name\": self.request.challenge.short_name},\n )\n\n\nclass ChallengeStatistics(TemplateView):\n template_name = \"pages/challenge_statistics.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data()\n\n participants = (\n self.request.challenge.get_participants().select_related(\n \"user_profile\", \"verification\"\n )\n )\n\n participants_countries = (\n participants.exclude(user_profile__country=\"\")\n .values(\"user_profile__country\")\n .annotate(country_count=Count(\"user_profile__country\"))\n .order_by(\"-country_count\")\n .values_list(\"user_profile__country\", \"country_count\")\n )\n\n public_phases = self.request.challenge.phase_set.filter(public=True)\n\n submissions = (\n Submission.objects.filter(phase__in=public_phases)\n .values(\"phase__pk\", \"created__year\", \"created__month\")\n .annotate(object_count=Count(\"phase__slug\"))\n .order_by(\"created__year\", \"created__month\", \"phase__pk\")\n )\n\n context.update(\n {\n \"participants\": world_map(\n values=[\n {\n \"id\": countries.numeric(c[0], padded=True),\n \"participants\": c[1],\n }\n for c in participants_countries\n ]\n ),\n \"participants_total\": participants.count(),\n \"submissions\": stacked_bar(\n values=[\n {\n \"Month\": datetime(\n datum[\"created__year\"],\n datum[\"created__month\"],\n 1,\n ).isoformat(),\n \"New Submissions\": datum[\"object_count\"],\n \"Phase\": datum[\"phase__pk\"],\n }\n for datum in submissions\n ],\n lookup=\"New Submissions\",\n title=\"New Submissions per Month\",\n facet=\"Phase\",\n domain=[\n (phase.pk, phase.title) for phase in public_phases\n ],\n ),\n \"algorithm_phases\": self.request.challenge.phase_set.prefetch_related(\n \"submission_set\"\n ).filter(\n submission_kind=SubmissionKindChoices.ALGORITHM\n ),\n \"statistics_for_phases\": cache.get(\"statistics_for_phases\"),\n \"challenge_request\": ChallengeRequest.objects.filter(\n short_name=self.request.challenge.short_name,\n status=ChallengeRequest.ChallengeRequestStatusChoices.ACCEPTED,\n ).first(),\n }\n )\n\n return context\n", "path": "app/grandchallenge/pages/views.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.core.cache import cache\nfrom django.db.models import Count, Q\nfrom django.http import Http404\nfrom django.views.generic import (\n CreateView,\n DeleteView,\n DetailView,\n ListView,\n TemplateView,\n UpdateView,\n)\nfrom django_countries import countries\nfrom guardian.mixins import LoginRequiredMixin\n\nfrom grandchallenge.challenges.models import ChallengeRequest\nfrom grandchallenge.charts.specs import stacked_bar, world_map\nfrom grandchallenge.core.guardian import ObjectPermissionRequiredMixin\nfrom grandchallenge.evaluation.models import Submission\nfrom grandchallenge.evaluation.utils import SubmissionKindChoices\nfrom grandchallenge.pages.forms import PageCreateForm, PageUpdateForm\nfrom grandchallenge.pages.models import Page\nfrom grandchallenge.subdomains.utils import reverse, reverse_lazy\n\n\nclass ChallengeFilteredQuerysetMixin:\n def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.filter(Q(challenge=self.request.challenge))\n\n\nclass ChallengeFormKwargsMixin:\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"challenge\": self.request.challenge})\n return kwargs\n\n\nclass PageCreate(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n ChallengeFormKwargsMixin,\n CreateView,\n):\n model = Page\n form_class = PageCreateForm\n permission_required = \"change_challenge\"\n raise_exception = True\n login_url = reverse_lazy(\"account_login\")\n\n def get_permission_object(self):\n return self.request.challenge\n\n def form_valid(self, form):\n form.instance.challenge = self.request.challenge\n return super().form_valid(form)\n\n\nclass PageList(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n ChallengeFilteredQuerysetMixin,\n ListView,\n):\n model = Page\n permission_required = \"change_challenge\"\n raise_exception = True\n login_url = reverse_lazy(\"account_login\")\n\n def get_permission_object(self):\n return self.request.challenge\n\n\nclass PageDetail(\n UserPassesTestMixin, ChallengeFilteredQuerysetMixin, DetailView\n):\n model = Page\n raise_exception = True\n login_url = reverse_lazy(\"account_login\")\n\n def test_func(self):\n user = self.request.user\n page = self.get_object()\n return page.can_be_viewed_by(user=user)\n\n\nclass ChallengeHome(PageDetail):\n def get_object(self, queryset=None):\n page = self.request.challenge.page_set.first()\n\n if page is None:\n raise Http404(\"No pages found for this challenge\")\n\n return page\n\n\nclass PageUpdate(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n ChallengeFilteredQuerysetMixin,\n ChallengeFormKwargsMixin,\n UpdateView,\n):\n model = Page\n form_class = PageUpdateForm\n permission_required = \"change_challenge\"\n raise_exception = True\n login_url = reverse_lazy(\"account_login\")\n\n def get_permission_object(self):\n return self.request.challenge\n\n def form_valid(self, form):\n response = super().form_valid(form)\n self.object.move(form.cleaned_data[\"move\"])\n return response\n\n\nclass PageDelete(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n ChallengeFilteredQuerysetMixin,\n SuccessMessageMixin,\n DeleteView,\n):\n model = Page\n success_message = \"Page was successfully deleted\"\n permission_required = \"change_challenge\"\n raise_exception = True\n login_url = reverse_lazy(\"account_login\")\n\n def get_permission_object(self):\n return self.request.challenge\n\n def get_success_url(self):\n return reverse(\n \"pages:list\",\n kwargs={\"challenge_short_name\": self.request.challenge.short_name},\n )\n\n\nclass ChallengeStatistics(TemplateView):\n template_name = \"pages/challenge_statistics.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data()\n\n participants = (\n self.request.challenge.get_participants().select_related(\n \"user_profile\", \"verification\"\n )\n )\n\n participants_countries = (\n participants.exclude(user_profile__country=\"\")\n .values(\"user_profile__country\")\n .annotate(country_count=Count(\"user_profile__country\"))\n .order_by(\"-country_count\")\n .values_list(\"user_profile__country\", \"country_count\")\n )\n\n public_phases = self.request.challenge.phase_set.filter(public=True)\n\n submissions = (\n Submission.objects.filter(phase__in=public_phases)\n .values(\"phase__pk\", \"created__year\", \"created__month\")\n .annotate(object_count=Count(\"phase__slug\"))\n .order_by(\"created__year\", \"created__month\", \"phase__pk\")\n )\n\n creators = (\n Submission.objects.filter(phase__in=public_phases)\n .values(\"phase__pk\")\n .annotate(creators_count=Count(\"creator\"))\n .order_by(\"phase__pk\")\n )\n\n context.update(\n {\n \"participants\": world_map(\n values=[\n {\n \"id\": countries.numeric(c[0], padded=True),\n \"participants\": c[1],\n }\n for c in participants_countries\n ]\n ),\n \"participants_total\": participants.count(),\n \"submissions\": stacked_bar(\n values=[\n {\n \"Month\": datetime(\n datum[\"created__year\"],\n datum[\"created__month\"],\n 1,\n ).isoformat(),\n \"New Submissions\": datum[\"object_count\"],\n \"Phase\": datum[\"phase__pk\"],\n }\n for datum in submissions\n ],\n lookup=\"New Submissions\",\n title=\"New Submissions per Month\",\n facet=\"Phase\",\n domain=[\n (phase.pk, phase.title) for phase in public_phases\n ],\n ),\n \"algorithm_phases\": self.request.challenge.phase_set.prefetch_related(\n \"submission_set\"\n ).filter(\n submission_kind=SubmissionKindChoices.ALGORITHM\n ),\n \"statistics_for_phases\": cache.get(\"statistics_for_phases\"),\n \"challenge_request\": ChallengeRequest.objects.filter(\n short_name=self.request.challenge.short_name,\n status=ChallengeRequest.ChallengeRequestStatusChoices.ACCEPTED,\n ).first(),\n \"creators\": stacked_bar(\n values=[\n {\n \"Creators\": datum[\"creators_count\"],\n \"Phase\": datum[\"phase__pk\"],\n }\n for datum in creators\n ],\n lookup=\"Creators\",\n title=\"Creators per Phase\",\n facet=\"Phase\",\n domain=[\n (phase.pk, phase.title) for phase in public_phases\n ],\n ),\n }\n )\n\n return context\n", "path": "app/grandchallenge/pages/views.py"}]} | 2,135 | 288 |
gh_patches_debug_17392 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-1019 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Argument copying technique in LazyStrategy does not preserve custom list subtypes
I encountered an unexpected behavior when attempting to test a third-party library which makes use of custom `list` subtypes. Here's an example which reproduces the behavior:
Consider a class which inherits from the built-in Python `list` type and adds additional methods:
```python
class MyList(list):
def custom_method(self):
return "result"
def __copy__(self):
return MyList(list(self))
def __deepcopy__(self, table):
return self.__copy__()
```
Because I've overridden both `__copy__` and `__deepcopy__`, copying this list preserves its type:
```python
>>> from copy import copy, deepcopy
>>> my_list = MyList([])
>>> type(copy(my_list))
<class '__main__.MyList'>
>>> type(deepcopy(my_list))
<class '__main__.MyList'>
```
Let's say that I want to have a strategy which is parameterized by an instance of this class. This works as expected for some strategies:
```python
>>> from hypothesis import strategies as st
>>> type(st.just(my_list).example())
<class '__main__.MyList'>
```
However, I noticed that the argument type is not preserved when I use a composite strategy:
```python
>>> @st.composite
... def my_strategy(draw, my_list):
... return my_list
...
>>> type(my_strategy(my_list).example())
<type 'tuple'>
```
I believe that this behavior is due to how arguments are copied in `LazyStrategy`:
https://github.com/HypothesisWorks/hypothesis-python/blob/3ee500943938d60a8a97b7d3d948522d65f23e84/src/hypothesis/searchstrategy/lazy.py#L88
Each argument is being copied by `tupelize()`, which is defined as
```python
def tupleize(x):
if isinstance(x, (tuple, list)):
return tuple(x)
else:
return x
```
I'm not sure whether it would be safe to replace `tupelize` with `copy` here: converting lists to tuples here guards against mutation from both code inside of the composite strategy's body as well as the code which calls / constructs the strategy, so safely using `copy` here might also require additional `copy` calls elsewhere to guard against mutation during/after invocation.
I'm able to work around this behavior by wrapping my argument in an outer list (e.g. `[my_list]`) and unpacking the argument in my composite strategy.
I'm therefore not blocked by this behavior but I found it confusing and figured it might be worth reporting / documenting.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/hypothesis/searchstrategy/lazy.py`
Content:
```
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2017 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 from __future__ import division, print_function, absolute_import
19
20 from hypothesis.internal.compat import getfullargspec
21 from hypothesis.internal.reflection import arg_string, \
22 convert_keyword_arguments, convert_positional_arguments
23 from hypothesis.searchstrategy.strategies import SearchStrategy
24
25
26 def tupleize(x):
27 if isinstance(x, (tuple, list)):
28 return tuple(x)
29 else:
30 return x
31
32
33 unwrap_cache = {}
34 unwrap_depth = 0
35
36
37 def unwrap_strategies(s):
38 global unwrap_depth
39
40 if not isinstance(s, SearchStrategy):
41 return s
42 try:
43 return unwrap_cache[s]
44 except KeyError:
45 pass
46
47 unwrap_cache[s] = s
48
49 try:
50 unwrap_depth += 1
51 try:
52 result = unwrap_strategies(s.wrapped_strategy)
53 unwrap_cache[s] = result
54 try:
55 assert result.force_has_reusable_values == \
56 s.force_has_reusable_values
57 except AttributeError:
58 pass
59
60 try:
61 result.force_has_reusable_values = s.force_has_reusable_values
62 except AttributeError:
63 pass
64 return result
65 except AttributeError:
66 return s
67 finally:
68 unwrap_depth -= 1
69 if unwrap_depth <= 0:
70 unwrap_cache.clear()
71 assert unwrap_depth >= 0
72
73
74 class LazyStrategy(SearchStrategy):
75
76 """A strategy which is defined purely by conversion to and from another
77 strategy.
78
79 Its parameter and distribution come from that other strategy.
80
81 """
82
83 def __init__(self, function, args, kwargs):
84 SearchStrategy.__init__(self)
85 self.__wrapped_strategy = None
86 self.__representation = None
87 self.__function = function
88 self.__args = tuple(map(tupleize, args))
89 self.__kwargs = dict(
90 (k, tupleize(v)) for k, v in kwargs.items()
91 )
92
93 @property
94 def supports_find(self):
95 return self.wrapped_strategy.supports_find
96
97 def calc_is_empty(self, recur):
98 return recur(self.wrapped_strategy)
99
100 def calc_has_reusable_values(self, recur):
101 return recur(self.wrapped_strategy)
102
103 def calc_is_cacheable(self, recur):
104 for source in (self.__args, self.__kwargs.values()):
105 for v in source:
106 if isinstance(v, SearchStrategy) and not v.is_cacheable:
107 return False
108 return True
109
110 @property
111 def wrapped_strategy(self):
112 if self.__wrapped_strategy is None:
113 unwrapped_args = tuple(
114 unwrap_strategies(s) for s in self.__args)
115 unwrapped_kwargs = {
116 k: unwrap_strategies(v)
117 for k, v in self.__kwargs.items()
118 }
119
120 base = self.__function(
121 *self.__args, **self.__kwargs
122 )
123 if (
124 unwrapped_args == self.__args and
125 unwrapped_kwargs == self.__kwargs
126 ):
127 self.__wrapped_strategy = base
128 else:
129 self.__wrapped_strategy = self.__function(
130 *unwrapped_args,
131 **unwrapped_kwargs)
132 return self.__wrapped_strategy
133
134 def do_validate(self):
135 w = self.wrapped_strategy
136 assert isinstance(w, SearchStrategy), \
137 '%r returned non-strategy %r' % (self, w)
138 w.validate()
139
140 def __repr__(self):
141 if self.__representation is None:
142 _args = self.__args
143 _kwargs = self.__kwargs
144 argspec = getfullargspec(self.__function)
145 defaults = dict(argspec.kwonlydefaults or {})
146 if argspec.defaults is not None:
147 for name, value in zip(reversed(argspec.args),
148 reversed(argspec.defaults)):
149 defaults[name] = value
150 if len(argspec.args) > 1 or argspec.defaults:
151 _args, _kwargs = convert_positional_arguments(
152 self.__function, _args, _kwargs)
153 else:
154 _args, _kwargs = convert_keyword_arguments(
155 self.__function, _args, _kwargs)
156 kwargs_for_repr = dict(_kwargs)
157 for k, v in defaults.items():
158 if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:
159 del kwargs_for_repr[k]
160 self.__representation = '%s(%s)' % (
161 self.__function.__name__,
162 arg_string(
163 self.__function, _args, kwargs_for_repr, reorder=False),
164 )
165 return self.__representation
166
167 def do_draw(self, data):
168 return data.draw(self.wrapped_strategy)
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/hypothesis/searchstrategy/lazy.py b/src/hypothesis/searchstrategy/lazy.py
--- a/src/hypothesis/searchstrategy/lazy.py
+++ b/src/hypothesis/searchstrategy/lazy.py
@@ -22,14 +22,6 @@
convert_keyword_arguments, convert_positional_arguments
from hypothesis.searchstrategy.strategies import SearchStrategy
-
-def tupleize(x):
- if isinstance(x, (tuple, list)):
- return tuple(x)
- else:
- return x
-
-
unwrap_cache = {}
unwrap_depth = 0
@@ -85,10 +77,8 @@
self.__wrapped_strategy = None
self.__representation = None
self.__function = function
- self.__args = tuple(map(tupleize, args))
- self.__kwargs = dict(
- (k, tupleize(v)) for k, v in kwargs.items()
- )
+ self.__args = args
+ self.__kwargs = kwargs
@property
def supports_find(self):
| {"golden_diff": "diff --git a/src/hypothesis/searchstrategy/lazy.py b/src/hypothesis/searchstrategy/lazy.py\n--- a/src/hypothesis/searchstrategy/lazy.py\n+++ b/src/hypothesis/searchstrategy/lazy.py\n@@ -22,14 +22,6 @@\n convert_keyword_arguments, convert_positional_arguments\n from hypothesis.searchstrategy.strategies import SearchStrategy\n \n-\n-def tupleize(x):\n- if isinstance(x, (tuple, list)):\n- return tuple(x)\n- else:\n- return x\n-\n-\n unwrap_cache = {}\n unwrap_depth = 0\n \n@@ -85,10 +77,8 @@\n self.__wrapped_strategy = None\n self.__representation = None\n self.__function = function\n- self.__args = tuple(map(tupleize, args))\n- self.__kwargs = dict(\n- (k, tupleize(v)) for k, v in kwargs.items()\n- )\n+ self.__args = args\n+ self.__kwargs = kwargs\n \n @property\n def supports_find(self):\n", "issue": "Argument copying technique in LazyStrategy does not preserve custom list subtypes\nI encountered an unexpected behavior when attempting to test a third-party library which makes use of custom `list` subtypes. Here's an example which reproduces the behavior:\r\n\r\nConsider a class which inherits from the built-in Python `list` type and adds additional methods:\r\n\r\n```python\r\nclass MyList(list):\r\n def custom_method(self):\r\n return \"result\"\r\n \r\n def __copy__(self):\r\n return MyList(list(self))\r\n \r\n def __deepcopy__(self, table):\r\n return self.__copy__()\r\n```\r\n\r\nBecause I've overridden both `__copy__` and `__deepcopy__`, copying this list preserves its type:\r\n\r\n```python\r\n>>> from copy import copy, deepcopy\r\n>>> my_list = MyList([])\r\n>>> type(copy(my_list))\r\n<class '__main__.MyList'>\r\n>>> type(deepcopy(my_list))\r\n<class '__main__.MyList'>\r\n```\r\n\r\nLet's say that I want to have a strategy which is parameterized by an instance of this class. This works as expected for some strategies:\r\n\r\n```python\r\n>>> from hypothesis import strategies as st\r\n>>> type(st.just(my_list).example())\r\n<class '__main__.MyList'>\r\n```\r\n\r\nHowever, I noticed that the argument type is not preserved when I use a composite strategy:\r\n\r\n```python\r\n>>> @st.composite\r\n... def my_strategy(draw, my_list):\r\n... return my_list\r\n...\r\n>>> type(my_strategy(my_list).example())\r\n<type 'tuple'>\r\n```\r\n\r\nI believe that this behavior is due to how arguments are copied in `LazyStrategy`: \r\n\r\nhttps://github.com/HypothesisWorks/hypothesis-python/blob/3ee500943938d60a8a97b7d3d948522d65f23e84/src/hypothesis/searchstrategy/lazy.py#L88\r\n\r\nEach argument is being copied by `tupelize()`, which is defined as\r\n\r\n```python\r\ndef tupleize(x):\r\n if isinstance(x, (tuple, list)):\r\n return tuple(x)\r\n else:\r\n return x\r\n```\r\n\r\nI'm not sure whether it would be safe to replace `tupelize` with `copy` here: converting lists to tuples here guards against mutation from both code inside of the composite strategy's body as well as the code which calls / constructs the strategy, so safely using `copy` here might also require additional `copy` calls elsewhere to guard against mutation during/after invocation.\r\n\r\nI'm able to work around this behavior by wrapping my argument in an outer list (e.g. `[my_list]`) and unpacking the argument in my composite strategy.\r\n\r\nI'm therefore not blocked by this behavior but I found it confusing and figured it might be worth reporting / documenting.\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2017 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom hypothesis.internal.compat import getfullargspec\nfrom hypothesis.internal.reflection import arg_string, \\\n convert_keyword_arguments, convert_positional_arguments\nfrom hypothesis.searchstrategy.strategies import SearchStrategy\n\n\ndef tupleize(x):\n if isinstance(x, (tuple, list)):\n return tuple(x)\n else:\n return x\n\n\nunwrap_cache = {}\nunwrap_depth = 0\n\n\ndef unwrap_strategies(s):\n global unwrap_depth\n\n if not isinstance(s, SearchStrategy):\n return s\n try:\n return unwrap_cache[s]\n except KeyError:\n pass\n\n unwrap_cache[s] = s\n\n try:\n unwrap_depth += 1\n try:\n result = unwrap_strategies(s.wrapped_strategy)\n unwrap_cache[s] = result\n try:\n assert result.force_has_reusable_values == \\\n s.force_has_reusable_values\n except AttributeError:\n pass\n\n try:\n result.force_has_reusable_values = s.force_has_reusable_values\n except AttributeError:\n pass\n return result\n except AttributeError:\n return s\n finally:\n unwrap_depth -= 1\n if unwrap_depth <= 0:\n unwrap_cache.clear()\n assert unwrap_depth >= 0\n\n\nclass LazyStrategy(SearchStrategy):\n\n \"\"\"A strategy which is defined purely by conversion to and from another\n strategy.\n\n Its parameter and distribution come from that other strategy.\n\n \"\"\"\n\n def __init__(self, function, args, kwargs):\n SearchStrategy.__init__(self)\n self.__wrapped_strategy = None\n self.__representation = None\n self.__function = function\n self.__args = tuple(map(tupleize, args))\n self.__kwargs = dict(\n (k, tupleize(v)) for k, v in kwargs.items()\n )\n\n @property\n def supports_find(self):\n return self.wrapped_strategy.supports_find\n\n def calc_is_empty(self, recur):\n return recur(self.wrapped_strategy)\n\n def calc_has_reusable_values(self, recur):\n return recur(self.wrapped_strategy)\n\n def calc_is_cacheable(self, recur):\n for source in (self.__args, self.__kwargs.values()):\n for v in source:\n if isinstance(v, SearchStrategy) and not v.is_cacheable:\n return False\n return True\n\n @property\n def wrapped_strategy(self):\n if self.__wrapped_strategy is None:\n unwrapped_args = tuple(\n unwrap_strategies(s) for s in self.__args)\n unwrapped_kwargs = {\n k: unwrap_strategies(v)\n for k, v in self.__kwargs.items()\n }\n\n base = self.__function(\n *self.__args, **self.__kwargs\n )\n if (\n unwrapped_args == self.__args and\n unwrapped_kwargs == self.__kwargs\n ):\n self.__wrapped_strategy = base\n else:\n self.__wrapped_strategy = self.__function(\n *unwrapped_args,\n **unwrapped_kwargs)\n return self.__wrapped_strategy\n\n def do_validate(self):\n w = self.wrapped_strategy\n assert isinstance(w, SearchStrategy), \\\n '%r returned non-strategy %r' % (self, w)\n w.validate()\n\n def __repr__(self):\n if self.__representation is None:\n _args = self.__args\n _kwargs = self.__kwargs\n argspec = getfullargspec(self.__function)\n defaults = dict(argspec.kwonlydefaults or {})\n if argspec.defaults is not None:\n for name, value in zip(reversed(argspec.args),\n reversed(argspec.defaults)):\n defaults[name] = value\n if len(argspec.args) > 1 or argspec.defaults:\n _args, _kwargs = convert_positional_arguments(\n self.__function, _args, _kwargs)\n else:\n _args, _kwargs = convert_keyword_arguments(\n self.__function, _args, _kwargs)\n kwargs_for_repr = dict(_kwargs)\n for k, v in defaults.items():\n if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:\n del kwargs_for_repr[k]\n self.__representation = '%s(%s)' % (\n self.__function.__name__,\n arg_string(\n self.__function, _args, kwargs_for_repr, reorder=False),\n )\n return self.__representation\n\n def do_draw(self, data):\n return data.draw(self.wrapped_strategy)\n", "path": "src/hypothesis/searchstrategy/lazy.py"}], "after_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2017 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom hypothesis.internal.compat import getfullargspec\nfrom hypothesis.internal.reflection import arg_string, \\\n convert_keyword_arguments, convert_positional_arguments\nfrom hypothesis.searchstrategy.strategies import SearchStrategy\n\nunwrap_cache = {}\nunwrap_depth = 0\n\n\ndef unwrap_strategies(s):\n global unwrap_depth\n\n if not isinstance(s, SearchStrategy):\n return s\n try:\n return unwrap_cache[s]\n except KeyError:\n pass\n\n unwrap_cache[s] = s\n\n try:\n unwrap_depth += 1\n try:\n result = unwrap_strategies(s.wrapped_strategy)\n unwrap_cache[s] = result\n try:\n assert result.force_has_reusable_values == \\\n s.force_has_reusable_values\n except AttributeError:\n pass\n\n try:\n result.force_has_reusable_values = s.force_has_reusable_values\n except AttributeError:\n pass\n return result\n except AttributeError:\n return s\n finally:\n unwrap_depth -= 1\n if unwrap_depth <= 0:\n unwrap_cache.clear()\n assert unwrap_depth >= 0\n\n\nclass LazyStrategy(SearchStrategy):\n\n \"\"\"A strategy which is defined purely by conversion to and from another\n strategy.\n\n Its parameter and distribution come from that other strategy.\n\n \"\"\"\n\n def __init__(self, function, args, kwargs):\n SearchStrategy.__init__(self)\n self.__wrapped_strategy = None\n self.__representation = None\n self.__function = function\n self.__args = args\n self.__kwargs = kwargs\n\n @property\n def supports_find(self):\n return self.wrapped_strategy.supports_find\n\n def calc_is_empty(self, recur):\n return recur(self.wrapped_strategy)\n\n def calc_has_reusable_values(self, recur):\n return recur(self.wrapped_strategy)\n\n def calc_is_cacheable(self, recur):\n for source in (self.__args, self.__kwargs.values()):\n for v in source:\n if isinstance(v, SearchStrategy) and not v.is_cacheable:\n return False\n return True\n\n @property\n def wrapped_strategy(self):\n if self.__wrapped_strategy is None:\n unwrapped_args = tuple(\n unwrap_strategies(s) for s in self.__args)\n unwrapped_kwargs = {\n k: unwrap_strategies(v)\n for k, v in self.__kwargs.items()\n }\n\n base = self.__function(\n *self.__args, **self.__kwargs\n )\n if (\n unwrapped_args == self.__args and\n unwrapped_kwargs == self.__kwargs\n ):\n self.__wrapped_strategy = base\n else:\n self.__wrapped_strategy = self.__function(\n *unwrapped_args,\n **unwrapped_kwargs)\n return self.__wrapped_strategy\n\n def do_validate(self):\n w = self.wrapped_strategy\n assert isinstance(w, SearchStrategy), \\\n '%r returned non-strategy %r' % (self, w)\n w.validate()\n\n def __repr__(self):\n if self.__representation is None:\n _args = self.__args\n _kwargs = self.__kwargs\n argspec = getfullargspec(self.__function)\n defaults = dict(argspec.kwonlydefaults or {})\n if argspec.defaults is not None:\n for name, value in zip(reversed(argspec.args),\n reversed(argspec.defaults)):\n defaults[name] = value\n if len(argspec.args) > 1 or argspec.defaults:\n _args, _kwargs = convert_positional_arguments(\n self.__function, _args, _kwargs)\n else:\n _args, _kwargs = convert_keyword_arguments(\n self.__function, _args, _kwargs)\n kwargs_for_repr = dict(_kwargs)\n for k, v in defaults.items():\n if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:\n del kwargs_for_repr[k]\n self.__representation = '%s(%s)' % (\n self.__function.__name__,\n arg_string(\n self.__function, _args, kwargs_for_repr, reorder=False),\n )\n return self.__representation\n\n def do_draw(self, data):\n return data.draw(self.wrapped_strategy)\n", "path": "src/hypothesis/searchstrategy/lazy.py"}]} | 2,392 | 227 |
gh_patches_debug_21801 | rasdani/github-patches | git_diff | SciTools__cartopy-2287 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove "cfeature.LAND" from "Feature Creation" example ?
In the nice example provided on the [Feature Creation page](https://scitools.org.uk/cartopy/docs/latest/gallery/feature_creation.html), you could probably remove the useless line:
B) `ax.add_feature(cfeature.LAND)`
because you have already called
A) `ax.stock_img()`
As a matter of fact, B) being called after A), it should theoretically be plotted above A). I see you are using a default `zorder=-1` for LAND, so it is probably below everything else (did not know you could use negative zorder values)
```
>>> cfeature.LAND.kwargs
{'edgecolor': 'face', 'zorder': -1, 'facecolor': array([0.9375 , 0.9375 , 0.859375])}
```
If I use `ax.add_feature(cfeature.LAND, zorder=10)`, I do get land with a uniform color above the stock image on land (and the stock ocean elsewhere)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/lines_and_polygons/feature_creation.py`
Content:
```
1 """
2 Feature Creation
3 ----------------
4
5 This example manually instantiates a
6 :class:`cartopy.feature.NaturalEarthFeature` to access administrative
7 boundaries (states and provinces).
8
9 Note that this example is intended to illustrate the ability to construct
10 Natural Earth features that cartopy does not necessarily know about
11 *a priori*.
12 In this instance however, it would be possible to make use of the
13 pre-defined :data:`cartopy.feature.STATES` constant.
14
15 """
16 from matplotlib.offsetbox import AnchoredText
17 import matplotlib.pyplot as plt
18
19 import cartopy.crs as ccrs
20 import cartopy.feature as cfeature
21
22
23 def main():
24 fig = plt.figure()
25 ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
26 ax.set_extent([80, 170, -45, 30], crs=ccrs.PlateCarree())
27
28 # Put a background image on for nice sea rendering.
29 ax.stock_img()
30
31 # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth
32 states_provinces = cfeature.NaturalEarthFeature(
33 category='cultural',
34 name='admin_1_states_provinces_lines',
35 scale='50m',
36 facecolor='none')
37
38 SOURCE = 'Natural Earth'
39 LICENSE = 'public domain'
40
41 ax.add_feature(cfeature.LAND)
42 ax.add_feature(cfeature.COASTLINE)
43 ax.add_feature(states_provinces, edgecolor='gray')
44
45 # Add a text annotation for the license information to the
46 # the bottom right corner.
47 text = AnchoredText('\u00A9 {}; license: {}'
48 ''.format(SOURCE, LICENSE),
49 loc=4, prop={'size': 12}, frameon=True)
50 ax.add_artist(text)
51
52 plt.show()
53
54
55 if __name__ == '__main__':
56 main()
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/lines_and_polygons/feature_creation.py b/examples/lines_and_polygons/feature_creation.py
--- a/examples/lines_and_polygons/feature_creation.py
+++ b/examples/lines_and_polygons/feature_creation.py
@@ -28,7 +28,7 @@
# Put a background image on for nice sea rendering.
ax.stock_img()
- # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth
+ # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth.
states_provinces = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
@@ -38,9 +38,11 @@
SOURCE = 'Natural Earth'
LICENSE = 'public domain'
- ax.add_feature(cfeature.LAND)
- ax.add_feature(cfeature.COASTLINE)
+ # Add our states feature.
ax.add_feature(states_provinces, edgecolor='gray')
+ # Add land feature, overriding the default negative zorder so it shows
+ # above the background image.
+ ax.add_feature(cfeature.LAND, zorder=1, edgecolor='k')
# Add a text annotation for the license information to the
# the bottom right corner.
| {"golden_diff": "diff --git a/examples/lines_and_polygons/feature_creation.py b/examples/lines_and_polygons/feature_creation.py\n--- a/examples/lines_and_polygons/feature_creation.py\n+++ b/examples/lines_and_polygons/feature_creation.py\n@@ -28,7 +28,7 @@\n # Put a background image on for nice sea rendering.\n ax.stock_img()\n \n- # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth\n+ # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth.\n states_provinces = cfeature.NaturalEarthFeature(\n category='cultural',\n name='admin_1_states_provinces_lines',\n@@ -38,9 +38,11 @@\n SOURCE = 'Natural Earth'\n LICENSE = 'public domain'\n \n- ax.add_feature(cfeature.LAND)\n- ax.add_feature(cfeature.COASTLINE)\n+ # Add our states feature.\n ax.add_feature(states_provinces, edgecolor='gray')\n+ # Add land feature, overriding the default negative zorder so it shows\n+ # above the background image.\n+ ax.add_feature(cfeature.LAND, zorder=1, edgecolor='k')\n \n # Add a text annotation for the license information to the\n # the bottom right corner.\n", "issue": "Remove \"cfeature.LAND\" from \"Feature Creation\" example ?\nIn the nice example provided on the [Feature Creation page](https://scitools.org.uk/cartopy/docs/latest/gallery/feature_creation.html), you could probably remove the useless line:\r\nB) `ax.add_feature(cfeature.LAND)`\r\nbecause you have already called\r\nA) `ax.stock_img()`\r\n\r\nAs a matter of fact, B) being called after A), it should theoretically be plotted above A). I see you are using a default `zorder=-1` for LAND, so it is probably below everything else (did not know you could use negative zorder values)\r\n```\r\n>>> cfeature.LAND.kwargs\r\n{'edgecolor': 'face', 'zorder': -1, 'facecolor': array([0.9375 , 0.9375 , 0.859375])}\r\n```\r\nIf I use `ax.add_feature(cfeature.LAND, zorder=10)`, I do get land with a uniform color above the stock image on land (and the stock ocean elsewhere)\n", "before_files": [{"content": "\"\"\"\nFeature Creation\n----------------\n\nThis example manually instantiates a\n:class:`cartopy.feature.NaturalEarthFeature` to access administrative\nboundaries (states and provinces).\n\nNote that this example is intended to illustrate the ability to construct\nNatural Earth features that cartopy does not necessarily know about\n*a priori*.\nIn this instance however, it would be possible to make use of the\npre-defined :data:`cartopy.feature.STATES` constant.\n\n\"\"\"\nfrom matplotlib.offsetbox import AnchoredText\nimport matplotlib.pyplot as plt\n\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\n\n\ndef main():\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())\n ax.set_extent([80, 170, -45, 30], crs=ccrs.PlateCarree())\n\n # Put a background image on for nice sea rendering.\n ax.stock_img()\n\n # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth\n states_provinces = cfeature.NaturalEarthFeature(\n category='cultural',\n name='admin_1_states_provinces_lines',\n scale='50m',\n facecolor='none')\n\n SOURCE = 'Natural Earth'\n LICENSE = 'public domain'\n\n ax.add_feature(cfeature.LAND)\n ax.add_feature(cfeature.COASTLINE)\n ax.add_feature(states_provinces, edgecolor='gray')\n\n # Add a text annotation for the license information to the\n # the bottom right corner.\n text = AnchoredText('\\u00A9 {}; license: {}'\n ''.format(SOURCE, LICENSE),\n loc=4, prop={'size': 12}, frameon=True)\n ax.add_artist(text)\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/lines_and_polygons/feature_creation.py"}], "after_files": [{"content": "\"\"\"\nFeature Creation\n----------------\n\nThis example manually instantiates a\n:class:`cartopy.feature.NaturalEarthFeature` to access administrative\nboundaries (states and provinces).\n\nNote that this example is intended to illustrate the ability to construct\nNatural Earth features that cartopy does not necessarily know about\n*a priori*.\nIn this instance however, it would be possible to make use of the\npre-defined :data:`cartopy.feature.STATES` constant.\n\n\"\"\"\nfrom matplotlib.offsetbox import AnchoredText\nimport matplotlib.pyplot as plt\n\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\n\n\ndef main():\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())\n ax.set_extent([80, 170, -45, 30], crs=ccrs.PlateCarree())\n\n # Put a background image on for nice sea rendering.\n ax.stock_img()\n\n # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth.\n states_provinces = cfeature.NaturalEarthFeature(\n category='cultural',\n name='admin_1_states_provinces_lines',\n scale='50m',\n facecolor='none')\n\n SOURCE = 'Natural Earth'\n LICENSE = 'public domain'\n\n # Add our states feature.\n ax.add_feature(states_provinces, edgecolor='gray')\n # Add land feature, overriding the default negative zorder so it shows\n # above the background image.\n ax.add_feature(cfeature.LAND, zorder=1, edgecolor='k')\n\n # Add a text annotation for the license information to the\n # the bottom right corner.\n text = AnchoredText('\\u00A9 {}; license: {}'\n ''.format(SOURCE, LICENSE),\n loc=4, prop={'size': 12}, frameon=True)\n ax.add_artist(text)\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/lines_and_polygons/feature_creation.py"}]} | 1,016 | 293 |
gh_patches_debug_26290 | rasdani/github-patches | git_diff | scrapy__scrapy-4052 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exception when using DummyStatsCollector
### Description
Using the DummyStatsCollector results in an exception:
```
2019-09-09 13:51:23 [scrapy.utils.signal] ERROR: Error caught on signal handler: <bound method CoreStats.spider_closed of <scrapy.extensions.corestats.CoreStats object at 0x7f86269cac18>>
Traceback (most recent call last):
File ".../lib/python3.6/site-packages/twisted/internet/defer.py", line 150, in maybeDeferred
result = f(*args, **kw)
File ".../lib/python3.6/site-packages/pydispatch/robustapply.py", line 55, in robustApply
return receiver(*arguments, **named)
File ".../lib/python3.6/site-packages/scrapy/extensions/corestats.py", line 28, in spider_closed
elapsed_time = finish_time - self.stats.get_value('start_time')
TypeError: unsupported operand type(s) for -: 'datetime.datetime' and 'NoneType'
```
This problem has been introduced in aa46e1995cd5cb1099aba17535372b538bd656b3.
### Steps to Reproduce
Set `STATS_CLASS = "scrapy.statscollectors.DummyStatsCollector"` in the settings module as described in the documentation (https://docs.scrapy.org/en/latest/topics/stats.html#dummystatscollector).
**Expected behavior:** no exception
**Actual behavior:** exception thrown
**Reproduces how often:** always
### Versions
At least master as of 534de7395da3a53b5a2c89960db9ec5d8fdab60c
### Fix
A possible fix is to use the elapsed time as a default argument so that `get_value()` does not return None. I can prepare a PR if needed.
```diff
--- a/scrapy/extensions/corestats.py
+++ b/scrapy/extensions/corestats.py
@@ -25,7 +25,7 @@ class CoreStats(object):
def spider_closed(self, spider, reason):
finish_time = datetime.datetime.utcnow()
- elapsed_time = finish_time - self.stats.get_value('start_time')
+ elapsed_time = finish_time - self.stats.get_value('start_time', finish_time)
elapsed_time_seconds = elapsed_time.total_seconds()
self.stats.set_value('elapsed_time_seconds', elapsed_time_seconds, spider=spider)
self.stats.set_value('finish_time', finish_time, spider=spider)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/extensions/corestats.py`
Content:
```
1 """
2 Extension for collecting core stats like items scraped and start/finish times
3 """
4 import datetime
5
6 from scrapy import signals
7
8 class CoreStats(object):
9
10 def __init__(self, stats):
11 self.stats = stats
12
13 @classmethod
14 def from_crawler(cls, crawler):
15 o = cls(crawler.stats)
16 crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
17 crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
18 crawler.signals.connect(o.item_scraped, signal=signals.item_scraped)
19 crawler.signals.connect(o.item_dropped, signal=signals.item_dropped)
20 crawler.signals.connect(o.response_received, signal=signals.response_received)
21 return o
22
23 def spider_opened(self, spider):
24 self.stats.set_value('start_time', datetime.datetime.utcnow(), spider=spider)
25
26 def spider_closed(self, spider, reason):
27 finish_time = datetime.datetime.utcnow()
28 elapsed_time = finish_time - self.stats.get_value('start_time')
29 elapsed_time_seconds = elapsed_time.total_seconds()
30 self.stats.set_value('elapsed_time_seconds', elapsed_time_seconds, spider=spider)
31 self.stats.set_value('finish_time', finish_time, spider=spider)
32 self.stats.set_value('finish_reason', reason, spider=spider)
33
34 def item_scraped(self, item, spider):
35 self.stats.inc_value('item_scraped_count', spider=spider)
36
37 def response_received(self, spider):
38 self.stats.inc_value('response_received_count', spider=spider)
39
40 def item_dropped(self, item, spider, exception):
41 reason = exception.__class__.__name__
42 self.stats.inc_value('item_dropped_count', spider=spider)
43 self.stats.inc_value('item_dropped_reasons_count/%s' % reason, spider=spider)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/extensions/corestats.py b/scrapy/extensions/corestats.py
--- a/scrapy/extensions/corestats.py
+++ b/scrapy/extensions/corestats.py
@@ -1,14 +1,16 @@
"""
Extension for collecting core stats like items scraped and start/finish times
"""
-import datetime
+from datetime import datetime
from scrapy import signals
+
class CoreStats(object):
def __init__(self, stats):
self.stats = stats
+ self.start_time = None
@classmethod
def from_crawler(cls, crawler):
@@ -21,11 +23,12 @@
return o
def spider_opened(self, spider):
- self.stats.set_value('start_time', datetime.datetime.utcnow(), spider=spider)
+ self.start_time = datetime.utcnow()
+ self.stats.set_value('start_time', self.start_time, spider=spider)
def spider_closed(self, spider, reason):
- finish_time = datetime.datetime.utcnow()
- elapsed_time = finish_time - self.stats.get_value('start_time')
+ finish_time = datetime.utcnow()
+ elapsed_time = finish_time - self.start_time
elapsed_time_seconds = elapsed_time.total_seconds()
self.stats.set_value('elapsed_time_seconds', elapsed_time_seconds, spider=spider)
self.stats.set_value('finish_time', finish_time, spider=spider)
| {"golden_diff": "diff --git a/scrapy/extensions/corestats.py b/scrapy/extensions/corestats.py\n--- a/scrapy/extensions/corestats.py\n+++ b/scrapy/extensions/corestats.py\n@@ -1,14 +1,16 @@\n \"\"\"\n Extension for collecting core stats like items scraped and start/finish times\n \"\"\"\n-import datetime\n+from datetime import datetime\n \n from scrapy import signals\n \n+\n class CoreStats(object):\n \n def __init__(self, stats):\n self.stats = stats\n+ self.start_time = None\n \n @classmethod\n def from_crawler(cls, crawler):\n@@ -21,11 +23,12 @@\n return o\n \n def spider_opened(self, spider):\n- self.stats.set_value('start_time', datetime.datetime.utcnow(), spider=spider)\n+ self.start_time = datetime.utcnow()\n+ self.stats.set_value('start_time', self.start_time, spider=spider)\n \n def spider_closed(self, spider, reason):\n- finish_time = datetime.datetime.utcnow()\n- elapsed_time = finish_time - self.stats.get_value('start_time')\n+ finish_time = datetime.utcnow()\n+ elapsed_time = finish_time - self.start_time\n elapsed_time_seconds = elapsed_time.total_seconds()\n self.stats.set_value('elapsed_time_seconds', elapsed_time_seconds, spider=spider)\n self.stats.set_value('finish_time', finish_time, spider=spider)\n", "issue": "Exception when using DummyStatsCollector\n### Description\r\n\r\nUsing the DummyStatsCollector results in an exception:\r\n\r\n```\r\n2019-09-09 13:51:23 [scrapy.utils.signal] ERROR: Error caught on signal handler: <bound method CoreStats.spider_closed of <scrapy.extensions.corestats.CoreStats object at 0x7f86269cac18>>\r\nTraceback (most recent call last):\r\n File \".../lib/python3.6/site-packages/twisted/internet/defer.py\", line 150, in maybeDeferred\r\n result = f(*args, **kw)\r\n File \".../lib/python3.6/site-packages/pydispatch/robustapply.py\", line 55, in robustApply\r\n return receiver(*arguments, **named)\r\n File \".../lib/python3.6/site-packages/scrapy/extensions/corestats.py\", line 28, in spider_closed\r\n elapsed_time = finish_time - self.stats.get_value('start_time')\r\nTypeError: unsupported operand type(s) for -: 'datetime.datetime' and 'NoneType'\r\n```\r\n\r\nThis problem has been introduced in aa46e1995cd5cb1099aba17535372b538bd656b3.\r\n\r\n### Steps to Reproduce\r\n\r\nSet `STATS_CLASS = \"scrapy.statscollectors.DummyStatsCollector\"` in the settings module as described in the documentation (https://docs.scrapy.org/en/latest/topics/stats.html#dummystatscollector).\r\n\r\n**Expected behavior:** no exception\r\n**Actual behavior:** exception thrown\r\n**Reproduces how often:** always\r\n\r\n### Versions\r\n\r\nAt least master as of 534de7395da3a53b5a2c89960db9ec5d8fdab60c\r\n\r\n### Fix\r\n\r\nA possible fix is to use the elapsed time as a default argument so that `get_value()` does not return None. I can prepare a PR if needed.\r\n\r\n```diff\r\n--- a/scrapy/extensions/corestats.py\r\n+++ b/scrapy/extensions/corestats.py\r\n@@ -25,7 +25,7 @@ class CoreStats(object):\r\n \r\n def spider_closed(self, spider, reason):\r\n finish_time = datetime.datetime.utcnow()\r\n- elapsed_time = finish_time - self.stats.get_value('start_time')\r\n+ elapsed_time = finish_time - self.stats.get_value('start_time', finish_time)\r\n elapsed_time_seconds = elapsed_time.total_seconds()\r\n self.stats.set_value('elapsed_time_seconds', elapsed_time_seconds, spider=spider)\r\n self.stats.set_value('finish_time', finish_time, spider=spider)\r\n```\n", "before_files": [{"content": "\"\"\"\nExtension for collecting core stats like items scraped and start/finish times\n\"\"\"\nimport datetime\n\nfrom scrapy import signals\n\nclass CoreStats(object):\n\n def __init__(self, stats):\n self.stats = stats\n\n @classmethod\n def from_crawler(cls, crawler):\n o = cls(crawler.stats)\n crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)\n crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)\n crawler.signals.connect(o.item_scraped, signal=signals.item_scraped)\n crawler.signals.connect(o.item_dropped, signal=signals.item_dropped)\n crawler.signals.connect(o.response_received, signal=signals.response_received)\n return o\n\n def spider_opened(self, spider):\n self.stats.set_value('start_time', datetime.datetime.utcnow(), spider=spider)\n\n def spider_closed(self, spider, reason):\n finish_time = datetime.datetime.utcnow()\n elapsed_time = finish_time - self.stats.get_value('start_time')\n elapsed_time_seconds = elapsed_time.total_seconds()\n self.stats.set_value('elapsed_time_seconds', elapsed_time_seconds, spider=spider)\n self.stats.set_value('finish_time', finish_time, spider=spider)\n self.stats.set_value('finish_reason', reason, spider=spider)\n\n def item_scraped(self, item, spider):\n self.stats.inc_value('item_scraped_count', spider=spider)\n\n def response_received(self, spider):\n self.stats.inc_value('response_received_count', spider=spider)\n\n def item_dropped(self, item, spider, exception):\n reason = exception.__class__.__name__\n self.stats.inc_value('item_dropped_count', spider=spider)\n self.stats.inc_value('item_dropped_reasons_count/%s' % reason, spider=spider)\n", "path": "scrapy/extensions/corestats.py"}], "after_files": [{"content": "\"\"\"\nExtension for collecting core stats like items scraped and start/finish times\n\"\"\"\nfrom datetime import datetime\n\nfrom scrapy import signals\n\n\nclass CoreStats(object):\n\n def __init__(self, stats):\n self.stats = stats\n self.start_time = None\n\n @classmethod\n def from_crawler(cls, crawler):\n o = cls(crawler.stats)\n crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)\n crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)\n crawler.signals.connect(o.item_scraped, signal=signals.item_scraped)\n crawler.signals.connect(o.item_dropped, signal=signals.item_dropped)\n crawler.signals.connect(o.response_received, signal=signals.response_received)\n return o\n\n def spider_opened(self, spider):\n self.start_time = datetime.utcnow()\n self.stats.set_value('start_time', self.start_time, spider=spider)\n\n def spider_closed(self, spider, reason):\n finish_time = datetime.utcnow()\n elapsed_time = finish_time - self.start_time\n elapsed_time_seconds = elapsed_time.total_seconds()\n self.stats.set_value('elapsed_time_seconds', elapsed_time_seconds, spider=spider)\n self.stats.set_value('finish_time', finish_time, spider=spider)\n self.stats.set_value('finish_reason', reason, spider=spider)\n\n def item_scraped(self, item, spider):\n self.stats.inc_value('item_scraped_count', spider=spider)\n\n def response_received(self, spider):\n self.stats.inc_value('response_received_count', spider=spider)\n\n def item_dropped(self, item, spider, exception):\n reason = exception.__class__.__name__\n self.stats.inc_value('item_dropped_count', spider=spider)\n self.stats.inc_value('item_dropped_reasons_count/%s' % reason, spider=spider)\n", "path": "scrapy/extensions/corestats.py"}]} | 1,305 | 297 |
gh_patches_debug_12903 | rasdani/github-patches | git_diff | optuna__optuna-1814 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improvement proposal for TensorBoard integration
## Motivation
- Improve the display of TensorBoard
## Description
### 1. Trial ID
I guess using directory name as trial ID is helpful to find a good trial.
Current integration uses default hash.
**Current display**

**Proposal**

### 2. Use trial number as horizontal axis
**Current display**

**Proposal**

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optuna/integration/tensorboard.py`
Content:
```
1 import os
2 from typing import Dict
3
4 import optuna
5 from optuna._experimental import experimental
6 from optuna._imports import try_import
7
8 with try_import() as _imports:
9 from tensorboard.plugins.hparams import api as hp
10 import tensorflow as tf
11
12
13 @experimental("2.0.0")
14 class TensorBoardCallback(object):
15 """Callback to track Optuna trials with TensorBoard.
16
17 This callback adds relevant information that is tracked by Optuna to TensorBoard.
18
19 See `the example <https://github.com/optuna/optuna/blob/master/
20 examples/tensorboard_simple.py>`_.
21
22 Args:
23 dirname:
24 Directory to store TensorBoard logs.
25 metric_name:
26 Name of the metric. Since the metric itself is just a number,
27 `metric_name` can be used to give it a name. So you know later
28 if it was roc-auc or accuracy.
29
30 """
31
32 def __init__(self, dirname: str, metric_name: str) -> None:
33 _imports.check()
34 self._dirname = dirname
35 self._metric_name = metric_name
36 self._hp_params = dict() # type: Dict[str, hp.HParam]
37
38 def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:
39 if len(self._hp_params) == 0:
40 self._initialization(study)
41 if trial.state != optuna.trial.TrialState.COMPLETE:
42 return
43 trial_value = trial.value if trial.value is not None else float("nan")
44 hparams = dict()
45 for param_name, param_value in trial.params.items():
46 if param_name not in self._hp_params:
47 self._add_distributions(trial.distributions)
48 hparams[self._hp_params[param_name]] = param_value
49 run_name = "trial-%d" % trial.number
50 run_dir = os.path.join(self._dirname, run_name)
51 with tf.summary.create_file_writer(run_dir).as_default():
52 hp.hparams(hparams) # record the values used in this trial
53 tf.summary.scalar(self._metric_name, trial_value, step=1)
54
55 def _add_distributions(
56 self, distributions: Dict[str, optuna.distributions.BaseDistribution]
57 ) -> None:
58 for param_name, param_distribution in distributions.items():
59 if isinstance(param_distribution, optuna.distributions.UniformDistribution):
60 self._hp_params[param_name] = hp.HParam(
61 param_name, hp.RealInterval(param_distribution.low, param_distribution.high)
62 )
63 elif isinstance(param_distribution, optuna.distributions.LogUniformDistribution):
64 self._hp_params[param_name] = hp.HParam(
65 param_name, hp.RealInterval(param_distribution.low, param_distribution.high)
66 )
67 elif isinstance(param_distribution, optuna.distributions.DiscreteUniformDistribution):
68 self._hp_params[param_name] = hp.HParam(
69 param_name, hp.Discrete(param_distribution.low, param_distribution.high)
70 )
71 elif isinstance(param_distribution, optuna.distributions.IntUniformDistribution):
72 self._hp_params[param_name] = hp.HParam(
73 param_name, hp.IntInterval(param_distribution.low, param_distribution.high)
74 )
75 elif isinstance(param_distribution, optuna.distributions.CategoricalDistribution):
76 self._hp_params[param_name] = hp.HParam(
77 param_name, hp.Discrete(param_distribution.choices)
78 )
79 else:
80 distribution_list = [
81 optuna.distributions.UniformDistribution.__name__,
82 optuna.distributions.LogUniformDistribution.__name__,
83 optuna.distributions.DiscreteUniformDistribution.__name__,
84 optuna.distributions.IntUniformDistribution.__name__,
85 optuna.distributions.CategoricalDistribution.__name__,
86 ]
87 raise NotImplementedError(
88 "The distribution {} is not implemented. "
89 "The parameter distribution should be one of the {}".format(
90 param_distribution, distribution_list
91 )
92 )
93
94 def _initialization(self, study: optuna.Study) -> None:
95 completed_trials = [
96 trial
97 for trial in study.get_trials(deepcopy=False)
98 if trial.state == optuna.trial.TrialState.COMPLETE
99 ]
100 for trial in completed_trials:
101 self._add_distributions(trial.distributions)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optuna/integration/tensorboard.py b/optuna/integration/tensorboard.py
--- a/optuna/integration/tensorboard.py
+++ b/optuna/integration/tensorboard.py
@@ -49,8 +49,8 @@
run_name = "trial-%d" % trial.number
run_dir = os.path.join(self._dirname, run_name)
with tf.summary.create_file_writer(run_dir).as_default():
- hp.hparams(hparams) # record the values used in this trial
- tf.summary.scalar(self._metric_name, trial_value, step=1)
+ hp.hparams(hparams, trial_id=run_name) # record the values used in this trial
+ tf.summary.scalar(self._metric_name, trial_value, step=trial.number)
def _add_distributions(
self, distributions: Dict[str, optuna.distributions.BaseDistribution]
| {"golden_diff": "diff --git a/optuna/integration/tensorboard.py b/optuna/integration/tensorboard.py\n--- a/optuna/integration/tensorboard.py\n+++ b/optuna/integration/tensorboard.py\n@@ -49,8 +49,8 @@\n run_name = \"trial-%d\" % trial.number\n run_dir = os.path.join(self._dirname, run_name)\n with tf.summary.create_file_writer(run_dir).as_default():\n- hp.hparams(hparams) # record the values used in this trial\n- tf.summary.scalar(self._metric_name, trial_value, step=1)\n+ hp.hparams(hparams, trial_id=run_name) # record the values used in this trial\n+ tf.summary.scalar(self._metric_name, trial_value, step=trial.number)\n \n def _add_distributions(\n self, distributions: Dict[str, optuna.distributions.BaseDistribution]\n", "issue": "Improvement proposal for TensorBoard integration\n## Motivation\r\n\r\n- Improve the display of TensorBoard\r\n\r\n## Description\r\n\r\n### 1. Trial ID\r\n\r\nI guess using directory name as trial ID is helpful to find a good trial.\r\nCurrent integration uses default hash.\r\n\r\n**Current display**\r\n\r\n\r\n**Proposal**\r\n\r\n\r\n\r\n### 2. Use trial number as horizontal axis\r\n\r\n**Current display**\r\n\r\n\r\n**Proposal**\r\n\r\n\n", "before_files": [{"content": "import os\nfrom typing import Dict\n\nimport optuna\nfrom optuna._experimental import experimental\nfrom optuna._imports import try_import\n\nwith try_import() as _imports:\n from tensorboard.plugins.hparams import api as hp\n import tensorflow as tf\n\n\n@experimental(\"2.0.0\")\nclass TensorBoardCallback(object):\n \"\"\"Callback to track Optuna trials with TensorBoard.\n\n This callback adds relevant information that is tracked by Optuna to TensorBoard.\n\n See `the example <https://github.com/optuna/optuna/blob/master/\n examples/tensorboard_simple.py>`_.\n\n Args:\n dirname:\n Directory to store TensorBoard logs.\n metric_name:\n Name of the metric. Since the metric itself is just a number,\n `metric_name` can be used to give it a name. So you know later\n if it was roc-auc or accuracy.\n\n \"\"\"\n\n def __init__(self, dirname: str, metric_name: str) -> None:\n _imports.check()\n self._dirname = dirname\n self._metric_name = metric_name\n self._hp_params = dict() # type: Dict[str, hp.HParam]\n\n def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:\n if len(self._hp_params) == 0:\n self._initialization(study)\n if trial.state != optuna.trial.TrialState.COMPLETE:\n return\n trial_value = trial.value if trial.value is not None else float(\"nan\")\n hparams = dict()\n for param_name, param_value in trial.params.items():\n if param_name not in self._hp_params:\n self._add_distributions(trial.distributions)\n hparams[self._hp_params[param_name]] = param_value\n run_name = \"trial-%d\" % trial.number\n run_dir = os.path.join(self._dirname, run_name)\n with tf.summary.create_file_writer(run_dir).as_default():\n hp.hparams(hparams) # record the values used in this trial\n tf.summary.scalar(self._metric_name, trial_value, step=1)\n\n def _add_distributions(\n self, distributions: Dict[str, optuna.distributions.BaseDistribution]\n ) -> None:\n for param_name, param_distribution in distributions.items():\n if isinstance(param_distribution, optuna.distributions.UniformDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name, hp.RealInterval(param_distribution.low, param_distribution.high)\n )\n elif isinstance(param_distribution, optuna.distributions.LogUniformDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name, hp.RealInterval(param_distribution.low, param_distribution.high)\n )\n elif isinstance(param_distribution, optuna.distributions.DiscreteUniformDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name, hp.Discrete(param_distribution.low, param_distribution.high)\n )\n elif isinstance(param_distribution, optuna.distributions.IntUniformDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name, hp.IntInterval(param_distribution.low, param_distribution.high)\n )\n elif isinstance(param_distribution, optuna.distributions.CategoricalDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name, hp.Discrete(param_distribution.choices)\n )\n else:\n distribution_list = [\n optuna.distributions.UniformDistribution.__name__,\n optuna.distributions.LogUniformDistribution.__name__,\n optuna.distributions.DiscreteUniformDistribution.__name__,\n optuna.distributions.IntUniformDistribution.__name__,\n optuna.distributions.CategoricalDistribution.__name__,\n ]\n raise NotImplementedError(\n \"The distribution {} is not implemented. \"\n \"The parameter distribution should be one of the {}\".format(\n param_distribution, distribution_list\n )\n )\n\n def _initialization(self, study: optuna.Study) -> None:\n completed_trials = [\n trial\n for trial in study.get_trials(deepcopy=False)\n if trial.state == optuna.trial.TrialState.COMPLETE\n ]\n for trial in completed_trials:\n self._add_distributions(trial.distributions)\n", "path": "optuna/integration/tensorboard.py"}], "after_files": [{"content": "import os\nfrom typing import Dict\n\nimport optuna\nfrom optuna._experimental import experimental\nfrom optuna._imports import try_import\n\nwith try_import() as _imports:\n from tensorboard.plugins.hparams import api as hp\n import tensorflow as tf\n\n\n@experimental(\"2.0.0\")\nclass TensorBoardCallback(object):\n \"\"\"Callback to track Optuna trials with TensorBoard.\n\n This callback adds relevant information that is tracked by Optuna to TensorBoard.\n\n See `the example <https://github.com/optuna/optuna/blob/master/\n examples/tensorboard_simple.py>`_.\n\n Args:\n dirname:\n Directory to store TensorBoard logs.\n metric_name:\n Name of the metric. Since the metric itself is just a number,\n `metric_name` can be used to give it a name. So you know later\n if it was roc-auc or accuracy.\n\n \"\"\"\n\n def __init__(self, dirname: str, metric_name: str) -> None:\n _imports.check()\n self._dirname = dirname\n self._metric_name = metric_name\n self._hp_params = dict() # type: Dict[str, hp.HParam]\n\n def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:\n if len(self._hp_params) == 0:\n self._initialization(study)\n if trial.state != optuna.trial.TrialState.COMPLETE:\n return\n trial_value = trial.value if trial.value is not None else float(\"nan\")\n hparams = dict()\n for param_name, param_value in trial.params.items():\n if param_name not in self._hp_params:\n self._add_distributions(trial.distributions)\n hparams[self._hp_params[param_name]] = param_value\n run_name = \"trial-%d\" % trial.number\n run_dir = os.path.join(self._dirname, run_name)\n with tf.summary.create_file_writer(run_dir).as_default():\n hp.hparams(hparams, trial_id=run_name) # record the values used in this trial\n tf.summary.scalar(self._metric_name, trial_value, step=trial.number)\n\n def _add_distributions(\n self, distributions: Dict[str, optuna.distributions.BaseDistribution]\n ) -> None:\n for param_name, param_distribution in distributions.items():\n if isinstance(param_distribution, optuna.distributions.UniformDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name, hp.RealInterval(param_distribution.low, param_distribution.high)\n )\n elif isinstance(param_distribution, optuna.distributions.LogUniformDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name, hp.RealInterval(param_distribution.low, param_distribution.high)\n )\n elif isinstance(param_distribution, optuna.distributions.DiscreteUniformDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name, hp.Discrete(param_distribution.low, param_distribution.high)\n )\n elif isinstance(param_distribution, optuna.distributions.IntUniformDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name, hp.IntInterval(param_distribution.low, param_distribution.high)\n )\n elif isinstance(param_distribution, optuna.distributions.CategoricalDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name, hp.Discrete(param_distribution.choices)\n )\n else:\n distribution_list = [\n optuna.distributions.UniformDistribution.__name__,\n optuna.distributions.LogUniformDistribution.__name__,\n optuna.distributions.DiscreteUniformDistribution.__name__,\n optuna.distributions.IntUniformDistribution.__name__,\n optuna.distributions.CategoricalDistribution.__name__,\n ]\n raise NotImplementedError(\n \"The distribution {} is not implemented. \"\n \"The parameter distribution should be one of the {}\".format(\n param_distribution, distribution_list\n )\n )\n\n def _initialization(self, study: optuna.Study) -> None:\n completed_trials = [\n trial\n for trial in study.get_trials(deepcopy=False)\n if trial.state == optuna.trial.TrialState.COMPLETE\n ]\n for trial in completed_trials:\n self._add_distributions(trial.distributions)\n", "path": "optuna/integration/tensorboard.py"}]} | 1,777 | 196 |
gh_patches_debug_29 | rasdani/github-patches | git_diff | MongoEngine__mongoengine-2424 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When will be new release?
When are you planning a new release? Some features has been added since last one in May. Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mongoengine/__init__.py`
Content:
```
1 # Import submodules so that we can expose their __all__
2 from mongoengine import connection
3 from mongoengine import document
4 from mongoengine import errors
5 from mongoengine import fields
6 from mongoengine import queryset
7 from mongoengine import signals
8
9 # Import everything from each submodule so that it can be accessed via
10 # mongoengine, e.g. instead of `from mongoengine.connection import connect`,
11 # users can simply use `from mongoengine import connect`, or even
12 # `from mongoengine import *` and then `connect('testdb')`.
13 from mongoengine.connection import *
14 from mongoengine.document import *
15 from mongoengine.errors import *
16 from mongoengine.fields import *
17 from mongoengine.queryset import *
18 from mongoengine.signals import *
19
20
21 __all__ = (
22 list(document.__all__)
23 + list(fields.__all__)
24 + list(connection.__all__)
25 + list(queryset.__all__)
26 + list(signals.__all__)
27 + list(errors.__all__)
28 )
29
30
31 VERSION = (0, 20, 0)
32
33
34 def get_version():
35 """Return the VERSION as a string.
36
37 For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.
38 """
39 return ".".join(map(str, VERSION))
40
41
42 __version__ = get_version()
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mongoengine/__init__.py b/mongoengine/__init__.py
--- a/mongoengine/__init__.py
+++ b/mongoengine/__init__.py
@@ -28,7 +28,7 @@
)
-VERSION = (0, 20, 0)
+VERSION = (0, 21, 0)
def get_version():
| {"golden_diff": "diff --git a/mongoengine/__init__.py b/mongoengine/__init__.py\n--- a/mongoengine/__init__.py\n+++ b/mongoengine/__init__.py\n@@ -28,7 +28,7 @@\n )\n \n \n-VERSION = (0, 20, 0)\n+VERSION = (0, 21, 0)\n \n \n def get_version():\n", "issue": "When will be new release?\nWhen are you planning a new release? Some features has been added since last one in May. Thanks!\n", "before_files": [{"content": "# Import submodules so that we can expose their __all__\nfrom mongoengine import connection\nfrom mongoengine import document\nfrom mongoengine import errors\nfrom mongoengine import fields\nfrom mongoengine import queryset\nfrom mongoengine import signals\n\n# Import everything from each submodule so that it can be accessed via\n# mongoengine, e.g. instead of `from mongoengine.connection import connect`,\n# users can simply use `from mongoengine import connect`, or even\n# `from mongoengine import *` and then `connect('testdb')`.\nfrom mongoengine.connection import *\nfrom mongoengine.document import *\nfrom mongoengine.errors import *\nfrom mongoengine.fields import *\nfrom mongoengine.queryset import *\nfrom mongoengine.signals import *\n\n\n__all__ = (\n list(document.__all__)\n + list(fields.__all__)\n + list(connection.__all__)\n + list(queryset.__all__)\n + list(signals.__all__)\n + list(errors.__all__)\n)\n\n\nVERSION = (0, 20, 0)\n\n\ndef get_version():\n \"\"\"Return the VERSION as a string.\n\n For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, VERSION))\n\n\n__version__ = get_version()\n", "path": "mongoengine/__init__.py"}], "after_files": [{"content": "# Import submodules so that we can expose their __all__\nfrom mongoengine import connection\nfrom mongoengine import document\nfrom mongoengine import errors\nfrom mongoengine import fields\nfrom mongoengine import queryset\nfrom mongoengine import signals\n\n# Import everything from each submodule so that it can be accessed via\n# mongoengine, e.g. instead of `from mongoengine.connection import connect`,\n# users can simply use `from mongoengine import connect`, or even\n# `from mongoengine import *` and then `connect('testdb')`.\nfrom mongoengine.connection import *\nfrom mongoengine.document import *\nfrom mongoengine.errors import *\nfrom mongoengine.fields import *\nfrom mongoengine.queryset import *\nfrom mongoengine.signals import *\n\n\n__all__ = (\n list(document.__all__)\n + list(fields.__all__)\n + list(connection.__all__)\n + list(queryset.__all__)\n + list(signals.__all__)\n + list(errors.__all__)\n)\n\n\nVERSION = (0, 21, 0)\n\n\ndef get_version():\n \"\"\"Return the VERSION as a string.\n\n For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, VERSION))\n\n\n__version__ = get_version()\n", "path": "mongoengine/__init__.py"}]} | 644 | 85 |
gh_patches_debug_37916 | rasdani/github-patches | git_diff | dotkom__onlineweb4-1122 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add redirect to newly created item after creating it in dashboard inventory
As title states.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/inventory/dashboard/views.py`
Content:
```
1 # -*- encoding: utf-8 -*-
2
3 from datetime import datetime
4
5 from django.contrib import messages
6 from django.contrib.auth.decorators import login_required
7 from django.core.exceptions import PermissionDenied
8 from django.shortcuts import render, get_object_or_404, redirect
9 from django.http import HttpResponseBadRequest, HttpResponse, JsonResponse
10
11 from guardian.decorators import permission_required
12
13 from apps.dashboard.tools import has_access, get_base_context
14 from apps.inventory.dashboard.forms import ItemForm, BatchForm
15 from apps.inventory.models import Item, Batch
16
17
18 @login_required
19 @permission_required('inventory.view_item', return_403=True)
20 def index(request):
21
22 # Generic check to see if user has access to dashboard. (In Komiteer or superuser)
23 if not has_access(request):
24 raise PermissionDenied
25
26 # Create the base context needed for the sidebar
27 context = get_base_context(request)
28
29 context['items'] = Item.objects.all().order_by('name')
30
31 return render(request, 'inventory/dashboard/index.html', context)
32
33
34 @login_required
35 @permission_required('inventory.add_item', return_403=True)
36 def new(request):
37
38 if not has_access(request):
39 raise PermissionDenied
40
41 # Get base context
42 context = get_base_context(request)
43
44 if request.method == 'POST':
45 inventory_form = ItemForm(request.POST)
46
47 if not inventory_form.is_valid():
48 messages.error(request, u'Noen av de påkrevde feltene inneholder feil.')
49 else:
50 inventory_form.save()
51 messages.success(request, u'Varen ble opprettet')
52 return redirect(index)
53
54 context['form'] = inventory_form
55
56 else:
57 context['form'] = ItemForm()
58
59 return render(request, 'inventory/dashboard/new.html', context)
60
61
62 @login_required
63 @permission_required('inventory.view_item', return_403=True)
64 def details(request, item_pk):
65 # Generic check to see if user has access to dashboard. (In Komiteer or superuser)
66 if not has_access(request):
67 raise PermissionDenied
68
69 # Create the base context needed for the sidebar
70 context = get_base_context(request)
71
72 context['item'] = get_object_or_404(Item, pk=item_pk)
73
74 if request.method == 'POST':
75 if 'inventory.change_item' not in context['user_permissions']:
76 raise PermissionDenied
77
78 item_form = ItemForm(request.POST, instance=context['item'])
79 if not item_form.is_valid():
80 messages.error(request, u'Noen av de påkrevde feltene inneholder feil.')
81 else:
82 item_form.save()
83 messages.success(request, u'Varen ble oppdatert')
84 context['item_form'] = item_form
85 else:
86 context['item_form'] = ItemForm(instance=context['item'])
87
88 context['new_batch_form'] = BatchForm()
89
90 context['batch_forms'] = [(batch.id, BatchForm(instance=batch)) for batch in Batch.objects.filter(item=context['item'])]
91
92 return render(request, 'inventory/dashboard/details.html', context)
93
94 @login_required
95 @permission_required('inventory.delete_item', return_403=True)
96 def item_delete(request, item_pk):
97 if not has_access(request):
98 raise PermissionDenied
99
100 item = get_object_or_404(Item, pk=item_pk)
101
102 item.delete()
103
104 messages.success(request, u'Varen %s ble slettet.' % item.name)
105
106 return redirect(index)
107
108 @login_required
109 @permission_required('inventory.add_batch', return_403=True)
110 def batch_new(request, item_pk):
111 if not has_access(request):
112 raise PermissionDenied
113
114 # Get base context
115
116 item = get_object_or_404(Item, pk=item_pk)
117
118 if request.method == 'POST':
119 batch_form = BatchForm(request.POST)
120 batch = batch_form.save(commit=False)
121 batch.item = item
122
123 if not batch_form.is_valid():
124 messages.error(request, u'Noen av de påkrevde feltene inneholder feil.')
125 else:
126 batch.save()
127 messages.success(request, u'Batchen ble lagt til.')
128
129 return redirect(details, item_pk=item_pk)
130
131 raise PermissionDenied
132
133 @login_required
134 @permission_required('inventory.change_batch', return_403=True)
135 def batch(request, item_pk, batch_pk):
136 if not has_access(request):
137 raise PermissionDenied
138
139 # Get base context
140
141 item = get_object_or_404(Item, pk=item_pk)
142 batch = get_object_or_404(Batch, pk=batch_pk)
143
144 if request.method == 'POST':
145 batch_form = BatchForm(request.POST, instance=batch)
146
147 if not batch_form.is_valid():
148 messages.error(request, u'Noen av de påkrevde feltene inneholder feil.')
149 else:
150 batch_form.save()
151 messages.success(request, u'Batchen ble oppdatert.')
152
153 return redirect(details, item_pk=item_pk)
154
155 raise PermissionDenied
156
157
158 @login_required
159 @permission_required('inventory.delete_batch', return_403=True)
160 def batch_delete(request, item_pk, batch_pk):
161 if not has_access(request):
162 raise PermissionDenied
163
164 batch = get_object_or_404(Batch, pk=batch_pk)
165
166 batch.delete()
167
168 messages.success(request, u'Batchen ble slettet.')
169
170 return redirect(details, item_pk=item_pk)
171
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/inventory/dashboard/views.py b/apps/inventory/dashboard/views.py
--- a/apps/inventory/dashboard/views.py
+++ b/apps/inventory/dashboard/views.py
@@ -47,9 +47,9 @@
if not inventory_form.is_valid():
messages.error(request, u'Noen av de påkrevde feltene inneholder feil.')
else:
- inventory_form.save()
+ item = inventory_form.save()
messages.success(request, u'Varen ble opprettet')
- return redirect(index)
+ return redirect(details, item.id)
context['form'] = inventory_form
@@ -99,11 +99,15 @@
item = get_object_or_404(Item, pk=item_pk)
- item.delete()
+ if request.method == 'POST':
+
+ item.delete()
+
+ messages.success(request, u'Varen %s ble slettet.' % item.name)
- messages.success(request, u'Varen %s ble slettet.' % item.name)
+ return redirect(index)
- return redirect(index)
+ raise PermissionDenied
@login_required
@permission_required('inventory.add_batch', return_403=True)
@@ -111,18 +115,27 @@
if not has_access(request):
raise PermissionDenied
- # Get base context
+ # Field mapper
+ fieldmap = {
+ 'amount': u'Mengde',
+ 'expiration_date': u'Utløpsdato',
+ }
item = get_object_or_404(Item, pk=item_pk)
if request.method == 'POST':
batch_form = BatchForm(request.POST)
- batch = batch_form.save(commit=False)
- batch.item = item
if not batch_form.is_valid():
- messages.error(request, u'Noen av de påkrevde feltene inneholder feil.')
+ # Dirty hack to display errors since the form is not passed in redirect context
+ error_reply = u"Feil i felt:"
+ for field, error in batch_form.errors.items():
+ error_reply += ' ' + fieldmap[field] + ' (' + batch_form.error_class.as_text(error) + '),'
+
+ messages.error(request, error_reply.rstrip(','))
else:
+ batch = batch_form.save(commit=False)
+ batch.item = item
batch.save()
messages.success(request, u'Batchen ble lagt til.')
@@ -163,9 +176,12 @@
batch = get_object_or_404(Batch, pk=batch_pk)
- batch.delete()
+ if request.method == 'POST':
- messages.success(request, u'Batchen ble slettet.')
+ batch.delete()
+ messages.success(request, u'Batchen ble slettet.')
- return redirect(details, item_pk=item_pk)
+ return redirect(details, item_pk=item_pk)
+
+ raise PermissionDenied
| {"golden_diff": "diff --git a/apps/inventory/dashboard/views.py b/apps/inventory/dashboard/views.py\n--- a/apps/inventory/dashboard/views.py\n+++ b/apps/inventory/dashboard/views.py\n@@ -47,9 +47,9 @@\n if not inventory_form.is_valid():\n messages.error(request, u'Noen av de p\u00e5krevde feltene inneholder feil.')\n else:\n- inventory_form.save()\n+ item = inventory_form.save()\n messages.success(request, u'Varen ble opprettet')\n- return redirect(index)\n+ return redirect(details, item.id)\n \n context['form'] = inventory_form\n \n@@ -99,11 +99,15 @@\n \n item = get_object_or_404(Item, pk=item_pk)\n \n- item.delete()\n+ if request.method == 'POST':\n+\n+ item.delete()\n+\n+ messages.success(request, u'Varen %s ble slettet.' % item.name)\n \n- messages.success(request, u'Varen %s ble slettet.' % item.name)\n+ return redirect(index)\n \n- return redirect(index)\n+ raise PermissionDenied\n \n @login_required\n @permission_required('inventory.add_batch', return_403=True)\n@@ -111,18 +115,27 @@\n if not has_access(request):\n raise PermissionDenied\n \n- # Get base context\n+ # Field mapper\n+ fieldmap = {\n+ 'amount': u'Mengde',\n+ 'expiration_date': u'Utl\u00f8psdato',\n+ }\n \n item = get_object_or_404(Item, pk=item_pk)\n \n if request.method == 'POST':\n batch_form = BatchForm(request.POST)\n- batch = batch_form.save(commit=False)\n- batch.item = item\n \n if not batch_form.is_valid():\n- messages.error(request, u'Noen av de p\u00e5krevde feltene inneholder feil.')\n+ # Dirty hack to display errors since the form is not passed in redirect context\n+ error_reply = u\"Feil i felt:\"\n+ for field, error in batch_form.errors.items():\n+ error_reply += ' ' + fieldmap[field] + ' (' + batch_form.error_class.as_text(error) + '),'\n+\n+ messages.error(request, error_reply.rstrip(','))\n else:\n+ batch = batch_form.save(commit=False)\n+ batch.item = item\n batch.save()\n messages.success(request, u'Batchen ble lagt til.')\n \n@@ -163,9 +176,12 @@\n \n batch = get_object_or_404(Batch, pk=batch_pk)\n \n- batch.delete()\n+ if request.method == 'POST':\n \n- messages.success(request, u'Batchen ble slettet.')\n+ batch.delete()\n+ messages.success(request, u'Batchen ble slettet.')\n \n- return redirect(details, item_pk=item_pk)\n+ return redirect(details, item_pk=item_pk)\n+\n+ raise PermissionDenied\n", "issue": "Add redirect to newly created item after creating it in dashboard inventory\nAs title states.\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\n\nfrom datetime import datetime\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponseBadRequest, HttpResponse, JsonResponse\n\nfrom guardian.decorators import permission_required\n\nfrom apps.dashboard.tools import has_access, get_base_context\nfrom apps.inventory.dashboard.forms import ItemForm, BatchForm\nfrom apps.inventory.models import Item, Batch\n\n\n@login_required\n@permission_required('inventory.view_item', return_403=True)\ndef index(request):\n\n # Generic check to see if user has access to dashboard. (In Komiteer or superuser)\n if not has_access(request):\n raise PermissionDenied\n\n # Create the base context needed for the sidebar\n context = get_base_context(request)\n\n context['items'] = Item.objects.all().order_by('name')\n\n return render(request, 'inventory/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('inventory.add_item', return_403=True)\ndef new(request):\n\n if not has_access(request):\n raise PermissionDenied\n\n # Get base context\n context = get_base_context(request)\n\n if request.method == 'POST':\n inventory_form = ItemForm(request.POST)\n\n if not inventory_form.is_valid():\n messages.error(request, u'Noen av de p\u00e5krevde feltene inneholder feil.')\n else:\n inventory_form.save()\n messages.success(request, u'Varen ble opprettet')\n return redirect(index)\n\n context['form'] = inventory_form\n\n else:\n context['form'] = ItemForm()\n\n return render(request, 'inventory/dashboard/new.html', context)\n\n\n@login_required\n@permission_required('inventory.view_item', return_403=True)\ndef details(request, item_pk):\n # Generic check to see if user has access to dashboard. (In Komiteer or superuser)\n if not has_access(request):\n raise PermissionDenied\n\n # Create the base context needed for the sidebar\n context = get_base_context(request)\n\n context['item'] = get_object_or_404(Item, pk=item_pk)\n\n if request.method == 'POST':\n if 'inventory.change_item' not in context['user_permissions']:\n raise PermissionDenied\n\n item_form = ItemForm(request.POST, instance=context['item'])\n if not item_form.is_valid():\n messages.error(request, u'Noen av de p\u00e5krevde feltene inneholder feil.')\n else:\n item_form.save()\n messages.success(request, u'Varen ble oppdatert')\n context['item_form'] = item_form\n else:\n context['item_form'] = ItemForm(instance=context['item'])\n\n context['new_batch_form'] = BatchForm()\n\n context['batch_forms'] = [(batch.id, BatchForm(instance=batch)) for batch in Batch.objects.filter(item=context['item'])]\n\n return render(request, 'inventory/dashboard/details.html', context)\n\n@login_required\n@permission_required('inventory.delete_item', return_403=True)\ndef item_delete(request, item_pk):\n if not has_access(request):\n raise PermissionDenied\n\n item = get_object_or_404(Item, pk=item_pk)\n\n item.delete()\n\n messages.success(request, u'Varen %s ble slettet.' % item.name)\n\n return redirect(index)\n\n@login_required\n@permission_required('inventory.add_batch', return_403=True)\ndef batch_new(request, item_pk):\n if not has_access(request):\n raise PermissionDenied\n\n # Get base context\n\n item = get_object_or_404(Item, pk=item_pk)\n\n if request.method == 'POST':\n batch_form = BatchForm(request.POST)\n batch = batch_form.save(commit=False)\n batch.item = item\n\n if not batch_form.is_valid():\n messages.error(request, u'Noen av de p\u00e5krevde feltene inneholder feil.')\n else:\n batch.save()\n messages.success(request, u'Batchen ble lagt til.')\n\n return redirect(details, item_pk=item_pk)\n\n raise PermissionDenied\n\n@login_required\n@permission_required('inventory.change_batch', return_403=True)\ndef batch(request, item_pk, batch_pk):\n if not has_access(request):\n raise PermissionDenied\n\n # Get base context\n\n item = get_object_or_404(Item, pk=item_pk)\n batch = get_object_or_404(Batch, pk=batch_pk)\n\n if request.method == 'POST':\n batch_form = BatchForm(request.POST, instance=batch)\n\n if not batch_form.is_valid():\n messages.error(request, u'Noen av de p\u00e5krevde feltene inneholder feil.')\n else:\n batch_form.save()\n messages.success(request, u'Batchen ble oppdatert.')\n\n return redirect(details, item_pk=item_pk)\n\n raise PermissionDenied\n\n\n@login_required\n@permission_required('inventory.delete_batch', return_403=True)\ndef batch_delete(request, item_pk, batch_pk):\n if not has_access(request):\n raise PermissionDenied\n\n batch = get_object_or_404(Batch, pk=batch_pk)\n\n batch.delete()\n\n messages.success(request, u'Batchen ble slettet.')\n\n return redirect(details, item_pk=item_pk)\n\n", "path": "apps/inventory/dashboard/views.py"}], "after_files": [{"content": "# -*- encoding: utf-8 -*-\n\nfrom datetime import datetime\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponseBadRequest, HttpResponse, JsonResponse\n\nfrom guardian.decorators import permission_required\n\nfrom apps.dashboard.tools import has_access, get_base_context\nfrom apps.inventory.dashboard.forms import ItemForm, BatchForm\nfrom apps.inventory.models import Item, Batch\n\n\n@login_required\n@permission_required('inventory.view_item', return_403=True)\ndef index(request):\n\n # Generic check to see if user has access to dashboard. (In Komiteer or superuser)\n if not has_access(request):\n raise PermissionDenied\n\n # Create the base context needed for the sidebar\n context = get_base_context(request)\n\n context['items'] = Item.objects.all().order_by('name')\n\n return render(request, 'inventory/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('inventory.add_item', return_403=True)\ndef new(request):\n\n if not has_access(request):\n raise PermissionDenied\n\n # Get base context\n context = get_base_context(request)\n\n if request.method == 'POST':\n inventory_form = ItemForm(request.POST)\n\n if not inventory_form.is_valid():\n messages.error(request, u'Noen av de p\u00e5krevde feltene inneholder feil.')\n else:\n item = inventory_form.save()\n messages.success(request, u'Varen ble opprettet')\n return redirect(details, item.id)\n\n context['form'] = inventory_form\n\n else:\n context['form'] = ItemForm()\n\n return render(request, 'inventory/dashboard/new.html', context)\n\n\n@login_required\n@permission_required('inventory.view_item', return_403=True)\ndef details(request, item_pk):\n # Generic check to see if user has access to dashboard. (In Komiteer or superuser)\n if not has_access(request):\n raise PermissionDenied\n\n # Create the base context needed for the sidebar\n context = get_base_context(request)\n\n context['item'] = get_object_or_404(Item, pk=item_pk)\n\n if request.method == 'POST':\n if 'inventory.change_item' not in context['user_permissions']:\n raise PermissionDenied\n\n item_form = ItemForm(request.POST, instance=context['item'])\n if not item_form.is_valid():\n messages.error(request, u'Noen av de p\u00e5krevde feltene inneholder feil.')\n else:\n item_form.save()\n messages.success(request, u'Varen ble oppdatert')\n context['item_form'] = item_form\n else:\n context['item_form'] = ItemForm(instance=context['item'])\n\n context['new_batch_form'] = BatchForm()\n\n context['batch_forms'] = [(batch.id, BatchForm(instance=batch)) for batch in Batch.objects.filter(item=context['item'])]\n\n return render(request, 'inventory/dashboard/details.html', context)\n\n@login_required\n@permission_required('inventory.delete_item', return_403=True)\ndef item_delete(request, item_pk):\n if not has_access(request):\n raise PermissionDenied\n\n item = get_object_or_404(Item, pk=item_pk)\n\n if request.method == 'POST':\n\n item.delete()\n\n messages.success(request, u'Varen %s ble slettet.' % item.name)\n\n return redirect(index)\n\n raise PermissionDenied\n\n@login_required\n@permission_required('inventory.add_batch', return_403=True)\ndef batch_new(request, item_pk):\n if not has_access(request):\n raise PermissionDenied\n\n # Field mapper\n fieldmap = {\n 'amount': u'Mengde',\n 'expiration_date': u'Utl\u00f8psdato',\n }\n\n item = get_object_or_404(Item, pk=item_pk)\n\n if request.method == 'POST':\n batch_form = BatchForm(request.POST)\n\n if not batch_form.is_valid():\n # Dirty hack to display errors since the form is not passed in redirect context\n error_reply = u\"Feil i felt:\"\n for field, error in batch_form.errors.items():\n error_reply += ' ' + fieldmap[field] + ' (' + batch_form.error_class.as_text(error) + '),'\n\n messages.error(request, error_reply.rstrip(','))\n else:\n batch = batch_form.save(commit=False)\n batch.item = item\n batch.save()\n messages.success(request, u'Batchen ble lagt til.')\n\n return redirect(details, item_pk=item_pk)\n\n raise PermissionDenied\n\n@login_required\n@permission_required('inventory.change_batch', return_403=True)\ndef batch(request, item_pk, batch_pk):\n if not has_access(request):\n raise PermissionDenied\n\n # Get base context\n\n item = get_object_or_404(Item, pk=item_pk)\n batch = get_object_or_404(Batch, pk=batch_pk)\n\n if request.method == 'POST':\n batch_form = BatchForm(request.POST, instance=batch)\n\n if not batch_form.is_valid():\n messages.error(request, u'Noen av de p\u00e5krevde feltene inneholder feil.')\n else:\n batch_form.save()\n messages.success(request, u'Batchen ble oppdatert.')\n\n return redirect(details, item_pk=item_pk)\n\n raise PermissionDenied\n\n\n@login_required\n@permission_required('inventory.delete_batch', return_403=True)\ndef batch_delete(request, item_pk, batch_pk):\n if not has_access(request):\n raise PermissionDenied\n\n batch = get_object_or_404(Batch, pk=batch_pk)\n\n if request.method == 'POST':\n\n batch.delete()\n messages.success(request, u'Batchen ble slettet.')\n\n return redirect(details, item_pk=item_pk)\n\n raise PermissionDenied\n\n", "path": "apps/inventory/dashboard/views.py"}]} | 1,870 | 659 |
gh_patches_debug_35255 | rasdani/github-patches | git_diff | deepchecks__deepchecks-980 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG][CV] Confusion matrix display labels are inverted
The "True value" and "Predicted value" are inverted. The matrix should be transposed prior to display.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepchecks/vision/checks/performance/confusion_matrix.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module containing confusion matrix report check."""
12 from typing import Any
13
14 import numpy as np
15 from plotly.express import imshow
16 from queue import PriorityQueue
17
18 from deepchecks.core import CheckResult, DatasetKind
19 from deepchecks.vision import SingleDatasetCheck, Context
20 from deepchecks.vision.vision_data import TaskType
21 from deepchecks.vision.metrics_utils.iou_utils import jaccard_iou
22
23 __all__ = ['ConfusionMatrixReport']
24
25
26 def filter_confusion_matrix(confusion_matrix, number_of_categories):
27 pq = PriorityQueue()
28 for row, values in enumerate(confusion_matrix):
29 for col, value in enumerate(values):
30 if row != col:
31 pq.put((-value, (row, col)))
32 categories = set()
33 while not pq.empty():
34 if len(categories) >= number_of_categories:
35 break
36 _, (row, col) = pq.get()
37 categories.add(row)
38 categories.add(col)
39 categories = sorted(categories)
40 return confusion_matrix[np.ix_(categories, categories)], categories
41
42
43 class ConfusionMatrixReport(SingleDatasetCheck):
44 """Calculate the confusion matrix of the model on the given dataset.
45
46 For object detection, each detected bounding box calculates the IoU for each label and then is that label class is
47 used for the confusion matrix. detected bounding boxes that don't match a label has their own class and same
48 for labels without detected bounding boxes.
49
50 Parameters
51 ----------
52 categories_to_display (int, default 10):
53 Maximum number of categories to display
54 confidence_threshold (float, default 0.3):
55 Threshold to consider bounding box as detected.
56 iou_threshold (float, default 0.5):
57 Threshold to consider detected bounding box as labeled bounding box.
58 """
59
60 def __init__(self,
61 categories_to_display: int = 10,
62 confidence_threshold: float = 0.3,
63 iou_threshold: float = 0.5):
64 super().__init__()
65 self.confidence_threshold = confidence_threshold
66 self.categories_to_display = categories_to_display
67 self.iou_threshold = iou_threshold
68 self.matrix = None
69 self.num_classes = 0
70 self.task_type = None
71
72 def initialize_run(self, context: Context, dataset_kind: DatasetKind = None):
73 """Initialize run by creating an empty matrix the size of the data."""
74 context.assert_task_type(TaskType.CLASSIFICATION, TaskType.OBJECT_DETECTION)
75
76 if dataset_kind == DatasetKind.TRAIN:
77 dataset = context.train
78 else:
79 dataset = context.test
80 self.task_type = dataset.task_type
81 self.num_classes = dataset.num_classes
82
83 matrix_size = self.num_classes if self.task_type == TaskType.CLASSIFICATION else self.num_classes + 1
84
85 self.matrix = np.zeros((matrix_size, matrix_size))
86
87 def update(self, context: Context, batch: Any, dataset_kind: DatasetKind = DatasetKind.TRAIN):
88 """Add batch to confusion matrix."""
89 if dataset_kind == DatasetKind.TRAIN:
90 dataset = context.train
91 else:
92 dataset = context.test
93
94 labels = dataset.batch_to_labels(batch)
95 predictions = context.infer(batch, dataset_kind)
96
97 if self.task_type == TaskType.CLASSIFICATION:
98 self.update_classification(predictions, labels)
99 elif self.task_type == TaskType.OBJECT_DETECTION:
100 self.update_object_detection(predictions, labels)
101
102 def compute(self, context: Context, dataset_kind: DatasetKind = None) -> CheckResult:
103 """Compute and plot confusion matrix after all batches were processed."""
104 if dataset_kind == DatasetKind.TRAIN:
105 dataset = context.train
106 else:
107 dataset = context.test
108 display_confusion_matrix, categories = filter_confusion_matrix(self.matrix, self.categories_to_display)
109
110 description = ''
111
112 display_categories = []
113 for category in categories:
114 if self.num_classes == category:
115 description += ('last category are detections that do not overlap with labeled data'
116 ' and labels that have not been detected. ')
117 display_categories.append('not found')
118 else:
119 display_categories.append(dataset.label_id_to_name(category))
120
121 description += f'Showing {self.categories_to_display} of {self.num_classes} classes:'
122
123 fig = imshow(display_confusion_matrix,
124 x=display_categories,
125 y=display_categories,
126 text_auto=True)
127
128 fig.update_layout(width=600, height=600)
129 fig.update_xaxes(title='Predicted Value', type='category')
130 fig.update_yaxes(title='True value', type='category')
131
132 return CheckResult(
133 self.matrix,
134 header='Confusion Matrix',
135 display=[description, fig]
136 )
137
138 def update_object_detection(self, predictions, labels):
139 """Update the confusion matrix by batch for object detection task."""
140 for image_detections, image_labels in zip(predictions, labels):
141 detections_passed_threshold = [
142 detection for detection in image_detections if detection[4] > self.confidence_threshold
143 ]
144 if len(detections_passed_threshold) == 0:
145 # detections are empty, update matrix for labels
146 for label in image_labels:
147 gt_class = int(label[0].item())
148 self.matrix[self.num_classes, gt_class] += 1
149 continue
150
151 all_ious = np.zeros((len(image_labels), len(detections_passed_threshold)))
152
153 for label_index, label in enumerate(image_labels):
154 for detected_index, detected in enumerate(detections_passed_threshold):
155 all_ious[label_index, detected_index] = jaccard_iou(detected, label)
156
157 want_idx = np.where(all_ious > self.iou_threshold)
158
159 all_matches = [[want_idx[0][i], want_idx[1][i], all_ious[want_idx[0][i], want_idx[1][i]]]
160 for i in range(want_idx[0].shape[0])]
161 all_matches = np.array(all_matches)
162
163 # remove duplicate matches
164 if all_matches.shape[0] > 0:
165 all_matches = all_matches[all_matches[:, 2].argsort()[::-1]]
166
167 all_matches = all_matches[np.unique(all_matches[:, 1], return_index=True)[1]]
168
169 all_matches = all_matches[all_matches[:, 2].argsort()[::-1]]
170
171 all_matches = all_matches[np.unique(all_matches[:, 0], return_index=True)[1]]
172
173 for i, label in enumerate(image_labels):
174 gt_class = int(label[0])
175 if all_matches.shape[0] > 0 and all_matches[all_matches[:, 0] == i].shape[0] == 1:
176 detection_class = int(image_detections[int(all_matches[all_matches[:, 0] == i, 1][0])][5])
177 self.matrix[detection_class, gt_class] += 1
178 else:
179 self.matrix[self.num_classes, gt_class] += 1
180
181 for i, detection in enumerate(image_detections):
182 if all_matches.shape[0] and all_matches[all_matches[:, 1] == i].shape[0] == 0:
183 detection_class = int(detection[5])
184 self.matrix[detection_class, self.num_classes] += 1
185
186 def update_classification(self, predictions, labels):
187 """Update the confusion matrix by batch for classification task."""
188 for predicted_classes, image_labels in zip(predictions, labels):
189 detected_class = max(range(len(predicted_classes)), key=predicted_classes.__getitem__)
190
191 self.matrix[detected_class, image_labels] += 1
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deepchecks/vision/checks/performance/confusion_matrix.py b/deepchecks/vision/checks/performance/confusion_matrix.py
--- a/deepchecks/vision/checks/performance/confusion_matrix.py
+++ b/deepchecks/vision/checks/performance/confusion_matrix.py
@@ -145,7 +145,7 @@
# detections are empty, update matrix for labels
for label in image_labels:
gt_class = int(label[0].item())
- self.matrix[self.num_classes, gt_class] += 1
+ self.matrix[gt_class, self.num_classes] += 1
continue
all_ious = np.zeros((len(image_labels), len(detections_passed_threshold)))
@@ -174,18 +174,18 @@
gt_class = int(label[0])
if all_matches.shape[0] > 0 and all_matches[all_matches[:, 0] == i].shape[0] == 1:
detection_class = int(image_detections[int(all_matches[all_matches[:, 0] == i, 1][0])][5])
- self.matrix[detection_class, gt_class] += 1
+ self.matrix[gt_class, detection_class] += 1
else:
- self.matrix[self.num_classes, gt_class] += 1
+ self.matrix[gt_class, self.num_classes] += 1
for i, detection in enumerate(image_detections):
if all_matches.shape[0] and all_matches[all_matches[:, 1] == i].shape[0] == 0:
detection_class = int(detection[5])
- self.matrix[detection_class, self.num_classes] += 1
+ self.matrix[self.num_classes, detection_class] += 1
def update_classification(self, predictions, labels):
"""Update the confusion matrix by batch for classification task."""
for predicted_classes, image_labels in zip(predictions, labels):
detected_class = max(range(len(predicted_classes)), key=predicted_classes.__getitem__)
- self.matrix[detected_class, image_labels] += 1
+ self.matrix[image_labels, detected_class] += 1
| {"golden_diff": "diff --git a/deepchecks/vision/checks/performance/confusion_matrix.py b/deepchecks/vision/checks/performance/confusion_matrix.py\n--- a/deepchecks/vision/checks/performance/confusion_matrix.py\n+++ b/deepchecks/vision/checks/performance/confusion_matrix.py\n@@ -145,7 +145,7 @@\n # detections are empty, update matrix for labels\n for label in image_labels:\n gt_class = int(label[0].item())\n- self.matrix[self.num_classes, gt_class] += 1\n+ self.matrix[gt_class, self.num_classes] += 1\n continue\n \n all_ious = np.zeros((len(image_labels), len(detections_passed_threshold)))\n@@ -174,18 +174,18 @@\n gt_class = int(label[0])\n if all_matches.shape[0] > 0 and all_matches[all_matches[:, 0] == i].shape[0] == 1:\n detection_class = int(image_detections[int(all_matches[all_matches[:, 0] == i, 1][0])][5])\n- self.matrix[detection_class, gt_class] += 1\n+ self.matrix[gt_class, detection_class] += 1\n else:\n- self.matrix[self.num_classes, gt_class] += 1\n+ self.matrix[gt_class, self.num_classes] += 1\n \n for i, detection in enumerate(image_detections):\n if all_matches.shape[0] and all_matches[all_matches[:, 1] == i].shape[0] == 0:\n detection_class = int(detection[5])\n- self.matrix[detection_class, self.num_classes] += 1\n+ self.matrix[self.num_classes, detection_class] += 1\n \n def update_classification(self, predictions, labels):\n \"\"\"Update the confusion matrix by batch for classification task.\"\"\"\n for predicted_classes, image_labels in zip(predictions, labels):\n detected_class = max(range(len(predicted_classes)), key=predicted_classes.__getitem__)\n \n- self.matrix[detected_class, image_labels] += 1\n+ self.matrix[image_labels, detected_class] += 1\n", "issue": "[BUG][CV] Confusion matrix display labels are inverted\nThe \"True value\" and \"Predicted value\" are inverted. The matrix should be transposed prior to display. \r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing confusion matrix report check.\"\"\"\nfrom typing import Any\n\nimport numpy as np\nfrom plotly.express import imshow\nfrom queue import PriorityQueue\n\nfrom deepchecks.core import CheckResult, DatasetKind\nfrom deepchecks.vision import SingleDatasetCheck, Context\nfrom deepchecks.vision.vision_data import TaskType\nfrom deepchecks.vision.metrics_utils.iou_utils import jaccard_iou\n\n__all__ = ['ConfusionMatrixReport']\n\n\ndef filter_confusion_matrix(confusion_matrix, number_of_categories):\n pq = PriorityQueue()\n for row, values in enumerate(confusion_matrix):\n for col, value in enumerate(values):\n if row != col:\n pq.put((-value, (row, col)))\n categories = set()\n while not pq.empty():\n if len(categories) >= number_of_categories:\n break\n _, (row, col) = pq.get()\n categories.add(row)\n categories.add(col)\n categories = sorted(categories)\n return confusion_matrix[np.ix_(categories, categories)], categories\n\n\nclass ConfusionMatrixReport(SingleDatasetCheck):\n \"\"\"Calculate the confusion matrix of the model on the given dataset.\n\n For object detection, each detected bounding box calculates the IoU for each label and then is that label class is\n used for the confusion matrix. detected bounding boxes that don't match a label has their own class and same\n for labels without detected bounding boxes.\n\n Parameters\n ----------\n categories_to_display (int, default 10):\n Maximum number of categories to display\n confidence_threshold (float, default 0.3):\n Threshold to consider bounding box as detected.\n iou_threshold (float, default 0.5):\n Threshold to consider detected bounding box as labeled bounding box.\n \"\"\"\n\n def __init__(self,\n categories_to_display: int = 10,\n confidence_threshold: float = 0.3,\n iou_threshold: float = 0.5):\n super().__init__()\n self.confidence_threshold = confidence_threshold\n self.categories_to_display = categories_to_display\n self.iou_threshold = iou_threshold\n self.matrix = None\n self.num_classes = 0\n self.task_type = None\n\n def initialize_run(self, context: Context, dataset_kind: DatasetKind = None):\n \"\"\"Initialize run by creating an empty matrix the size of the data.\"\"\"\n context.assert_task_type(TaskType.CLASSIFICATION, TaskType.OBJECT_DETECTION)\n\n if dataset_kind == DatasetKind.TRAIN:\n dataset = context.train\n else:\n dataset = context.test\n self.task_type = dataset.task_type\n self.num_classes = dataset.num_classes\n\n matrix_size = self.num_classes if self.task_type == TaskType.CLASSIFICATION else self.num_classes + 1\n\n self.matrix = np.zeros((matrix_size, matrix_size))\n\n def update(self, context: Context, batch: Any, dataset_kind: DatasetKind = DatasetKind.TRAIN):\n \"\"\"Add batch to confusion matrix.\"\"\"\n if dataset_kind == DatasetKind.TRAIN:\n dataset = context.train\n else:\n dataset = context.test\n\n labels = dataset.batch_to_labels(batch)\n predictions = context.infer(batch, dataset_kind)\n\n if self.task_type == TaskType.CLASSIFICATION:\n self.update_classification(predictions, labels)\n elif self.task_type == TaskType.OBJECT_DETECTION:\n self.update_object_detection(predictions, labels)\n\n def compute(self, context: Context, dataset_kind: DatasetKind = None) -> CheckResult:\n \"\"\"Compute and plot confusion matrix after all batches were processed.\"\"\"\n if dataset_kind == DatasetKind.TRAIN:\n dataset = context.train\n else:\n dataset = context.test\n display_confusion_matrix, categories = filter_confusion_matrix(self.matrix, self.categories_to_display)\n\n description = ''\n\n display_categories = []\n for category in categories:\n if self.num_classes == category:\n description += ('last category are detections that do not overlap with labeled data'\n ' and labels that have not been detected. ')\n display_categories.append('not found')\n else:\n display_categories.append(dataset.label_id_to_name(category))\n\n description += f'Showing {self.categories_to_display} of {self.num_classes} classes:'\n\n fig = imshow(display_confusion_matrix,\n x=display_categories,\n y=display_categories,\n text_auto=True)\n\n fig.update_layout(width=600, height=600)\n fig.update_xaxes(title='Predicted Value', type='category')\n fig.update_yaxes(title='True value', type='category')\n\n return CheckResult(\n self.matrix,\n header='Confusion Matrix',\n display=[description, fig]\n )\n\n def update_object_detection(self, predictions, labels):\n \"\"\"Update the confusion matrix by batch for object detection task.\"\"\"\n for image_detections, image_labels in zip(predictions, labels):\n detections_passed_threshold = [\n detection for detection in image_detections if detection[4] > self.confidence_threshold\n ]\n if len(detections_passed_threshold) == 0:\n # detections are empty, update matrix for labels\n for label in image_labels:\n gt_class = int(label[0].item())\n self.matrix[self.num_classes, gt_class] += 1\n continue\n\n all_ious = np.zeros((len(image_labels), len(detections_passed_threshold)))\n\n for label_index, label in enumerate(image_labels):\n for detected_index, detected in enumerate(detections_passed_threshold):\n all_ious[label_index, detected_index] = jaccard_iou(detected, label)\n\n want_idx = np.where(all_ious > self.iou_threshold)\n\n all_matches = [[want_idx[0][i], want_idx[1][i], all_ious[want_idx[0][i], want_idx[1][i]]]\n for i in range(want_idx[0].shape[0])]\n all_matches = np.array(all_matches)\n\n # remove duplicate matches\n if all_matches.shape[0] > 0:\n all_matches = all_matches[all_matches[:, 2].argsort()[::-1]]\n\n all_matches = all_matches[np.unique(all_matches[:, 1], return_index=True)[1]]\n\n all_matches = all_matches[all_matches[:, 2].argsort()[::-1]]\n\n all_matches = all_matches[np.unique(all_matches[:, 0], return_index=True)[1]]\n\n for i, label in enumerate(image_labels):\n gt_class = int(label[0])\n if all_matches.shape[0] > 0 and all_matches[all_matches[:, 0] == i].shape[0] == 1:\n detection_class = int(image_detections[int(all_matches[all_matches[:, 0] == i, 1][0])][5])\n self.matrix[detection_class, gt_class] += 1\n else:\n self.matrix[self.num_classes, gt_class] += 1\n\n for i, detection in enumerate(image_detections):\n if all_matches.shape[0] and all_matches[all_matches[:, 1] == i].shape[0] == 0:\n detection_class = int(detection[5])\n self.matrix[detection_class, self.num_classes] += 1\n\n def update_classification(self, predictions, labels):\n \"\"\"Update the confusion matrix by batch for classification task.\"\"\"\n for predicted_classes, image_labels in zip(predictions, labels):\n detected_class = max(range(len(predicted_classes)), key=predicted_classes.__getitem__)\n\n self.matrix[detected_class, image_labels] += 1\n", "path": "deepchecks/vision/checks/performance/confusion_matrix.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing confusion matrix report check.\"\"\"\nfrom typing import Any\n\nimport numpy as np\nfrom plotly.express import imshow\nfrom queue import PriorityQueue\n\nfrom deepchecks.core import CheckResult, DatasetKind\nfrom deepchecks.vision import SingleDatasetCheck, Context\nfrom deepchecks.vision.vision_data import TaskType\nfrom deepchecks.vision.metrics_utils.iou_utils import jaccard_iou\n\n__all__ = ['ConfusionMatrixReport']\n\n\ndef filter_confusion_matrix(confusion_matrix, number_of_categories):\n pq = PriorityQueue()\n for row, values in enumerate(confusion_matrix):\n for col, value in enumerate(values):\n if row != col:\n pq.put((-value, (row, col)))\n categories = set()\n while not pq.empty():\n if len(categories) >= number_of_categories:\n break\n _, (row, col) = pq.get()\n categories.add(row)\n categories.add(col)\n categories = sorted(categories)\n return confusion_matrix[np.ix_(categories, categories)], categories\n\n\nclass ConfusionMatrixReport(SingleDatasetCheck):\n \"\"\"Calculate the confusion matrix of the model on the given dataset.\n\n For object detection, each detected bounding box calculates the IoU for each label and then is that label class is\n used for the confusion matrix. detected bounding boxes that don't match a label has their own class and same\n for labels without detected bounding boxes.\n\n Parameters\n ----------\n categories_to_display (int, default 10):\n Maximum number of categories to display\n confidence_threshold (float, default 0.3):\n Threshold to consider bounding box as detected.\n iou_threshold (float, default 0.5):\n Threshold to consider detected bounding box as labeled bounding box.\n \"\"\"\n\n def __init__(self,\n categories_to_display: int = 10,\n confidence_threshold: float = 0.3,\n iou_threshold: float = 0.5):\n super().__init__()\n self.confidence_threshold = confidence_threshold\n self.categories_to_display = categories_to_display\n self.iou_threshold = iou_threshold\n self.matrix = None\n self.num_classes = 0\n self.task_type = None\n\n def initialize_run(self, context: Context, dataset_kind: DatasetKind = None):\n \"\"\"Initialize run by creating an empty matrix the size of the data.\"\"\"\n context.assert_task_type(TaskType.CLASSIFICATION, TaskType.OBJECT_DETECTION)\n\n if dataset_kind == DatasetKind.TRAIN:\n dataset = context.train\n else:\n dataset = context.test\n self.task_type = dataset.task_type\n self.num_classes = dataset.num_classes\n\n matrix_size = self.num_classes if self.task_type == TaskType.CLASSIFICATION else self.num_classes + 1\n\n self.matrix = np.zeros((matrix_size, matrix_size))\n\n def update(self, context: Context, batch: Any, dataset_kind: DatasetKind = DatasetKind.TRAIN):\n \"\"\"Add batch to confusion matrix.\"\"\"\n if dataset_kind == DatasetKind.TRAIN:\n dataset = context.train\n else:\n dataset = context.test\n\n labels = dataset.batch_to_labels(batch)\n predictions = context.infer(batch, dataset_kind)\n\n if self.task_type == TaskType.CLASSIFICATION:\n self.update_classification(predictions, labels)\n elif self.task_type == TaskType.OBJECT_DETECTION:\n self.update_object_detection(predictions, labels)\n\n def compute(self, context: Context, dataset_kind: DatasetKind = None) -> CheckResult:\n \"\"\"Compute and plot confusion matrix after all batches were processed.\"\"\"\n if dataset_kind == DatasetKind.TRAIN:\n dataset = context.train\n else:\n dataset = context.test\n display_confusion_matrix, categories = filter_confusion_matrix(self.matrix, self.categories_to_display)\n\n description = ''\n\n display_categories = []\n for category in categories:\n if self.num_classes == category:\n description += ('last category are detections that do not overlap with labeled data'\n ' and labels that have not been detected. ')\n display_categories.append('not found')\n else:\n display_categories.append(dataset.label_id_to_name(category))\n\n description += f'Showing {self.categories_to_display} of {self.num_classes} classes:'\n\n fig = imshow(display_confusion_matrix,\n x=display_categories,\n y=display_categories,\n text_auto=True)\n\n fig.update_layout(width=600, height=600)\n fig.update_xaxes(title='Predicted Value', type='category')\n fig.update_yaxes(title='True value', type='category')\n\n return CheckResult(\n self.matrix,\n header='Confusion Matrix',\n display=[description, fig]\n )\n\n def update_object_detection(self, predictions, labels):\n \"\"\"Update the confusion matrix by batch for object detection task.\"\"\"\n for image_detections, image_labels in zip(predictions, labels):\n detections_passed_threshold = [\n detection for detection in image_detections if detection[4] > self.confidence_threshold\n ]\n if len(detections_passed_threshold) == 0:\n # detections are empty, update matrix for labels\n for label in image_labels:\n gt_class = int(label[0].item())\n self.matrix[gt_class, self.num_classes] += 1\n continue\n\n all_ious = np.zeros((len(image_labels), len(detections_passed_threshold)))\n\n for label_index, label in enumerate(image_labels):\n for detected_index, detected in enumerate(detections_passed_threshold):\n all_ious[label_index, detected_index] = jaccard_iou(detected, label)\n\n want_idx = np.where(all_ious > self.iou_threshold)\n\n all_matches = [[want_idx[0][i], want_idx[1][i], all_ious[want_idx[0][i], want_idx[1][i]]]\n for i in range(want_idx[0].shape[0])]\n all_matches = np.array(all_matches)\n\n # remove duplicate matches\n if all_matches.shape[0] > 0:\n all_matches = all_matches[all_matches[:, 2].argsort()[::-1]]\n\n all_matches = all_matches[np.unique(all_matches[:, 1], return_index=True)[1]]\n\n all_matches = all_matches[all_matches[:, 2].argsort()[::-1]]\n\n all_matches = all_matches[np.unique(all_matches[:, 0], return_index=True)[1]]\n\n for i, label in enumerate(image_labels):\n gt_class = int(label[0])\n if all_matches.shape[0] > 0 and all_matches[all_matches[:, 0] == i].shape[0] == 1:\n detection_class = int(image_detections[int(all_matches[all_matches[:, 0] == i, 1][0])][5])\n self.matrix[gt_class, detection_class] += 1\n else:\n self.matrix[gt_class, self.num_classes] += 1\n\n for i, detection in enumerate(image_detections):\n if all_matches.shape[0] and all_matches[all_matches[:, 1] == i].shape[0] == 0:\n detection_class = int(detection[5])\n self.matrix[self.num_classes, detection_class] += 1\n\n def update_classification(self, predictions, labels):\n \"\"\"Update the confusion matrix by batch for classification task.\"\"\"\n for predicted_classes, image_labels in zip(predictions, labels):\n detected_class = max(range(len(predicted_classes)), key=predicted_classes.__getitem__)\n\n self.matrix[image_labels, detected_class] += 1\n", "path": "deepchecks/vision/checks/performance/confusion_matrix.py"}]} | 2,484 | 475 |
gh_patches_debug_1519 | rasdani/github-patches | git_diff | databricks__koalas-1959 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plot does not work in Ubuntu
Plotting with `matplotlib` 3.3.0+ causes an exception because of Pillow dependency added at https://github.com/matplotlib/matplotlib/commit/370e9a2d5d9e637abc90b3270d368642c69f66c6#diff-60f61ab7a8d1910d86d9fda2261620314edcae5894d5aaa236b821c7256badd7 on Ubuntu:
```python
import databricks.koalas as ks
df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6]})
df.plot...
```
```
ImportError: /.../lib/python3.8/site-packages/PIL/_imaging.cpython-38-x86_64-linux-gnu.so: ELF load command address/offset not properly aligned
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 #
4 # Copyright (C) 2019 Databricks, Inc.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18 from __future__ import print_function
19
20 from io import open
21 import sys
22 from setuptools import setup
23 from os import path
24
25 DESCRIPTION = "Koalas: pandas API on Apache Spark"
26
27 this_directory = path.abspath(path.dirname(__file__))
28 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
29 LONG_DESCRIPTION = f.read()
30
31 try:
32 exec(open('databricks/koalas/version.py').read())
33 except IOError:
34 print("Failed to load Koalas version file for packaging. You must be in Koalas root dir.",
35 file=sys.stderr)
36 sys.exit(-1)
37 VERSION = __version__ # noqa
38
39 setup(
40 name='koalas',
41 version=VERSION,
42 packages=[
43 'databricks',
44 'databricks.koalas',
45 'databricks.koalas.missing',
46 'databricks.koalas.plot',
47 'databricks.koalas.spark',
48 'databricks.koalas.typedef',
49 'databricks.koalas.usage_logging'],
50 extras_require={
51 'spark': ['pyspark>=2.4.0'],
52 'mlflow': ['mlflow>=1.0'],
53 'plotly': ['plotly>=4.8'],
54 },
55 python_requires='>=3.5,<3.9',
56 install_requires=[
57 'pandas>=0.23.2',
58 'pyarrow>=0.10',
59 'numpy>=1.14',
60 'matplotlib>=3.0.0',
61 ],
62 author="Databricks",
63 author_email="[email protected]",
64 license='http://www.apache.org/licenses/LICENSE-2.0',
65 url="https://github.com/databricks/koalas",
66 project_urls={
67 'Bug Tracker': 'https://github.com/databricks/koalas/issues',
68 'Documentation': 'https://koalas.readthedocs.io/',
69 'Source Code': 'https://github.com/databricks/koalas'
70 },
71 description=DESCRIPTION,
72 long_description=LONG_DESCRIPTION,
73 long_description_content_type='text/markdown',
74 classifiers=[
75 'Programming Language :: Python :: 3.5',
76 'Programming Language :: Python :: 3.6',
77 'Programming Language :: Python :: 3.7',
78 'Programming Language :: Python :: 3.8',
79 ],
80 )
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,7 @@
'pandas>=0.23.2',
'pyarrow>=0.10',
'numpy>=1.14',
- 'matplotlib>=3.0.0',
+ 'matplotlib>=3.0.0,<3.3.0',
],
author="Databricks",
author_email="[email protected]",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,7 +57,7 @@\n 'pandas>=0.23.2',\n 'pyarrow>=0.10',\n 'numpy>=1.14',\n- 'matplotlib>=3.0.0',\n+ 'matplotlib>=3.0.0,<3.3.0',\n ],\n author=\"Databricks\",\n author_email=\"[email protected]\",\n", "issue": "plot does not work in Ubuntu\nPlotting with `matplotlib` 3.3.0+ causes an exception because of Pillow dependency added at https://github.com/matplotlib/matplotlib/commit/370e9a2d5d9e637abc90b3270d368642c69f66c6#diff-60f61ab7a8d1910d86d9fda2261620314edcae5894d5aaa236b821c7256badd7 on Ubuntu:\r\n\r\n```python\r\nimport databricks.koalas as ks\r\ndf = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6]})\r\ndf.plot...\r\n```\r\n\r\n```\r\nImportError: /.../lib/python3.8/site-packages/PIL/_imaging.cpython-38-x86_64-linux-gnu.so: ELF load command address/offset not properly aligned\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import print_function\n\nfrom io import open\nimport sys\nfrom setuptools import setup\nfrom os import path\n\nDESCRIPTION = \"Koalas: pandas API on Apache Spark\"\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\ntry:\n exec(open('databricks/koalas/version.py').read())\nexcept IOError:\n print(\"Failed to load Koalas version file for packaging. You must be in Koalas root dir.\",\n file=sys.stderr)\n sys.exit(-1)\nVERSION = __version__ # noqa\n\nsetup(\n name='koalas',\n version=VERSION,\n packages=[\n 'databricks',\n 'databricks.koalas',\n 'databricks.koalas.missing',\n 'databricks.koalas.plot',\n 'databricks.koalas.spark',\n 'databricks.koalas.typedef',\n 'databricks.koalas.usage_logging'],\n extras_require={\n 'spark': ['pyspark>=2.4.0'],\n 'mlflow': ['mlflow>=1.0'],\n 'plotly': ['plotly>=4.8'],\n },\n python_requires='>=3.5,<3.9',\n install_requires=[\n 'pandas>=0.23.2',\n 'pyarrow>=0.10',\n 'numpy>=1.14',\n 'matplotlib>=3.0.0',\n ],\n author=\"Databricks\",\n author_email=\"[email protected]\",\n license='http://www.apache.org/licenses/LICENSE-2.0',\n url=\"https://github.com/databricks/koalas\",\n project_urls={\n 'Bug Tracker': 'https://github.com/databricks/koalas/issues',\n 'Documentation': 'https://koalas.readthedocs.io/',\n 'Source Code': 'https://github.com/databricks/koalas'\n },\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import print_function\n\nfrom io import open\nimport sys\nfrom setuptools import setup\nfrom os import path\n\nDESCRIPTION = \"Koalas: pandas API on Apache Spark\"\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\ntry:\n exec(open('databricks/koalas/version.py').read())\nexcept IOError:\n print(\"Failed to load Koalas version file for packaging. You must be in Koalas root dir.\",\n file=sys.stderr)\n sys.exit(-1)\nVERSION = __version__ # noqa\n\nsetup(\n name='koalas',\n version=VERSION,\n packages=[\n 'databricks',\n 'databricks.koalas',\n 'databricks.koalas.missing',\n 'databricks.koalas.plot',\n 'databricks.koalas.spark',\n 'databricks.koalas.typedef',\n 'databricks.koalas.usage_logging'],\n extras_require={\n 'spark': ['pyspark>=2.4.0'],\n 'mlflow': ['mlflow>=1.0'],\n 'plotly': ['plotly>=4.8'],\n },\n python_requires='>=3.5,<3.9',\n install_requires=[\n 'pandas>=0.23.2',\n 'pyarrow>=0.10',\n 'numpy>=1.14',\n 'matplotlib>=3.0.0,<3.3.0',\n ],\n author=\"Databricks\",\n author_email=\"[email protected]\",\n license='http://www.apache.org/licenses/LICENSE-2.0',\n url=\"https://github.com/databricks/koalas\",\n project_urls={\n 'Bug Tracker': 'https://github.com/databricks/koalas/issues',\n 'Documentation': 'https://koalas.readthedocs.io/',\n 'Source Code': 'https://github.com/databricks/koalas'\n },\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n)\n", "path": "setup.py"}]} | 1,319 | 112 |
gh_patches_debug_30358 | rasdani/github-patches | git_diff | nvaccess__nvda-10338 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
winVersion.isWin10: modify it to return early if major is not 10
Hi,
This feature request should not be done for 2019.3 in order to avoid introducing possible regressions, but perhaps defer it to 2020.1:
### Is your feature request related to a problem? Please describe.
In NVDA 2019.2, as part of UIA console support, winVersion module introduced isWin10 function which returns whether a system is running a specific version of Windows 10 or later. At the moment it checks major version in two places:
* If "at least" flag is specified: return False if version is less than 10.
* If "at least" flag is not specified: return False if major version is not 10.
At least it is understandable. But I would argue that for sake of performance and keeping the subject line and body in sync, I propose the below modification.
### Describe the solution you'd like
Instead of checking flags after defining version information map, return early if major version isn't even 10. Not only flag checks can be avoided on Windows 8.1 and earlier, it can make the function body in line with what the function title says.
Thus I propose:
1. First, check winVersion.major and return False if it isn't 10.
2. Define version to build map.
3. Same as current function.
### Describe alternatives you've considered
Keep the function as is.
### Additional context
I'm sure there might be other opportunities to optimize this function, but that's in the future.
Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/winVersion.py`
Content:
```
1 # A part of NonVisual Desktop Access (NVDA)
2 # Copyright (C) 2006-2019 NV Access Limited
3 # This file is covered by the GNU General Public License.
4 # See the file COPYING for more details.
5
6 import sys
7 import os
8 import winUser
9
10 winVersion=sys.getwindowsversion()
11 winVersionText="{v.major}.{v.minor}.{v.build}".format(v=winVersion)
12 if winVersion.service_pack_major!=0:
13 winVersionText+=" service pack %d"%winVersion.service_pack_major
14 if winVersion.service_pack_minor!=0:
15 winVersionText+=".%d"%winVersion.service_pack_minor
16 winVersionText+=" %s" % ("workstation","domain controller","server")[winVersion.product_type-1]
17
18 def isSupportedOS():
19 # NVDA can only run on Windows 7 Service pack 1 and above
20 return (winVersion.major,winVersion.minor,winVersion.service_pack_major) >= (6,1,1)
21
22 def canRunVc2010Builds():
23 return isSupportedOS()
24
25 UWP_OCR_DATA_PATH = os.path.expandvars(r"$windir\OCR")
26 def isUwpOcrAvailable():
27 return os.path.isdir(UWP_OCR_DATA_PATH)
28
29 def isWin10(version=1507, atLeast=True):
30 """
31 Returns True if NVDA is running on the supplied release version of Windows 10. If no argument is supplied, returns True for all public Windows 10 releases.
32 @param version: a release version of Windows 10 (such as 1903).
33 @param atLeast: return True if NVDA is running on at least this Windows 10 build (i.e. this version or higher).
34 """
35 from logHandler import log
36 win10VersionsToBuilds={
37 1507: 10240,
38 1511: 10586,
39 1607: 14393,
40 1703: 15063,
41 1709: 16299,
42 1803: 17134,
43 1809: 17763,
44 1903: 18362,
45 1909: 18363,
46 }
47 if atLeast and winVersion.major < 10:
48 return False
49 elif not atLeast and winVersion.major != 10:
50 return False
51 try:
52 if atLeast:
53 return winVersion.build >= win10VersionsToBuilds[version]
54 else:
55 return winVersion.build == win10VersionsToBuilds[version]
56 except KeyError:
57 log.error("Unknown Windows 10 version {}".format(version))
58 return False
59
60
61 def isFullScreenMagnificationAvailable():
62 return (winVersion.major, winVersion.minor) >= (6, 2)
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/source/winVersion.py b/source/winVersion.py
--- a/source/winVersion.py
+++ b/source/winVersion.py
@@ -26,34 +26,35 @@
def isUwpOcrAvailable():
return os.path.isdir(UWP_OCR_DATA_PATH)
-def isWin10(version=1507, atLeast=True):
+
+WIN10_VERSIONS_TO_BUILDS = {
+ 1507: 10240,
+ 1511: 10586,
+ 1607: 14393,
+ 1703: 15063,
+ 1709: 16299,
+ 1803: 17134,
+ 1809: 17763,
+ 1903: 18362,
+ 1909: 18363,
+}
+
+
+def isWin10(version: int = 1507, atLeast: bool = True):
"""
Returns True if NVDA is running on the supplied release version of Windows 10. If no argument is supplied, returns True for all public Windows 10 releases.
@param version: a release version of Windows 10 (such as 1903).
@param atLeast: return True if NVDA is running on at least this Windows 10 build (i.e. this version or higher).
"""
- from logHandler import log
- win10VersionsToBuilds={
- 1507: 10240,
- 1511: 10586,
- 1607: 14393,
- 1703: 15063,
- 1709: 16299,
- 1803: 17134,
- 1809: 17763,
- 1903: 18362,
- 1909: 18363,
- }
- if atLeast and winVersion.major < 10:
- return False
- elif not atLeast and winVersion.major != 10:
+ if winVersion.major != 10:
return False
try:
if atLeast:
- return winVersion.build >= win10VersionsToBuilds[version]
+ return winVersion.build >= WIN10_VERSIONS_TO_BUILDS[version]
else:
- return winVersion.build == win10VersionsToBuilds[version]
+ return winVersion.build == WIN10_VERSIONS_TO_BUILDS[version]
except KeyError:
+ from logHandler import log
log.error("Unknown Windows 10 version {}".format(version))
return False
| {"golden_diff": "diff --git a/source/winVersion.py b/source/winVersion.py\n--- a/source/winVersion.py\n+++ b/source/winVersion.py\n@@ -26,34 +26,35 @@\n def isUwpOcrAvailable():\r\n \treturn os.path.isdir(UWP_OCR_DATA_PATH)\r\n \r\n-def isWin10(version=1507, atLeast=True):\r\n+\r\n+WIN10_VERSIONS_TO_BUILDS = {\r\n+\t1507: 10240,\r\n+\t1511: 10586,\r\n+\t1607: 14393,\r\n+\t1703: 15063,\r\n+\t1709: 16299,\r\n+\t1803: 17134,\r\n+\t1809: 17763,\r\n+\t1903: 18362,\r\n+\t1909: 18363,\r\n+}\r\n+\r\n+\r\n+def isWin10(version: int = 1507, atLeast: bool = True):\r\n \t\"\"\"\r\n \tReturns True if NVDA is running on the supplied release version of Windows 10. If no argument is supplied, returns True for all public Windows 10 releases.\r\n \t@param version: a release version of Windows 10 (such as 1903).\r\n \t@param atLeast: return True if NVDA is running on at least this Windows 10 build (i.e. this version or higher).\r\n \t\"\"\"\r\n-\tfrom logHandler import log\r\n-\twin10VersionsToBuilds={\r\n-\t\t1507: 10240,\r\n-\t\t1511: 10586,\r\n-\t\t1607: 14393,\r\n-\t\t1703: 15063,\r\n-\t\t1709: 16299,\r\n-\t\t1803: 17134,\r\n-\t\t1809: 17763,\r\n-\t\t1903: 18362,\r\n-\t\t1909: 18363,\r\n-\t}\r\n-\tif atLeast and winVersion.major < 10:\r\n-\t\treturn False\r\n-\telif not atLeast and winVersion.major != 10:\r\n+\tif winVersion.major != 10:\r\n \t\treturn False\r\n \ttry:\r\n \t\tif atLeast:\r\n-\t\t\treturn winVersion.build >= win10VersionsToBuilds[version]\r\n+\t\t\treturn winVersion.build >= WIN10_VERSIONS_TO_BUILDS[version]\r\n \t\telse:\r\n-\t\t\treturn winVersion.build == win10VersionsToBuilds[version]\r\n+\t\t\treturn winVersion.build == WIN10_VERSIONS_TO_BUILDS[version]\r\n \texcept KeyError:\r\n+\t\tfrom logHandler import log\r\n \t\tlog.error(\"Unknown Windows 10 version {}\".format(version))\r\n \t\treturn False\n", "issue": "winVersion.isWin10: modify it to return early if major is not 10\nHi,\r\n\r\nThis feature request should not be done for 2019.3 in order to avoid introducing possible regressions, but perhaps defer it to 2020.1:\r\n\r\n### Is your feature request related to a problem? Please describe.\r\nIn NVDA 2019.2, as part of UIA console support, winVersion module introduced isWin10 function which returns whether a system is running a specific version of Windows 10 or later. At the moment it checks major version in two places:\r\n\r\n* If \"at least\" flag is specified: return False if version is less than 10.\r\n* If \"at least\" flag is not specified: return False if major version is not 10.\r\n\r\nAt least it is understandable. But I would argue that for sake of performance and keeping the subject line and body in sync, I propose the below modification.\r\n\r\n### Describe the solution you'd like\r\nInstead of checking flags after defining version information map, return early if major version isn't even 10. Not only flag checks can be avoided on Windows 8.1 and earlier, it can make the function body in line with what the function title says.\r\n\r\nThus I propose:\r\n\r\n1. First, check winVersion.major and return False if it isn't 10.\r\n2. Define version to build map.\r\n3. Same as current function.\r\n\r\n### Describe alternatives you've considered\r\nKeep the function as is.\r\n\r\n### Additional context\r\nI'm sure there might be other opportunities to optimize this function, but that's in the future.\r\n\r\nThanks.\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2019 NV Access Limited\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n\r\nimport sys\r\nimport os\r\nimport winUser\r\n\r\nwinVersion=sys.getwindowsversion()\r\nwinVersionText=\"{v.major}.{v.minor}.{v.build}\".format(v=winVersion)\r\nif winVersion.service_pack_major!=0:\r\n\twinVersionText+=\" service pack %d\"%winVersion.service_pack_major\r\n\tif winVersion.service_pack_minor!=0:\r\n\t\twinVersionText+=\".%d\"%winVersion.service_pack_minor\r\nwinVersionText+=\" %s\" % (\"workstation\",\"domain controller\",\"server\")[winVersion.product_type-1]\r\n\r\ndef isSupportedOS():\r\n\t# NVDA can only run on Windows 7 Service pack 1 and above\r\n\treturn (winVersion.major,winVersion.minor,winVersion.service_pack_major) >= (6,1,1)\r\n\r\ndef canRunVc2010Builds():\r\n\treturn isSupportedOS()\r\n\r\nUWP_OCR_DATA_PATH = os.path.expandvars(r\"$windir\\OCR\")\r\ndef isUwpOcrAvailable():\r\n\treturn os.path.isdir(UWP_OCR_DATA_PATH)\r\n\r\ndef isWin10(version=1507, atLeast=True):\r\n\t\"\"\"\r\n\tReturns True if NVDA is running on the supplied release version of Windows 10. If no argument is supplied, returns True for all public Windows 10 releases.\r\n\t@param version: a release version of Windows 10 (such as 1903).\r\n\t@param atLeast: return True if NVDA is running on at least this Windows 10 build (i.e. this version or higher).\r\n\t\"\"\"\r\n\tfrom logHandler import log\r\n\twin10VersionsToBuilds={\r\n\t\t1507: 10240,\r\n\t\t1511: 10586,\r\n\t\t1607: 14393,\r\n\t\t1703: 15063,\r\n\t\t1709: 16299,\r\n\t\t1803: 17134,\r\n\t\t1809: 17763,\r\n\t\t1903: 18362,\r\n\t\t1909: 18363,\r\n\t}\r\n\tif atLeast and winVersion.major < 10:\r\n\t\treturn False\r\n\telif not atLeast and winVersion.major != 10:\r\n\t\treturn False\r\n\ttry:\r\n\t\tif atLeast:\r\n\t\t\treturn winVersion.build >= win10VersionsToBuilds[version]\r\n\t\telse:\r\n\t\t\treturn winVersion.build == win10VersionsToBuilds[version]\r\n\texcept KeyError:\r\n\t\tlog.error(\"Unknown Windows 10 version {}\".format(version))\r\n\t\treturn False\r\n\r\n\r\ndef isFullScreenMagnificationAvailable():\r\n\treturn (winVersion.major, winVersion.minor) >= (6, 2)\r\n", "path": "source/winVersion.py"}], "after_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2019 NV Access Limited\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n\r\nimport sys\r\nimport os\r\nimport winUser\r\n\r\nwinVersion=sys.getwindowsversion()\r\nwinVersionText=\"{v.major}.{v.minor}.{v.build}\".format(v=winVersion)\r\nif winVersion.service_pack_major!=0:\r\n\twinVersionText+=\" service pack %d\"%winVersion.service_pack_major\r\n\tif winVersion.service_pack_minor!=0:\r\n\t\twinVersionText+=\".%d\"%winVersion.service_pack_minor\r\nwinVersionText+=\" %s\" % (\"workstation\",\"domain controller\",\"server\")[winVersion.product_type-1]\r\n\r\ndef isSupportedOS():\r\n\t# NVDA can only run on Windows 7 Service pack 1 and above\r\n\treturn (winVersion.major,winVersion.minor,winVersion.service_pack_major) >= (6,1,1)\r\n\r\ndef canRunVc2010Builds():\r\n\treturn isSupportedOS()\r\n\r\nUWP_OCR_DATA_PATH = os.path.expandvars(r\"$windir\\OCR\")\r\ndef isUwpOcrAvailable():\r\n\treturn os.path.isdir(UWP_OCR_DATA_PATH)\r\n\r\n\r\nWIN10_VERSIONS_TO_BUILDS = {\r\n\t1507: 10240,\r\n\t1511: 10586,\r\n\t1607: 14393,\r\n\t1703: 15063,\r\n\t1709: 16299,\r\n\t1803: 17134,\r\n\t1809: 17763,\r\n\t1903: 18362,\r\n\t1909: 18363,\r\n}\r\n\r\n\r\ndef isWin10(version: int = 1507, atLeast: bool = True):\r\n\t\"\"\"\r\n\tReturns True if NVDA is running on the supplied release version of Windows 10. If no argument is supplied, returns True for all public Windows 10 releases.\r\n\t@param version: a release version of Windows 10 (such as 1903).\r\n\t@param atLeast: return True if NVDA is running on at least this Windows 10 build (i.e. this version or higher).\r\n\t\"\"\"\r\n\tif winVersion.major != 10:\r\n\t\treturn False\r\n\ttry:\r\n\t\tif atLeast:\r\n\t\t\treturn winVersion.build >= WIN10_VERSIONS_TO_BUILDS[version]\r\n\t\telse:\r\n\t\t\treturn winVersion.build == WIN10_VERSIONS_TO_BUILDS[version]\r\n\texcept KeyError:\r\n\t\tfrom logHandler import log\r\n\t\tlog.error(\"Unknown Windows 10 version {}\".format(version))\r\n\t\treturn False\r\n\r\n\r\ndef isFullScreenMagnificationAvailable():\r\n\treturn (winVersion.major, winVersion.minor) >= (6, 2)\r\n", "path": "source/winVersion.py"}]} | 1,375 | 657 |
gh_patches_debug_15883 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1311 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
South Glos UK doesn't return all Recycling / Food Bin dates
The South Gloucester UK source is only returning the Food Bin and Recycling bins every other week, when they should be every week.
Calendar in HA shows like this for me:

The South Glos webpage shows:

And the service endpoint that you are querying replies with:

I could put a PR in to treat the Rx dates as Recycling and Food as well?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py`
Content:
```
1 import datetime
2 import json
3
4 import requests
5 from waste_collection_schedule import Collection
6
7 TITLE = "South Gloucestershire Council" # Title will show up in README.md and info.md
8 DESCRIPTION = "Source script for southglos.gov.uk" # Describe your source
9 URL = "https://southglos.gov.uk" # Insert url to service homepage. URL will show up in README.md and info.md
10 TEST_CASES = { # Insert arguments for test cases to be used by test_sources.py script
11 "Test_001": {"uprn": "643346"},
12 "Test_002": {"uprn": "641084"}
13 }
14
15 ICON_MAP = { # Optional: Dict of waste types and suitable mdi icons
16 "BLACK BIN": "mdi:trash-can",
17 "RECYCLING": "mdi:recycle",
18 "GARDEN WASTE": "mdi:leaf",
19 "FOOD BIN": "mdi:food"
20 }
21
22
23 class Source:
24 def __init__(self, uprn: str): # argX correspond to the args dict in the source configuration
25 self._uprn = uprn
26
27 def fetch(self):
28 session = requests.Session()
29 r = session.get(
30 f"https://webapps.southglos.gov.uk/Webservices/SGC.RefuseCollectionService/RefuseCollectionService.svc"
31 f"/getCollections/{self._uprn}")
32 r.raise_for_status()
33 output = r.text.strip('[]')
34 output = json.loads(output)
35 recycling_and_food_bin_dates = [output['C1'], output['C2'], output['C3']]
36 black_bin_dates = [output['R1'], output['R2'], output['R3']]
37 garden_bin_dates = [output['G1'], output['G2'], output['G3']]
38 entries = [] # List that holds collection schedule
39
40 for collection in recycling_and_food_bin_dates:
41 entries.append(
42 Collection(
43 date=datetime.datetime.strptime(collection, "%d/%m/%Y").date(),
44 t="RECYCLING",
45 icon=ICON_MAP.get("RECYCLING"),
46 )
47 )
48 entries.append(
49 Collection(
50 date=datetime.datetime.strptime(collection, "%d/%m/%Y").date(),
51 t="FOOD BIN",
52 icon=ICON_MAP.get("FOOD BIN"),
53 )
54 )
55
56 for collection in black_bin_dates:
57 entries.append(
58 Collection(
59 date=datetime.datetime.strptime(collection, "%d/%m/%Y").date(),
60 t="BLACK BIN",
61 icon=ICON_MAP.get("BLACK BIN"),
62 )
63 )
64
65 if garden_bin_dates[1] != '': #
66 for collection in garden_bin_dates:
67 entries.append(
68 Collection(
69 date=datetime.datetime.strptime(collection, "%d/%m/%Y").date(),
70 t="GARDEN WASTE",
71 icon=ICON_MAP.get("GARDEN WASTE"),
72 )
73 )
74
75 return entries
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py
@@ -32,8 +32,11 @@
r.raise_for_status()
output = r.text.strip('[]')
output = json.loads(output)
- recycling_and_food_bin_dates = [output['C1'], output['C2'], output['C3']]
+ # Recycling and food are fields starting with C and R
+ recycling_and_food_bin_dates = [output['C1'], output['C2'], output['C3'], output['R1'], output['R2'], output['R3']]
+ # Black bin dates are fields starting R
black_bin_dates = [output['R1'], output['R2'], output['R3']]
+ # Garden bin dates are fields starting G
garden_bin_dates = [output['G1'], output['G2'], output['G3']]
entries = [] # List that holds collection schedule
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py\n@@ -32,8 +32,11 @@\n r.raise_for_status()\n output = r.text.strip('[]')\n output = json.loads(output)\n- recycling_and_food_bin_dates = [output['C1'], output['C2'], output['C3']]\n+ # Recycling and food are fields starting with C and R\n+ recycling_and_food_bin_dates = [output['C1'], output['C2'], output['C3'], output['R1'], output['R2'], output['R3']]\n+ # Black bin dates are fields starting R\n black_bin_dates = [output['R1'], output['R2'], output['R3']]\n+ # Garden bin dates are fields starting G\n garden_bin_dates = [output['G1'], output['G2'], output['G3']]\n entries = [] # List that holds collection schedule\n", "issue": "South Glos UK doesn't return all Recycling / Food Bin dates\nThe South Gloucester UK source is only returning the Food Bin and Recycling bins every other week, when they should be every week.\r\n\r\nCalendar in HA shows like this for me:\r\n\r\n\r\nThe South Glos webpage shows:\r\n\r\n\r\nAnd the service endpoint that you are querying replies with:\r\n\r\n\r\nI could put a PR in to treat the Rx dates as Recycling and Food as well?\n", "before_files": [{"content": "import datetime\nimport json\n\nimport requests\nfrom waste_collection_schedule import Collection\n\nTITLE = \"South Gloucestershire Council\" # Title will show up in README.md and info.md\nDESCRIPTION = \"Source script for southglos.gov.uk\" # Describe your source\nURL = \"https://southglos.gov.uk\" # Insert url to service homepage. URL will show up in README.md and info.md\nTEST_CASES = { # Insert arguments for test cases to be used by test_sources.py script\n \"Test_001\": {\"uprn\": \"643346\"},\n \"Test_002\": {\"uprn\": \"641084\"}\n}\n\nICON_MAP = { # Optional: Dict of waste types and suitable mdi icons\n \"BLACK BIN\": \"mdi:trash-can\",\n \"RECYCLING\": \"mdi:recycle\",\n \"GARDEN WASTE\": \"mdi:leaf\",\n \"FOOD BIN\": \"mdi:food\"\n}\n\n\nclass Source:\n def __init__(self, uprn: str): # argX correspond to the args dict in the source configuration\n self._uprn = uprn\n\n def fetch(self):\n session = requests.Session()\n r = session.get(\n f\"https://webapps.southglos.gov.uk/Webservices/SGC.RefuseCollectionService/RefuseCollectionService.svc\"\n f\"/getCollections/{self._uprn}\")\n r.raise_for_status()\n output = r.text.strip('[]')\n output = json.loads(output)\n recycling_and_food_bin_dates = [output['C1'], output['C2'], output['C3']]\n black_bin_dates = [output['R1'], output['R2'], output['R3']]\n garden_bin_dates = [output['G1'], output['G2'], output['G3']]\n entries = [] # List that holds collection schedule\n\n for collection in recycling_and_food_bin_dates:\n entries.append(\n Collection(\n date=datetime.datetime.strptime(collection, \"%d/%m/%Y\").date(),\n t=\"RECYCLING\",\n icon=ICON_MAP.get(\"RECYCLING\"),\n )\n )\n entries.append(\n Collection(\n date=datetime.datetime.strptime(collection, \"%d/%m/%Y\").date(),\n t=\"FOOD BIN\",\n icon=ICON_MAP.get(\"FOOD BIN\"),\n )\n )\n\n for collection in black_bin_dates:\n entries.append(\n Collection(\n date=datetime.datetime.strptime(collection, \"%d/%m/%Y\").date(),\n t=\"BLACK BIN\",\n icon=ICON_MAP.get(\"BLACK BIN\"),\n )\n )\n\n if garden_bin_dates[1] != '': #\n for collection in garden_bin_dates:\n entries.append(\n Collection(\n date=datetime.datetime.strptime(collection, \"%d/%m/%Y\").date(),\n t=\"GARDEN WASTE\",\n icon=ICON_MAP.get(\"GARDEN WASTE\"),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py"}], "after_files": [{"content": "import datetime\nimport json\n\nimport requests\nfrom waste_collection_schedule import Collection\n\nTITLE = \"South Gloucestershire Council\" # Title will show up in README.md and info.md\nDESCRIPTION = \"Source script for southglos.gov.uk\" # Describe your source\nURL = \"https://southglos.gov.uk\" # Insert url to service homepage. URL will show up in README.md and info.md\nTEST_CASES = { # Insert arguments for test cases to be used by test_sources.py script\n \"Test_001\": {\"uprn\": \"643346\"},\n \"Test_002\": {\"uprn\": \"641084\"}\n}\n\nICON_MAP = { # Optional: Dict of waste types and suitable mdi icons\n \"BLACK BIN\": \"mdi:trash-can\",\n \"RECYCLING\": \"mdi:recycle\",\n \"GARDEN WASTE\": \"mdi:leaf\",\n \"FOOD BIN\": \"mdi:food\"\n}\n\n\nclass Source:\n def __init__(self, uprn: str): # argX correspond to the args dict in the source configuration\n self._uprn = uprn\n\n def fetch(self):\n session = requests.Session()\n r = session.get(\n f\"https://webapps.southglos.gov.uk/Webservices/SGC.RefuseCollectionService/RefuseCollectionService.svc\"\n f\"/getCollections/{self._uprn}\")\n r.raise_for_status()\n output = r.text.strip('[]')\n output = json.loads(output)\n # Recycling and food are fields starting with C and R\n recycling_and_food_bin_dates = [output['C1'], output['C2'], output['C3'], output['R1'], output['R2'], output['R3']]\n # Black bin dates are fields starting R\n black_bin_dates = [output['R1'], output['R2'], output['R3']]\n # Garden bin dates are fields starting G\n garden_bin_dates = [output['G1'], output['G2'], output['G3']]\n entries = [] # List that holds collection schedule\n\n for collection in recycling_and_food_bin_dates:\n entries.append(\n Collection(\n date=datetime.datetime.strptime(collection, \"%d/%m/%Y\").date(),\n t=\"RECYCLING\",\n icon=ICON_MAP.get(\"RECYCLING\"),\n )\n )\n entries.append(\n Collection(\n date=datetime.datetime.strptime(collection, \"%d/%m/%Y\").date(),\n t=\"FOOD BIN\",\n icon=ICON_MAP.get(\"FOOD BIN\"),\n )\n )\n\n for collection in black_bin_dates:\n entries.append(\n Collection(\n date=datetime.datetime.strptime(collection, \"%d/%m/%Y\").date(),\n t=\"BLACK BIN\",\n icon=ICON_MAP.get(\"BLACK BIN\"),\n )\n )\n\n if garden_bin_dates[1] != '': #\n for collection in garden_bin_dates:\n entries.append(\n Collection(\n date=datetime.datetime.strptime(collection, \"%d/%m/%Y\").date(),\n t=\"GARDEN WASTE\",\n icon=ICON_MAP.get(\"GARDEN WASTE\"),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py"}]} | 1,329 | 280 |
gh_patches_debug_64458 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1834 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E3021 after upgrading to cfn_lint-0.44.0
cfn-lint version: cfn_lint-0.44.0
Hi!
After upgrading to version cfn_lint 0.44.0 don't validate AWS::Events::Rule
This was reported valid since yesterday:
```yaml
TaskSchedule:
Type: AWS::Events::Rule
Properties:
Description: xxxxxxxxx
Name: !Join ["-", [!Ref EnvironmentName, scheduled, !Ref App]]
ScheduleExpression: "cron(15 9 * * ? *)"
State: "ENABLED"
Targets:
- Arn:
Fn::ImportValue: !Sub ${EnvironmentName}-microservice-cluster-arn
RoleArn: !GetAtt ServiceRole.Arn
Id: my-task
EcsParameters:
TaskCount: 1
TaskDefinitionArn: !Ref TaskDefinition
```
The error reported is: `E3021 An Events Rule can have up to 5 Targets`
There is one Target so should still be valid
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/events/RuleTargetsLimit.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.rules import CloudFormationLintRule
6 from cfnlint.rules import RuleMatch
7
8
9 class RuleTargetsLimit(CloudFormationLintRule):
10 """Check State Machine Definition"""
11 id = 'E3021'
12 shortdesc = 'Check Events Rule Targets are less than or equal to 5'
13 description = 'CloudWatch Events Rule can only support up to 5 targets'
14 source_url = 'https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/cloudwatch_limits_cwe.html'
15 tags = ['resources', 'events']
16 max_count = 5
17
18 def __init__(self):
19 """Init"""
20 super(RuleTargetsLimit, self).__init__()
21 self.resource_property_types = ['AWS::Events::Rule']
22 self.limits = {}
23
24 # pylint: disable=W0613
25 def check_value(self, value, path):
26 """Count them up """
27
28 resource_name = path[1]
29 if len(path) > 4:
30 if path[4] == 'Fn::If':
31 resource_name = '%s.%s' % (path[1], path[5])
32
33 if resource_name not in self.limits:
34 self.limits[resource_name] = {
35 'count': 0,
36 'path': path[:-1]
37 }
38
39 self.limits[resource_name]['count'] += 1
40 return []
41
42 def match_resource_properties(self, properties, _, path, cfn):
43 """Check CloudFormation Properties"""
44 matches = []
45 matches.extend(
46 cfn.check_value(
47 obj=properties, key='Targets',
48 path=path[:],
49 check_value=self.check_value
50 ))
51
52 for _, limit in self.limits.items():
53 if limit['count'] > self.max_count:
54 message = 'An Events Rule can have up to {0} Targets'
55 matches.append(RuleMatch(limit['path'], message.format(self.max_count)))
56
57 return matches
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/events/RuleTargetsLimit.py b/src/cfnlint/rules/resources/events/RuleTargetsLimit.py
--- a/src/cfnlint/rules/resources/events/RuleTargetsLimit.py
+++ b/src/cfnlint/rules/resources/events/RuleTargetsLimit.py
@@ -21,6 +21,9 @@
self.resource_property_types = ['AWS::Events::Rule']
self.limits = {}
+ def initialize(self, cfn):
+ self.limits = {}
+
# pylint: disable=W0613
def check_value(self, value, path):
"""Count them up """
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/events/RuleTargetsLimit.py b/src/cfnlint/rules/resources/events/RuleTargetsLimit.py\n--- a/src/cfnlint/rules/resources/events/RuleTargetsLimit.py\n+++ b/src/cfnlint/rules/resources/events/RuleTargetsLimit.py\n@@ -21,6 +21,9 @@\n self.resource_property_types = ['AWS::Events::Rule']\n self.limits = {}\n \n+ def initialize(self, cfn):\n+ self.limits = {}\n+\n # pylint: disable=W0613\n def check_value(self, value, path):\n \"\"\"Count them up \"\"\"\n", "issue": "E3021 after upgrading to cfn_lint-0.44.0\ncfn-lint version: cfn_lint-0.44.0\r\n\r\nHi! \r\n\r\nAfter upgrading to version cfn_lint 0.44.0 don't validate AWS::Events::Rule\r\n\r\nThis was reported valid since yesterday:\r\n\r\n```yaml\r\n TaskSchedule:\r\n Type: AWS::Events::Rule\r\n Properties:\r\n Description: xxxxxxxxx\r\n Name: !Join [\"-\", [!Ref EnvironmentName, scheduled, !Ref App]]\r\n ScheduleExpression: \"cron(15 9 * * ? *)\"\r\n State: \"ENABLED\"\r\n Targets:\r\n - Arn:\r\n Fn::ImportValue: !Sub ${EnvironmentName}-microservice-cluster-arn\r\n RoleArn: !GetAtt ServiceRole.Arn\r\n Id: my-task\r\n EcsParameters:\r\n TaskCount: 1\r\n TaskDefinitionArn: !Ref TaskDefinition\r\n```\r\n\r\nThe error reported is: `E3021 An Events Rule can have up to 5 Targets`\r\n\r\nThere is one Target so should still be valid \r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass RuleTargetsLimit(CloudFormationLintRule):\n \"\"\"Check State Machine Definition\"\"\"\n id = 'E3021'\n shortdesc = 'Check Events Rule Targets are less than or equal to 5'\n description = 'CloudWatch Events Rule can only support up to 5 targets'\n source_url = 'https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/cloudwatch_limits_cwe.html'\n tags = ['resources', 'events']\n max_count = 5\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(RuleTargetsLimit, self).__init__()\n self.resource_property_types = ['AWS::Events::Rule']\n self.limits = {}\n\n # pylint: disable=W0613\n def check_value(self, value, path):\n \"\"\"Count them up \"\"\"\n\n resource_name = path[1]\n if len(path) > 4:\n if path[4] == 'Fn::If':\n resource_name = '%s.%s' % (path[1], path[5])\n\n if resource_name not in self.limits:\n self.limits[resource_name] = {\n 'count': 0,\n 'path': path[:-1]\n }\n\n self.limits[resource_name]['count'] += 1\n return []\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n matches.extend(\n cfn.check_value(\n obj=properties, key='Targets',\n path=path[:],\n check_value=self.check_value\n ))\n\n for _, limit in self.limits.items():\n if limit['count'] > self.max_count:\n message = 'An Events Rule can have up to {0} Targets'\n matches.append(RuleMatch(limit['path'], message.format(self.max_count)))\n\n return matches\n", "path": "src/cfnlint/rules/resources/events/RuleTargetsLimit.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass RuleTargetsLimit(CloudFormationLintRule):\n \"\"\"Check State Machine Definition\"\"\"\n id = 'E3021'\n shortdesc = 'Check Events Rule Targets are less than or equal to 5'\n description = 'CloudWatch Events Rule can only support up to 5 targets'\n source_url = 'https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/cloudwatch_limits_cwe.html'\n tags = ['resources', 'events']\n max_count = 5\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(RuleTargetsLimit, self).__init__()\n self.resource_property_types = ['AWS::Events::Rule']\n self.limits = {}\n\n def initialize(self, cfn):\n self.limits = {}\n\n # pylint: disable=W0613\n def check_value(self, value, path):\n \"\"\"Count them up \"\"\"\n\n resource_name = path[1]\n if len(path) > 4:\n if path[4] == 'Fn::If':\n resource_name = '%s.%s' % (path[1], path[5])\n\n if resource_name not in self.limits:\n self.limits[resource_name] = {\n 'count': 0,\n 'path': path[:-1]\n }\n\n self.limits[resource_name]['count'] += 1\n return []\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n matches.extend(\n cfn.check_value(\n obj=properties, key='Targets',\n path=path[:],\n check_value=self.check_value\n ))\n\n for _, limit in self.limits.items():\n if limit['count'] > self.max_count:\n message = 'An Events Rule can have up to {0} Targets'\n matches.append(RuleMatch(limit['path'], message.format(self.max_count)))\n\n return matches\n", "path": "src/cfnlint/rules/resources/events/RuleTargetsLimit.py"}]} | 1,058 | 136 |
gh_patches_debug_32851 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-447 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Initiator dashboard: too many notification
I get notifications of any projects of my organization, even it's not my project. I'm not sure if I want to see all changes in all projects of my organization. I think it will be too much.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/notifications/signals.py`
Content:
```
1 from django.db.models import signals
2 from django.dispatch import receiver
3
4 from adhocracy4.actions.models import Action
5 from adhocracy4.actions.verbs import Verbs
6 from adhocracy4.follows.models import Follow
7 from adhocracy4.phases.models import Phase
8 from adhocracy4.projects.models import Project
9 from apps.organisations.models import Organisation
10 from . import emails
11
12
13 @receiver(signals.post_save, sender=Action)
14 def send_notifications(instance, created, **kwargs):
15 action = instance
16 verb = Verbs(action.verb)
17
18 if verb == Verbs.CREATE or verb == Verbs.ADD:
19 emails.NotifyCreatorEmail.send(action)
20
21 if action.project:
22 emails.NotifyModeratorsEmail.send(action)
23 emails.NotifyFollowersOnNewItemCreated.send(action)
24
25 elif verb == Verbs.SCHEDULE:
26 if isinstance(action.obj, Phase):
27 emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)
28
29
30 @receiver(signals.m2m_changed, sender=Project.moderators.through)
31 def autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):
32 if action == 'post_add':
33 if not reverse:
34 project = instance
35 users_pks = pk_set
36
37 for user_pk in users_pks:
38 Follow.objects.update_or_create(
39 project=project,
40 creator_id=user_pk,
41 defaults={
42 'enabled': True
43 }
44 )
45 else:
46 user = instance
47 project_pks = pk_set
48
49 for project_pk in project_pks:
50 Follow.objects.update_or_create(
51 project_id=project_pk,
52 creator_id=user,
53 defaults={
54 'enabled': True
55 }
56 )
57
58
59 @receiver(signals.m2m_changed, sender=Organisation.initiators.through)
60 def autofollow_organisation_initiators(instance, action, pk_set, reverse,
61 **kwargs):
62 if action == 'post_add':
63 if not reverse:
64 organisation = instance
65 users_pks = pk_set
66
67 for project in Project.objects.filter(organisation=organisation):
68 for user_pk in users_pks:
69 Follow.objects.update_or_create(
70 project=project,
71 creator_id=user_pk,
72 defaults={
73 'enabled': True
74 }
75 )
76 else:
77 user = instance
78 organisation_pk_set = pk_set
79
80 for project in Project.objects.filter(
81 organisation_id__in=organisation_pk_set):
82 Follow.objects.update_or_create(
83 project=project,
84 creator=user,
85 defaults={
86 'enabled': True
87 }
88 )
89
90
91 @receiver(signals.post_save)
92 def autofollow_organisation_initiators_new_projects(sender, instance, created,
93 **kwargs):
94 if issubclass(sender, Project):
95 # we have to check if the senders inherits from Project to catch
96 # signals from external projects and bplans
97 project = instance
98 if created:
99 for user in project.organisation.initiators.all():
100 Follow.objects.update_or_create(
101 project=project,
102 creator=user,
103 defaults={
104 'enabled': True
105 }
106 )
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/notifications/signals.py b/apps/notifications/signals.py
--- a/apps/notifications/signals.py
+++ b/apps/notifications/signals.py
@@ -6,7 +6,6 @@
from adhocracy4.follows.models import Follow
from adhocracy4.phases.models import Phase
from adhocracy4.projects.models import Project
-from apps.organisations.models import Organisation
from . import emails
@@ -54,53 +53,3 @@
'enabled': True
}
)
-
-
-@receiver(signals.m2m_changed, sender=Organisation.initiators.through)
-def autofollow_organisation_initiators(instance, action, pk_set, reverse,
- **kwargs):
- if action == 'post_add':
- if not reverse:
- organisation = instance
- users_pks = pk_set
-
- for project in Project.objects.filter(organisation=organisation):
- for user_pk in users_pks:
- Follow.objects.update_or_create(
- project=project,
- creator_id=user_pk,
- defaults={
- 'enabled': True
- }
- )
- else:
- user = instance
- organisation_pk_set = pk_set
-
- for project in Project.objects.filter(
- organisation_id__in=organisation_pk_set):
- Follow.objects.update_or_create(
- project=project,
- creator=user,
- defaults={
- 'enabled': True
- }
- )
-
-
-@receiver(signals.post_save)
-def autofollow_organisation_initiators_new_projects(sender, instance, created,
- **kwargs):
- if issubclass(sender, Project):
- # we have to check if the senders inherits from Project to catch
- # signals from external projects and bplans
- project = instance
- if created:
- for user in project.organisation.initiators.all():
- Follow.objects.update_or_create(
- project=project,
- creator=user,
- defaults={
- 'enabled': True
- }
- )
| {"golden_diff": "diff --git a/apps/notifications/signals.py b/apps/notifications/signals.py\n--- a/apps/notifications/signals.py\n+++ b/apps/notifications/signals.py\n@@ -6,7 +6,6 @@\n from adhocracy4.follows.models import Follow\n from adhocracy4.phases.models import Phase\n from adhocracy4.projects.models import Project\n-from apps.organisations.models import Organisation\n from . import emails\n \n \n@@ -54,53 +53,3 @@\n 'enabled': True\n }\n )\n-\n-\n-@receiver(signals.m2m_changed, sender=Organisation.initiators.through)\n-def autofollow_organisation_initiators(instance, action, pk_set, reverse,\n- **kwargs):\n- if action == 'post_add':\n- if not reverse:\n- organisation = instance\n- users_pks = pk_set\n-\n- for project in Project.objects.filter(organisation=organisation):\n- for user_pk in users_pks:\n- Follow.objects.update_or_create(\n- project=project,\n- creator_id=user_pk,\n- defaults={\n- 'enabled': True\n- }\n- )\n- else:\n- user = instance\n- organisation_pk_set = pk_set\n-\n- for project in Project.objects.filter(\n- organisation_id__in=organisation_pk_set):\n- Follow.objects.update_or_create(\n- project=project,\n- creator=user,\n- defaults={\n- 'enabled': True\n- }\n- )\n-\n-\n-@receiver(signals.post_save)\n-def autofollow_organisation_initiators_new_projects(sender, instance, created,\n- **kwargs):\n- if issubclass(sender, Project):\n- # we have to check if the senders inherits from Project to catch\n- # signals from external projects and bplans\n- project = instance\n- if created:\n- for user in project.organisation.initiators.all():\n- Follow.objects.update_or_create(\n- project=project,\n- creator=user,\n- defaults={\n- 'enabled': True\n- }\n- )\n", "issue": "Initiator dashboard: too many notification\nI get notifications of any projects of my organization, even it's not my project. I'm not sure if I want to see all changes in all projects of my organization. I think it will be too much.\n", "before_files": [{"content": "from django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.actions.verbs import Verbs\nfrom adhocracy4.follows.models import Follow\nfrom adhocracy4.phases.models import Phase\nfrom adhocracy4.projects.models import Project\nfrom apps.organisations.models import Organisation\nfrom . import emails\n\n\n@receiver(signals.post_save, sender=Action)\ndef send_notifications(instance, created, **kwargs):\n action = instance\n verb = Verbs(action.verb)\n\n if verb == Verbs.CREATE or verb == Verbs.ADD:\n emails.NotifyCreatorEmail.send(action)\n\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n emails.NotifyFollowersOnNewItemCreated.send(action)\n\n elif verb == Verbs.SCHEDULE:\n if isinstance(action.obj, Phase):\n emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)\n\n\n@receiver(signals.m2m_changed, sender=Project.moderators.through)\ndef autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):\n if action == 'post_add':\n if not reverse:\n project = instance\n users_pks = pk_set\n\n for user_pk in users_pks:\n Follow.objects.update_or_create(\n project=project,\n creator_id=user_pk,\n defaults={\n 'enabled': True\n }\n )\n else:\n user = instance\n project_pks = pk_set\n\n for project_pk in project_pks:\n Follow.objects.update_or_create(\n project_id=project_pk,\n creator_id=user,\n defaults={\n 'enabled': True\n }\n )\n\n\n@receiver(signals.m2m_changed, sender=Organisation.initiators.through)\ndef autofollow_organisation_initiators(instance, action, pk_set, reverse,\n **kwargs):\n if action == 'post_add':\n if not reverse:\n organisation = instance\n users_pks = pk_set\n\n for project in Project.objects.filter(organisation=organisation):\n for user_pk in users_pks:\n Follow.objects.update_or_create(\n project=project,\n creator_id=user_pk,\n defaults={\n 'enabled': True\n }\n )\n else:\n user = instance\n organisation_pk_set = pk_set\n\n for project in Project.objects.filter(\n organisation_id__in=organisation_pk_set):\n Follow.objects.update_or_create(\n project=project,\n creator=user,\n defaults={\n 'enabled': True\n }\n )\n\n\n@receiver(signals.post_save)\ndef autofollow_organisation_initiators_new_projects(sender, instance, created,\n **kwargs):\n if issubclass(sender, Project):\n # we have to check if the senders inherits from Project to catch\n # signals from external projects and bplans\n project = instance\n if created:\n for user in project.organisation.initiators.all():\n Follow.objects.update_or_create(\n project=project,\n creator=user,\n defaults={\n 'enabled': True\n }\n )\n", "path": "apps/notifications/signals.py"}], "after_files": [{"content": "from django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.actions.verbs import Verbs\nfrom adhocracy4.follows.models import Follow\nfrom adhocracy4.phases.models import Phase\nfrom adhocracy4.projects.models import Project\nfrom . import emails\n\n\n@receiver(signals.post_save, sender=Action)\ndef send_notifications(instance, created, **kwargs):\n action = instance\n verb = Verbs(action.verb)\n\n if verb == Verbs.CREATE or verb == Verbs.ADD:\n emails.NotifyCreatorEmail.send(action)\n\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n emails.NotifyFollowersOnNewItemCreated.send(action)\n\n elif verb == Verbs.SCHEDULE:\n if isinstance(action.obj, Phase):\n emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)\n\n\n@receiver(signals.m2m_changed, sender=Project.moderators.through)\ndef autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):\n if action == 'post_add':\n if not reverse:\n project = instance\n users_pks = pk_set\n\n for user_pk in users_pks:\n Follow.objects.update_or_create(\n project=project,\n creator_id=user_pk,\n defaults={\n 'enabled': True\n }\n )\n else:\n user = instance\n project_pks = pk_set\n\n for project_pk in project_pks:\n Follow.objects.update_or_create(\n project_id=project_pk,\n creator_id=user,\n defaults={\n 'enabled': True\n }\n )\n", "path": "apps/notifications/signals.py"}]} | 1,181 | 451 |
gh_patches_debug_25520 | rasdani/github-patches | git_diff | pyca__cryptography-4427 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Decide when to deprecate 1.0.1
At some point we should start warning about users with OpenSSL 1.0.1, as we did with 0.9.8 and 1.0.0, now that upstream is no longer supporting it. We don't have to make a decision about when to actually drop it, but at some point we should be warning on it.
However, I don't think we should do it right now, as it'd be too easy to just contribute to warning fatigue, 71% of our downloads are currently using 1.0.1. Here's the last 11 months of progress:

And the SQL I used to generate that:
```sql
SELECT
STRFTIME_UTC_USEC(timestamp, "%Y-%m") AS yyyymm,
ROUND(100 * SUM(CASE WHEN SUBSTR(REGEXP_EXTRACT(details.openssl_version, r"^OpenSSL ([^ ]+) "), 0, 5) = "0.9.8" THEN 1 ELSE 0 END) / COUNT(*), 1) AS percent_098,
ROUND(100 * SUM(CASE WHEN SUBSTR(REGEXP_EXTRACT(details.openssl_version, r"^OpenSSL ([^ ]+) "), 0, 5) = "1.0.0" THEN 1 ELSE 0 END) / COUNT(*), 1) AS percent_100,
ROUND(100 * SUM(CASE WHEN SUBSTR(REGEXP_EXTRACT(details.openssl_version, r"^OpenSSL ([^ ]+) "), 0, 5) = "1.0.1" THEN 1 ELSE 0 END) / COUNT(*), 1) AS percent_101,
ROUND(100 * SUM(CASE WHEN SUBSTR(REGEXP_EXTRACT(details.openssl_version, r"^OpenSSL ([^ ]+) "), 0, 5) = "1.0.2" THEN 1 ELSE 0 END) / COUNT(*), 1) AS percent_102,
ROUND(100 * SUM(CASE WHEN SUBSTR(REGEXP_EXTRACT(details.openssl_version, r"^OpenSSL ([^ ]+) "), 0, 5) = "1.1.0" THEN 1 ELSE 0 END) / COUNT(*), 1) AS percent_110,
COUNT(*) as download_count
FROM
TABLE_DATE_RANGE(
[the-psf:pypi.downloads],
DATE_ADD(CURRENT_TIMESTAMP(), -1, "year"),
CURRENT_TIMESTAMP()
)
WHERE
details.openssl_version IS NOT NULL AND file.project = 'cryptography' and details.system.name NOT IN ("Windows", "Darwin")
GROUP BY
yyyymm
ORDER BY
yyyymm DESC
LIMIT 100
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/bindings/openssl/binding.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import collections
8 import threading
9 import types
10
11 from cryptography import utils
12 from cryptography.exceptions import InternalError
13 from cryptography.hazmat.bindings._openssl import ffi, lib
14 from cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES
15
16 _OpenSSLErrorWithText = collections.namedtuple(
17 "_OpenSSLErrorWithText", ["code", "lib", "func", "reason", "reason_text"]
18 )
19
20
21 class _OpenSSLError(object):
22 def __init__(self, code, lib, func, reason):
23 self._code = code
24 self._lib = lib
25 self._func = func
26 self._reason = reason
27
28 def _lib_reason_match(self, lib, reason):
29 return lib == self.lib and reason == self.reason
30
31 code = utils.read_only_property("_code")
32 lib = utils.read_only_property("_lib")
33 func = utils.read_only_property("_func")
34 reason = utils.read_only_property("_reason")
35
36
37 def _consume_errors(lib):
38 errors = []
39 while True:
40 code = lib.ERR_get_error()
41 if code == 0:
42 break
43
44 err_lib = lib.ERR_GET_LIB(code)
45 err_func = lib.ERR_GET_FUNC(code)
46 err_reason = lib.ERR_GET_REASON(code)
47
48 errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))
49
50 return errors
51
52
53 def _openssl_assert(lib, ok):
54 if not ok:
55 errors = _consume_errors(lib)
56 errors_with_text = []
57 for err in errors:
58 buf = ffi.new("char[]", 256)
59 lib.ERR_error_string_n(err.code, buf, len(buf))
60 err_text_reason = ffi.string(buf)
61
62 errors_with_text.append(
63 _OpenSSLErrorWithText(
64 err.code, err.lib, err.func, err.reason, err_text_reason
65 )
66 )
67
68 raise InternalError(
69 "Unknown OpenSSL error. This error is commonly encountered when "
70 "another library is not cleaning up the OpenSSL error stack. If "
71 "you are using cryptography with another library that uses "
72 "OpenSSL try disabling it before reporting a bug. Otherwise "
73 "please file an issue at https://github.com/pyca/cryptography/"
74 "issues with information on how to reproduce "
75 "this. ({0!r})".format(errors_with_text),
76 errors_with_text
77 )
78
79
80 def build_conditional_library(lib, conditional_names):
81 conditional_lib = types.ModuleType("lib")
82 conditional_lib._original_lib = lib
83 excluded_names = set()
84 for condition, names_cb in conditional_names.items():
85 if not getattr(lib, condition):
86 excluded_names.update(names_cb())
87
88 for attr in dir(lib):
89 if attr not in excluded_names:
90 setattr(conditional_lib, attr, getattr(lib, attr))
91
92 return conditional_lib
93
94
95 class Binding(object):
96 """
97 OpenSSL API wrapper.
98 """
99 lib = None
100 ffi = ffi
101 _lib_loaded = False
102 _init_lock = threading.Lock()
103 _lock_init_lock = threading.Lock()
104
105 def __init__(self):
106 self._ensure_ffi_initialized()
107
108 @classmethod
109 def _register_osrandom_engine(cls):
110 # Clear any errors extant in the queue before we start. In many
111 # scenarios other things may be interacting with OpenSSL in the same
112 # process space and it has proven untenable to assume that they will
113 # reliably clear the error queue. Once we clear it here we will
114 # error on any subsequent unexpected item in the stack.
115 cls.lib.ERR_clear_error()
116 cls._osrandom_engine_id = cls.lib.Cryptography_osrandom_engine_id
117 cls._osrandom_engine_name = cls.lib.Cryptography_osrandom_engine_name
118 result = cls.lib.Cryptography_add_osrandom_engine()
119 _openssl_assert(cls.lib, result in (1, 2))
120
121 @classmethod
122 def _ensure_ffi_initialized(cls):
123 with cls._init_lock:
124 if not cls._lib_loaded:
125 cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)
126 cls._lib_loaded = True
127 # initialize the SSL library
128 cls.lib.SSL_library_init()
129 # adds all ciphers/digests for EVP
130 cls.lib.OpenSSL_add_all_algorithms()
131 # loads error strings for libcrypto and libssl functions
132 cls.lib.SSL_load_error_strings()
133 cls._register_osrandom_engine()
134
135 @classmethod
136 def init_static_locks(cls):
137 with cls._lock_init_lock:
138 cls._ensure_ffi_initialized()
139 # Use Python's implementation if available, importing _ssl triggers
140 # the setup for this.
141 __import__("_ssl")
142
143 if (not cls.lib.Cryptography_HAS_LOCKING_CALLBACKS or
144 cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL):
145 return
146
147 # If nothing else has setup a locking callback already, we set up
148 # our own
149 res = lib.Cryptography_setup_ssl_threads()
150 _openssl_assert(cls.lib, res == 1)
151
152
153 # OpenSSL is not thread safe until the locks are initialized. We call this
154 # method in module scope so that it executes with the import lock. On
155 # Pythons < 3.4 this import lock is a global lock, which can prevent a race
156 # condition registering the OpenSSL locks. On Python 3.4+ the import lock
157 # is per module so this approach will not work.
158 Binding.init_static_locks()
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py
--- a/src/cryptography/hazmat/bindings/openssl/binding.py
+++ b/src/cryptography/hazmat/bindings/openssl/binding.py
@@ -7,6 +7,7 @@
import collections
import threading
import types
+import warnings
from cryptography import utils
from cryptography.exceptions import InternalError
@@ -150,9 +151,24 @@
_openssl_assert(cls.lib, res == 1)
+def _verify_openssl_version(lib):
+ if (
+ lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and
+ not lib.CRYPTOGRAPHY_IS_LIBRESSL
+ ):
+ warnings.warn(
+ "OpenSSL version 1.0.1 is no longer supported by the OpenSSL "
+ "project, please upgrade. A future version of cryptography will "
+ "drop support for it.",
+ DeprecationWarning
+ )
+
+
# OpenSSL is not thread safe until the locks are initialized. We call this
# method in module scope so that it executes with the import lock. On
# Pythons < 3.4 this import lock is a global lock, which can prevent a race
# condition registering the OpenSSL locks. On Python 3.4+ the import lock
# is per module so this approach will not work.
Binding.init_static_locks()
+
+_verify_openssl_version(Binding.lib)
| {"golden_diff": "diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py\n--- a/src/cryptography/hazmat/bindings/openssl/binding.py\n+++ b/src/cryptography/hazmat/bindings/openssl/binding.py\n@@ -7,6 +7,7 @@\n import collections\n import threading\n import types\n+import warnings\n \n from cryptography import utils\n from cryptography.exceptions import InternalError\n@@ -150,9 +151,24 @@\n _openssl_assert(cls.lib, res == 1)\n \n \n+def _verify_openssl_version(lib):\n+ if (\n+ lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and\n+ not lib.CRYPTOGRAPHY_IS_LIBRESSL\n+ ):\n+ warnings.warn(\n+ \"OpenSSL version 1.0.1 is no longer supported by the OpenSSL \"\n+ \"project, please upgrade. A future version of cryptography will \"\n+ \"drop support for it.\",\n+ DeprecationWarning\n+ )\n+\n+\n # OpenSSL is not thread safe until the locks are initialized. We call this\n # method in module scope so that it executes with the import lock. On\n # Pythons < 3.4 this import lock is a global lock, which can prevent a race\n # condition registering the OpenSSL locks. On Python 3.4+ the import lock\n # is per module so this approach will not work.\n Binding.init_static_locks()\n+\n+_verify_openssl_version(Binding.lib)\n", "issue": "Decide when to deprecate 1.0.1\nAt some point we should start warning about users with OpenSSL 1.0.1, as we did with 0.9.8 and 1.0.0, now that upstream is no longer supporting it. We don't have to make a decision about when to actually drop it, but at some point we should be warning on it.\r\n\r\nHowever, I don't think we should do it right now, as it'd be too easy to just contribute to warning fatigue, 71% of our downloads are currently using 1.0.1. Here's the last 11 months of progress: \r\n\r\n\r\n\r\nAnd the SQL I used to generate that:\r\n\r\n```sql\r\nSELECT\r\n STRFTIME_UTC_USEC(timestamp, \"%Y-%m\") AS yyyymm,\r\n ROUND(100 * SUM(CASE WHEN SUBSTR(REGEXP_EXTRACT(details.openssl_version, r\"^OpenSSL ([^ ]+) \"), 0, 5) = \"0.9.8\" THEN 1 ELSE 0 END) / COUNT(*), 1) AS percent_098,\r\n ROUND(100 * SUM(CASE WHEN SUBSTR(REGEXP_EXTRACT(details.openssl_version, r\"^OpenSSL ([^ ]+) \"), 0, 5) = \"1.0.0\" THEN 1 ELSE 0 END) / COUNT(*), 1) AS percent_100,\r\n ROUND(100 * SUM(CASE WHEN SUBSTR(REGEXP_EXTRACT(details.openssl_version, r\"^OpenSSL ([^ ]+) \"), 0, 5) = \"1.0.1\" THEN 1 ELSE 0 END) / COUNT(*), 1) AS percent_101,\r\n ROUND(100 * SUM(CASE WHEN SUBSTR(REGEXP_EXTRACT(details.openssl_version, r\"^OpenSSL ([^ ]+) \"), 0, 5) = \"1.0.2\" THEN 1 ELSE 0 END) / COUNT(*), 1) AS percent_102,\r\n ROUND(100 * SUM(CASE WHEN SUBSTR(REGEXP_EXTRACT(details.openssl_version, r\"^OpenSSL ([^ ]+) \"), 0, 5) = \"1.1.0\" THEN 1 ELSE 0 END) / COUNT(*), 1) AS percent_110,\r\n COUNT(*) as download_count\r\nFROM\r\n TABLE_DATE_RANGE(\r\n [the-psf:pypi.downloads],\r\n DATE_ADD(CURRENT_TIMESTAMP(), -1, \"year\"),\r\n CURRENT_TIMESTAMP()\r\n )\r\nWHERE\r\n details.openssl_version IS NOT NULL AND file.project = 'cryptography' and details.system.name NOT IN (\"Windows\", \"Darwin\")\r\nGROUP BY\r\n yyyymm\r\nORDER BY\r\n yyyymm DESC\r\nLIMIT 100\r\n```\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport collections\nimport threading\nimport types\n\nfrom cryptography import utils\nfrom cryptography.exceptions import InternalError\nfrom cryptography.hazmat.bindings._openssl import ffi, lib\nfrom cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES\n\n_OpenSSLErrorWithText = collections.namedtuple(\n \"_OpenSSLErrorWithText\", [\"code\", \"lib\", \"func\", \"reason\", \"reason_text\"]\n)\n\n\nclass _OpenSSLError(object):\n def __init__(self, code, lib, func, reason):\n self._code = code\n self._lib = lib\n self._func = func\n self._reason = reason\n\n def _lib_reason_match(self, lib, reason):\n return lib == self.lib and reason == self.reason\n\n code = utils.read_only_property(\"_code\")\n lib = utils.read_only_property(\"_lib\")\n func = utils.read_only_property(\"_func\")\n reason = utils.read_only_property(\"_reason\")\n\n\ndef _consume_errors(lib):\n errors = []\n while True:\n code = lib.ERR_get_error()\n if code == 0:\n break\n\n err_lib = lib.ERR_GET_LIB(code)\n err_func = lib.ERR_GET_FUNC(code)\n err_reason = lib.ERR_GET_REASON(code)\n\n errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))\n\n return errors\n\n\ndef _openssl_assert(lib, ok):\n if not ok:\n errors = _consume_errors(lib)\n errors_with_text = []\n for err in errors:\n buf = ffi.new(\"char[]\", 256)\n lib.ERR_error_string_n(err.code, buf, len(buf))\n err_text_reason = ffi.string(buf)\n\n errors_with_text.append(\n _OpenSSLErrorWithText(\n err.code, err.lib, err.func, err.reason, err_text_reason\n )\n )\n\n raise InternalError(\n \"Unknown OpenSSL error. This error is commonly encountered when \"\n \"another library is not cleaning up the OpenSSL error stack. If \"\n \"you are using cryptography with another library that uses \"\n \"OpenSSL try disabling it before reporting a bug. Otherwise \"\n \"please file an issue at https://github.com/pyca/cryptography/\"\n \"issues with information on how to reproduce \"\n \"this. ({0!r})\".format(errors_with_text),\n errors_with_text\n )\n\n\ndef build_conditional_library(lib, conditional_names):\n conditional_lib = types.ModuleType(\"lib\")\n conditional_lib._original_lib = lib\n excluded_names = set()\n for condition, names_cb in conditional_names.items():\n if not getattr(lib, condition):\n excluded_names.update(names_cb())\n\n for attr in dir(lib):\n if attr not in excluded_names:\n setattr(conditional_lib, attr, getattr(lib, attr))\n\n return conditional_lib\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n lib = None\n ffi = ffi\n _lib_loaded = False\n _init_lock = threading.Lock()\n _lock_init_lock = threading.Lock()\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _register_osrandom_engine(cls):\n # Clear any errors extant in the queue before we start. In many\n # scenarios other things may be interacting with OpenSSL in the same\n # process space and it has proven untenable to assume that they will\n # reliably clear the error queue. Once we clear it here we will\n # error on any subsequent unexpected item in the stack.\n cls.lib.ERR_clear_error()\n cls._osrandom_engine_id = cls.lib.Cryptography_osrandom_engine_id\n cls._osrandom_engine_name = cls.lib.Cryptography_osrandom_engine_name\n result = cls.lib.Cryptography_add_osrandom_engine()\n _openssl_assert(cls.lib, result in (1, 2))\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n with cls._init_lock:\n if not cls._lib_loaded:\n cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)\n cls._lib_loaded = True\n # initialize the SSL library\n cls.lib.SSL_library_init()\n # adds all ciphers/digests for EVP\n cls.lib.OpenSSL_add_all_algorithms()\n # loads error strings for libcrypto and libssl functions\n cls.lib.SSL_load_error_strings()\n cls._register_osrandom_engine()\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if (not cls.lib.Cryptography_HAS_LOCKING_CALLBACKS or\n cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL):\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n res = lib.Cryptography_setup_ssl_threads()\n _openssl_assert(cls.lib, res == 1)\n\n\n# OpenSSL is not thread safe until the locks are initialized. We call this\n# method in module scope so that it executes with the import lock. On\n# Pythons < 3.4 this import lock is a global lock, which can prevent a race\n# condition registering the OpenSSL locks. On Python 3.4+ the import lock\n# is per module so this approach will not work.\nBinding.init_static_locks()\n", "path": "src/cryptography/hazmat/bindings/openssl/binding.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport collections\nimport threading\nimport types\nimport warnings\n\nfrom cryptography import utils\nfrom cryptography.exceptions import InternalError\nfrom cryptography.hazmat.bindings._openssl import ffi, lib\nfrom cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES\n\n_OpenSSLErrorWithText = collections.namedtuple(\n \"_OpenSSLErrorWithText\", [\"code\", \"lib\", \"func\", \"reason\", \"reason_text\"]\n)\n\n\nclass _OpenSSLError(object):\n def __init__(self, code, lib, func, reason):\n self._code = code\n self._lib = lib\n self._func = func\n self._reason = reason\n\n def _lib_reason_match(self, lib, reason):\n return lib == self.lib and reason == self.reason\n\n code = utils.read_only_property(\"_code\")\n lib = utils.read_only_property(\"_lib\")\n func = utils.read_only_property(\"_func\")\n reason = utils.read_only_property(\"_reason\")\n\n\ndef _consume_errors(lib):\n errors = []\n while True:\n code = lib.ERR_get_error()\n if code == 0:\n break\n\n err_lib = lib.ERR_GET_LIB(code)\n err_func = lib.ERR_GET_FUNC(code)\n err_reason = lib.ERR_GET_REASON(code)\n\n errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))\n\n return errors\n\n\ndef _openssl_assert(lib, ok):\n if not ok:\n errors = _consume_errors(lib)\n errors_with_text = []\n for err in errors:\n buf = ffi.new(\"char[]\", 256)\n lib.ERR_error_string_n(err.code, buf, len(buf))\n err_text_reason = ffi.string(buf)\n\n errors_with_text.append(\n _OpenSSLErrorWithText(\n err.code, err.lib, err.func, err.reason, err_text_reason\n )\n )\n\n raise InternalError(\n \"Unknown OpenSSL error. This error is commonly encountered when \"\n \"another library is not cleaning up the OpenSSL error stack. If \"\n \"you are using cryptography with another library that uses \"\n \"OpenSSL try disabling it before reporting a bug. Otherwise \"\n \"please file an issue at https://github.com/pyca/cryptography/\"\n \"issues with information on how to reproduce \"\n \"this. ({0!r})\".format(errors_with_text),\n errors_with_text\n )\n\n\ndef build_conditional_library(lib, conditional_names):\n conditional_lib = types.ModuleType(\"lib\")\n conditional_lib._original_lib = lib\n excluded_names = set()\n for condition, names_cb in conditional_names.items():\n if not getattr(lib, condition):\n excluded_names.update(names_cb())\n\n for attr in dir(lib):\n if attr not in excluded_names:\n setattr(conditional_lib, attr, getattr(lib, attr))\n\n return conditional_lib\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n lib = None\n ffi = ffi\n _lib_loaded = False\n _init_lock = threading.Lock()\n _lock_init_lock = threading.Lock()\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _register_osrandom_engine(cls):\n # Clear any errors extant in the queue before we start. In many\n # scenarios other things may be interacting with OpenSSL in the same\n # process space and it has proven untenable to assume that they will\n # reliably clear the error queue. Once we clear it here we will\n # error on any subsequent unexpected item in the stack.\n cls.lib.ERR_clear_error()\n cls._osrandom_engine_id = cls.lib.Cryptography_osrandom_engine_id\n cls._osrandom_engine_name = cls.lib.Cryptography_osrandom_engine_name\n result = cls.lib.Cryptography_add_osrandom_engine()\n _openssl_assert(cls.lib, result in (1, 2))\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n with cls._init_lock:\n if not cls._lib_loaded:\n cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)\n cls._lib_loaded = True\n # initialize the SSL library\n cls.lib.SSL_library_init()\n # adds all ciphers/digests for EVP\n cls.lib.OpenSSL_add_all_algorithms()\n # loads error strings for libcrypto and libssl functions\n cls.lib.SSL_load_error_strings()\n cls._register_osrandom_engine()\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if (not cls.lib.Cryptography_HAS_LOCKING_CALLBACKS or\n cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL):\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n res = lib.Cryptography_setup_ssl_threads()\n _openssl_assert(cls.lib, res == 1)\n\n\ndef _verify_openssl_version(lib):\n if (\n lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and\n not lib.CRYPTOGRAPHY_IS_LIBRESSL\n ):\n warnings.warn(\n \"OpenSSL version 1.0.1 is no longer supported by the OpenSSL \"\n \"project, please upgrade. A future version of cryptography will \"\n \"drop support for it.\",\n DeprecationWarning\n )\n\n\n# OpenSSL is not thread safe until the locks are initialized. We call this\n# method in module scope so that it executes with the import lock. On\n# Pythons < 3.4 this import lock is a global lock, which can prevent a race\n# condition registering the OpenSSL locks. On Python 3.4+ the import lock\n# is per module so this approach will not work.\nBinding.init_static_locks()\n\n_verify_openssl_version(Binding.lib)\n", "path": "src/cryptography/hazmat/bindings/openssl/binding.py"}]} | 2,556 | 335 |
gh_patches_debug_1020 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-1970 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
district, topic and localisation for external projects and bplans
external projects also need district, topic and localisation as the other projects do.
same is true for b-plans as not all of them come via imperia
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/bplan/tasks.py`
Content:
```
1 import json
2 import logging
3 import urllib
4
5 from background_task import background
6
7 from adhocracy4.administrative_districts.models import AdministrativeDistrict
8 from meinberlin.apps.bplan.models import Bplan
9
10 logger = logging.getLogger(__name__)
11
12
13 def get_features_from_bplan_api(endpoint):
14 url = 'https://bplan-prod.liqd.net/api/' + endpoint
15 req = urllib.request.Request(url)
16 res = urllib.request.urlopen(req)
17 res_body = res.read()
18 res_json = json.loads(res_body.decode("utf-8"))
19
20 return res_json.get('features')
21
22
23 def get_bplan_point_and_district_pk(bplan_identifier):
24 url_poi = 'bplan/points/' + \
25 '?bplan={}'.format(bplan_identifier.replace(' ', '%20'))
26
27 try:
28 features = get_features_from_bplan_api(url_poi)
29 if features:
30 district_pk = features[0]['properties']['bezirk']
31 point = features[0]
32
33 return point, district_pk
34
35 return None, None
36
37 except UnicodeEncodeError:
38 # catches bplan-identifiers with problematic chars
39 pass
40
41
42 def get_bplan_api_pk_to_a4_admin_district_dict():
43 url_dis = 'bezirke/'
44 features = get_features_from_bplan_api(url_dis)
45 dis_dict = {}
46 if features:
47 for district in features:
48
49 dis_model = AdministrativeDistrict.objects.filter(
50 name=district['properties']['name']
51 )
52 if dis_model:
53 dis_dict[district['properties']['pk']] = \
54 dis_model[0]
55 else:
56 dis_dict[district['properties']['pk']] = None
57
58 return dis_dict
59
60
61 @background(schedule=0)
62 def get_location_information(bplan_id):
63 bplan = Bplan.objects.get(pk=bplan_id)
64 point, district_pk = get_bplan_point_and_district_pk(bplan.identifier)
65 dis_dict = get_bplan_api_pk_to_a4_admin_district_dict()
66
67 if district_pk:
68 bplan.administrative_district = \
69 dis_dict[district_pk]
70 else:
71 logger.error(
72 "The identifier '{}' for bplan '{}' seems to be wrong. "
73 "It doesn't exist on https://bplan-prod.liqd.net/api/"
74 .format(bplan.identifier, bplan)
75 )
76 bplan.point = point
77 bplan.save(update_fields=['point', 'administrative_district'])
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/bplan/tasks.py b/meinberlin/apps/bplan/tasks.py
--- a/meinberlin/apps/bplan/tasks.py
+++ b/meinberlin/apps/bplan/tasks.py
@@ -74,4 +74,5 @@
.format(bplan.identifier, bplan)
)
bplan.point = point
- bplan.save(update_fields=['point', 'administrative_district'])
+ bplan.topics = ['URB']
+ bplan.save(update_fields=['point', 'administrative_district', 'topics'])
| {"golden_diff": "diff --git a/meinberlin/apps/bplan/tasks.py b/meinberlin/apps/bplan/tasks.py\n--- a/meinberlin/apps/bplan/tasks.py\n+++ b/meinberlin/apps/bplan/tasks.py\n@@ -74,4 +74,5 @@\n .format(bplan.identifier, bplan)\n )\n bplan.point = point\n- bplan.save(update_fields=['point', 'administrative_district'])\n+ bplan.topics = ['URB']\n+ bplan.save(update_fields=['point', 'administrative_district', 'topics'])\n", "issue": "district, topic and localisation for external projects and bplans\nexternal projects also need district, topic and localisation as the other projects do. \r\n\r\nsame is true for b-plans as not all of them come via imperia\n", "before_files": [{"content": "import json\nimport logging\nimport urllib\n\nfrom background_task import background\n\nfrom adhocracy4.administrative_districts.models import AdministrativeDistrict\nfrom meinberlin.apps.bplan.models import Bplan\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_features_from_bplan_api(endpoint):\n url = 'https://bplan-prod.liqd.net/api/' + endpoint\n req = urllib.request.Request(url)\n res = urllib.request.urlopen(req)\n res_body = res.read()\n res_json = json.loads(res_body.decode(\"utf-8\"))\n\n return res_json.get('features')\n\n\ndef get_bplan_point_and_district_pk(bplan_identifier):\n url_poi = 'bplan/points/' + \\\n '?bplan={}'.format(bplan_identifier.replace(' ', '%20'))\n\n try:\n features = get_features_from_bplan_api(url_poi)\n if features:\n district_pk = features[0]['properties']['bezirk']\n point = features[0]\n\n return point, district_pk\n\n return None, None\n\n except UnicodeEncodeError:\n # catches bplan-identifiers with problematic chars\n pass\n\n\ndef get_bplan_api_pk_to_a4_admin_district_dict():\n url_dis = 'bezirke/'\n features = get_features_from_bplan_api(url_dis)\n dis_dict = {}\n if features:\n for district in features:\n\n dis_model = AdministrativeDistrict.objects.filter(\n name=district['properties']['name']\n )\n if dis_model:\n dis_dict[district['properties']['pk']] = \\\n dis_model[0]\n else:\n dis_dict[district['properties']['pk']] = None\n\n return dis_dict\n\n\n@background(schedule=0)\ndef get_location_information(bplan_id):\n bplan = Bplan.objects.get(pk=bplan_id)\n point, district_pk = get_bplan_point_and_district_pk(bplan.identifier)\n dis_dict = get_bplan_api_pk_to_a4_admin_district_dict()\n\n if district_pk:\n bplan.administrative_district = \\\n dis_dict[district_pk]\n else:\n logger.error(\n \"The identifier '{}' for bplan '{}' seems to be wrong. \"\n \"It doesn't exist on https://bplan-prod.liqd.net/api/\"\n .format(bplan.identifier, bplan)\n )\n bplan.point = point\n bplan.save(update_fields=['point', 'administrative_district'])\n", "path": "meinberlin/apps/bplan/tasks.py"}], "after_files": [{"content": "import json\nimport logging\nimport urllib\n\nfrom background_task import background\n\nfrom adhocracy4.administrative_districts.models import AdministrativeDistrict\nfrom meinberlin.apps.bplan.models import Bplan\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_features_from_bplan_api(endpoint):\n url = 'https://bplan-prod.liqd.net/api/' + endpoint\n req = urllib.request.Request(url)\n res = urllib.request.urlopen(req)\n res_body = res.read()\n res_json = json.loads(res_body.decode(\"utf-8\"))\n\n return res_json.get('features')\n\n\ndef get_bplan_point_and_district_pk(bplan_identifier):\n url_poi = 'bplan/points/' + \\\n '?bplan={}'.format(bplan_identifier.replace(' ', '%20'))\n\n try:\n features = get_features_from_bplan_api(url_poi)\n if features:\n district_pk = features[0]['properties']['bezirk']\n point = features[0]\n\n return point, district_pk\n\n return None, None\n\n except UnicodeEncodeError:\n # catches bplan-identifiers with problematic chars\n pass\n\n\ndef get_bplan_api_pk_to_a4_admin_district_dict():\n url_dis = 'bezirke/'\n features = get_features_from_bplan_api(url_dis)\n dis_dict = {}\n if features:\n for district in features:\n\n dis_model = AdministrativeDistrict.objects.filter(\n name=district['properties']['name']\n )\n if dis_model:\n dis_dict[district['properties']['pk']] = \\\n dis_model[0]\n else:\n dis_dict[district['properties']['pk']] = None\n\n return dis_dict\n\n\n@background(schedule=0)\ndef get_location_information(bplan_id):\n bplan = Bplan.objects.get(pk=bplan_id)\n point, district_pk = get_bplan_point_and_district_pk(bplan.identifier)\n dis_dict = get_bplan_api_pk_to_a4_admin_district_dict()\n\n if district_pk:\n bplan.administrative_district = \\\n dis_dict[district_pk]\n else:\n logger.error(\n \"The identifier '{}' for bplan '{}' seems to be wrong. \"\n \"It doesn't exist on https://bplan-prod.liqd.net/api/\"\n .format(bplan.identifier, bplan)\n )\n bplan.point = point\n bplan.topics = ['URB']\n bplan.save(update_fields=['point', 'administrative_district', 'topics'])\n", "path": "meinberlin/apps/bplan/tasks.py"}]} | 980 | 123 |
gh_patches_debug_3356 | rasdani/github-patches | git_diff | pypa__setuptools-3106 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Entry points are missing when supplied as a string.
### setuptools version
setuptools==60.9.0
### Python version
3.8.12
### OS
Debian
### Additional environment information
_No response_
### Description
When installing a Python package which uses `setuptools` and has at least one `entry_point` directly from Git the `entry_point` script is not created with `setuptools==60.9.0`.
I have verified that this does work with `setuptools=60.8.2` but not `60.9.0`.
### Expected behavior
The expected `entry_point` script `tap-postgres` is present in `venv/bin/`.
### How to Reproduce
```
python -m venv venv
source venv/bin/activate
pip install git+https://github.com/thread/pipelinewise-tap-postgres.git@ee9bedb4711bb27c9660892f56c8fb27f3770d2a
```
### Output
```console
```
[BUG] Entry points are missing when supplied as a string.
### setuptools version
setuptools==60.9.0
### Python version
3.8.12
### OS
Debian
### Additional environment information
_No response_
### Description
When installing a Python package which uses `setuptools` and has at least one `entry_point` directly from Git the `entry_point` script is not created with `setuptools==60.9.0`.
I have verified that this does work with `setuptools=60.8.2` but not `60.9.0`.
### Expected behavior
The expected `entry_point` script `tap-postgres` is present in `venv/bin/`.
### How to Reproduce
```
python -m venv venv
source venv/bin/activate
pip install git+https://github.com/thread/pipelinewise-tap-postgres.git@ee9bedb4711bb27c9660892f56c8fb27f3770d2a
```
### Output
```console
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setuptools/_entry_points.py`
Content:
```
1 import functools
2 import operator
3 import itertools
4
5 from .extern.jaraco.text import yield_lines
6 from .extern.jaraco.functools import pass_none
7 from ._importlib import metadata
8 from ._itertools import ensure_unique
9 from .extern.more_itertools import consume
10
11
12 def ensure_valid(ep):
13 """
14 Exercise one of the dynamic properties to trigger
15 the pattern match.
16 """
17 ep.extras
18
19
20 def load_group(value, group):
21 """
22 Given a value of an entry point or series of entry points,
23 return each as an EntryPoint.
24 """
25 # normalize to a single sequence of lines
26 lines = yield_lines(value)
27 text = f'[{group}]\n' + '\n'.join(lines)
28 return metadata.EntryPoints._from_text(text)
29
30
31 def by_group_and_name(ep):
32 return ep.group, ep.name
33
34
35 def validate(eps: metadata.EntryPoints):
36 """
37 Ensure entry points are unique by group and name and validate each.
38 """
39 consume(map(ensure_valid, ensure_unique(eps, key=by_group_and_name)))
40 return eps
41
42
43 @functools.singledispatch
44 def load(eps):
45 """
46 Given a Distribution.entry_points, produce EntryPoints.
47 """
48 groups = itertools.chain.from_iterable(
49 load_group(value, group)
50 for group, value in eps.items())
51 return validate(metadata.EntryPoints(groups))
52
53
54 @load.register(str)
55 def _(eps):
56 return validate(metadata.EntryPoints._from_text(eps))
57
58
59 load.register(type(None), lambda x: x)
60
61
62 @pass_none
63 def render(eps: metadata.EntryPoints):
64 by_group = operator.attrgetter('group')
65 groups = itertools.groupby(sorted(eps, key=by_group), by_group)
66
67 return '\n'.join(
68 f'[{group}]\n{render_items(items)}\n'
69 for group, items in groups
70 )
71
72
73 def render_items(eps):
74 return '\n'.join(
75 f'{ep.name} = {ep.value}'
76 for ep in sorted(eps)
77 )
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setuptools/_entry_points.py b/setuptools/_entry_points.py
--- a/setuptools/_entry_points.py
+++ b/setuptools/_entry_points.py
@@ -53,7 +53,16 @@
@load.register(str)
def _(eps):
- return validate(metadata.EntryPoints._from_text(eps))
+ r"""
+ >>> ep, = load('[console_scripts]\nfoo=bar')
+ >>> ep.group
+ 'console_scripts'
+ >>> ep.name
+ 'foo'
+ >>> ep.value
+ 'bar'
+ """
+ return validate(metadata.EntryPoints(metadata.EntryPoints._from_text(eps)))
load.register(type(None), lambda x: x)
| {"golden_diff": "diff --git a/setuptools/_entry_points.py b/setuptools/_entry_points.py\n--- a/setuptools/_entry_points.py\n+++ b/setuptools/_entry_points.py\n@@ -53,7 +53,16 @@\n \n @load.register(str)\n def _(eps):\n- return validate(metadata.EntryPoints._from_text(eps))\n+ r\"\"\"\n+ >>> ep, = load('[console_scripts]\\nfoo=bar')\n+ >>> ep.group\n+ 'console_scripts'\n+ >>> ep.name\n+ 'foo'\n+ >>> ep.value\n+ 'bar'\n+ \"\"\"\n+ return validate(metadata.EntryPoints(metadata.EntryPoints._from_text(eps)))\n \n \n load.register(type(None), lambda x: x)\n", "issue": "[BUG] Entry points are missing when supplied as a string.\n### setuptools version\r\n\r\nsetuptools==60.9.0\r\n\r\n### Python version\r\n\r\n3.8.12\r\n\r\n### OS\r\n\r\nDebian\r\n\r\n### Additional environment information\r\n\r\n_No response_\r\n\r\n### Description\r\n\r\nWhen installing a Python package which uses `setuptools` and has at least one `entry_point` directly from Git the `entry_point` script is not created with `setuptools==60.9.0`.\r\n\r\nI have verified that this does work with `setuptools=60.8.2` but not `60.9.0`.\r\n\r\n### Expected behavior\r\n\r\nThe expected `entry_point` script `tap-postgres` is present in `venv/bin/`.\r\n\r\n### How to Reproduce\r\n\r\n```\r\npython -m venv venv\r\nsource venv/bin/activate\r\npip install git+https://github.com/thread/pipelinewise-tap-postgres.git@ee9bedb4711bb27c9660892f56c8fb27f3770d2a\r\n```\r\n\r\n### Output\r\n\r\n```console\r\n\r\n```\r\n\n[BUG] Entry points are missing when supplied as a string.\n### setuptools version\r\n\r\nsetuptools==60.9.0\r\n\r\n### Python version\r\n\r\n3.8.12\r\n\r\n### OS\r\n\r\nDebian\r\n\r\n### Additional environment information\r\n\r\n_No response_\r\n\r\n### Description\r\n\r\nWhen installing a Python package which uses `setuptools` and has at least one `entry_point` directly from Git the `entry_point` script is not created with `setuptools==60.9.0`.\r\n\r\nI have verified that this does work with `setuptools=60.8.2` but not `60.9.0`.\r\n\r\n### Expected behavior\r\n\r\nThe expected `entry_point` script `tap-postgres` is present in `venv/bin/`.\r\n\r\n### How to Reproduce\r\n\r\n```\r\npython -m venv venv\r\nsource venv/bin/activate\r\npip install git+https://github.com/thread/pipelinewise-tap-postgres.git@ee9bedb4711bb27c9660892f56c8fb27f3770d2a\r\n```\r\n\r\n### Output\r\n\r\n```console\r\n\r\n```\r\n\n", "before_files": [{"content": "import functools\nimport operator\nimport itertools\n\nfrom .extern.jaraco.text import yield_lines\nfrom .extern.jaraco.functools import pass_none\nfrom ._importlib import metadata\nfrom ._itertools import ensure_unique\nfrom .extern.more_itertools import consume\n\n\ndef ensure_valid(ep):\n \"\"\"\n Exercise one of the dynamic properties to trigger\n the pattern match.\n \"\"\"\n ep.extras\n\n\ndef load_group(value, group):\n \"\"\"\n Given a value of an entry point or series of entry points,\n return each as an EntryPoint.\n \"\"\"\n # normalize to a single sequence of lines\n lines = yield_lines(value)\n text = f'[{group}]\\n' + '\\n'.join(lines)\n return metadata.EntryPoints._from_text(text)\n\n\ndef by_group_and_name(ep):\n return ep.group, ep.name\n\n\ndef validate(eps: metadata.EntryPoints):\n \"\"\"\n Ensure entry points are unique by group and name and validate each.\n \"\"\"\n consume(map(ensure_valid, ensure_unique(eps, key=by_group_and_name)))\n return eps\n\n\[email protected]\ndef load(eps):\n \"\"\"\n Given a Distribution.entry_points, produce EntryPoints.\n \"\"\"\n groups = itertools.chain.from_iterable(\n load_group(value, group)\n for group, value in eps.items())\n return validate(metadata.EntryPoints(groups))\n\n\[email protected](str)\ndef _(eps):\n return validate(metadata.EntryPoints._from_text(eps))\n\n\nload.register(type(None), lambda x: x)\n\n\n@pass_none\ndef render(eps: metadata.EntryPoints):\n by_group = operator.attrgetter('group')\n groups = itertools.groupby(sorted(eps, key=by_group), by_group)\n\n return '\\n'.join(\n f'[{group}]\\n{render_items(items)}\\n'\n for group, items in groups\n )\n\n\ndef render_items(eps):\n return '\\n'.join(\n f'{ep.name} = {ep.value}'\n for ep in sorted(eps)\n )\n", "path": "setuptools/_entry_points.py"}], "after_files": [{"content": "import functools\nimport operator\nimport itertools\n\nfrom .extern.jaraco.text import yield_lines\nfrom .extern.jaraco.functools import pass_none\nfrom ._importlib import metadata\nfrom ._itertools import ensure_unique\nfrom .extern.more_itertools import consume\n\n\ndef ensure_valid(ep):\n \"\"\"\n Exercise one of the dynamic properties to trigger\n the pattern match.\n \"\"\"\n ep.extras\n\n\ndef load_group(value, group):\n \"\"\"\n Given a value of an entry point or series of entry points,\n return each as an EntryPoint.\n \"\"\"\n # normalize to a single sequence of lines\n lines = yield_lines(value)\n text = f'[{group}]\\n' + '\\n'.join(lines)\n return metadata.EntryPoints._from_text(text)\n\n\ndef by_group_and_name(ep):\n return ep.group, ep.name\n\n\ndef validate(eps: metadata.EntryPoints):\n \"\"\"\n Ensure entry points are unique by group and name and validate each.\n \"\"\"\n consume(map(ensure_valid, ensure_unique(eps, key=by_group_and_name)))\n return eps\n\n\[email protected]\ndef load(eps):\n \"\"\"\n Given a Distribution.entry_points, produce EntryPoints.\n \"\"\"\n groups = itertools.chain.from_iterable(\n load_group(value, group)\n for group, value in eps.items())\n return validate(metadata.EntryPoints(groups))\n\n\[email protected](str)\ndef _(eps):\n r\"\"\"\n >>> ep, = load('[console_scripts]\\nfoo=bar')\n >>> ep.group\n 'console_scripts'\n >>> ep.name\n 'foo'\n >>> ep.value\n 'bar'\n \"\"\"\n return validate(metadata.EntryPoints(metadata.EntryPoints._from_text(eps)))\n\n\nload.register(type(None), lambda x: x)\n\n\n@pass_none\ndef render(eps: metadata.EntryPoints):\n by_group = operator.attrgetter('group')\n groups = itertools.groupby(sorted(eps, key=by_group), by_group)\n\n return '\\n'.join(\n f'[{group}]\\n{render_items(items)}\\n'\n for group, items in groups\n )\n\n\ndef render_items(eps):\n return '\\n'.join(\n f'{ep.name} = {ep.value}'\n for ep in sorted(eps)\n )\n", "path": "setuptools/_entry_points.py"}]} | 1,343 | 156 |
gh_patches_debug_1189 | rasdani/github-patches | git_diff | oobabooga__text-generation-webui-4905 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
coqui_tts fails to load as assumes interactive sessions to accept ToS
### Describe the bug
When enabled coqui_tts prevents textgen from starting as it expects an interactive session for a user to accept a ToS agreement
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
- Enable coqui_tts
- Restart textgen
- Note that textgen never starts
- Check console logs
```
2023-12-12 22:13:22 INFO:Loading the extension "coqui_tts"...
[XTTS] Loading XTTS...
> You must agree to the terms of service to use this model.
| > Please see the terms of service at https://coqui.ai/cpml.txt
| > "I have read, understood and agreed to the Terms and Conditions." - [y/n]
```
- No way to accept non-interactively
### Screenshot
_No response_
### Logs
```shell
INFO: Started server process [37]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://0.0.0.0:5001 (Press CTRL+C to quit)
2023-12-12 22:13:18 DEBUG:Intercepting all calls to posthog.
2023-12-12 22:13:19 DEBUG:Creating Sentence Embedder...
2023-12-12 22:13:20 WARNING:Using embedded DuckDB without persistence: data will be transient
2023-12-12 22:13:22 DEBUG:Loading hyperparameters...
2023-12-12 22:13:22 INFO:Loading the extension "coqui_tts"...
[XTTS] Loading XTTS...
> You must agree to the terms of service to use this model.
| > Please see the terms of service at https://coqui.ai/cpml.txt
| > "I have read, understood and agreed to the Terms and Conditions." - [y/n]
```
### System Info
```shell
Latest official docker image running on server.
```
Note that a workaround for this is to remove coqui_tts and install "alltalk_tts" instead which seems to work without issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `extensions/coqui_tts/script.py`
Content:
```
1 import html
2 import json
3 import random
4 import time
5 from pathlib import Path
6
7 import gradio as gr
8
9 from modules import chat, shared, ui_chat
10 from modules.logging_colors import logger
11 from modules.ui import create_refresh_button
12 from modules.utils import gradio
13
14 try:
15 from TTS.api import TTS
16 from TTS.utils.synthesizer import Synthesizer
17 except ModuleNotFoundError:
18 logger.error(
19 "Could not find the TTS module. Make sure to install the requirements for the coqui_tts extension."
20 "\n"
21 "\nLinux / Mac:\npip install -r extensions/coqui_tts/requirements.txt\n"
22 "\nWindows:\npip install -r extensions\\coqui_tts\\requirements.txt\n"
23 "\n"
24 "If you used the one-click installer, paste the command above in the terminal window launched after running the \"cmd_\" script. On Windows, that's \"cmd_windows.bat\"."
25 )
26
27 raise
28
29
30 params = {
31 "activate": True,
32 "autoplay": True,
33 "show_text": False,
34 "remove_trailing_dots": False,
35 "voice": "female_01.wav",
36 "language": "English",
37 "model_name": "tts_models/multilingual/multi-dataset/xtts_v2",
38 "device": "cuda"
39 }
40
41 this_dir = str(Path(__file__).parent.resolve())
42 model = None
43 with open(Path(f"{this_dir}/languages.json"), encoding='utf8') as f:
44 languages = json.load(f)
45
46
47 def get_available_voices():
48 return sorted([voice.name for voice in Path(f"{this_dir}/voices").glob("*.wav")])
49
50
51 def preprocess(raw_input):
52 raw_input = html.unescape(raw_input)
53 # raw_input = raw_input.strip("\"")
54 return raw_input
55
56
57 def new_split_into_sentences(self, text):
58 sentences = self.seg.segment(text)
59 if params['remove_trailing_dots']:
60 sentences_without_dots = []
61 for sentence in sentences:
62 if sentence.endswith('.') and not sentence.endswith('...'):
63 sentence = sentence[:-1]
64
65 sentences_without_dots.append(sentence)
66
67 return sentences_without_dots
68 else:
69 return sentences
70
71
72 Synthesizer.split_into_sentences = new_split_into_sentences
73
74
75 def load_model():
76 model = TTS(params["model_name"]).to(params["device"])
77 return model
78
79
80 def remove_tts_from_history(history):
81 for i, entry in enumerate(history['internal']):
82 history['visible'][i] = [history['visible'][i][0], entry[1]]
83
84 return history
85
86
87 def toggle_text_in_history(history):
88 for i, entry in enumerate(history['visible']):
89 visible_reply = entry[1]
90 if visible_reply.startswith('<audio'):
91 if params['show_text']:
92 reply = history['internal'][i][1]
93 history['visible'][i] = [history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>\n\n{reply}"]
94 else:
95 history['visible'][i] = [history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>"]
96
97 return history
98
99
100 def random_sentence():
101 with open(Path("extensions/coqui_tts/harvard_sentences.txt")) as f:
102 return random.choice(list(f))
103
104
105 def voice_preview(string):
106 string = html.unescape(string) or random_sentence()
107
108 output_file = Path('extensions/coqui_tts/outputs/voice_preview.wav')
109 model.tts_to_file(
110 text=string,
111 file_path=output_file,
112 speaker_wav=[f"{this_dir}/voices/{params['voice']}"],
113 language=languages[params["language"]]
114 )
115
116 return f'<audio src="file/{output_file.as_posix()}?{int(time.time())}" controls autoplay></audio>'
117
118
119 def history_modifier(history):
120 # Remove autoplay from the last reply
121 if len(history['internal']) > 0:
122 history['visible'][-1] = [
123 history['visible'][-1][0],
124 history['visible'][-1][1].replace('controls autoplay>', 'controls>')
125 ]
126
127 return history
128
129
130 def state_modifier(state):
131 if not params['activate']:
132 return state
133
134 state['stream'] = False
135 return state
136
137
138 def input_modifier(string, state):
139 if not params['activate']:
140 return string
141
142 shared.processing_message = "*Is recording a voice message...*"
143 return string
144
145
146 def output_modifier(string, state):
147 if not params['activate']:
148 return string
149
150 original_string = string
151 string = preprocess(html.unescape(string))
152 if string == '':
153 string = '*Empty reply, try regenerating*'
154 else:
155 output_file = Path(f'extensions/coqui_tts/outputs/{state["character_menu"]}_{int(time.time())}.wav')
156 model.tts_to_file(
157 text=string,
158 file_path=output_file,
159 speaker_wav=[f"{this_dir}/voices/{params['voice']}"],
160 language=languages[params["language"]]
161 )
162
163 autoplay = 'autoplay' if params['autoplay'] else ''
164 string = f'<audio src="file/{output_file.as_posix()}" controls {autoplay}></audio>'
165 if params['show_text']:
166 string += f'\n\n{original_string}'
167
168 shared.processing_message = "*Is typing...*"
169 return string
170
171
172 def custom_css():
173 path_to_css = Path(f"{this_dir}/style.css")
174 return open(path_to_css, 'r').read()
175
176
177 def setup():
178 global model
179 print("[XTTS] Loading XTTS...")
180 model = load_model()
181 print("[XTTS] Done!")
182 Path(f"{this_dir}/outputs").mkdir(parents=True, exist_ok=True)
183
184
185 def ui():
186 with gr.Accordion("Coqui TTS (XTTSv2)"):
187 with gr.Row():
188 activate = gr.Checkbox(value=params['activate'], label='Activate TTS')
189 autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')
190
191 with gr.Row():
192 show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')
193 remove_trailing_dots = gr.Checkbox(value=params['remove_trailing_dots'], label='Remove trailing "." from text segments before converting to audio')
194
195 with gr.Row():
196 with gr.Row():
197 voice = gr.Dropdown(get_available_voices(), label="Voice wav", value=params["voice"])
198 create_refresh_button(voice, lambda: None, lambda: {'choices': get_available_voices(), 'value': params["voice"]}, 'refresh-button')
199
200 language = gr.Dropdown(languages.keys(), label="Language", value=params["language"])
201
202 with gr.Row():
203 preview_text = gr.Text(show_label=False, placeholder="Preview text", elem_id="silero_preview_text")
204 preview_play = gr.Button("Preview")
205 preview_audio = gr.HTML(visible=False)
206
207 with gr.Row():
208 convert = gr.Button('Permanently replace audios with the message texts')
209 convert_cancel = gr.Button('Cancel', visible=False)
210 convert_confirm = gr.Button('Confirm (cannot be undone)', variant="stop", visible=False)
211
212 # Convert history with confirmation
213 convert_arr = [convert_confirm, convert, convert_cancel]
214 convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
215 convert_confirm.click(
216 lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then(
217 remove_tts_from_history, gradio('history'), gradio('history')).then(
218 chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
219 chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))
220
221 convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
222
223 # Toggle message text in history
224 show_text.change(
225 lambda x: params.update({"show_text": x}), show_text, None).then(
226 toggle_text_in_history, gradio('history'), gradio('history')).then(
227 chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
228 chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))
229
230 # Event functions to update the parameters in the backend
231 activate.change(lambda x: params.update({"activate": x}), activate, None)
232 autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None)
233 remove_trailing_dots.change(lambda x: params.update({"remove_trailing_dots": x}), remove_trailing_dots, None)
234 voice.change(lambda x: params.update({"voice": x}), voice, None)
235 language.change(lambda x: params.update({"language": x}), language, None)
236
237 # Play preview
238 preview_text.submit(voice_preview, preview_text, preview_audio)
239 preview_play.click(voice_preview, preview_text, preview_audio)
240
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/extensions/coqui_tts/script.py b/extensions/coqui_tts/script.py
--- a/extensions/coqui_tts/script.py
+++ b/extensions/coqui_tts/script.py
@@ -1,3 +1,4 @@
+import os
import html
import json
import random
@@ -26,6 +27,7 @@
raise
+os.environ["COQUI_TOS_AGREED"] = "1"
params = {
"activate": True,
| {"golden_diff": "diff --git a/extensions/coqui_tts/script.py b/extensions/coqui_tts/script.py\n--- a/extensions/coqui_tts/script.py\n+++ b/extensions/coqui_tts/script.py\n@@ -1,3 +1,4 @@\n+import os\n import html\n import json\n import random\n@@ -26,6 +27,7 @@\n \n raise\n \n+os.environ[\"COQUI_TOS_AGREED\"] = \"1\"\n \n params = {\n \"activate\": True,\n", "issue": "coqui_tts fails to load as assumes interactive sessions to accept ToS\n### Describe the bug\r\n\r\nWhen enabled coqui_tts prevents textgen from starting as it expects an interactive session for a user to accept a ToS agreement\r\n\r\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Reproduction\r\n\r\n- Enable coqui_tts\r\n- Restart textgen\r\n- Note that textgen never starts\r\n- Check console logs\r\n```\r\n2023-12-12 22:13:22 INFO:Loading the extension \"coqui_tts\"...\r\n[XTTS] Loading XTTS...\r\n > You must agree to the terms of service to use this model.\r\n | > Please see the terms of service at https://coqui.ai/cpml.txt\r\n | > \"I have read, understood and agreed to the Terms and Conditions.\" - [y/n]\r\n```\r\n- No way to accept non-interactively\r\n\r\n### Screenshot\r\n\r\n_No response_\r\n\r\n### Logs\r\n\r\n```shell\r\nINFO: Started server process [37]\r\nINFO: Waiting for application startup.\r\nINFO: Application startup complete.\r\nINFO: Uvicorn running on http://0.0.0.0:5001 (Press CTRL+C to quit)\r\n2023-12-12 22:13:18 DEBUG:Intercepting all calls to posthog.\r\n2023-12-12 22:13:19 DEBUG:Creating Sentence Embedder...\r\n2023-12-12 22:13:20 WARNING:Using embedded DuckDB without persistence: data will be transient\r\n2023-12-12 22:13:22 DEBUG:Loading hyperparameters...\r\n2023-12-12 22:13:22 INFO:Loading the extension \"coqui_tts\"...\r\n[XTTS] Loading XTTS...\r\n > You must agree to the terms of service to use this model.\r\n | > Please see the terms of service at https://coqui.ai/cpml.txt\r\n | > \"I have read, understood and agreed to the Terms and Conditions.\" - [y/n]\r\n```\r\n\r\n\r\n### System Info\r\n\r\n```shell\r\nLatest official docker image running on server.\r\n```\r\n\r\n\r\nNote that a workaround for this is to remove coqui_tts and install \"alltalk_tts\" instead which seems to work without issue.\n", "before_files": [{"content": "import html\nimport json\nimport random\nimport time\nfrom pathlib import Path\n\nimport gradio as gr\n\nfrom modules import chat, shared, ui_chat\nfrom modules.logging_colors import logger\nfrom modules.ui import create_refresh_button\nfrom modules.utils import gradio\n\ntry:\n from TTS.api import TTS\n from TTS.utils.synthesizer import Synthesizer\nexcept ModuleNotFoundError:\n logger.error(\n \"Could not find the TTS module. Make sure to install the requirements for the coqui_tts extension.\"\n \"\\n\"\n \"\\nLinux / Mac:\\npip install -r extensions/coqui_tts/requirements.txt\\n\"\n \"\\nWindows:\\npip install -r extensions\\\\coqui_tts\\\\requirements.txt\\n\"\n \"\\n\"\n \"If you used the one-click installer, paste the command above in the terminal window launched after running the \\\"cmd_\\\" script. On Windows, that's \\\"cmd_windows.bat\\\".\"\n )\n\n raise\n\n\nparams = {\n \"activate\": True,\n \"autoplay\": True,\n \"show_text\": False,\n \"remove_trailing_dots\": False,\n \"voice\": \"female_01.wav\",\n \"language\": \"English\",\n \"model_name\": \"tts_models/multilingual/multi-dataset/xtts_v2\",\n \"device\": \"cuda\"\n}\n\nthis_dir = str(Path(__file__).parent.resolve())\nmodel = None\nwith open(Path(f\"{this_dir}/languages.json\"), encoding='utf8') as f:\n languages = json.load(f)\n\n\ndef get_available_voices():\n return sorted([voice.name for voice in Path(f\"{this_dir}/voices\").glob(\"*.wav\")])\n\n\ndef preprocess(raw_input):\n raw_input = html.unescape(raw_input)\n # raw_input = raw_input.strip(\"\\\"\")\n return raw_input\n\n\ndef new_split_into_sentences(self, text):\n sentences = self.seg.segment(text)\n if params['remove_trailing_dots']:\n sentences_without_dots = []\n for sentence in sentences:\n if sentence.endswith('.') and not sentence.endswith('...'):\n sentence = sentence[:-1]\n\n sentences_without_dots.append(sentence)\n\n return sentences_without_dots\n else:\n return sentences\n\n\nSynthesizer.split_into_sentences = new_split_into_sentences\n\n\ndef load_model():\n model = TTS(params[\"model_name\"]).to(params[\"device\"])\n return model\n\n\ndef remove_tts_from_history(history):\n for i, entry in enumerate(history['internal']):\n history['visible'][i] = [history['visible'][i][0], entry[1]]\n\n return history\n\n\ndef toggle_text_in_history(history):\n for i, entry in enumerate(history['visible']):\n visible_reply = entry[1]\n if visible_reply.startswith('<audio'):\n if params['show_text']:\n reply = history['internal'][i][1]\n history['visible'][i] = [history['visible'][i][0], f\"{visible_reply.split('</audio>')[0]}</audio>\\n\\n{reply}\"]\n else:\n history['visible'][i] = [history['visible'][i][0], f\"{visible_reply.split('</audio>')[0]}</audio>\"]\n\n return history\n\n\ndef random_sentence():\n with open(Path(\"extensions/coqui_tts/harvard_sentences.txt\")) as f:\n return random.choice(list(f))\n\n\ndef voice_preview(string):\n string = html.unescape(string) or random_sentence()\n\n output_file = Path('extensions/coqui_tts/outputs/voice_preview.wav')\n model.tts_to_file(\n text=string,\n file_path=output_file,\n speaker_wav=[f\"{this_dir}/voices/{params['voice']}\"],\n language=languages[params[\"language\"]]\n )\n\n return f'<audio src=\"file/{output_file.as_posix()}?{int(time.time())}\" controls autoplay></audio>'\n\n\ndef history_modifier(history):\n # Remove autoplay from the last reply\n if len(history['internal']) > 0:\n history['visible'][-1] = [\n history['visible'][-1][0],\n history['visible'][-1][1].replace('controls autoplay>', 'controls>')\n ]\n\n return history\n\n\ndef state_modifier(state):\n if not params['activate']:\n return state\n\n state['stream'] = False\n return state\n\n\ndef input_modifier(string, state):\n if not params['activate']:\n return string\n\n shared.processing_message = \"*Is recording a voice message...*\"\n return string\n\n\ndef output_modifier(string, state):\n if not params['activate']:\n return string\n\n original_string = string\n string = preprocess(html.unescape(string))\n if string == '':\n string = '*Empty reply, try regenerating*'\n else:\n output_file = Path(f'extensions/coqui_tts/outputs/{state[\"character_menu\"]}_{int(time.time())}.wav')\n model.tts_to_file(\n text=string,\n file_path=output_file,\n speaker_wav=[f\"{this_dir}/voices/{params['voice']}\"],\n language=languages[params[\"language\"]]\n )\n\n autoplay = 'autoplay' if params['autoplay'] else ''\n string = f'<audio src=\"file/{output_file.as_posix()}\" controls {autoplay}></audio>'\n if params['show_text']:\n string += f'\\n\\n{original_string}'\n\n shared.processing_message = \"*Is typing...*\"\n return string\n\n\ndef custom_css():\n path_to_css = Path(f\"{this_dir}/style.css\")\n return open(path_to_css, 'r').read()\n\n\ndef setup():\n global model\n print(\"[XTTS] Loading XTTS...\")\n model = load_model()\n print(\"[XTTS] Done!\")\n Path(f\"{this_dir}/outputs\").mkdir(parents=True, exist_ok=True)\n\n\ndef ui():\n with gr.Accordion(\"Coqui TTS (XTTSv2)\"):\n with gr.Row():\n activate = gr.Checkbox(value=params['activate'], label='Activate TTS')\n autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')\n\n with gr.Row():\n show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')\n remove_trailing_dots = gr.Checkbox(value=params['remove_trailing_dots'], label='Remove trailing \".\" from text segments before converting to audio')\n\n with gr.Row():\n with gr.Row():\n voice = gr.Dropdown(get_available_voices(), label=\"Voice wav\", value=params[\"voice\"])\n create_refresh_button(voice, lambda: None, lambda: {'choices': get_available_voices(), 'value': params[\"voice\"]}, 'refresh-button')\n\n language = gr.Dropdown(languages.keys(), label=\"Language\", value=params[\"language\"])\n\n with gr.Row():\n preview_text = gr.Text(show_label=False, placeholder=\"Preview text\", elem_id=\"silero_preview_text\")\n preview_play = gr.Button(\"Preview\")\n preview_audio = gr.HTML(visible=False)\n\n with gr.Row():\n convert = gr.Button('Permanently replace audios with the message texts')\n convert_cancel = gr.Button('Cancel', visible=False)\n convert_confirm = gr.Button('Confirm (cannot be undone)', variant=\"stop\", visible=False)\n\n # Convert history with confirmation\n convert_arr = [convert_confirm, convert, convert_cancel]\n convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)\n convert_confirm.click(\n lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then(\n remove_tts_from_history, gradio('history'), gradio('history')).then(\n chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(\n chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))\n\n convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)\n\n # Toggle message text in history\n show_text.change(\n lambda x: params.update({\"show_text\": x}), show_text, None).then(\n toggle_text_in_history, gradio('history'), gradio('history')).then(\n chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(\n chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))\n\n # Event functions to update the parameters in the backend\n activate.change(lambda x: params.update({\"activate\": x}), activate, None)\n autoplay.change(lambda x: params.update({\"autoplay\": x}), autoplay, None)\n remove_trailing_dots.change(lambda x: params.update({\"remove_trailing_dots\": x}), remove_trailing_dots, None)\n voice.change(lambda x: params.update({\"voice\": x}), voice, None)\n language.change(lambda x: params.update({\"language\": x}), language, None)\n\n # Play preview\n preview_text.submit(voice_preview, preview_text, preview_audio)\n preview_play.click(voice_preview, preview_text, preview_audio)\n", "path": "extensions/coqui_tts/script.py"}], "after_files": [{"content": "import os\nimport html\nimport json\nimport random\nimport time\nfrom pathlib import Path\n\nimport gradio as gr\n\nfrom modules import chat, shared, ui_chat\nfrom modules.logging_colors import logger\nfrom modules.ui import create_refresh_button\nfrom modules.utils import gradio\n\ntry:\n from TTS.api import TTS\n from TTS.utils.synthesizer import Synthesizer\nexcept ModuleNotFoundError:\n logger.error(\n \"Could not find the TTS module. Make sure to install the requirements for the coqui_tts extension.\"\n \"\\n\"\n \"\\nLinux / Mac:\\npip install -r extensions/coqui_tts/requirements.txt\\n\"\n \"\\nWindows:\\npip install -r extensions\\\\coqui_tts\\\\requirements.txt\\n\"\n \"\\n\"\n \"If you used the one-click installer, paste the command above in the terminal window launched after running the \\\"cmd_\\\" script. On Windows, that's \\\"cmd_windows.bat\\\".\"\n )\n\n raise\n\nos.environ[\"COQUI_TOS_AGREED\"] = \"1\"\n\nparams = {\n \"activate\": True,\n \"autoplay\": True,\n \"show_text\": False,\n \"remove_trailing_dots\": False,\n \"voice\": \"female_01.wav\",\n \"language\": \"English\",\n \"model_name\": \"tts_models/multilingual/multi-dataset/xtts_v2\",\n \"device\": \"cuda\"\n}\n\nthis_dir = str(Path(__file__).parent.resolve())\nmodel = None\nwith open(Path(f\"{this_dir}/languages.json\"), encoding='utf8') as f:\n languages = json.load(f)\n\n\ndef get_available_voices():\n return sorted([voice.name for voice in Path(f\"{this_dir}/voices\").glob(\"*.wav\")])\n\n\ndef preprocess(raw_input):\n raw_input = html.unescape(raw_input)\n # raw_input = raw_input.strip(\"\\\"\")\n return raw_input\n\n\ndef new_split_into_sentences(self, text):\n sentences = self.seg.segment(text)\n if params['remove_trailing_dots']:\n sentences_without_dots = []\n for sentence in sentences:\n if sentence.endswith('.') and not sentence.endswith('...'):\n sentence = sentence[:-1]\n\n sentences_without_dots.append(sentence)\n\n return sentences_without_dots\n else:\n return sentences\n\n\nSynthesizer.split_into_sentences = new_split_into_sentences\n\n\ndef load_model():\n model = TTS(params[\"model_name\"]).to(params[\"device\"])\n return model\n\n\ndef remove_tts_from_history(history):\n for i, entry in enumerate(history['internal']):\n history['visible'][i] = [history['visible'][i][0], entry[1]]\n\n return history\n\n\ndef toggle_text_in_history(history):\n for i, entry in enumerate(history['visible']):\n visible_reply = entry[1]\n if visible_reply.startswith('<audio'):\n if params['show_text']:\n reply = history['internal'][i][1]\n history['visible'][i] = [history['visible'][i][0], f\"{visible_reply.split('</audio>')[0]}</audio>\\n\\n{reply}\"]\n else:\n history['visible'][i] = [history['visible'][i][0], f\"{visible_reply.split('</audio>')[0]}</audio>\"]\n\n return history\n\n\ndef random_sentence():\n with open(Path(\"extensions/coqui_tts/harvard_sentences.txt\")) as f:\n return random.choice(list(f))\n\n\ndef voice_preview(string):\n string = html.unescape(string) or random_sentence()\n\n output_file = Path('extensions/coqui_tts/outputs/voice_preview.wav')\n model.tts_to_file(\n text=string,\n file_path=output_file,\n speaker_wav=[f\"{this_dir}/voices/{params['voice']}\"],\n language=languages[params[\"language\"]]\n )\n\n return f'<audio src=\"file/{output_file.as_posix()}?{int(time.time())}\" controls autoplay></audio>'\n\n\ndef history_modifier(history):\n # Remove autoplay from the last reply\n if len(history['internal']) > 0:\n history['visible'][-1] = [\n history['visible'][-1][0],\n history['visible'][-1][1].replace('controls autoplay>', 'controls>')\n ]\n\n return history\n\n\ndef state_modifier(state):\n if not params['activate']:\n return state\n\n state['stream'] = False\n return state\n\n\ndef input_modifier(string, state):\n if not params['activate']:\n return string\n\n shared.processing_message = \"*Is recording a voice message...*\"\n return string\n\n\ndef output_modifier(string, state):\n if not params['activate']:\n return string\n\n original_string = string\n string = preprocess(html.unescape(string))\n if string == '':\n string = '*Empty reply, try regenerating*'\n else:\n output_file = Path(f'extensions/coqui_tts/outputs/{state[\"character_menu\"]}_{int(time.time())}.wav')\n model.tts_to_file(\n text=string,\n file_path=output_file,\n speaker_wav=[f\"{this_dir}/voices/{params['voice']}\"],\n language=languages[params[\"language\"]]\n )\n\n autoplay = 'autoplay' if params['autoplay'] else ''\n string = f'<audio src=\"file/{output_file.as_posix()}\" controls {autoplay}></audio>'\n if params['show_text']:\n string += f'\\n\\n{original_string}'\n\n shared.processing_message = \"*Is typing...*\"\n return string\n\n\ndef custom_css():\n path_to_css = Path(f\"{this_dir}/style.css\")\n return open(path_to_css, 'r').read()\n\n\ndef setup():\n global model\n print(\"[XTTS] Loading XTTS...\")\n model = load_model()\n print(\"[XTTS] Done!\")\n Path(f\"{this_dir}/outputs\").mkdir(parents=True, exist_ok=True)\n\n\ndef ui():\n with gr.Accordion(\"Coqui TTS (XTTSv2)\"):\n with gr.Row():\n activate = gr.Checkbox(value=params['activate'], label='Activate TTS')\n autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')\n\n with gr.Row():\n show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')\n remove_trailing_dots = gr.Checkbox(value=params['remove_trailing_dots'], label='Remove trailing \".\" from text segments before converting to audio')\n\n with gr.Row():\n with gr.Row():\n voice = gr.Dropdown(get_available_voices(), label=\"Voice wav\", value=params[\"voice\"])\n create_refresh_button(voice, lambda: None, lambda: {'choices': get_available_voices(), 'value': params[\"voice\"]}, 'refresh-button')\n\n language = gr.Dropdown(languages.keys(), label=\"Language\", value=params[\"language\"])\n\n with gr.Row():\n preview_text = gr.Text(show_label=False, placeholder=\"Preview text\", elem_id=\"silero_preview_text\")\n preview_play = gr.Button(\"Preview\")\n preview_audio = gr.HTML(visible=False)\n\n with gr.Row():\n convert = gr.Button('Permanently replace audios with the message texts')\n convert_cancel = gr.Button('Cancel', visible=False)\n convert_confirm = gr.Button('Confirm (cannot be undone)', variant=\"stop\", visible=False)\n\n # Convert history with confirmation\n convert_arr = [convert_confirm, convert, convert_cancel]\n convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)\n convert_confirm.click(\n lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then(\n remove_tts_from_history, gradio('history'), gradio('history')).then(\n chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(\n chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))\n\n convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)\n\n # Toggle message text in history\n show_text.change(\n lambda x: params.update({\"show_text\": x}), show_text, None).then(\n toggle_text_in_history, gradio('history'), gradio('history')).then(\n chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(\n chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))\n\n # Event functions to update the parameters in the backend\n activate.change(lambda x: params.update({\"activate\": x}), activate, None)\n autoplay.change(lambda x: params.update({\"autoplay\": x}), autoplay, None)\n remove_trailing_dots.change(lambda x: params.update({\"remove_trailing_dots\": x}), remove_trailing_dots, None)\n voice.change(lambda x: params.update({\"voice\": x}), voice, None)\n language.change(lambda x: params.update({\"language\": x}), language, None)\n\n # Play preview\n preview_text.submit(voice_preview, preview_text, preview_audio)\n preview_play.click(voice_preview, preview_text, preview_audio)\n", "path": "extensions/coqui_tts/script.py"}]} | 3,387 | 105 |
gh_patches_debug_14850 | rasdani/github-patches | git_diff | liqd__adhocracy4-893 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
django-admin: place comment's is_blocked next to is_censored
**URL:**
**user:**
**expected behaviour:** As is_removed, is_censored and is_blocked have a very similar behaviour, the is_blocked field should be next to the first two fields. In the detail as well as in the list view
**behaviour:**
**important screensize:**
**device & browser:**
**Comment/Question:**
Screenshot?


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `adhocracy4/comments/admin.py`
Content:
```
1 from django.contrib import admin
2
3 from .models import Comment
4
5
6 @admin.register(Comment)
7 class CommentAdmin(admin.ModelAdmin):
8 fields = (
9 'content_type', 'content_object', 'comment', 'is_removed',
10 'is_censored', 'is_moderator_marked', 'creator', 'comment_categories',
11 'is_blocked'
12 )
13 readonly_fields = ('creator', 'content_type', 'content_object')
14 list_display = (
15 '__str__', 'creator', 'is_removed', 'is_censored', 'created',
16 'is_blocked'
17 )
18 search_fields = ('comment',)
19 date_hierarchy = 'created'
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/adhocracy4/comments/admin.py b/adhocracy4/comments/admin.py
--- a/adhocracy4/comments/admin.py
+++ b/adhocracy4/comments/admin.py
@@ -7,13 +7,13 @@
class CommentAdmin(admin.ModelAdmin):
fields = (
'content_type', 'content_object', 'comment', 'is_removed',
- 'is_censored', 'is_moderator_marked', 'creator', 'comment_categories',
- 'is_blocked'
+ 'is_censored', 'is_blocked', 'is_moderator_marked',
+ 'creator', 'comment_categories'
)
readonly_fields = ('creator', 'content_type', 'content_object')
list_display = (
- '__str__', 'creator', 'is_removed', 'is_censored', 'created',
- 'is_blocked'
+ '__str__', 'creator', 'is_removed', 'is_censored',
+ 'is_blocked', 'created'
)
search_fields = ('comment',)
date_hierarchy = 'created'
| {"golden_diff": "diff --git a/adhocracy4/comments/admin.py b/adhocracy4/comments/admin.py\n--- a/adhocracy4/comments/admin.py\n+++ b/adhocracy4/comments/admin.py\n@@ -7,13 +7,13 @@\n class CommentAdmin(admin.ModelAdmin):\n fields = (\n 'content_type', 'content_object', 'comment', 'is_removed',\n- 'is_censored', 'is_moderator_marked', 'creator', 'comment_categories',\n- 'is_blocked'\n+ 'is_censored', 'is_blocked', 'is_moderator_marked',\n+ 'creator', 'comment_categories'\n )\n readonly_fields = ('creator', 'content_type', 'content_object')\n list_display = (\n- '__str__', 'creator', 'is_removed', 'is_censored', 'created',\n- 'is_blocked'\n+ '__str__', 'creator', 'is_removed', 'is_censored',\n+ 'is_blocked', 'created'\n )\n search_fields = ('comment',)\n date_hierarchy = 'created'\n", "issue": "django-admin: place comment's is_blocked next to is_censored\n**URL:** \r\n**user:** \r\n**expected behaviour:** As is_removed, is_censored and is_blocked have a very similar behaviour, the is_blocked field should be next to the first two fields. In the detail as well as in the list view\r\n**behaviour:** \r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom .models import Comment\n\n\[email protected](Comment)\nclass CommentAdmin(admin.ModelAdmin):\n fields = (\n 'content_type', 'content_object', 'comment', 'is_removed',\n 'is_censored', 'is_moderator_marked', 'creator', 'comment_categories',\n 'is_blocked'\n )\n readonly_fields = ('creator', 'content_type', 'content_object')\n list_display = (\n '__str__', 'creator', 'is_removed', 'is_censored', 'created',\n 'is_blocked'\n )\n search_fields = ('comment',)\n date_hierarchy = 'created'\n", "path": "adhocracy4/comments/admin.py"}], "after_files": [{"content": "from django.contrib import admin\n\nfrom .models import Comment\n\n\[email protected](Comment)\nclass CommentAdmin(admin.ModelAdmin):\n fields = (\n 'content_type', 'content_object', 'comment', 'is_removed',\n 'is_censored', 'is_blocked', 'is_moderator_marked',\n 'creator', 'comment_categories'\n )\n readonly_fields = ('creator', 'content_type', 'content_object')\n list_display = (\n '__str__', 'creator', 'is_removed', 'is_censored',\n 'is_blocked', 'created'\n )\n search_fields = ('comment',)\n date_hierarchy = 'created'\n", "path": "adhocracy4/comments/admin.py"}]} | 679 | 224 |
gh_patches_debug_14622 | rasdani/github-patches | git_diff | mlflow__mlflow-3976 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] await_registration_for isn't always overridable when registering model version
### Willingness to contribute
The MLflow Community encourages bug fix contributions. Would you or another member of your organization be willing to contribute a fix for this bug to the MLflow code base?
- [ ] Yes. I can contribute a fix for this bug independently.
- [x] Yes. I would be willing to contribute a fix for this bug with guidance from the MLflow community.
- [ ] No. I cannot contribute a bug fix at this time.
### System information
- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Databricks
- **MLflow installed from (source or binary)**: NA
- **MLflow version (run ``mlflow --version``)**: 1.13.1
- **Python version**: 3.6
- **npm version, if running the dev UI**: NA
- **Exact command to reproduce**: mlflow.spark.log_model(spark_model=model, registered_model_name = "test-model", artifact_path="test-model", await_registration_for=600)
### Describe the problem
Apologies if this is desired behaviour, but passing await_registration_for didn't seem to have an effect in the case above for us - the default wait of 300 seconds was always used. Looking in to the mlflow/tracking/_model_registry/fluent.py it seems like [L69 doesn't pass this forward to client.create_model down a certain route](https://github.com/mlflow/mlflow/blob/75c4bfa1c0dda546c491adfed670f8a80b9f3ccf/mlflow/tracking/_model_registry/fluent.py#L69).
If there is a case where this shouldn't be allowed it isn't called out in the documentation.
### Code to reproduce issue
mlflow.start_run()
mlflow.spark.log_model(spark_model=model, registered_model_name = "test-model", artifact_path="test-model", await_registration_for=600)
### Other info / logs
2021/01/12 16:43:18 INFO mlflow.tracking._model_registry.client: Waiting up to 300 seconds for model version to finish creation. Model name: test-model, version 1
### What component(s), interfaces, languages, and integrations does this bug affect?
Components
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [x] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs
- [ ] `area/server-infra`: MLflow server, JavaScript dev server
- [x] `area/tracking`: Tracking Service, tracking client APIs, autologging
Interface
- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
Language
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
Integrations
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/tracking/_model_registry/fluent.py`
Content:
```
1 from mlflow.exceptions import MlflowException
2 from mlflow.protos.databricks_pb2 import RESOURCE_ALREADY_EXISTS, ErrorCode
3 from mlflow.store.artifact.runs_artifact_repo import RunsArtifactRepository
4 from mlflow.tracking import MlflowClient
5 from mlflow.utils.logging_utils import eprint
6 from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
7
8
9 def register_model(model_uri, name, await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS):
10 """
11 Create a new model version in model registry for the model files specified by ``model_uri``.
12 Note that this method assumes the model registry backend URI is the same as that of the
13 tracking backend.
14
15 :param model_uri: URI referring to the MLmodel directory. Use a ``runs:/`` URI if you want to
16 record the run ID with the model in model registry. ``models:/`` URIs are
17 currently not supported.
18 :param name: Name of the registered model under which to create a new model version. If a
19 registered model with the given name does not exist, it will be created
20 automatically.
21 :param await_registration_for: Number of seconds to wait for the model version to finish
22 being created and is in ``READY`` status. By default, the function
23 waits for five minutes. Specify 0 or None to skip waiting.
24 :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object created by
25 backend.
26
27 .. code-block:: python
28 :caption: Example
29
30 import mlflow.sklearn
31 from sklearn.ensemble import RandomForestRegressor
32
33 mlflow.set_tracking_uri("sqlite:////tmp/mlruns.db")
34 params = {"n_estimators": 3, "random_state": 42}
35
36 # Log MLflow entities
37 with mlflow.start_run() as run:
38 rfr = RandomForestRegressor(**params).fit([[0, 1]], [1])
39 mlflow.log_params(params)
40 mlflow.sklearn.log_model(rfr, artifact_path="sklearn-model")
41
42 model_uri = "runs:/{}/sklearn-model".format(run.info.run_id)
43 mv = mlflow.register_model(model_uri, "RandomForestRegressionModel")
44 print("Name: {}".format(mv.name))
45 print("Version: {}".format(mv.version))
46
47 .. code-block:: text
48 :caption: Output
49
50 Name: RandomForestRegressionModel
51 Version: 1
52 """
53 client = MlflowClient()
54 try:
55 create_model_response = client.create_registered_model(name)
56 eprint("Successfully registered model '%s'." % create_model_response.name)
57 except MlflowException as e:
58 if e.error_code == ErrorCode.Name(RESOURCE_ALREADY_EXISTS):
59 eprint(
60 "Registered model '%s' already exists. Creating a new version of this model..."
61 % name
62 )
63 else:
64 raise e
65
66 if RunsArtifactRepository.is_runs_uri(model_uri):
67 source = RunsArtifactRepository.get_underlying_uri(model_uri)
68 (run_id, _) = RunsArtifactRepository.parse_runs_uri(model_uri)
69 create_version_response = client.create_model_version(name, source, run_id)
70 else:
71 create_version_response = client.create_model_version(
72 name, source=model_uri, run_id=None, await_creation_for=await_registration_for
73 )
74 eprint(
75 "Created version '{version}' of model '{model_name}'.".format(
76 version=create_version_response.version, model_name=create_version_response.name
77 )
78 )
79 return create_version_response
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlflow/tracking/_model_registry/fluent.py b/mlflow/tracking/_model_registry/fluent.py
--- a/mlflow/tracking/_model_registry/fluent.py
+++ b/mlflow/tracking/_model_registry/fluent.py
@@ -66,7 +66,9 @@
if RunsArtifactRepository.is_runs_uri(model_uri):
source = RunsArtifactRepository.get_underlying_uri(model_uri)
(run_id, _) = RunsArtifactRepository.parse_runs_uri(model_uri)
- create_version_response = client.create_model_version(name, source, run_id)
+ create_version_response = client.create_model_version(
+ name, source, run_id, await_creation_for=await_registration_for
+ )
else:
create_version_response = client.create_model_version(
name, source=model_uri, run_id=None, await_creation_for=await_registration_for
| {"golden_diff": "diff --git a/mlflow/tracking/_model_registry/fluent.py b/mlflow/tracking/_model_registry/fluent.py\n--- a/mlflow/tracking/_model_registry/fluent.py\n+++ b/mlflow/tracking/_model_registry/fluent.py\n@@ -66,7 +66,9 @@\n if RunsArtifactRepository.is_runs_uri(model_uri):\n source = RunsArtifactRepository.get_underlying_uri(model_uri)\n (run_id, _) = RunsArtifactRepository.parse_runs_uri(model_uri)\n- create_version_response = client.create_model_version(name, source, run_id)\n+ create_version_response = client.create_model_version(\n+ name, source, run_id, await_creation_for=await_registration_for\n+ )\n else:\n create_version_response = client.create_model_version(\n name, source=model_uri, run_id=None, await_creation_for=await_registration_for\n", "issue": "[BUG] await_registration_for isn't always overridable when registering model version\n### Willingness to contribute\r\nThe MLflow Community encourages bug fix contributions. Would you or another member of your organization be willing to contribute a fix for this bug to the MLflow code base?\r\n\r\n- [ ] Yes. I can contribute a fix for this bug independently.\r\n- [x] Yes. I would be willing to contribute a fix for this bug with guidance from the MLflow community.\r\n- [ ] No. I cannot contribute a bug fix at this time.\r\n\r\n### System information\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Databricks\r\n- **MLflow installed from (source or binary)**: NA\r\n- **MLflow version (run ``mlflow --version``)**: 1.13.1\r\n- **Python version**: 3.6\r\n- **npm version, if running the dev UI**: NA\r\n- **Exact command to reproduce**: mlflow.spark.log_model(spark_model=model, registered_model_name = \"test-model\", artifact_path=\"test-model\", await_registration_for=600)\r\n\r\n### Describe the problem\r\nApologies if this is desired behaviour, but passing await_registration_for didn't seem to have an effect in the case above for us - the default wait of 300 seconds was always used. Looking in to the mlflow/tracking/_model_registry/fluent.py it seems like [L69 doesn't pass this forward to client.create_model down a certain route](https://github.com/mlflow/mlflow/blob/75c4bfa1c0dda546c491adfed670f8a80b9f3ccf/mlflow/tracking/_model_registry/fluent.py#L69).\r\n\r\nIf there is a case where this shouldn't be allowed it isn't called out in the documentation.\r\n\r\n### Code to reproduce issue\r\nmlflow.start_run()\r\nmlflow.spark.log_model(spark_model=model, registered_model_name = \"test-model\", artifact_path=\"test-model\", await_registration_for=600)\r\n\r\n### Other info / logs\r\n2021/01/12 16:43:18 INFO mlflow.tracking._model_registry.client: Waiting up to 300 seconds for model version to finish creation. Model name: test-model, version 1\r\n\r\n### What component(s), interfaces, languages, and integrations does this bug affect?\r\nComponents \r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [x] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs\r\n- [ ] `area/server-infra`: MLflow server, JavaScript dev server\r\n- [x] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\nInterface \r\n- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\nLanguage \r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\nIntegrations\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\r\n\n", "before_files": [{"content": "from mlflow.exceptions import MlflowException\nfrom mlflow.protos.databricks_pb2 import RESOURCE_ALREADY_EXISTS, ErrorCode\nfrom mlflow.store.artifact.runs_artifact_repo import RunsArtifactRepository\nfrom mlflow.tracking import MlflowClient\nfrom mlflow.utils.logging_utils import eprint\nfrom mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS\n\n\ndef register_model(model_uri, name, await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS):\n \"\"\"\n Create a new model version in model registry for the model files specified by ``model_uri``.\n Note that this method assumes the model registry backend URI is the same as that of the\n tracking backend.\n\n :param model_uri: URI referring to the MLmodel directory. Use a ``runs:/`` URI if you want to\n record the run ID with the model in model registry. ``models:/`` URIs are\n currently not supported.\n :param name: Name of the registered model under which to create a new model version. If a\n registered model with the given name does not exist, it will be created\n automatically.\n :param await_registration_for: Number of seconds to wait for the model version to finish\n being created and is in ``READY`` status. By default, the function\n waits for five minutes. Specify 0 or None to skip waiting.\n :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object created by\n backend.\n\n .. code-block:: python\n :caption: Example\n\n import mlflow.sklearn\n from sklearn.ensemble import RandomForestRegressor\n\n mlflow.set_tracking_uri(\"sqlite:////tmp/mlruns.db\")\n params = {\"n_estimators\": 3, \"random_state\": 42}\n\n # Log MLflow entities\n with mlflow.start_run() as run:\n rfr = RandomForestRegressor(**params).fit([[0, 1]], [1])\n mlflow.log_params(params)\n mlflow.sklearn.log_model(rfr, artifact_path=\"sklearn-model\")\n\n model_uri = \"runs:/{}/sklearn-model\".format(run.info.run_id)\n mv = mlflow.register_model(model_uri, \"RandomForestRegressionModel\")\n print(\"Name: {}\".format(mv.name))\n print(\"Version: {}\".format(mv.version))\n\n .. code-block:: text\n :caption: Output\n\n Name: RandomForestRegressionModel\n Version: 1\n \"\"\"\n client = MlflowClient()\n try:\n create_model_response = client.create_registered_model(name)\n eprint(\"Successfully registered model '%s'.\" % create_model_response.name)\n except MlflowException as e:\n if e.error_code == ErrorCode.Name(RESOURCE_ALREADY_EXISTS):\n eprint(\n \"Registered model '%s' already exists. Creating a new version of this model...\"\n % name\n )\n else:\n raise e\n\n if RunsArtifactRepository.is_runs_uri(model_uri):\n source = RunsArtifactRepository.get_underlying_uri(model_uri)\n (run_id, _) = RunsArtifactRepository.parse_runs_uri(model_uri)\n create_version_response = client.create_model_version(name, source, run_id)\n else:\n create_version_response = client.create_model_version(\n name, source=model_uri, run_id=None, await_creation_for=await_registration_for\n )\n eprint(\n \"Created version '{version}' of model '{model_name}'.\".format(\n version=create_version_response.version, model_name=create_version_response.name\n )\n )\n return create_version_response\n", "path": "mlflow/tracking/_model_registry/fluent.py"}], "after_files": [{"content": "from mlflow.exceptions import MlflowException\nfrom mlflow.protos.databricks_pb2 import RESOURCE_ALREADY_EXISTS, ErrorCode\nfrom mlflow.store.artifact.runs_artifact_repo import RunsArtifactRepository\nfrom mlflow.tracking import MlflowClient\nfrom mlflow.utils.logging_utils import eprint\nfrom mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS\n\n\ndef register_model(model_uri, name, await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS):\n \"\"\"\n Create a new model version in model registry for the model files specified by ``model_uri``.\n Note that this method assumes the model registry backend URI is the same as that of the\n tracking backend.\n\n :param model_uri: URI referring to the MLmodel directory. Use a ``runs:/`` URI if you want to\n record the run ID with the model in model registry. ``models:/`` URIs are\n currently not supported.\n :param name: Name of the registered model under which to create a new model version. If a\n registered model with the given name does not exist, it will be created\n automatically.\n :param await_registration_for: Number of seconds to wait for the model version to finish\n being created and is in ``READY`` status. By default, the function\n waits for five minutes. Specify 0 or None to skip waiting.\n :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object created by\n backend.\n\n .. code-block:: python\n :caption: Example\n\n import mlflow.sklearn\n from sklearn.ensemble import RandomForestRegressor\n\n mlflow.set_tracking_uri(\"sqlite:////tmp/mlruns.db\")\n params = {\"n_estimators\": 3, \"random_state\": 42}\n\n # Log MLflow entities\n with mlflow.start_run() as run:\n rfr = RandomForestRegressor(**params).fit([[0, 1]], [1])\n mlflow.log_params(params)\n mlflow.sklearn.log_model(rfr, artifact_path=\"sklearn-model\")\n\n model_uri = \"runs:/{}/sklearn-model\".format(run.info.run_id)\n mv = mlflow.register_model(model_uri, \"RandomForestRegressionModel\")\n print(\"Name: {}\".format(mv.name))\n print(\"Version: {}\".format(mv.version))\n\n .. code-block:: text\n :caption: Output\n\n Name: RandomForestRegressionModel\n Version: 1\n \"\"\"\n client = MlflowClient()\n try:\n create_model_response = client.create_registered_model(name)\n eprint(\"Successfully registered model '%s'.\" % create_model_response.name)\n except MlflowException as e:\n if e.error_code == ErrorCode.Name(RESOURCE_ALREADY_EXISTS):\n eprint(\n \"Registered model '%s' already exists. Creating a new version of this model...\"\n % name\n )\n else:\n raise e\n\n if RunsArtifactRepository.is_runs_uri(model_uri):\n source = RunsArtifactRepository.get_underlying_uri(model_uri)\n (run_id, _) = RunsArtifactRepository.parse_runs_uri(model_uri)\n create_version_response = client.create_model_version(\n name, source, run_id, await_creation_for=await_registration_for\n )\n else:\n create_version_response = client.create_model_version(\n name, source=model_uri, run_id=None, await_creation_for=await_registration_for\n )\n eprint(\n \"Created version '{version}' of model '{model_name}'.\".format(\n version=create_version_response.version, model_name=create_version_response.name\n )\n )\n return create_version_response\n", "path": "mlflow/tracking/_model_registry/fluent.py"}]} | 2,052 | 184 |
gh_patches_debug_512 | rasdani/github-patches | git_diff | Textualize__rich-1426 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] reconfiguring rich via pytest at runtime
**Describe the bug**
Trying to reconfigure rich when my code is run via pytest, using the following pytest hook:
```python
# conftest.py
def pytest_configure():
try:
import rich
except ImportError:
pass
else:
rich.reconfigure(soft_wrap=False)
```
and invoking my tests with
```shell
pytest
```
I got the following error:
```
INTERNALERROR> Traceback (most recent call last):
INTERNALERROR> File "/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/_pytest/main.py", line 265, in wrap_session
INTERNALERROR> config._do_configure()
INTERNALERROR> File "/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/_pytest/config/__init__.py", line 982, in _do_configure
INTERNALERROR> self.hook.pytest_configure.call_historic(kwargs=dict(config=self))
INTERNALERROR> File "/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/pluggy/hooks.py", line 308, in call_historic
INTERNALERROR> res = self._hookexec(self, self.get_hookimpls(), kwargs)
INTERNALERROR> File "/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/pluggy/manager.py", line 93, in _hookexec
INTERNALERROR> return self._inner_hookexec(hook, methods, kwargs)
INTERNALERROR> File "/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/pluggy/manager.py", line 84, in <lambda>
INTERNALERROR> self._inner_hookexec = lambda hook, methods, kwargs: hook.multicall(
INTERNALERROR> File "/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/pluggy/callers.py", line 208, in _multicall
INTERNALERROR> return outcome.get_result()
INTERNALERROR> File "/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/pluggy/callers.py", line 80, in get_result
INTERNALERROR> raise ex[1].with_traceback(ex[2])
INTERNALERROR> File "/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/pluggy/callers.py", line 187, in _multicall
INTERNALERROR> res = hook_impl.function(*args)
INTERNALERROR> File "/Users/robcleme/dev/my/wxc/tests/conftest.py", line 33, in pytest_configure
INTERNALERROR> rich.reconfigure(soft_wrap=False)
INTERNALERROR> File "/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/rich/__init__.py", line 45, in reconfigure
INTERNALERROR> _console.__dict__ = new_console.__dict__
INTERNALERROR> AttributeError: 'NoneType' object has no attribute '__dict__'
```
**Platform**
OsX
**Diagnose**
```
python -m rich.diagnose
python -m rich._windows
pip freeze | grep rich
```
```
╭───────────────────────── <class 'rich.console.Console'> ─────────────────────────╮
│ A high level console interface. │
│ │
│ ╭──────────────────────────────────────────────────────────────────────────────╮ │
│ │ <console width=185 ColorSystem.TRUECOLOR> │ │
│ ╰──────────────────────────────────────────────────────────────────────────────╯ │
│ │
│ color_system = 'truecolor' │
│ encoding = 'utf-8' │
│ file = <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'> │
│ height = 52 │
│ is_alt_screen = False │
│ is_dumb_terminal = False │
│ is_interactive = True │
│ is_jupyter = False │
│ is_terminal = True │
│ legacy_windows = False │
│ no_color = False │
│ options = ConsoleOptions( │
│ size=ConsoleDimensions(width=185, height=52), │
│ legacy_windows=False, │
│ min_width=1, │
│ max_width=185, │
│ is_terminal=True, │
│ encoding='utf-8', │
│ max_height=52, │
│ justify=None, │
│ overflow=None, │
│ no_wrap=False, │
│ highlight=None, │
│ markup=None, │
│ height=None │
│ ) │
│ quiet = False │
│ record = False │
│ safe_box = True │
│ size = ConsoleDimensions(width=185, height=52) │
│ soft_wrap = False │
│ stderr = False │
│ style = None │
│ tab_size = 8 │
│ width = 185 │
╰──────────────────────────────────────────────────────────────────────────────────╯
platform="Darwin"
WindowsConsoleFeatures(vt=False, truecolor=False)
rich==10.7.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rich/__init__.py`
Content:
```
1 """Rich text and beautiful formatting in the terminal."""
2
3 import os
4 from typing import IO, TYPE_CHECKING, Any, Optional
5
6 from ._extension import load_ipython_extension
7
8 __all__ = ["get_console", "reconfigure", "print", "inspect"]
9
10 if TYPE_CHECKING:
11 from .console import Console
12
13 # Global console used by alternative print
14 _console: Optional["Console"] = None
15
16 _IMPORT_CWD = os.path.abspath(os.getcwd())
17
18
19 def get_console() -> "Console":
20 """Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,
21 and hasn't been explicitly given one.
22
23 Returns:
24 Console: A console instance.
25 """
26 global _console
27 if _console is None:
28 from .console import Console
29
30 _console = Console()
31
32 return _console
33
34
35 def reconfigure(*args: Any, **kwargs: Any) -> None:
36 """Reconfigures the global console by replacing it with another.
37
38 Args:
39 console (Console): Replacement console instance.
40 """
41 from rich.console import Console
42
43 new_console = Console(*args, **kwargs)
44 _console.__dict__ = new_console.__dict__
45
46
47 def print(
48 *objects: Any,
49 sep: str = " ",
50 end: str = "\n",
51 file: Optional[IO[str]] = None,
52 flush: bool = False
53 ) -> None:
54 r"""Print object(s) supplied via positional arguments.
55 This function has an identical signature to the built-in print.
56 For more advanced features, see the :class:`~rich.console.Console` class.
57
58 Args:
59 sep (str, optional): Separator between printed objects. Defaults to " ".
60 end (str, optional): Character to write at end of output. Defaults to "\\n".
61 file (IO[str], optional): File to write to, or None for stdout. Defaults to None.
62 flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False.
63
64 """
65 from .console import Console
66
67 write_console = get_console() if file is None else Console(file=file)
68 return write_console.print(*objects, sep=sep, end=end)
69
70
71 def inspect(
72 obj: Any,
73 *,
74 console: Optional["Console"] = None,
75 title: Optional[str] = None,
76 help: bool = False,
77 methods: bool = False,
78 docs: bool = True,
79 private: bool = False,
80 dunder: bool = False,
81 sort: bool = True,
82 all: bool = False,
83 value: bool = True
84 ) -> None:
85 """Inspect any Python object.
86
87 * inspect(<OBJECT>) to see summarized info.
88 * inspect(<OBJECT>, methods=True) to see methods.
89 * inspect(<OBJECT>, help=True) to see full (non-abbreviated) help.
90 * inspect(<OBJECT>, private=True) to see private attributes (single underscore).
91 * inspect(<OBJECT>, dunder=True) to see attributes beginning with double underscore.
92 * inspect(<OBJECT>, all=True) to see all attributes.
93
94 Args:
95 obj (Any): An object to inspect.
96 title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
97 help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
98 methods (bool, optional): Enable inspection of callables. Defaults to False.
99 docs (bool, optional): Also render doc strings. Defaults to True.
100 private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
101 dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
102 sort (bool, optional): Sort attributes alphabetically. Defaults to True.
103 all (bool, optional): Show all attributes. Defaults to False.
104 value (bool, optional): Pretty print value. Defaults to True.
105 """
106 _console = console or get_console()
107 from rich._inspect import Inspect
108
109 # Special case for inspect(inspect)
110 is_inspect = obj is inspect
111
112 _inspect = Inspect(
113 obj,
114 title=title,
115 help=is_inspect or help,
116 methods=is_inspect or methods,
117 docs=is_inspect or docs,
118 private=private,
119 dunder=dunder,
120 sort=sort,
121 all=all,
122 value=value,
123 )
124 _console.print(_inspect)
125
126
127 if __name__ == "__main__": # pragma: no cover
128 print("Hello, **World**")
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rich/__init__.py b/rich/__init__.py
--- a/rich/__init__.py
+++ b/rich/__init__.py
@@ -41,6 +41,7 @@
from rich.console import Console
new_console = Console(*args, **kwargs)
+ _console = get_console()
_console.__dict__ = new_console.__dict__
| {"golden_diff": "diff --git a/rich/__init__.py b/rich/__init__.py\n--- a/rich/__init__.py\n+++ b/rich/__init__.py\n@@ -41,6 +41,7 @@\n from rich.console import Console\n \n new_console = Console(*args, **kwargs)\n+ _console = get_console()\n _console.__dict__ = new_console.__dict__\n", "issue": "[BUG] reconfiguring rich via pytest at runtime\n**Describe the bug**\r\n\r\nTrying to reconfigure rich when my code is run via pytest, using the following pytest hook:\r\n\r\n```python\r\n# conftest.py\r\n\r\ndef pytest_configure():\r\n try:\r\n import rich\r\n except ImportError:\r\n pass\r\n else:\r\n rich.reconfigure(soft_wrap=False)\r\n```\r\n\r\nand invoking my tests with\r\n```shell\r\npytest\r\n```\r\n\r\nI got the following error:\r\n\r\n```\r\nINTERNALERROR> Traceback (most recent call last):\r\nINTERNALERROR> File \"/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/_pytest/main.py\", line 265, in wrap_session\r\nINTERNALERROR> config._do_configure()\r\nINTERNALERROR> File \"/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/_pytest/config/__init__.py\", line 982, in _do_configure\r\nINTERNALERROR> self.hook.pytest_configure.call_historic(kwargs=dict(config=self))\r\nINTERNALERROR> File \"/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/pluggy/hooks.py\", line 308, in call_historic\r\nINTERNALERROR> res = self._hookexec(self, self.get_hookimpls(), kwargs)\r\nINTERNALERROR> File \"/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/pluggy/manager.py\", line 93, in _hookexec\r\nINTERNALERROR> return self._inner_hookexec(hook, methods, kwargs)\r\nINTERNALERROR> File \"/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/pluggy/manager.py\", line 84, in <lambda>\r\nINTERNALERROR> self._inner_hookexec = lambda hook, methods, kwargs: hook.multicall(\r\nINTERNALERROR> File \"/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/pluggy/callers.py\", line 208, in _multicall\r\nINTERNALERROR> return outcome.get_result()\r\nINTERNALERROR> File \"/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/pluggy/callers.py\", line 80, in get_result\r\nINTERNALERROR> raise ex[1].with_traceback(ex[2])\r\nINTERNALERROR> File \"/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/pluggy/callers.py\", line 187, in _multicall\r\nINTERNALERROR> res = hook_impl.function(*args)\r\nINTERNALERROR> File \"/Users/robcleme/dev/my/wxc/tests/conftest.py\", line 33, in pytest_configure\r\nINTERNALERROR> rich.reconfigure(soft_wrap=False)\r\nINTERNALERROR> File \"/Users/robcleme/.pyenv/versions/wxc-dev/lib/python3.9/site-packages/rich/__init__.py\", line 45, in reconfigure\r\nINTERNALERROR> _console.__dict__ = new_console.__dict__\r\nINTERNALERROR> AttributeError: 'NoneType' object has no attribute '__dict__'\r\n```\r\n\r\n**Platform**\r\nOsX\r\n\r\n**Diagnose**\r\n\r\n```\r\npython -m rich.diagnose\r\npython -m rich._windows\r\npip freeze | grep rich\r\n```\r\n```\r\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 <class 'rich.console.Console'> \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\u2502 A high level console interface. \u2502\r\n\u2502 \u2502\r\n\u2502 \u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e \u2502\r\n\u2502 \u2502 <console width=185 ColorSystem.TRUECOLOR> \u2502 \u2502\r\n\u2502 \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2502\r\n\u2502 \u2502\r\n\u2502 color_system = 'truecolor' \u2502\r\n\u2502 encoding = 'utf-8' \u2502\r\n\u2502 file = <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'> \u2502\r\n\u2502 height = 52 \u2502\r\n\u2502 is_alt_screen = False \u2502\r\n\u2502 is_dumb_terminal = False \u2502\r\n\u2502 is_interactive = True \u2502\r\n\u2502 is_jupyter = False \u2502\r\n\u2502 is_terminal = True \u2502\r\n\u2502 legacy_windows = False \u2502\r\n\u2502 no_color = False \u2502\r\n\u2502 options = ConsoleOptions( \u2502\r\n\u2502 size=ConsoleDimensions(width=185, height=52), \u2502\r\n\u2502 legacy_windows=False, \u2502\r\n\u2502 min_width=1, \u2502\r\n\u2502 max_width=185, \u2502\r\n\u2502 is_terminal=True, \u2502\r\n\u2502 encoding='utf-8', \u2502\r\n\u2502 max_height=52, \u2502\r\n\u2502 justify=None, \u2502\r\n\u2502 overflow=None, \u2502\r\n\u2502 no_wrap=False, \u2502\r\n\u2502 highlight=None, \u2502\r\n\u2502 markup=None, \u2502\r\n\u2502 height=None \u2502\r\n\u2502 ) \u2502\r\n\u2502 quiet = False \u2502\r\n\u2502 record = False \u2502\r\n\u2502 safe_box = True \u2502\r\n\u2502 size = ConsoleDimensions(width=185, height=52) \u2502\r\n\u2502 soft_wrap = False \u2502\r\n\u2502 stderr = False \u2502\r\n\u2502 style = None \u2502\r\n\u2502 tab_size = 8 \u2502\r\n\u2502 width = 185 \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\nplatform=\"Darwin\"\r\nWindowsConsoleFeatures(vt=False, truecolor=False)\r\nrich==10.7.0\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"Rich text and beautiful formatting in the terminal.\"\"\"\n\nimport os\nfrom typing import IO, TYPE_CHECKING, Any, Optional\n\nfrom ._extension import load_ipython_extension\n\n__all__ = [\"get_console\", \"reconfigure\", \"print\", \"inspect\"]\n\nif TYPE_CHECKING:\n from .console import Console\n\n# Global console used by alternative print\n_console: Optional[\"Console\"] = None\n\n_IMPORT_CWD = os.path.abspath(os.getcwd())\n\n\ndef get_console() -> \"Console\":\n \"\"\"Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,\n and hasn't been explicitly given one.\n\n Returns:\n Console: A console instance.\n \"\"\"\n global _console\n if _console is None:\n from .console import Console\n\n _console = Console()\n\n return _console\n\n\ndef reconfigure(*args: Any, **kwargs: Any) -> None:\n \"\"\"Reconfigures the global console by replacing it with another.\n\n Args:\n console (Console): Replacement console instance.\n \"\"\"\n from rich.console import Console\n\n new_console = Console(*args, **kwargs)\n _console.__dict__ = new_console.__dict__\n\n\ndef print(\n *objects: Any,\n sep: str = \" \",\n end: str = \"\\n\",\n file: Optional[IO[str]] = None,\n flush: bool = False\n) -> None:\n r\"\"\"Print object(s) supplied via positional arguments.\n This function has an identical signature to the built-in print.\n For more advanced features, see the :class:`~rich.console.Console` class.\n\n Args:\n sep (str, optional): Separator between printed objects. Defaults to \" \".\n end (str, optional): Character to write at end of output. Defaults to \"\\\\n\".\n file (IO[str], optional): File to write to, or None for stdout. Defaults to None.\n flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False.\n\n \"\"\"\n from .console import Console\n\n write_console = get_console() if file is None else Console(file=file)\n return write_console.print(*objects, sep=sep, end=end)\n\n\ndef inspect(\n obj: Any,\n *,\n console: Optional[\"Console\"] = None,\n title: Optional[str] = None,\n help: bool = False,\n methods: bool = False,\n docs: bool = True,\n private: bool = False,\n dunder: bool = False,\n sort: bool = True,\n all: bool = False,\n value: bool = True\n) -> None:\n \"\"\"Inspect any Python object.\n\n * inspect(<OBJECT>) to see summarized info.\n * inspect(<OBJECT>, methods=True) to see methods.\n * inspect(<OBJECT>, help=True) to see full (non-abbreviated) help.\n * inspect(<OBJECT>, private=True) to see private attributes (single underscore).\n * inspect(<OBJECT>, dunder=True) to see attributes beginning with double underscore.\n * inspect(<OBJECT>, all=True) to see all attributes.\n\n Args:\n obj (Any): An object to inspect.\n title (str, optional): Title to display over inspect result, or None use type. Defaults to None.\n help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.\n methods (bool, optional): Enable inspection of callables. Defaults to False.\n docs (bool, optional): Also render doc strings. Defaults to True.\n private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.\n dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.\n sort (bool, optional): Sort attributes alphabetically. Defaults to True.\n all (bool, optional): Show all attributes. Defaults to False.\n value (bool, optional): Pretty print value. Defaults to True.\n \"\"\"\n _console = console or get_console()\n from rich._inspect import Inspect\n\n # Special case for inspect(inspect)\n is_inspect = obj is inspect\n\n _inspect = Inspect(\n obj,\n title=title,\n help=is_inspect or help,\n methods=is_inspect or methods,\n docs=is_inspect or docs,\n private=private,\n dunder=dunder,\n sort=sort,\n all=all,\n value=value,\n )\n _console.print(_inspect)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n print(\"Hello, **World**\")\n", "path": "rich/__init__.py"}], "after_files": [{"content": "\"\"\"Rich text and beautiful formatting in the terminal.\"\"\"\n\nimport os\nfrom typing import IO, TYPE_CHECKING, Any, Optional\n\nfrom ._extension import load_ipython_extension\n\n__all__ = [\"get_console\", \"reconfigure\", \"print\", \"inspect\"]\n\nif TYPE_CHECKING:\n from .console import Console\n\n# Global console used by alternative print\n_console: Optional[\"Console\"] = None\n\n_IMPORT_CWD = os.path.abspath(os.getcwd())\n\n\ndef get_console() -> \"Console\":\n \"\"\"Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,\n and hasn't been explicitly given one.\n\n Returns:\n Console: A console instance.\n \"\"\"\n global _console\n if _console is None:\n from .console import Console\n\n _console = Console()\n\n return _console\n\n\ndef reconfigure(*args: Any, **kwargs: Any) -> None:\n \"\"\"Reconfigures the global console by replacing it with another.\n\n Args:\n console (Console): Replacement console instance.\n \"\"\"\n from rich.console import Console\n\n new_console = Console(*args, **kwargs)\n _console = get_console()\n _console.__dict__ = new_console.__dict__\n\n\ndef print(\n *objects: Any,\n sep: str = \" \",\n end: str = \"\\n\",\n file: Optional[IO[str]] = None,\n flush: bool = False\n) -> None:\n r\"\"\"Print object(s) supplied via positional arguments.\n This function has an identical signature to the built-in print.\n For more advanced features, see the :class:`~rich.console.Console` class.\n\n Args:\n sep (str, optional): Separator between printed objects. Defaults to \" \".\n end (str, optional): Character to write at end of output. Defaults to \"\\\\n\".\n file (IO[str], optional): File to write to, or None for stdout. Defaults to None.\n flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False.\n\n \"\"\"\n from .console import Console\n\n write_console = get_console() if file is None else Console(file=file)\n return write_console.print(*objects, sep=sep, end=end)\n\n\ndef inspect(\n obj: Any,\n *,\n console: Optional[\"Console\"] = None,\n title: Optional[str] = None,\n help: bool = False,\n methods: bool = False,\n docs: bool = True,\n private: bool = False,\n dunder: bool = False,\n sort: bool = True,\n all: bool = False,\n value: bool = True\n) -> None:\n \"\"\"Inspect any Python object.\n\n * inspect(<OBJECT>) to see summarized info.\n * inspect(<OBJECT>, methods=True) to see methods.\n * inspect(<OBJECT>, help=True) to see full (non-abbreviated) help.\n * inspect(<OBJECT>, private=True) to see private attributes (single underscore).\n * inspect(<OBJECT>, dunder=True) to see attributes beginning with double underscore.\n * inspect(<OBJECT>, all=True) to see all attributes.\n\n Args:\n obj (Any): An object to inspect.\n title (str, optional): Title to display over inspect result, or None use type. Defaults to None.\n help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.\n methods (bool, optional): Enable inspection of callables. Defaults to False.\n docs (bool, optional): Also render doc strings. Defaults to True.\n private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.\n dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.\n sort (bool, optional): Sort attributes alphabetically. Defaults to True.\n all (bool, optional): Show all attributes. Defaults to False.\n value (bool, optional): Pretty print value. Defaults to True.\n \"\"\"\n _console = console or get_console()\n from rich._inspect import Inspect\n\n # Special case for inspect(inspect)\n is_inspect = obj is inspect\n\n _inspect = Inspect(\n obj,\n title=title,\n help=is_inspect or help,\n methods=is_inspect or methods,\n docs=is_inspect or docs,\n private=private,\n dunder=dunder,\n sort=sort,\n all=all,\n value=value,\n )\n _console.print(_inspect)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n print(\"Hello, **World**\")\n", "path": "rich/__init__.py"}]} | 2,769 | 86 |
gh_patches_debug_16281 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1684 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
is_skill_claim validation is not working for Anonymous authentication
## Version
4.13.0
## Describe the bug
When the skill bot doesn't have credentials set, the [is_skill_claim validation](https://github.com/microsoft/botbuilder-python/blob/main/libraries/botframework-connector/botframework/connector/auth/skill_validation.py#L62) returns false because it checks the `version_claim` before checking the `anonymous_skill_app_id`.
In [.NET](https://github.com/microsoft/botbuilder-dotnet/blob/main/libraries/Microsoft.Bot.Connector/Authentication/SkillValidation.cs#L87) and [JS](https://github.com/microsoft/botbuilder-js/blob/main/libraries/botframework-connector/src/auth/skillValidation.ts#L89) SDKs, the order of the validation is the opposite.

This is causing that the EndOfConversation activities are not sent when returning `DialogTurnResult(DialogTurnStatus.Complete)` from a dialog.
This issue affects local testing when no credentials are provided for the bot.
## To Reproduce
Steps to reproduce the behavior:
1. Using the following bots: [WaterfallHostBotDotNet](https://github.com/microsoft/BotFramework-FunctionalTests/tree/main/Bots/DotNet/Consumers/CodeFirst/WaterfallHostBot) and [WaterfallSkillBotPython](https://github.com/microsoft/BotFramework-FunctionalTests/tree/main/Bots/Python/Skills/CodeFirst/WaterfallSkillBot):
2. Run the bots.
3. Open BotFramework Emulator and connect to your host bot.
4. Follow the dialog selecting: `normal` delivery mode, `Waterfall` skills, `3. WaterfallSkillBotPython`, `1. Cards` skill action and `end` option.
5. See how the bots stop responding.

## Expected behavior
When selecting `end` (return `DialogTurnResult(DialogTurnStatus.Complete))` the skill bot must end the dialog sending an EOC activity to the host bot so this one can continue the dialog flow.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botframework-connector/botframework/connector/auth/skill_validation.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from datetime import timedelta
5 from typing import Dict, Union
6
7 import jwt
8
9 from .authentication_configuration import AuthenticationConfiguration
10 from .authentication_constants import AuthenticationConstants
11 from .claims_identity import ClaimsIdentity
12 from .credential_provider import CredentialProvider
13 from .government_constants import GovernmentConstants
14 from .verify_options import VerifyOptions
15 from .jwt_token_extractor import JwtTokenExtractor
16 from .channel_provider import ChannelProvider
17
18
19 class SkillValidation:
20 # TODO: Remove circular dependcies after C# refactor
21 # pylint: disable=import-outside-toplevel
22
23 """
24 Validates JWT tokens sent to and from a Skill.
25 """
26
27 _token_validation_parameters = VerifyOptions(
28 issuer=[
29 "https://sts.windows.net/d6d49420-f39b-4df7-a1dc-d59a935871db/", # Auth v3.1, 1.0 token
30 "https://login.microsoftonline.com/d6d49420-f39b-4df7-a1dc-d59a935871db/v2.0", # Auth v3.1, 2.0 token
31 "https://sts.windows.net/f8cdef31-a31e-4b4a-93e4-5f571e91255a/", # Auth v3.2, 1.0 token
32 "https://login.microsoftonline.com/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0", # Auth v3.2, 2.0 token
33 "https://sts.windows.net/cab8a31a-1906-4287-a0d8-4eef66b95f6e/", # Auth for US Gov, 1.0 token
34 "https://login.microsoftonline.us/cab8a31a-1906-4287-a0d8-4eef66b95f6e/v2.0", # Auth for US Gov, 2.0 token
35 "https://login.microsoftonline.us/f8cdef31-a31e-4b4a-93e4-5f571e91255a/", # Auth for US Gov, 1.0 token
36 "https://login.microsoftonline.us/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0", # Auth for US Gov, 2.0 token
37 ],
38 audience=None,
39 clock_tolerance=timedelta(minutes=5),
40 ignore_expiration=False,
41 )
42
43 @staticmethod
44 def is_skill_token(auth_header: str) -> bool:
45 """
46 Determines if a given Auth header is from from a skill to bot or bot to skill request.
47 :param auth_header: Bearer Token, in the "Bearer [Long String]" Format.
48 :return bool:
49 """
50 from .jwt_token_validation import JwtTokenValidation
51
52 if not JwtTokenValidation.is_valid_token_format(auth_header):
53 return False
54
55 bearer_token = auth_header.split(" ")[1]
56
57 # Parse the Big Long String into an actual token.
58 token = jwt.decode(bearer_token, verify=False)
59 return SkillValidation.is_skill_claim(token)
60
61 @staticmethod
62 def is_skill_claim(claims: Dict[str, object]) -> bool:
63 """
64 Checks if the given list of claims represents a skill.
65 :param claims: A dict of claims.
66 :return bool:
67 """
68 if AuthenticationConstants.VERSION_CLAIM not in claims:
69 return False
70
71 if (
72 claims.get(AuthenticationConstants.APP_ID_CLAIM, None)
73 == AuthenticationConstants.ANONYMOUS_SKILL_APP_ID
74 ):
75 return True
76
77 audience = claims.get(AuthenticationConstants.AUDIENCE_CLAIM)
78
79 # The audience is https://api.botframework.com and not an appId.
80 if (
81 not audience
82 or audience == AuthenticationConstants.TO_BOT_FROM_CHANNEL_TOKEN_ISSUER
83 ):
84 return False
85
86 from .jwt_token_validation import JwtTokenValidation
87
88 app_id = JwtTokenValidation.get_app_id_from_claims(claims)
89
90 if not app_id:
91 return False
92
93 # Skill claims must contain and app ID and the AppID must be different than the audience.
94 return app_id != audience
95
96 @staticmethod
97 async def authenticate_channel_token(
98 auth_header: str,
99 credentials: CredentialProvider,
100 channel_service_or_provider: Union[str, ChannelProvider],
101 channel_id: str,
102 auth_configuration: AuthenticationConfiguration,
103 ) -> ClaimsIdentity:
104 if auth_configuration is None:
105 raise Exception(
106 "auth_configuration cannot be None in SkillValidation.authenticate_channel_token"
107 )
108
109 from .jwt_token_validation import JwtTokenValidation
110
111 if isinstance(channel_service_or_provider, ChannelProvider):
112 is_gov = channel_service_or_provider.is_government()
113 else:
114 is_gov = JwtTokenValidation.is_government(channel_service_or_provider)
115
116 open_id_metadata_url = (
117 GovernmentConstants.TO_BOT_FROM_EMULATOR_OPEN_ID_METADATA_URL
118 if is_gov
119 else AuthenticationConstants.TO_BOT_FROM_EMULATOR_OPEN_ID_METADATA_URL
120 )
121
122 token_extractor = JwtTokenExtractor(
123 SkillValidation._token_validation_parameters,
124 open_id_metadata_url,
125 AuthenticationConstants.ALLOWED_SIGNING_ALGORITHMS,
126 )
127
128 identity = await token_extractor.get_identity_from_auth_header(
129 auth_header, channel_id, auth_configuration.required_endorsements
130 )
131 await SkillValidation._validate_identity(identity, credentials)
132
133 return identity
134
135 @staticmethod
136 def create_anonymous_skill_claim():
137 """
138 Creates a ClaimsIdentity for an anonymous (unauthenticated) skill.
139 :return ClaimsIdentity:
140 """
141 return ClaimsIdentity(
142 {
143 AuthenticationConstants.APP_ID_CLAIM: AuthenticationConstants.ANONYMOUS_SKILL_APP_ID
144 },
145 True,
146 AuthenticationConstants.ANONYMOUS_AUTH_TYPE,
147 )
148
149 @staticmethod
150 async def _validate_identity(
151 identity: ClaimsIdentity, credentials: CredentialProvider
152 ):
153 if not identity:
154 # No valid identity. Not Authorized.
155 raise PermissionError("Invalid Identity")
156
157 if not identity.is_authenticated:
158 # The token is in some way invalid. Not Authorized.
159 raise PermissionError("Token Not Authenticated")
160
161 version_claim = identity.claims.get(AuthenticationConstants.VERSION_CLAIM)
162 if not version_claim:
163 # No version claim
164 raise PermissionError(
165 f"'{AuthenticationConstants.VERSION_CLAIM}' claim is required on skill Tokens."
166 )
167
168 # Look for the "aud" claim, but only if issued from the Bot Framework
169 audience_claim = identity.claims.get(AuthenticationConstants.AUDIENCE_CLAIM)
170 if not audience_claim:
171 # Claim is not present or doesn't have a value. Not Authorized.
172 raise PermissionError(
173 f"'{AuthenticationConstants.AUDIENCE_CLAIM}' claim is required on skill Tokens."
174 )
175
176 if not await credentials.is_valid_appid(audience_claim):
177 # The AppId is not valid. Not Authorized.
178 raise PermissionError("Invalid audience.")
179
180 from .jwt_token_validation import JwtTokenValidation
181
182 app_id = JwtTokenValidation.get_app_id_from_claims(identity.claims)
183 if not app_id:
184 # Invalid AppId
185 raise PermissionError("Invalid app_id.")
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botframework-connector/botframework/connector/auth/skill_validation.py b/libraries/botframework-connector/botframework/connector/auth/skill_validation.py
--- a/libraries/botframework-connector/botframework/connector/auth/skill_validation.py
+++ b/libraries/botframework-connector/botframework/connector/auth/skill_validation.py
@@ -65,15 +65,15 @@
:param claims: A dict of claims.
:return bool:
"""
- if AuthenticationConstants.VERSION_CLAIM not in claims:
- return False
-
if (
claims.get(AuthenticationConstants.APP_ID_CLAIM, None)
== AuthenticationConstants.ANONYMOUS_SKILL_APP_ID
):
return True
+ if AuthenticationConstants.VERSION_CLAIM not in claims:
+ return False
+
audience = claims.get(AuthenticationConstants.AUDIENCE_CLAIM)
# The audience is https://api.botframework.com and not an appId.
| {"golden_diff": "diff --git a/libraries/botframework-connector/botframework/connector/auth/skill_validation.py b/libraries/botframework-connector/botframework/connector/auth/skill_validation.py\n--- a/libraries/botframework-connector/botframework/connector/auth/skill_validation.py\n+++ b/libraries/botframework-connector/botframework/connector/auth/skill_validation.py\n@@ -65,15 +65,15 @@\n :param claims: A dict of claims.\n :return bool:\n \"\"\"\n- if AuthenticationConstants.VERSION_CLAIM not in claims:\n- return False\n-\n if (\n claims.get(AuthenticationConstants.APP_ID_CLAIM, None)\n == AuthenticationConstants.ANONYMOUS_SKILL_APP_ID\n ):\n return True\n \n+ if AuthenticationConstants.VERSION_CLAIM not in claims:\n+ return False\n+\n audience = claims.get(AuthenticationConstants.AUDIENCE_CLAIM)\n \n # The audience is https://api.botframework.com and not an appId.\n", "issue": "is_skill_claim validation is not working for Anonymous authentication\n## Version\r\n4.13.0\r\n\r\n## Describe the bug\r\nWhen the skill bot doesn't have credentials set, the [is_skill_claim validation](https://github.com/microsoft/botbuilder-python/blob/main/libraries/botframework-connector/botframework/connector/auth/skill_validation.py#L62) returns false because it checks the `version_claim` before checking the `anonymous_skill_app_id`.\r\nIn [.NET](https://github.com/microsoft/botbuilder-dotnet/blob/main/libraries/Microsoft.Bot.Connector/Authentication/SkillValidation.cs#L87) and [JS](https://github.com/microsoft/botbuilder-js/blob/main/libraries/botframework-connector/src/auth/skillValidation.ts#L89) SDKs, the order of the validation is the opposite.\r\n\r\n\r\n\r\n\r\nThis is causing that the EndOfConversation activities are not sent when returning `DialogTurnResult(DialogTurnStatus.Complete)` from a dialog.\r\nThis issue affects local testing when no credentials are provided for the bot.\r\n\r\n\r\n## To Reproduce\r\nSteps to reproduce the behavior:\r\n1. Using the following bots: [WaterfallHostBotDotNet](https://github.com/microsoft/BotFramework-FunctionalTests/tree/main/Bots/DotNet/Consumers/CodeFirst/WaterfallHostBot) and [WaterfallSkillBotPython](https://github.com/microsoft/BotFramework-FunctionalTests/tree/main/Bots/Python/Skills/CodeFirst/WaterfallSkillBot):\r\n2. Run the bots.\r\n3. Open BotFramework Emulator and connect to your host bot.\r\n4. Follow the dialog selecting: `normal` delivery mode, `Waterfall` skills, `3. WaterfallSkillBotPython`, `1. Cards` skill action and `end` option.\r\n5. See how the bots stop responding.\r\n\r\n\r\n\r\n\r\n## Expected behavior\r\nWhen selecting `end` (return `DialogTurnResult(DialogTurnStatus.Complete))` the skill bot must end the dialog sending an EOC activity to the host bot so this one can continue the dialog flow.\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom datetime import timedelta\nfrom typing import Dict, Union\n\nimport jwt\n\nfrom .authentication_configuration import AuthenticationConfiguration\nfrom .authentication_constants import AuthenticationConstants\nfrom .claims_identity import ClaimsIdentity\nfrom .credential_provider import CredentialProvider\nfrom .government_constants import GovernmentConstants\nfrom .verify_options import VerifyOptions\nfrom .jwt_token_extractor import JwtTokenExtractor\nfrom .channel_provider import ChannelProvider\n\n\nclass SkillValidation:\n # TODO: Remove circular dependcies after C# refactor\n # pylint: disable=import-outside-toplevel\n\n \"\"\"\n Validates JWT tokens sent to and from a Skill.\n \"\"\"\n\n _token_validation_parameters = VerifyOptions(\n issuer=[\n \"https://sts.windows.net/d6d49420-f39b-4df7-a1dc-d59a935871db/\", # Auth v3.1, 1.0 token\n \"https://login.microsoftonline.com/d6d49420-f39b-4df7-a1dc-d59a935871db/v2.0\", # Auth v3.1, 2.0 token\n \"https://sts.windows.net/f8cdef31-a31e-4b4a-93e4-5f571e91255a/\", # Auth v3.2, 1.0 token\n \"https://login.microsoftonline.com/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0\", # Auth v3.2, 2.0 token\n \"https://sts.windows.net/cab8a31a-1906-4287-a0d8-4eef66b95f6e/\", # Auth for US Gov, 1.0 token\n \"https://login.microsoftonline.us/cab8a31a-1906-4287-a0d8-4eef66b95f6e/v2.0\", # Auth for US Gov, 2.0 token\n \"https://login.microsoftonline.us/f8cdef31-a31e-4b4a-93e4-5f571e91255a/\", # Auth for US Gov, 1.0 token\n \"https://login.microsoftonline.us/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0\", # Auth for US Gov, 2.0 token\n ],\n audience=None,\n clock_tolerance=timedelta(minutes=5),\n ignore_expiration=False,\n )\n\n @staticmethod\n def is_skill_token(auth_header: str) -> bool:\n \"\"\"\n Determines if a given Auth header is from from a skill to bot or bot to skill request.\n :param auth_header: Bearer Token, in the \"Bearer [Long String]\" Format.\n :return bool:\n \"\"\"\n from .jwt_token_validation import JwtTokenValidation\n\n if not JwtTokenValidation.is_valid_token_format(auth_header):\n return False\n\n bearer_token = auth_header.split(\" \")[1]\n\n # Parse the Big Long String into an actual token.\n token = jwt.decode(bearer_token, verify=False)\n return SkillValidation.is_skill_claim(token)\n\n @staticmethod\n def is_skill_claim(claims: Dict[str, object]) -> bool:\n \"\"\"\n Checks if the given list of claims represents a skill.\n :param claims: A dict of claims.\n :return bool:\n \"\"\"\n if AuthenticationConstants.VERSION_CLAIM not in claims:\n return False\n\n if (\n claims.get(AuthenticationConstants.APP_ID_CLAIM, None)\n == AuthenticationConstants.ANONYMOUS_SKILL_APP_ID\n ):\n return True\n\n audience = claims.get(AuthenticationConstants.AUDIENCE_CLAIM)\n\n # The audience is https://api.botframework.com and not an appId.\n if (\n not audience\n or audience == AuthenticationConstants.TO_BOT_FROM_CHANNEL_TOKEN_ISSUER\n ):\n return False\n\n from .jwt_token_validation import JwtTokenValidation\n\n app_id = JwtTokenValidation.get_app_id_from_claims(claims)\n\n if not app_id:\n return False\n\n # Skill claims must contain and app ID and the AppID must be different than the audience.\n return app_id != audience\n\n @staticmethod\n async def authenticate_channel_token(\n auth_header: str,\n credentials: CredentialProvider,\n channel_service_or_provider: Union[str, ChannelProvider],\n channel_id: str,\n auth_configuration: AuthenticationConfiguration,\n ) -> ClaimsIdentity:\n if auth_configuration is None:\n raise Exception(\n \"auth_configuration cannot be None in SkillValidation.authenticate_channel_token\"\n )\n\n from .jwt_token_validation import JwtTokenValidation\n\n if isinstance(channel_service_or_provider, ChannelProvider):\n is_gov = channel_service_or_provider.is_government()\n else:\n is_gov = JwtTokenValidation.is_government(channel_service_or_provider)\n\n open_id_metadata_url = (\n GovernmentConstants.TO_BOT_FROM_EMULATOR_OPEN_ID_METADATA_URL\n if is_gov\n else AuthenticationConstants.TO_BOT_FROM_EMULATOR_OPEN_ID_METADATA_URL\n )\n\n token_extractor = JwtTokenExtractor(\n SkillValidation._token_validation_parameters,\n open_id_metadata_url,\n AuthenticationConstants.ALLOWED_SIGNING_ALGORITHMS,\n )\n\n identity = await token_extractor.get_identity_from_auth_header(\n auth_header, channel_id, auth_configuration.required_endorsements\n )\n await SkillValidation._validate_identity(identity, credentials)\n\n return identity\n\n @staticmethod\n def create_anonymous_skill_claim():\n \"\"\"\n Creates a ClaimsIdentity for an anonymous (unauthenticated) skill.\n :return ClaimsIdentity:\n \"\"\"\n return ClaimsIdentity(\n {\n AuthenticationConstants.APP_ID_CLAIM: AuthenticationConstants.ANONYMOUS_SKILL_APP_ID\n },\n True,\n AuthenticationConstants.ANONYMOUS_AUTH_TYPE,\n )\n\n @staticmethod\n async def _validate_identity(\n identity: ClaimsIdentity, credentials: CredentialProvider\n ):\n if not identity:\n # No valid identity. Not Authorized.\n raise PermissionError(\"Invalid Identity\")\n\n if not identity.is_authenticated:\n # The token is in some way invalid. Not Authorized.\n raise PermissionError(\"Token Not Authenticated\")\n\n version_claim = identity.claims.get(AuthenticationConstants.VERSION_CLAIM)\n if not version_claim:\n # No version claim\n raise PermissionError(\n f\"'{AuthenticationConstants.VERSION_CLAIM}' claim is required on skill Tokens.\"\n )\n\n # Look for the \"aud\" claim, but only if issued from the Bot Framework\n audience_claim = identity.claims.get(AuthenticationConstants.AUDIENCE_CLAIM)\n if not audience_claim:\n # Claim is not present or doesn't have a value. Not Authorized.\n raise PermissionError(\n f\"'{AuthenticationConstants.AUDIENCE_CLAIM}' claim is required on skill Tokens.\"\n )\n\n if not await credentials.is_valid_appid(audience_claim):\n # The AppId is not valid. Not Authorized.\n raise PermissionError(\"Invalid audience.\")\n\n from .jwt_token_validation import JwtTokenValidation\n\n app_id = JwtTokenValidation.get_app_id_from_claims(identity.claims)\n if not app_id:\n # Invalid AppId\n raise PermissionError(\"Invalid app_id.\")\n", "path": "libraries/botframework-connector/botframework/connector/auth/skill_validation.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom datetime import timedelta\nfrom typing import Dict, Union\n\nimport jwt\n\nfrom .authentication_configuration import AuthenticationConfiguration\nfrom .authentication_constants import AuthenticationConstants\nfrom .claims_identity import ClaimsIdentity\nfrom .credential_provider import CredentialProvider\nfrom .government_constants import GovernmentConstants\nfrom .verify_options import VerifyOptions\nfrom .jwt_token_extractor import JwtTokenExtractor\nfrom .channel_provider import ChannelProvider\n\n\nclass SkillValidation:\n # TODO: Remove circular dependcies after C# refactor\n # pylint: disable=import-outside-toplevel\n\n \"\"\"\n Validates JWT tokens sent to and from a Skill.\n \"\"\"\n\n _token_validation_parameters = VerifyOptions(\n issuer=[\n \"https://sts.windows.net/d6d49420-f39b-4df7-a1dc-d59a935871db/\", # Auth v3.1, 1.0 token\n \"https://login.microsoftonline.com/d6d49420-f39b-4df7-a1dc-d59a935871db/v2.0\", # Auth v3.1, 2.0 token\n \"https://sts.windows.net/f8cdef31-a31e-4b4a-93e4-5f571e91255a/\", # Auth v3.2, 1.0 token\n \"https://login.microsoftonline.com/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0\", # Auth v3.2, 2.0 token\n \"https://sts.windows.net/cab8a31a-1906-4287-a0d8-4eef66b95f6e/\", # Auth for US Gov, 1.0 token\n \"https://login.microsoftonline.us/cab8a31a-1906-4287-a0d8-4eef66b95f6e/v2.0\", # Auth for US Gov, 2.0 token\n \"https://login.microsoftonline.us/f8cdef31-a31e-4b4a-93e4-5f571e91255a/\", # Auth for US Gov, 1.0 token\n \"https://login.microsoftonline.us/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0\", # Auth for US Gov, 2.0 token\n ],\n audience=None,\n clock_tolerance=timedelta(minutes=5),\n ignore_expiration=False,\n )\n\n @staticmethod\n def is_skill_token(auth_header: str) -> bool:\n \"\"\"\n Determines if a given Auth header is from from a skill to bot or bot to skill request.\n :param auth_header: Bearer Token, in the \"Bearer [Long String]\" Format.\n :return bool:\n \"\"\"\n from .jwt_token_validation import JwtTokenValidation\n\n if not JwtTokenValidation.is_valid_token_format(auth_header):\n return False\n\n bearer_token = auth_header.split(\" \")[1]\n\n # Parse the Big Long String into an actual token.\n token = jwt.decode(bearer_token, verify=False)\n return SkillValidation.is_skill_claim(token)\n\n @staticmethod\n def is_skill_claim(claims: Dict[str, object]) -> bool:\n \"\"\"\n Checks if the given list of claims represents a skill.\n :param claims: A dict of claims.\n :return bool:\n \"\"\"\n if (\n claims.get(AuthenticationConstants.APP_ID_CLAIM, None)\n == AuthenticationConstants.ANONYMOUS_SKILL_APP_ID\n ):\n return True\n\n if AuthenticationConstants.VERSION_CLAIM not in claims:\n return False\n\n audience = claims.get(AuthenticationConstants.AUDIENCE_CLAIM)\n\n # The audience is https://api.botframework.com and not an appId.\n if (\n not audience\n or audience == AuthenticationConstants.TO_BOT_FROM_CHANNEL_TOKEN_ISSUER\n ):\n return False\n\n from .jwt_token_validation import JwtTokenValidation\n\n app_id = JwtTokenValidation.get_app_id_from_claims(claims)\n\n if not app_id:\n return False\n\n # Skill claims must contain and app ID and the AppID must be different than the audience.\n return app_id != audience\n\n @staticmethod\n async def authenticate_channel_token(\n auth_header: str,\n credentials: CredentialProvider,\n channel_service_or_provider: Union[str, ChannelProvider],\n channel_id: str,\n auth_configuration: AuthenticationConfiguration,\n ) -> ClaimsIdentity:\n if auth_configuration is None:\n raise Exception(\n \"auth_configuration cannot be None in SkillValidation.authenticate_channel_token\"\n )\n\n from .jwt_token_validation import JwtTokenValidation\n\n if isinstance(channel_service_or_provider, ChannelProvider):\n is_gov = channel_service_or_provider.is_government()\n else:\n is_gov = JwtTokenValidation.is_government(channel_service_or_provider)\n\n open_id_metadata_url = (\n GovernmentConstants.TO_BOT_FROM_EMULATOR_OPEN_ID_METADATA_URL\n if is_gov\n else AuthenticationConstants.TO_BOT_FROM_EMULATOR_OPEN_ID_METADATA_URL\n )\n\n token_extractor = JwtTokenExtractor(\n SkillValidation._token_validation_parameters,\n open_id_metadata_url,\n AuthenticationConstants.ALLOWED_SIGNING_ALGORITHMS,\n )\n\n identity = await token_extractor.get_identity_from_auth_header(\n auth_header, channel_id, auth_configuration.required_endorsements\n )\n await SkillValidation._validate_identity(identity, credentials)\n\n return identity\n\n @staticmethod\n def create_anonymous_skill_claim():\n \"\"\"\n Creates a ClaimsIdentity for an anonymous (unauthenticated) skill.\n :return ClaimsIdentity:\n \"\"\"\n return ClaimsIdentity(\n {\n AuthenticationConstants.APP_ID_CLAIM: AuthenticationConstants.ANONYMOUS_SKILL_APP_ID\n },\n True,\n AuthenticationConstants.ANONYMOUS_AUTH_TYPE,\n )\n\n @staticmethod\n async def _validate_identity(\n identity: ClaimsIdentity, credentials: CredentialProvider\n ):\n if not identity:\n # No valid identity. Not Authorized.\n raise PermissionError(\"Invalid Identity\")\n\n if not identity.is_authenticated:\n # The token is in some way invalid. Not Authorized.\n raise PermissionError(\"Token Not Authenticated\")\n\n version_claim = identity.claims.get(AuthenticationConstants.VERSION_CLAIM)\n if not version_claim:\n # No version claim\n raise PermissionError(\n f\"'{AuthenticationConstants.VERSION_CLAIM}' claim is required on skill Tokens.\"\n )\n\n # Look for the \"aud\" claim, but only if issued from the Bot Framework\n audience_claim = identity.claims.get(AuthenticationConstants.AUDIENCE_CLAIM)\n if not audience_claim:\n # Claim is not present or doesn't have a value. Not Authorized.\n raise PermissionError(\n f\"'{AuthenticationConstants.AUDIENCE_CLAIM}' claim is required on skill Tokens.\"\n )\n\n if not await credentials.is_valid_appid(audience_claim):\n # The AppId is not valid. Not Authorized.\n raise PermissionError(\"Invalid audience.\")\n\n from .jwt_token_validation import JwtTokenValidation\n\n app_id = JwtTokenValidation.get_app_id_from_claims(identity.claims)\n if not app_id:\n # Invalid AppId\n raise PermissionError(\"Invalid app_id.\")\n", "path": "libraries/botframework-connector/botframework/connector/auth/skill_validation.py"}]} | 2,935 | 214 |
gh_patches_debug_911 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSpeech-2364 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Added pre-install doc for G2P and TN modules and updated the dependency version of pypinyin
Added pre-install doc for G2P and TN modules and updated the dependency version of pypinyin
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import contextlib
15 import inspect
16 import io
17 import os
18 import subprocess as sp
19 import sys
20 from pathlib import Path
21
22 from setuptools import Command
23 from setuptools import find_packages
24 from setuptools import setup
25 from setuptools.command.develop import develop
26 from setuptools.command.install import install
27 from setuptools.command.test import test
28
29 HERE = Path(os.path.abspath(os.path.dirname(__file__)))
30
31 VERSION = '0.0.0'
32 COMMITID = 'none'
33
34 base = [
35 "editdistance",
36 "g2p_en",
37 "g2pM",
38 "h5py",
39 "inflect",
40 "jieba",
41 "jsonlines",
42 "kaldiio",
43 "librosa==0.8.1",
44 "loguru",
45 "matplotlib",
46 "nara_wpe",
47 "onnxruntime==1.10.0",
48 "opencc",
49 "pandas",
50 "paddlenlp",
51 "paddlespeech_feat",
52 "Pillow>=9.0.0",
53 "praatio==5.0.0",
54 "protobuf>=3.1.0, <=3.20.0",
55 "pypinyin",
56 "pypinyin-dict",
57 "python-dateutil",
58 "pyworld==0.2.12",
59 "resampy==0.2.2",
60 "sacrebleu",
61 "scipy",
62 "sentencepiece~=0.1.96",
63 "soundfile~=0.10",
64 "textgrid",
65 "timer",
66 "tqdm",
67 "typeguard",
68 "visualdl",
69 "webrtcvad",
70 "yacs~=0.1.8",
71 "prettytable",
72 "zhon",
73 "colorlog",
74 "pathos == 0.2.8",
75 "braceexpand",
76 "pyyaml",
77 "pybind11",
78 ]
79
80 server = ["fastapi", "uvicorn", "pattern_singleton", "websockets"]
81
82 requirements = {
83 "install":
84 base + server,
85 "develop": [
86 "ConfigArgParse",
87 "coverage",
88 "gpustat",
89 "paddlespeech_ctcdecoders",
90 "phkit",
91 "pypi-kenlm",
92 "snakeviz",
93 "sox",
94 "soxbindings",
95 "unidecode",
96 "yq",
97 "pre-commit",
98 ]
99 }
100
101
102 def check_call(cmd: str, shell=False, executable=None):
103 try:
104 sp.check_call(
105 cmd.split(),
106 shell=shell,
107 executable="/bin/bash" if shell else executable)
108 except sp.CalledProcessError as e:
109 print(
110 f"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:",
111 e.output,
112 file=sys.stderr)
113 raise e
114
115
116 def check_output(cmd: str, shell=False):
117 try:
118 out_bytes = sp.check_output(cmd.split())
119 except sp.CalledProcessError as e:
120 out_bytes = e.output # Output generated before error
121 code = e.returncode # Return code
122 print(
123 f"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:",
124 out_bytes,
125 file=sys.stderr)
126 return out_bytes.strip().decode('utf8')
127
128
129 @contextlib.contextmanager
130 def pushd(new_dir):
131 old_dir = os.getcwd()
132 os.chdir(new_dir)
133 print(new_dir)
134 yield
135 os.chdir(old_dir)
136 print(old_dir)
137
138
139 def read(*names, **kwargs):
140 with io.open(
141 os.path.join(os.path.dirname(__file__), *names),
142 encoding=kwargs.get("encoding", "utf8")) as fp:
143 return fp.read()
144
145
146 def _remove(files: str):
147 for f in files:
148 f.unlink()
149
150
151 ################################# Install ##################################
152
153
154 def _post_install(install_lib_dir):
155 # tools/make
156 tool_dir = HERE / "tools"
157 _remove(tool_dir.glob("*.done"))
158 with pushd(tool_dir):
159 check_call("make")
160 print("tools install.")
161
162 # ctcdecoder
163 ctcdecoder_dir = HERE / 'third_party/ctc_decoders'
164 with pushd(ctcdecoder_dir):
165 check_call("bash -e setup.sh")
166 print("ctcdecoder install.")
167
168
169 class DevelopCommand(develop):
170 def run(self):
171 develop.run(self)
172 # must after develop.run, or pkg install by shell will not see
173 self.execute(_post_install, (self.install_lib, ), msg="Post Install...")
174
175
176 class InstallCommand(install):
177 def run(self):
178 install.run(self)
179
180
181 class TestCommand(test):
182 def finalize_options(self):
183 test.finalize_options(self)
184 self.test_args = []
185 self.test_suite = True
186
187 def run_tests(self):
188 # Run nose ensuring that argv simulates running nosetests directly
189 import nose
190 nose.run_exit(argv=['nosetests', '-w', 'tests'])
191
192
193 # cmd: python setup.py upload
194 class UploadCommand(Command):
195 description = "Build and publish the package."
196 user_options = []
197
198 def initialize_options(self):
199 pass
200
201 def finalize_options(self):
202 pass
203
204 def run(self):
205 try:
206 print("Removing previous dist/ ...")
207 shutil.rmtree(str(HERE / "dist"))
208 except OSError:
209 pass
210 print("Building source distribution...")
211 sp.check_call([sys.executable, "setup.py", "sdist"])
212 print("Uploading package to PyPi...")
213 sp.check_call(["twine", "upload", "dist/*"])
214 sys.exit()
215
216
217 ################################# Version ##################################
218 def write_version_py(filename='paddlespeech/__init__.py'):
219 import paddlespeech
220 if hasattr(paddlespeech,
221 "__version__") and paddlespeech.__version__ == VERSION:
222 return
223 with open(filename, "a") as f:
224 out_str = f"\n__version__ = '{VERSION}'\n"
225 print(out_str)
226 f.write(f"\n__version__ = '{VERSION}'\n")
227
228 COMMITID = check_output("git rev-parse HEAD")
229 with open(filename, 'a') as f:
230 out_str = f"\n__commit__ = '{COMMITID}'\n"
231 print(out_str)
232 f.write(f"\n__commit__ = '{COMMITID}'\n")
233
234 print(f"{inspect.currentframe().f_code.co_name} done")
235
236
237 def remove_version_py(filename='paddlespeech/__init__.py'):
238 with open(filename, "r") as f:
239 lines = f.readlines()
240 with open(filename, "w") as f:
241 for line in lines:
242 if "__version__" in line or "__commit__" in line:
243 continue
244 f.write(line)
245 print(f"{inspect.currentframe().f_code.co_name} done")
246
247
248 @contextlib.contextmanager
249 def version_info():
250 write_version_py()
251 yield
252 remove_version_py()
253
254
255 ################################# Steup ##################################
256 setup_info = dict(
257 # Metadata
258 name='paddlespeech',
259 version=VERSION,
260 author='PaddlePaddle Speech and Language Team',
261 author_email='[email protected]',
262 url='https://github.com/PaddlePaddle/PaddleSpeech',
263 license='Apache 2.0',
264 description='Speech tools and models based on Paddlepaddle',
265 long_description=read("README.md"),
266 long_description_content_type="text/markdown",
267 keywords=[
268 "speech",
269 "asr",
270 "tts",
271 "speaker verfication",
272 "speech classfication",
273 "text frontend",
274 "MFA",
275 "paddlepaddle",
276 "beam search",
277 "ctcdecoder",
278 "deepspeech2",
279 "transformer",
280 "conformer",
281 "fastspeech",
282 "vocoder",
283 "pwgan",
284 "gan",
285 ],
286 python_requires='>=3.7',
287 install_requires=requirements["install"],
288 extras_require={
289 'develop':
290 requirements["develop"],
291 'doc': [
292 "sphinx", "sphinx-rtd-theme", "numpydoc", "myst_parser",
293 "recommonmark>=0.5.0", "sphinx-markdown-tables", "sphinx-autobuild"
294 ],
295 'test': ['nose', 'torchaudio==0.10.2'],
296 },
297 cmdclass={
298 'develop': DevelopCommand,
299 'install': InstallCommand,
300 'upload': UploadCommand,
301 'test': TestCommand,
302 },
303
304 # Package info
305 packages=find_packages(include=('paddlespeech*')),
306 zip_safe=True,
307 classifiers=[
308 'Development Status :: 5 - Production/Stable',
309 'Intended Audience :: Developers',
310 'Intended Audience :: Science/Research',
311 'Topic :: Scientific/Engineering :: Artificial Intelligence',
312 'License :: OSI Approved :: Apache Software License',
313 'Programming Language :: Python',
314 'Programming Language :: Python :: 3',
315 'Programming Language :: Python :: 3.7',
316 'Programming Language :: Python :: 3.8',
317 'Programming Language :: Python :: 3.9',
318 ],
319 entry_points={
320 'console_scripts': [
321 'paddlespeech=paddlespeech.cli.entry:_execute',
322 'paddlespeech_server=paddlespeech.server.entry:server_execute',
323 'paddlespeech_client=paddlespeech.server.entry:client_execute'
324 ]
325 })
326
327 with version_info():
328 setup(**setup_info, include_package_data=True)
329
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -52,7 +52,7 @@
"Pillow>=9.0.0",
"praatio==5.0.0",
"protobuf>=3.1.0, <=3.20.0",
- "pypinyin",
+ "pypinyin<=0.44.0",
"pypinyin-dict",
"python-dateutil",
"pyworld==0.2.12",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -52,7 +52,7 @@\n \"Pillow>=9.0.0\",\n \"praatio==5.0.0\",\n \"protobuf>=3.1.0, <=3.20.0\",\n- \"pypinyin\",\n+ \"pypinyin<=0.44.0\",\n \"pypinyin-dict\",\n \"python-dateutil\",\n \"pyworld==0.2.12\",\n", "issue": "Added pre-install doc for G2P and TN modules and updated the dependency version of pypinyin\nAdded pre-install doc for G2P and TN modules and updated the dependency version of pypinyin\n", "before_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport contextlib\nimport inspect\nimport io\nimport os\nimport subprocess as sp\nimport sys\nfrom pathlib import Path\n\nfrom setuptools import Command\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\nHERE = Path(os.path.abspath(os.path.dirname(__file__)))\n\nVERSION = '0.0.0'\nCOMMITID = 'none'\n\nbase = [\n \"editdistance\",\n \"g2p_en\",\n \"g2pM\",\n \"h5py\",\n \"inflect\",\n \"jieba\",\n \"jsonlines\",\n \"kaldiio\",\n \"librosa==0.8.1\",\n \"loguru\",\n \"matplotlib\",\n \"nara_wpe\",\n \"onnxruntime==1.10.0\",\n \"opencc\",\n \"pandas\",\n \"paddlenlp\",\n \"paddlespeech_feat\",\n \"Pillow>=9.0.0\",\n \"praatio==5.0.0\",\n \"protobuf>=3.1.0, <=3.20.0\",\n \"pypinyin\",\n \"pypinyin-dict\",\n \"python-dateutil\",\n \"pyworld==0.2.12\",\n \"resampy==0.2.2\",\n \"sacrebleu\",\n \"scipy\",\n \"sentencepiece~=0.1.96\",\n \"soundfile~=0.10\",\n \"textgrid\",\n \"timer\",\n \"tqdm\",\n \"typeguard\",\n \"visualdl\",\n \"webrtcvad\",\n \"yacs~=0.1.8\",\n \"prettytable\",\n \"zhon\",\n \"colorlog\",\n \"pathos == 0.2.8\",\n \"braceexpand\",\n \"pyyaml\",\n \"pybind11\",\n]\n\nserver = [\"fastapi\", \"uvicorn\", \"pattern_singleton\", \"websockets\"]\n\nrequirements = {\n \"install\":\n base + server,\n \"develop\": [\n \"ConfigArgParse\",\n \"coverage\",\n \"gpustat\",\n \"paddlespeech_ctcdecoders\",\n \"phkit\",\n \"pypi-kenlm\",\n \"snakeviz\",\n \"sox\",\n \"soxbindings\",\n \"unidecode\",\n \"yq\",\n \"pre-commit\",\n ]\n}\n\n\ndef check_call(cmd: str, shell=False, executable=None):\n try:\n sp.check_call(\n cmd.split(),\n shell=shell,\n executable=\"/bin/bash\" if shell else executable)\n except sp.CalledProcessError as e:\n print(\n f\"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:\",\n e.output,\n file=sys.stderr)\n raise e\n\n\ndef check_output(cmd: str, shell=False):\n try:\n out_bytes = sp.check_output(cmd.split())\n except sp.CalledProcessError as e:\n out_bytes = e.output # Output generated before error\n code = e.returncode # Return code\n print(\n f\"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:\",\n out_bytes,\n file=sys.stderr)\n return out_bytes.strip().decode('utf8')\n\n\[email protected]\ndef pushd(new_dir):\n old_dir = os.getcwd()\n os.chdir(new_dir)\n print(new_dir)\n yield\n os.chdir(old_dir)\n print(old_dir)\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef _remove(files: str):\n for f in files:\n f.unlink()\n\n\n################################# Install ##################################\n\n\ndef _post_install(install_lib_dir):\n # tools/make\n tool_dir = HERE / \"tools\"\n _remove(tool_dir.glob(\"*.done\"))\n with pushd(tool_dir):\n check_call(\"make\")\n print(\"tools install.\")\n\n # ctcdecoder\n ctcdecoder_dir = HERE / 'third_party/ctc_decoders'\n with pushd(ctcdecoder_dir):\n check_call(\"bash -e setup.sh\")\n print(\"ctcdecoder install.\")\n\n\nclass DevelopCommand(develop):\n def run(self):\n develop.run(self)\n # must after develop.run, or pkg install by shell will not see\n self.execute(_post_install, (self.install_lib, ), msg=\"Post Install...\")\n\n\nclass InstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass TestCommand(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # Run nose ensuring that argv simulates running nosetests directly\n import nose\n nose.run_exit(argv=['nosetests', '-w', 'tests'])\n\n\n# cmd: python setup.py upload\nclass UploadCommand(Command):\n description = \"Build and publish the package.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n print(\"Removing previous dist/ ...\")\n shutil.rmtree(str(HERE / \"dist\"))\n except OSError:\n pass\n print(\"Building source distribution...\")\n sp.check_call([sys.executable, \"setup.py\", \"sdist\"])\n print(\"Uploading package to PyPi...\")\n sp.check_call([\"twine\", \"upload\", \"dist/*\"])\n sys.exit()\n\n\n################################# Version ##################################\ndef write_version_py(filename='paddlespeech/__init__.py'):\n import paddlespeech\n if hasattr(paddlespeech,\n \"__version__\") and paddlespeech.__version__ == VERSION:\n return\n with open(filename, \"a\") as f:\n out_str = f\"\\n__version__ = '{VERSION}'\\n\"\n print(out_str)\n f.write(f\"\\n__version__ = '{VERSION}'\\n\")\n\n COMMITID = check_output(\"git rev-parse HEAD\")\n with open(filename, 'a') as f:\n out_str = f\"\\n__commit__ = '{COMMITID}'\\n\"\n print(out_str)\n f.write(f\"\\n__commit__ = '{COMMITID}'\\n\")\n\n print(f\"{inspect.currentframe().f_code.co_name} done\")\n\n\ndef remove_version_py(filename='paddlespeech/__init__.py'):\n with open(filename, \"r\") as f:\n lines = f.readlines()\n with open(filename, \"w\") as f:\n for line in lines:\n if \"__version__\" in line or \"__commit__\" in line:\n continue\n f.write(line)\n print(f\"{inspect.currentframe().f_code.co_name} done\")\n\n\[email protected]\ndef version_info():\n write_version_py()\n yield\n remove_version_py()\n\n\n################################# Steup ##################################\nsetup_info = dict(\n # Metadata\n name='paddlespeech',\n version=VERSION,\n author='PaddlePaddle Speech and Language Team',\n author_email='[email protected]',\n url='https://github.com/PaddlePaddle/PaddleSpeech',\n license='Apache 2.0',\n description='Speech tools and models based on Paddlepaddle',\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n keywords=[\n \"speech\",\n \"asr\",\n \"tts\",\n \"speaker verfication\",\n \"speech classfication\",\n \"text frontend\",\n \"MFA\",\n \"paddlepaddle\",\n \"beam search\",\n \"ctcdecoder\",\n \"deepspeech2\",\n \"transformer\",\n \"conformer\",\n \"fastspeech\",\n \"vocoder\",\n \"pwgan\",\n \"gan\",\n ],\n python_requires='>=3.7',\n install_requires=requirements[\"install\"],\n extras_require={\n 'develop':\n requirements[\"develop\"],\n 'doc': [\n \"sphinx\", \"sphinx-rtd-theme\", \"numpydoc\", \"myst_parser\",\n \"recommonmark>=0.5.0\", \"sphinx-markdown-tables\", \"sphinx-autobuild\"\n ],\n 'test': ['nose', 'torchaudio==0.10.2'],\n },\n cmdclass={\n 'develop': DevelopCommand,\n 'install': InstallCommand,\n 'upload': UploadCommand,\n 'test': TestCommand,\n },\n\n # Package info\n packages=find_packages(include=('paddlespeech*')),\n zip_safe=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n entry_points={\n 'console_scripts': [\n 'paddlespeech=paddlespeech.cli.entry:_execute',\n 'paddlespeech_server=paddlespeech.server.entry:server_execute',\n 'paddlespeech_client=paddlespeech.server.entry:client_execute'\n ]\n })\n\nwith version_info():\n setup(**setup_info, include_package_data=True)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport contextlib\nimport inspect\nimport io\nimport os\nimport subprocess as sp\nimport sys\nfrom pathlib import Path\n\nfrom setuptools import Command\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\nHERE = Path(os.path.abspath(os.path.dirname(__file__)))\n\nVERSION = '0.0.0'\nCOMMITID = 'none'\n\nbase = [\n \"editdistance\",\n \"g2p_en\",\n \"g2pM\",\n \"h5py\",\n \"inflect\",\n \"jieba\",\n \"jsonlines\",\n \"kaldiio\",\n \"librosa==0.8.1\",\n \"loguru\",\n \"matplotlib\",\n \"nara_wpe\",\n \"onnxruntime==1.10.0\",\n \"opencc\",\n \"pandas\",\n \"paddlenlp\",\n \"paddlespeech_feat\",\n \"Pillow>=9.0.0\",\n \"praatio==5.0.0\",\n \"protobuf>=3.1.0, <=3.20.0\",\n \"pypinyin<=0.44.0\",\n \"pypinyin-dict\",\n \"python-dateutil\",\n \"pyworld==0.2.12\",\n \"resampy==0.2.2\",\n \"sacrebleu\",\n \"scipy\",\n \"sentencepiece~=0.1.96\",\n \"soundfile~=0.10\",\n \"textgrid\",\n \"timer\",\n \"tqdm\",\n \"typeguard\",\n \"visualdl\",\n \"webrtcvad\",\n \"yacs~=0.1.8\",\n \"prettytable\",\n \"zhon\",\n \"colorlog\",\n \"pathos == 0.2.8\",\n \"braceexpand\",\n \"pyyaml\",\n \"pybind11\",\n]\n\nserver = [\"fastapi\", \"uvicorn\", \"pattern_singleton\", \"websockets\"]\n\nrequirements = {\n \"install\":\n base + server,\n \"develop\": [\n \"ConfigArgParse\",\n \"coverage\",\n \"gpustat\",\n \"paddlespeech_ctcdecoders\",\n \"phkit\",\n \"pypi-kenlm\",\n \"snakeviz\",\n \"sox\",\n \"soxbindings\",\n \"unidecode\",\n \"yq\",\n \"pre-commit\",\n ]\n}\n\n\ndef check_call(cmd: str, shell=False, executable=None):\n try:\n sp.check_call(\n cmd.split(),\n shell=shell,\n executable=\"/bin/bash\" if shell else executable)\n except sp.CalledProcessError as e:\n print(\n f\"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:\",\n e.output,\n file=sys.stderr)\n raise e\n\n\ndef check_output(cmd: str, shell=False):\n try:\n out_bytes = sp.check_output(cmd.split())\n except sp.CalledProcessError as e:\n out_bytes = e.output # Output generated before error\n code = e.returncode # Return code\n print(\n f\"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:\",\n out_bytes,\n file=sys.stderr)\n return out_bytes.strip().decode('utf8')\n\n\[email protected]\ndef pushd(new_dir):\n old_dir = os.getcwd()\n os.chdir(new_dir)\n print(new_dir)\n yield\n os.chdir(old_dir)\n print(old_dir)\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef _remove(files: str):\n for f in files:\n f.unlink()\n\n\n################################# Install ##################################\n\n\ndef _post_install(install_lib_dir):\n # tools/make\n tool_dir = HERE / \"tools\"\n _remove(tool_dir.glob(\"*.done\"))\n with pushd(tool_dir):\n check_call(\"make\")\n print(\"tools install.\")\n\n # ctcdecoder\n ctcdecoder_dir = HERE / 'third_party/ctc_decoders'\n with pushd(ctcdecoder_dir):\n check_call(\"bash -e setup.sh\")\n print(\"ctcdecoder install.\")\n\n\nclass DevelopCommand(develop):\n def run(self):\n develop.run(self)\n # must after develop.run, or pkg install by shell will not see\n self.execute(_post_install, (self.install_lib, ), msg=\"Post Install...\")\n\n\nclass InstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass TestCommand(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # Run nose ensuring that argv simulates running nosetests directly\n import nose\n nose.run_exit(argv=['nosetests', '-w', 'tests'])\n\n\n# cmd: python setup.py upload\nclass UploadCommand(Command):\n description = \"Build and publish the package.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n print(\"Removing previous dist/ ...\")\n shutil.rmtree(str(HERE / \"dist\"))\n except OSError:\n pass\n print(\"Building source distribution...\")\n sp.check_call([sys.executable, \"setup.py\", \"sdist\"])\n print(\"Uploading package to PyPi...\")\n sp.check_call([\"twine\", \"upload\", \"dist/*\"])\n sys.exit()\n\n\n################################# Version ##################################\ndef write_version_py(filename='paddlespeech/__init__.py'):\n import paddlespeech\n if hasattr(paddlespeech,\n \"__version__\") and paddlespeech.__version__ == VERSION:\n return\n with open(filename, \"a\") as f:\n out_str = f\"\\n__version__ = '{VERSION}'\\n\"\n print(out_str)\n f.write(f\"\\n__version__ = '{VERSION}'\\n\")\n\n COMMITID = check_output(\"git rev-parse HEAD\")\n with open(filename, 'a') as f:\n out_str = f\"\\n__commit__ = '{COMMITID}'\\n\"\n print(out_str)\n f.write(f\"\\n__commit__ = '{COMMITID}'\\n\")\n\n print(f\"{inspect.currentframe().f_code.co_name} done\")\n\n\ndef remove_version_py(filename='paddlespeech/__init__.py'):\n with open(filename, \"r\") as f:\n lines = f.readlines()\n with open(filename, \"w\") as f:\n for line in lines:\n if \"__version__\" in line or \"__commit__\" in line:\n continue\n f.write(line)\n print(f\"{inspect.currentframe().f_code.co_name} done\")\n\n\[email protected]\ndef version_info():\n write_version_py()\n yield\n remove_version_py()\n\n\n################################# Steup ##################################\nsetup_info = dict(\n # Metadata\n name='paddlespeech',\n version=VERSION,\n author='PaddlePaddle Speech and Language Team',\n author_email='[email protected]',\n url='https://github.com/PaddlePaddle/PaddleSpeech',\n license='Apache 2.0',\n description='Speech tools and models based on Paddlepaddle',\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n keywords=[\n \"speech\",\n \"asr\",\n \"tts\",\n \"speaker verfication\",\n \"speech classfication\",\n \"text frontend\",\n \"MFA\",\n \"paddlepaddle\",\n \"beam search\",\n \"ctcdecoder\",\n \"deepspeech2\",\n \"transformer\",\n \"conformer\",\n \"fastspeech\",\n \"vocoder\",\n \"pwgan\",\n \"gan\",\n ],\n python_requires='>=3.7',\n install_requires=requirements[\"install\"],\n extras_require={\n 'develop':\n requirements[\"develop\"],\n 'doc': [\n \"sphinx\", \"sphinx-rtd-theme\", \"numpydoc\", \"myst_parser\",\n \"recommonmark>=0.5.0\", \"sphinx-markdown-tables\", \"sphinx-autobuild\"\n ],\n 'test': ['nose', 'torchaudio==0.10.2'],\n },\n cmdclass={\n 'develop': DevelopCommand,\n 'install': InstallCommand,\n 'upload': UploadCommand,\n 'test': TestCommand,\n },\n\n # Package info\n packages=find_packages(include=('paddlespeech*')),\n zip_safe=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n entry_points={\n 'console_scripts': [\n 'paddlespeech=paddlespeech.cli.entry:_execute',\n 'paddlespeech_server=paddlespeech.server.entry:server_execute',\n 'paddlespeech_client=paddlespeech.server.entry:client_execute'\n ]\n })\n\nwith version_info():\n setup(**setup_info, include_package_data=True)\n", "path": "setup.py"}]} | 3,409 | 119 |
gh_patches_debug_3853 | rasdani/github-patches | git_diff | spack__spack-29252 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix neovim install on M1
With #29228 and #29229 in, the `spack install neovim` fails with:
```
1022 undef: _tigetstr
1023 undef: _cur_term
1024 undef: _setupterm
>> 1025 Undefined symbols for architecture arm64:
1026 "_tigetstr", referenced from:
1027 _try_load_terminfo_key in libtermkey.a(driver-ti.o)
1028 "_cur_term", referenced from:
1029 _load_terminfo in libtermkey.a(driver-ti.o)
1030 "_setupterm", referenced from:
1031 _new_driver in libtermkey.a(driver-ti.o)
1032 _load_terminfo in libtermkey.a(driver-ti.o)
1033 ld: symbol(s) not found for architecture arm64
```
While linking the `nvim` executable. These symbols seem to be coming from `ncurses`, but linking `ncurses` explicitly didn't seem to fix it. However, the current PR fixes it. One must turn off `termlib` in `ncurses` and then one must explicitly link it. Then `nvim` builds just fine. I am opening this PR as a Draft, because the `+termlib` seems hardwired in `lua`, so I don't know how to fix this properly. Also just adding `ncurses` in the cmake for `neovim` doesn't feel right, one should explicitly depend on `ncurses` and then find it using cmake. I don't have time to work on that. But this PR might be helpful to others to finish this work. Either way, neovim seems to work fine now.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/libtermkey/package.py`
Content:
```
1 # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class Libtermkey(Package):
10 """Easy keyboard entry processing for terminal programs"""
11 homepage = "http://www.leonerd.org.uk/code/libtermkey/"
12 url = "http://www.leonerd.org.uk/code/libtermkey/libtermkey-0.18.tar.gz"
13
14 version('0.22', sha256='6945bd3c4aaa83da83d80a045c5563da4edd7d0374c62c0d35aec09eb3014600')
15 version('0.18', sha256='239746de41c845af52bb3c14055558f743292dd6c24ac26c2d6567a5a6093926')
16 version('0.17', sha256='68949364ed5eaad857b3dea10071cde74b00b9f236dfbb702169f246c3cef389')
17 version('0.16', sha256='6c8136efa5d0b3277014a5d4519ea81190079c82656b7db1655a1bd147326a70')
18 version('0.15b', sha256='6825422c6297e4f81b2c48962b4512585ca8a50bf31f24b3234a1be71a9d7a6e')
19 version('0.14', sha256='3d114d4509499b80a583ea39cd35f18268aacf4a7bbf56c142cd032632005c79')
20
21 depends_on('libtool', type='build')
22 depends_on('ncurses')
23 depends_on('pkgconfig')
24
25 def install(self, spec, prefix):
26 make()
27 make("install", "PREFIX=" + prefix)
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/var/spack/repos/builtin/packages/libtermkey/package.py b/var/spack/repos/builtin/packages/libtermkey/package.py
--- a/var/spack/repos/builtin/packages/libtermkey/package.py
+++ b/var/spack/repos/builtin/packages/libtermkey/package.py
@@ -19,7 +19,7 @@
version('0.14', sha256='3d114d4509499b80a583ea39cd35f18268aacf4a7bbf56c142cd032632005c79')
depends_on('libtool', type='build')
- depends_on('ncurses')
+ depends_on('unibilium')
depends_on('pkgconfig')
def install(self, spec, prefix):
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/libtermkey/package.py b/var/spack/repos/builtin/packages/libtermkey/package.py\n--- a/var/spack/repos/builtin/packages/libtermkey/package.py\n+++ b/var/spack/repos/builtin/packages/libtermkey/package.py\n@@ -19,7 +19,7 @@\n version('0.14', sha256='3d114d4509499b80a583ea39cd35f18268aacf4a7bbf56c142cd032632005c79')\n \n depends_on('libtool', type='build')\n- depends_on('ncurses')\n+ depends_on('unibilium')\n depends_on('pkgconfig')\n \n def install(self, spec, prefix):\n", "issue": "Fix neovim install on M1\nWith #29228 and #29229 in, the `spack install neovim` fails with:\r\n```\r\n 1022 undef: _tigetstr\r\n 1023 undef: _cur_term\r\n 1024 undef: _setupterm\r\n >> 1025 Undefined symbols for architecture arm64:\r\n 1026 \"_tigetstr\", referenced from:\r\n 1027 _try_load_terminfo_key in libtermkey.a(driver-ti.o)\r\n 1028 \"_cur_term\", referenced from:\r\n 1029 _load_terminfo in libtermkey.a(driver-ti.o)\r\n 1030 \"_setupterm\", referenced from:\r\n 1031 _new_driver in libtermkey.a(driver-ti.o)\r\n 1032 _load_terminfo in libtermkey.a(driver-ti.o)\r\n 1033 ld: symbol(s) not found for architecture arm64\r\n```\r\nWhile linking the `nvim` executable. These symbols seem to be coming from `ncurses`, but linking `ncurses` explicitly didn't seem to fix it. However, the current PR fixes it. One must turn off `termlib` in `ncurses` and then one must explicitly link it. Then `nvim` builds just fine. I am opening this PR as a Draft, because the `+termlib` seems hardwired in `lua`, so I don't know how to fix this properly. Also just adding `ncurses` in the cmake for `neovim` doesn't feel right, one should explicitly depend on `ncurses` and then find it using cmake. I don't have time to work on that. But this PR might be helpful to others to finish this work. Either way, neovim seems to work fine now.\n", "before_files": [{"content": "# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Libtermkey(Package):\n \"\"\"Easy keyboard entry processing for terminal programs\"\"\"\n homepage = \"http://www.leonerd.org.uk/code/libtermkey/\"\n url = \"http://www.leonerd.org.uk/code/libtermkey/libtermkey-0.18.tar.gz\"\n\n version('0.22', sha256='6945bd3c4aaa83da83d80a045c5563da4edd7d0374c62c0d35aec09eb3014600')\n version('0.18', sha256='239746de41c845af52bb3c14055558f743292dd6c24ac26c2d6567a5a6093926')\n version('0.17', sha256='68949364ed5eaad857b3dea10071cde74b00b9f236dfbb702169f246c3cef389')\n version('0.16', sha256='6c8136efa5d0b3277014a5d4519ea81190079c82656b7db1655a1bd147326a70')\n version('0.15b', sha256='6825422c6297e4f81b2c48962b4512585ca8a50bf31f24b3234a1be71a9d7a6e')\n version('0.14', sha256='3d114d4509499b80a583ea39cd35f18268aacf4a7bbf56c142cd032632005c79')\n\n depends_on('libtool', type='build')\n depends_on('ncurses')\n depends_on('pkgconfig')\n\n def install(self, spec, prefix):\n make()\n make(\"install\", \"PREFIX=\" + prefix)\n", "path": "var/spack/repos/builtin/packages/libtermkey/package.py"}], "after_files": [{"content": "# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Libtermkey(Package):\n \"\"\"Easy keyboard entry processing for terminal programs\"\"\"\n homepage = \"http://www.leonerd.org.uk/code/libtermkey/\"\n url = \"http://www.leonerd.org.uk/code/libtermkey/libtermkey-0.18.tar.gz\"\n\n version('0.22', sha256='6945bd3c4aaa83da83d80a045c5563da4edd7d0374c62c0d35aec09eb3014600')\n version('0.18', sha256='239746de41c845af52bb3c14055558f743292dd6c24ac26c2d6567a5a6093926')\n version('0.17', sha256='68949364ed5eaad857b3dea10071cde74b00b9f236dfbb702169f246c3cef389')\n version('0.16', sha256='6c8136efa5d0b3277014a5d4519ea81190079c82656b7db1655a1bd147326a70')\n version('0.15b', sha256='6825422c6297e4f81b2c48962b4512585ca8a50bf31f24b3234a1be71a9d7a6e')\n version('0.14', sha256='3d114d4509499b80a583ea39cd35f18268aacf4a7bbf56c142cd032632005c79')\n\n depends_on('libtool', type='build')\n depends_on('unibilium')\n depends_on('pkgconfig')\n\n def install(self, spec, prefix):\n make()\n make(\"install\", \"PREFIX=\" + prefix)\n", "path": "var/spack/repos/builtin/packages/libtermkey/package.py"}]} | 1,349 | 190 |
gh_patches_debug_4404 | rasdani/github-patches | git_diff | pallets__werkzeug-930 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add an 'extras_require' entry to setup.py for watchdog
The reloader feature has an optional dependency on the watchdog package:
http://werkzeug.pocoo.org/docs/0.11/serving/#reloader
Whilst people can just add 'watchdog' to their requirements files, it's clearer to specifically add an `extras_require` entry for it to setup.py, which allows people to instead add eg `Werkzeug[watchdog]` to their requirements file, which more clearly shows why the dependency is required.
In addition, should Werkzeug ever need a specific version of watchdog in the future (eg due to backwards incompatibilities), then the `extras_require` entry could be updated with a version range, causing zero breakage for people who'd used the `Werkzeug[watchdog]` form in their requirements file.
I'll open a PR for this shortly :-)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Werkzeug
4 ========
5
6 Werkzeug started as simple collection of various utilities for WSGI
7 applications and has become one of the most advanced WSGI utility
8 modules. It includes a powerful debugger, full featured request and
9 response objects, HTTP utilities to handle entity tags, cache control
10 headers, HTTP dates, cookie handling, file uploads, a powerful URL
11 routing system and a bunch of community contributed addon modules.
12
13 Werkzeug is unicode aware and doesn't enforce a specific template
14 engine, database adapter or anything else. It doesn't even enforce
15 a specific way of handling requests and leaves all that up to the
16 developer. It's most useful for end user applications which should work
17 on as many server environments as possible (such as blogs, wikis,
18 bulletin boards, etc.).
19
20 Details and example applications are available on the
21 `Werkzeug website <http://werkzeug.pocoo.org/>`_.
22
23
24 Features
25 --------
26
27 - unicode awareness
28
29 - request and response objects
30
31 - various utility functions for dealing with HTTP headers such as
32 `Accept` and `Cache-Control` headers.
33
34 - thread local objects with proper cleanup at request end
35
36 - an interactive debugger
37
38 - A simple WSGI server with support for threading and forking
39 with an automatic reloader.
40
41 - a flexible URL routing system with REST support.
42
43 - fully WSGI compatible
44
45
46 Development Version
47 -------------------
48
49 The Werkzeug development version can be installed by cloning the git
50 repository from `github`_::
51
52 git clone [email protected]:pallets/werkzeug.git
53
54 .. _github: http://github.com/pallets/werkzeug
55 """
56 import ast
57 import re
58 try:
59 from setuptools import setup, Command
60 except ImportError:
61 from distutils.core import setup, Command
62
63
64 _version_re = re.compile(r'__version__\s+=\s+(.*)')
65
66 with open('werkzeug/__init__.py', 'rb') as f:
67 version = str(ast.literal_eval(_version_re.search(
68 f.read().decode('utf-8')).group(1)))
69
70
71 class TestCommand(Command):
72 user_options = []
73
74 def initialize_options(self):
75 pass
76
77 def finalize_options(self):
78 pass
79
80 def run(self):
81 import pytest
82 pytest.cmdline.main(args=[])
83
84
85 setup(
86 name='Werkzeug',
87 version=version,
88 url='http://werkzeug.pocoo.org/',
89 license='BSD',
90 author='Armin Ronacher',
91 author_email='[email protected]',
92 description='The Swiss Army knife of Python web development',
93 long_description=__doc__,
94 classifiers=[
95 'Development Status :: 5 - Production/Stable',
96 'Environment :: Web Environment',
97 'Intended Audience :: Developers',
98 'License :: OSI Approved :: BSD License',
99 'Operating System :: OS Independent',
100 'Programming Language :: Python',
101 'Programming Language :: Python :: 2',
102 'Programming Language :: Python :: 2.6',
103 'Programming Language :: Python :: 2.7',
104 'Programming Language :: Python :: 3',
105 'Programming Language :: Python :: 3.3',
106 'Programming Language :: Python :: 3.4',
107 'Programming Language :: Python :: 3.5',
108 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
109 'Topic :: Software Development :: Libraries :: Python Modules'
110 ],
111 packages=['werkzeug', 'werkzeug.debug', 'werkzeug.contrib'],
112 cmdclass=dict(test=TestCommand),
113 include_package_data=True,
114 zip_safe=False,
115 platforms='any'
116 )
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -109,6 +109,9 @@
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=['werkzeug', 'werkzeug.debug', 'werkzeug.contrib'],
+ extras_require={
+ 'watchdog': ['watchdog'],
+ },
cmdclass=dict(test=TestCommand),
include_package_data=True,
zip_safe=False,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -109,6 +109,9 @@\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n packages=['werkzeug', 'werkzeug.debug', 'werkzeug.contrib'],\n+ extras_require={\n+ 'watchdog': ['watchdog'],\n+ },\n cmdclass=dict(test=TestCommand),\n include_package_data=True,\n zip_safe=False,\n", "issue": "Add an 'extras_require' entry to setup.py for watchdog\nThe reloader feature has an optional dependency on the watchdog package:\nhttp://werkzeug.pocoo.org/docs/0.11/serving/#reloader\n\nWhilst people can just add 'watchdog' to their requirements files, it's clearer to specifically add an `extras_require` entry for it to setup.py, which allows people to instead add eg `Werkzeug[watchdog]` to their requirements file, which more clearly shows why the dependency is required.\n\nIn addition, should Werkzeug ever need a specific version of watchdog in the future (eg due to backwards incompatibilities), then the `extras_require` entry could be updated with a version range, causing zero breakage for people who'd used the `Werkzeug[watchdog]` form in their requirements file.\n\nI'll open a PR for this shortly :-)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nWerkzeug\n========\n\nWerkzeug started as simple collection of various utilities for WSGI\napplications and has become one of the most advanced WSGI utility\nmodules. It includes a powerful debugger, full featured request and\nresponse objects, HTTP utilities to handle entity tags, cache control\nheaders, HTTP dates, cookie handling, file uploads, a powerful URL\nrouting system and a bunch of community contributed addon modules.\n\nWerkzeug is unicode aware and doesn't enforce a specific template\nengine, database adapter or anything else. It doesn't even enforce\na specific way of handling requests and leaves all that up to the\ndeveloper. It's most useful for end user applications which should work\non as many server environments as possible (such as blogs, wikis,\nbulletin boards, etc.).\n\nDetails and example applications are available on the\n`Werkzeug website <http://werkzeug.pocoo.org/>`_.\n\n\nFeatures\n--------\n\n- unicode awareness\n\n- request and response objects\n\n- various utility functions for dealing with HTTP headers such as\n `Accept` and `Cache-Control` headers.\n\n- thread local objects with proper cleanup at request end\n\n- an interactive debugger\n\n- A simple WSGI server with support for threading and forking\n with an automatic reloader.\n\n- a flexible URL routing system with REST support.\n\n- fully WSGI compatible\n\n\nDevelopment Version\n-------------------\n\nThe Werkzeug development version can be installed by cloning the git\nrepository from `github`_::\n\n git clone [email protected]:pallets/werkzeug.git\n\n.. _github: http://github.com/pallets/werkzeug\n\"\"\"\nimport ast\nimport re\ntry:\n from setuptools import setup, Command\nexcept ImportError:\n from distutils.core import setup, Command\n\n\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n\nwith open('werkzeug/__init__.py', 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\n\nclass TestCommand(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n import pytest\n pytest.cmdline.main(args=[])\n\n\nsetup(\n name='Werkzeug',\n version=version,\n url='http://werkzeug.pocoo.org/',\n license='BSD',\n author='Armin Ronacher',\n author_email='[email protected]',\n description='The Swiss Army knife of Python web development',\n long_description=__doc__,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n packages=['werkzeug', 'werkzeug.debug', 'werkzeug.contrib'],\n cmdclass=dict(test=TestCommand),\n include_package_data=True,\n zip_safe=False,\n platforms='any'\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nWerkzeug\n========\n\nWerkzeug started as simple collection of various utilities for WSGI\napplications and has become one of the most advanced WSGI utility\nmodules. It includes a powerful debugger, full featured request and\nresponse objects, HTTP utilities to handle entity tags, cache control\nheaders, HTTP dates, cookie handling, file uploads, a powerful URL\nrouting system and a bunch of community contributed addon modules.\n\nWerkzeug is unicode aware and doesn't enforce a specific template\nengine, database adapter or anything else. It doesn't even enforce\na specific way of handling requests and leaves all that up to the\ndeveloper. It's most useful for end user applications which should work\non as many server environments as possible (such as blogs, wikis,\nbulletin boards, etc.).\n\nDetails and example applications are available on the\n`Werkzeug website <http://werkzeug.pocoo.org/>`_.\n\n\nFeatures\n--------\n\n- unicode awareness\n\n- request and response objects\n\n- various utility functions for dealing with HTTP headers such as\n `Accept` and `Cache-Control` headers.\n\n- thread local objects with proper cleanup at request end\n\n- an interactive debugger\n\n- A simple WSGI server with support for threading and forking\n with an automatic reloader.\n\n- a flexible URL routing system with REST support.\n\n- fully WSGI compatible\n\n\nDevelopment Version\n-------------------\n\nThe Werkzeug development version can be installed by cloning the git\nrepository from `github`_::\n\n git clone [email protected]:pallets/werkzeug.git\n\n.. _github: http://github.com/pallets/werkzeug\n\"\"\"\nimport ast\nimport re\ntry:\n from setuptools import setup, Command\nexcept ImportError:\n from distutils.core import setup, Command\n\n\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n\nwith open('werkzeug/__init__.py', 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\n\nclass TestCommand(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n import pytest\n pytest.cmdline.main(args=[])\n\n\nsetup(\n name='Werkzeug',\n version=version,\n url='http://werkzeug.pocoo.org/',\n license='BSD',\n author='Armin Ronacher',\n author_email='[email protected]',\n description='The Swiss Army knife of Python web development',\n long_description=__doc__,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n packages=['werkzeug', 'werkzeug.debug', 'werkzeug.contrib'],\n extras_require={\n 'watchdog': ['watchdog'],\n },\n cmdclass=dict(test=TestCommand),\n include_package_data=True,\n zip_safe=False,\n platforms='any'\n)\n", "path": "setup.py"}]} | 1,476 | 102 |
gh_patches_debug_1694 | rasdani/github-patches | git_diff | iterative__dvc-4826 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unexpected error on `dvc diff`
## Bug Report
When running `dvc diff staging`, I got a KeyError, here is the traceback:
```
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/main.py", line 76, in main
ret = cmd.run()
File "/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/command/diff.py", line 130, in run
diff = self.repo.diff(self.args.a_rev, self.args.b_rev)
File "/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/repo/__init__.py", line 54, in wrapper
return f(repo, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/repo/diff.py", line 43, in diff
missing = sorted(_filter_missing(self, deleted_or_missing))
File "/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/repo/diff.py", line 125, in _filter_missing
if out.status()[str(out)] == "not in cache":
KeyError: 'data/KPI/KPI_from_dvc/en/post_probs'
```
It only happens when I give a specific revision.
Any ideas? Could it be my data?
### Please provide information about your setup
**Output of `dvc version`:**
```console
$ dvc version
DVC version: 1.9.1 (pip)
---------------------------------
Platform: Python 3.7.3 on Linux-5.4.0-1029-aws-x86_64-with-debian-buster-sid
Supports: http, https, s3
Cache types: hardlink, symlink
Cache directory: ext4 on /dev/nvme0n1p1
Workspace directory: ext4 on /dev/nvme0n1p1
Repo: dvc, git
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/repo/diff.py`
Content:
```
1 import logging
2 import os
3
4 from dvc.repo import locked
5 from dvc.tree.local import LocalTree
6 from dvc.tree.repo import RepoTree
7
8 logger = logging.getLogger(__name__)
9
10
11 @locked
12 def diff(self, a_rev="HEAD", b_rev=None):
13 """
14 By default, it compares the workspace with the last commit's tree.
15
16 This implementation differs from `git diff` since DVC doesn't have
17 the concept of `index`, but it keeps the same interface, thus,
18 `dvc diff` would be the same as `dvc diff HEAD`.
19 """
20
21 if self.scm.no_commits:
22 return {}
23
24 b_rev = b_rev if b_rev else "workspace"
25 results = {}
26 for rev in self.brancher(revs=[a_rev, b_rev]):
27 if rev == "workspace" and rev != b_rev:
28 # brancher always returns workspace, but we only need to compute
29 # workspace paths/checksums if b_rev was None
30 continue
31 results[rev] = _paths_checksums(self)
32
33 old = results[a_rev]
34 new = results[b_rev]
35
36 # Compare paths between the old and new tree.
37 # set() efficiently converts dict keys to a set
38 added = sorted(set(new) - set(old))
39 deleted_or_missing = set(old) - set(new)
40 if b_rev == "workspace":
41 # missing status is only applicable when diffing local workspace
42 # against a commit
43 missing = sorted(_filter_missing(self, deleted_or_missing))
44 else:
45 missing = []
46 deleted = sorted(deleted_or_missing - set(missing))
47 modified = sorted(set(old) & set(new))
48
49 ret = {
50 "added": [{"path": path, "hash": new[path]} for path in added],
51 "deleted": [{"path": path, "hash": old[path]} for path in deleted],
52 "modified": [
53 {"path": path, "hash": {"old": old[path], "new": new[path]}}
54 for path in modified
55 if old[path] != new[path]
56 ],
57 "not in cache": [
58 {"path": path, "hash": old[path]} for path in missing
59 ],
60 }
61
62 return ret if any(ret.values()) else {}
63
64
65 def _paths_checksums(repo):
66 """
67 A dictionary of checksums addressed by relpaths collected from
68 the current tree outputs.
69
70 To help distinguish between a directory and a file output,
71 the former one will come with a trailing slash in the path:
72
73 directory: "data/"
74 file: "data"
75 """
76
77 return dict(_output_paths(repo))
78
79
80 def _output_paths(repo):
81 repo_tree = RepoTree(repo, stream=True)
82 on_working_tree = isinstance(repo.tree, LocalTree)
83
84 def _exists(output):
85 if on_working_tree:
86 return output.exists
87 return True
88
89 def _to_path(output):
90 return (
91 str(output)
92 if not output.is_dir_checksum
93 else os.path.join(str(output), "")
94 )
95
96 def _to_checksum(output):
97 if on_working_tree:
98 return repo.cache.local.tree.get_hash(output.path_info).value
99 return output.hash_info.value
100
101 for stage in repo.stages:
102 for output in stage.outs:
103 if _exists(output):
104 yield _to_path(output), _to_checksum(output)
105 if output.is_dir_checksum:
106 yield from _dir_output_paths(repo_tree, output)
107
108
109 def _dir_output_paths(repo_tree, output):
110 from dvc.config import NoRemoteError
111
112 try:
113 for fname in repo_tree.walk_files(output.path_info):
114 yield str(fname), repo_tree.get_file_hash(fname).value
115 except NoRemoteError:
116 logger.warning("dir cache entry for '%s' is missing", output)
117
118
119 def _filter_missing(repo, paths):
120 repo_tree = RepoTree(repo, stream=True)
121 for path in paths:
122 metadata = repo_tree.metadata(path)
123 if metadata.is_dvc:
124 out = metadata.outs[0]
125 if out.status()[str(out)] == "not in cache":
126 yield path
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/repo/diff.py b/dvc/repo/diff.py
--- a/dvc/repo/diff.py
+++ b/dvc/repo/diff.py
@@ -122,5 +122,5 @@
metadata = repo_tree.metadata(path)
if metadata.is_dvc:
out = metadata.outs[0]
- if out.status()[str(out)] == "not in cache":
+ if out.status().get(str(out)) == "not in cache":
yield path
| {"golden_diff": "diff --git a/dvc/repo/diff.py b/dvc/repo/diff.py\n--- a/dvc/repo/diff.py\n+++ b/dvc/repo/diff.py\n@@ -122,5 +122,5 @@\n metadata = repo_tree.metadata(path)\n if metadata.is_dvc:\n out = metadata.outs[0]\n- if out.status()[str(out)] == \"not in cache\":\n+ if out.status().get(str(out)) == \"not in cache\":\n yield path\n", "issue": "Unexpected error on `dvc diff`\n## Bug Report\r\nWhen running `dvc diff staging`, I got a KeyError, here is the traceback:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/main.py\", line 76, in main\r\n ret = cmd.run()\r\n File \"/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/command/diff.py\", line 130, in run\r\n diff = self.repo.diff(self.args.a_rev, self.args.b_rev)\r\n File \"/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/repo/__init__.py\", line 54, in wrapper\r\n return f(repo, *args, **kwargs)\r\n File \"/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/repo/diff.py\", line 43, in diff\r\n missing = sorted(_filter_missing(self, deleted_or_missing))\r\n File \"/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/repo/diff.py\", line 125, in _filter_missing\r\n if out.status()[str(out)] == \"not in cache\":\r\nKeyError: 'data/KPI/KPI_from_dvc/en/post_probs'\r\n```\r\n\r\nIt only happens when I give a specific revision.\r\n\r\nAny ideas? Could it be my data?\r\n\r\n### Please provide information about your setup\r\n\r\n**Output of `dvc version`:**\r\n\r\n```console\r\n$ dvc version\r\nDVC version: 1.9.1 (pip)\r\n---------------------------------\r\nPlatform: Python 3.7.3 on Linux-5.4.0-1029-aws-x86_64-with-debian-buster-sid\r\nSupports: http, https, s3\r\nCache types: hardlink, symlink\r\nCache directory: ext4 on /dev/nvme0n1p1\r\nWorkspace directory: ext4 on /dev/nvme0n1p1\r\nRepo: dvc, git\r\n```\n", "before_files": [{"content": "import logging\nimport os\n\nfrom dvc.repo import locked\nfrom dvc.tree.local import LocalTree\nfrom dvc.tree.repo import RepoTree\n\nlogger = logging.getLogger(__name__)\n\n\n@locked\ndef diff(self, a_rev=\"HEAD\", b_rev=None):\n \"\"\"\n By default, it compares the workspace with the last commit's tree.\n\n This implementation differs from `git diff` since DVC doesn't have\n the concept of `index`, but it keeps the same interface, thus,\n `dvc diff` would be the same as `dvc diff HEAD`.\n \"\"\"\n\n if self.scm.no_commits:\n return {}\n\n b_rev = b_rev if b_rev else \"workspace\"\n results = {}\n for rev in self.brancher(revs=[a_rev, b_rev]):\n if rev == \"workspace\" and rev != b_rev:\n # brancher always returns workspace, but we only need to compute\n # workspace paths/checksums if b_rev was None\n continue\n results[rev] = _paths_checksums(self)\n\n old = results[a_rev]\n new = results[b_rev]\n\n # Compare paths between the old and new tree.\n # set() efficiently converts dict keys to a set\n added = sorted(set(new) - set(old))\n deleted_or_missing = set(old) - set(new)\n if b_rev == \"workspace\":\n # missing status is only applicable when diffing local workspace\n # against a commit\n missing = sorted(_filter_missing(self, deleted_or_missing))\n else:\n missing = []\n deleted = sorted(deleted_or_missing - set(missing))\n modified = sorted(set(old) & set(new))\n\n ret = {\n \"added\": [{\"path\": path, \"hash\": new[path]} for path in added],\n \"deleted\": [{\"path\": path, \"hash\": old[path]} for path in deleted],\n \"modified\": [\n {\"path\": path, \"hash\": {\"old\": old[path], \"new\": new[path]}}\n for path in modified\n if old[path] != new[path]\n ],\n \"not in cache\": [\n {\"path\": path, \"hash\": old[path]} for path in missing\n ],\n }\n\n return ret if any(ret.values()) else {}\n\n\ndef _paths_checksums(repo):\n \"\"\"\n A dictionary of checksums addressed by relpaths collected from\n the current tree outputs.\n\n To help distinguish between a directory and a file output,\n the former one will come with a trailing slash in the path:\n\n directory: \"data/\"\n file: \"data\"\n \"\"\"\n\n return dict(_output_paths(repo))\n\n\ndef _output_paths(repo):\n repo_tree = RepoTree(repo, stream=True)\n on_working_tree = isinstance(repo.tree, LocalTree)\n\n def _exists(output):\n if on_working_tree:\n return output.exists\n return True\n\n def _to_path(output):\n return (\n str(output)\n if not output.is_dir_checksum\n else os.path.join(str(output), \"\")\n )\n\n def _to_checksum(output):\n if on_working_tree:\n return repo.cache.local.tree.get_hash(output.path_info).value\n return output.hash_info.value\n\n for stage in repo.stages:\n for output in stage.outs:\n if _exists(output):\n yield _to_path(output), _to_checksum(output)\n if output.is_dir_checksum:\n yield from _dir_output_paths(repo_tree, output)\n\n\ndef _dir_output_paths(repo_tree, output):\n from dvc.config import NoRemoteError\n\n try:\n for fname in repo_tree.walk_files(output.path_info):\n yield str(fname), repo_tree.get_file_hash(fname).value\n except NoRemoteError:\n logger.warning(\"dir cache entry for '%s' is missing\", output)\n\n\ndef _filter_missing(repo, paths):\n repo_tree = RepoTree(repo, stream=True)\n for path in paths:\n metadata = repo_tree.metadata(path)\n if metadata.is_dvc:\n out = metadata.outs[0]\n if out.status()[str(out)] == \"not in cache\":\n yield path\n", "path": "dvc/repo/diff.py"}], "after_files": [{"content": "import logging\nimport os\n\nfrom dvc.repo import locked\nfrom dvc.tree.local import LocalTree\nfrom dvc.tree.repo import RepoTree\n\nlogger = logging.getLogger(__name__)\n\n\n@locked\ndef diff(self, a_rev=\"HEAD\", b_rev=None):\n \"\"\"\n By default, it compares the workspace with the last commit's tree.\n\n This implementation differs from `git diff` since DVC doesn't have\n the concept of `index`, but it keeps the same interface, thus,\n `dvc diff` would be the same as `dvc diff HEAD`.\n \"\"\"\n\n if self.scm.no_commits:\n return {}\n\n b_rev = b_rev if b_rev else \"workspace\"\n results = {}\n for rev in self.brancher(revs=[a_rev, b_rev]):\n if rev == \"workspace\" and rev != b_rev:\n # brancher always returns workspace, but we only need to compute\n # workspace paths/checksums if b_rev was None\n continue\n results[rev] = _paths_checksums(self)\n\n old = results[a_rev]\n new = results[b_rev]\n\n # Compare paths between the old and new tree.\n # set() efficiently converts dict keys to a set\n added = sorted(set(new) - set(old))\n deleted_or_missing = set(old) - set(new)\n if b_rev == \"workspace\":\n # missing status is only applicable when diffing local workspace\n # against a commit\n missing = sorted(_filter_missing(self, deleted_or_missing))\n else:\n missing = []\n deleted = sorted(deleted_or_missing - set(missing))\n modified = sorted(set(old) & set(new))\n\n ret = {\n \"added\": [{\"path\": path, \"hash\": new[path]} for path in added],\n \"deleted\": [{\"path\": path, \"hash\": old[path]} for path in deleted],\n \"modified\": [\n {\"path\": path, \"hash\": {\"old\": old[path], \"new\": new[path]}}\n for path in modified\n if old[path] != new[path]\n ],\n \"not in cache\": [\n {\"path\": path, \"hash\": old[path]} for path in missing\n ],\n }\n\n return ret if any(ret.values()) else {}\n\n\ndef _paths_checksums(repo):\n \"\"\"\n A dictionary of checksums addressed by relpaths collected from\n the current tree outputs.\n\n To help distinguish between a directory and a file output,\n the former one will come with a trailing slash in the path:\n\n directory: \"data/\"\n file: \"data\"\n \"\"\"\n\n return dict(_output_paths(repo))\n\n\ndef _output_paths(repo):\n repo_tree = RepoTree(repo, stream=True)\n on_working_tree = isinstance(repo.tree, LocalTree)\n\n def _exists(output):\n if on_working_tree:\n return output.exists\n return True\n\n def _to_path(output):\n return (\n str(output)\n if not output.is_dir_checksum\n else os.path.join(str(output), \"\")\n )\n\n def _to_checksum(output):\n if on_working_tree:\n return repo.cache.local.tree.get_hash(output.path_info).value\n return output.hash_info.value\n\n for stage in repo.stages:\n for output in stage.outs:\n if _exists(output):\n yield _to_path(output), _to_checksum(output)\n if output.is_dir_checksum:\n yield from _dir_output_paths(repo_tree, output)\n\n\ndef _dir_output_paths(repo_tree, output):\n from dvc.config import NoRemoteError\n\n try:\n for fname in repo_tree.walk_files(output.path_info):\n yield str(fname), repo_tree.get_file_hash(fname).value\n except NoRemoteError:\n logger.warning(\"dir cache entry for '%s' is missing\", output)\n\n\ndef _filter_missing(repo, paths):\n repo_tree = RepoTree(repo, stream=True)\n for path in paths:\n metadata = repo_tree.metadata(path)\n if metadata.is_dvc:\n out = metadata.outs[0]\n if out.status().get(str(out)) == \"not in cache\":\n yield path\n", "path": "dvc/repo/diff.py"}]} | 1,925 | 113 |
gh_patches_debug_8273 | rasdani/github-patches | git_diff | cocotb__cocotb-1015 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
XGMII interface issue in 8 byte width without interleaving
XGMII interface is 32 bit with 4 bit control or 2 words in a 64 bit word (and 8 bit control). Currently the XGMII monitor doesnt support if the XGMII_START is offset to the second 32:40 bit position. I have made an update which works for me locally.
if ctrl[4] and bytes[4] == _XGMII_START:
ctrl, bytes = ctrl[5:], bytes[5:]
while self._add_payload(ctrl, bytes):
yield clk
ctrl, bytes = self._get_bytes()
located at line 136 in xgmii.py in monitors
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cocotb/monitors/xgmii.py`
Content:
```
1 # Copyright (c) 2013 Potential Ventures Ltd
2 # All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are met:
6 # * Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # * Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
11 # * Neither the name of Potential Ventures Ltd nor the names of its
12 # contributors may be used to endorse or promote products derived from this
13 # software without specific prior written permission.
14 #
15 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
19 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
26 """Monitor for XGMII (10 Gigabit Media Independent Interface)."""
27
28 # By default cast to scapy packets, otherwise we pass the string of bytes
29 try:
30 from scapy.all import Ether
31 _have_scapy = True
32 except ImportError:
33 _have_scapy = False
34
35 import struct
36 import zlib
37
38 import cocotb
39 from cocotb.utils import hexdump
40 from cocotb.monitors import Monitor
41 from cocotb.triggers import RisingEdge, ReadOnly
42
43 _XGMII_IDLE = "\x07" # noqa
44 _XGMII_START = "\xFB" # noqa
45 _XGMII_TERMINATE = "\xFD" # noqa
46
47 _PREAMBLE_SFD = "\x55\x55\x55\x55\x55\x55\xD5"
48
49
50 class XGMII(Monitor):
51 """XGMII (10 Gigabit Media Independent Interface) Monitor.
52
53 Assumes a single vector, either 4 or 8 bytes plus control bit for each byte.
54
55 If interleaved is ``True`` then the control bits are adjacent to the bytes.
56 """
57
58 def __init__(self, signal, clock, interleaved=True, callback=None,
59 event=None):
60 """Args:
61 signal (SimHandle): The XGMII data bus.
62 clock (SimHandle): The associated clock (assumed to be
63 driven by another coroutine).
64 interleaved (bool, optional): Whether control bits are interleaved
65 with the data bytes or not.
66
67 If interleaved the bus is
68 byte0, byte0_control, byte1, byte1_control, ...
69
70 Otherwise expect
71 byte0, byte1, ..., byte0_control, byte1_control, ...
72 """
73 self.log = signal._log
74 self.clock = clock
75 self.signal = signal
76 self.bytes = len(self.signal) / 9
77 self.interleaved = interleaved
78 Monitor.__init__(self, callback=callback, event=event)
79
80 def _get_bytes(self):
81 """Take a value and extract the individual bytes and control bits.
82
83 Returns a tuple of lists.
84 """
85 value = self.signal.value.integer
86 bytes = []
87 ctrls = []
88 byte_shift = 8
89 ctrl_base = 8 * self.bytes
90 ctrl_inc = 1
91 if self.interleaved:
92 byte_shift += 1
93 ctrl_base = 8
94 ctrl_inc = 9
95
96 for i in range(self.bytes):
97 bytes.append(chr((value >> (i * byte_shift)) & 0xff))
98 ctrls.append(bool(value & (1 << ctrl_base)))
99 ctrl_base += ctrl_inc
100
101 return ctrls, bytes
102
103 def _add_payload(self, ctrl, bytes):
104 """Take the payload and return true if more to come"""
105 for index, byte in enumerate(bytes):
106 if ctrl[index]:
107 if byte != _XGMII_TERMINATE:
108 self.log.error("Got control character in XGMII payload")
109 self.log.info("data = :" +
110 " ".join(["%02X" % ord(b) for b in bytes]))
111 self.log.info("ctrl = :" +
112 " ".join(["%s" % str(c) for c in ctrl]))
113 self._pkt = ""
114 return False
115
116 self._pkt += byte
117 return True
118
119 @cocotb.coroutine
120 def _monitor_recv(self):
121 clk = RisingEdge(self.clock)
122 self._pkt = ""
123
124 while True:
125 yield clk
126 ctrl, bytes = self._get_bytes()
127
128 if ctrl[0] and bytes[0] == _XGMII_START:
129
130 ctrl, bytes = ctrl[1:], bytes[1:]
131
132 while self._add_payload(ctrl, bytes):
133 yield clk
134 ctrl, bytes = self._get_bytes()
135
136 if self._pkt:
137
138 self.log.debug("Received:\n%s" % (hexdump(self._pkt)))
139
140 if len(self._pkt) < 64 + 7:
141 self.log.error("Received a runt frame!")
142 if len(self._pkt) < 12:
143 self.log.error("No data to extract")
144 self._pkt = ""
145 continue
146
147 preamble_sfd = self._pkt[0:7]
148 crc32 = self._pkt[-4:]
149 payload = self._pkt[7:-4]
150
151 if preamble_sfd != _PREAMBLE_SFD:
152 self.log.error("Got a frame with unknown preamble/SFD")
153 self.log.error(hexdump(preamble_sfd))
154 self._pkt = ""
155 continue
156
157 expected_crc = struct.pack("<I",
158 (zlib.crc32(payload) & 0xFFFFFFFF))
159
160 if crc32 != expected_crc:
161 self.log.error("Incorrect CRC on received packet")
162 self.log.info("Expected: %s" % (hexdump(expected_crc)))
163 self.log.info("Received: %s" % (hexdump(crc32)))
164
165 # Use scapy to decode the packet
166 if _have_scapy:
167 p = Ether(payload)
168 self.log.debug("Received decoded packet:\n%s" % p.show2())
169 else:
170 p = payload
171
172 self._recv(p)
173 self._pkt = ""
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cocotb/monitors/xgmii.py b/cocotb/monitors/xgmii.py
--- a/cocotb/monitors/xgmii.py
+++ b/cocotb/monitors/xgmii.py
@@ -133,6 +133,15 @@
yield clk
ctrl, bytes = self._get_bytes()
+ elif self.bytes == 8 :
+ if ctrl[4] and bytes[4] == _XGMII_START:
+
+ ctrl, bytes = ctrl[5:], bytes[5:]
+
+ while self._add_payload(ctrl, bytes):
+ yield clk
+ ctrl, bytes = self._get_bytes()
+
if self._pkt:
self.log.debug("Received:\n%s" % (hexdump(self._pkt)))
| {"golden_diff": "diff --git a/cocotb/monitors/xgmii.py b/cocotb/monitors/xgmii.py\n--- a/cocotb/monitors/xgmii.py\n+++ b/cocotb/monitors/xgmii.py\n@@ -133,6 +133,15 @@\n yield clk\n ctrl, bytes = self._get_bytes()\n \n+ elif self.bytes == 8 :\n+ if ctrl[4] and bytes[4] == _XGMII_START:\n+\n+ ctrl, bytes = ctrl[5:], bytes[5:]\n+\n+ while self._add_payload(ctrl, bytes):\n+ yield clk\n+ ctrl, bytes = self._get_bytes()\n+\n if self._pkt:\n \n self.log.debug(\"Received:\\n%s\" % (hexdump(self._pkt)))\n", "issue": "XGMII interface issue in 8 byte width without interleaving\nXGMII interface is 32 bit with 4 bit control or 2 words in a 64 bit word (and 8 bit control). Currently the XGMII monitor doesnt support if the XGMII_START is offset to the second 32:40 bit position. I have made an update which works for me locally.\r\n\r\n if ctrl[4] and bytes[4] == _XGMII_START:\r\n\r\n ctrl, bytes = ctrl[5:], bytes[5:]\r\n\r\n while self._add_payload(ctrl, bytes):\r\n yield clk\r\n ctrl, bytes = self._get_bytes()\r\n\r\nlocated at line 136 in xgmii.py in monitors\n", "before_files": [{"content": "# Copyright (c) 2013 Potential Ventures Ltd\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Monitor for XGMII (10 Gigabit Media Independent Interface).\"\"\"\n\n# By default cast to scapy packets, otherwise we pass the string of bytes\ntry:\n from scapy.all import Ether\n _have_scapy = True\nexcept ImportError:\n _have_scapy = False\n\nimport struct\nimport zlib\n\nimport cocotb\nfrom cocotb.utils import hexdump\nfrom cocotb.monitors import Monitor\nfrom cocotb.triggers import RisingEdge, ReadOnly\n\n_XGMII_IDLE = \"\\x07\" # noqa\n_XGMII_START = \"\\xFB\" # noqa\n_XGMII_TERMINATE = \"\\xFD\" # noqa\n\n_PREAMBLE_SFD = \"\\x55\\x55\\x55\\x55\\x55\\x55\\xD5\"\n\n\nclass XGMII(Monitor):\n \"\"\"XGMII (10 Gigabit Media Independent Interface) Monitor.\n\n Assumes a single vector, either 4 or 8 bytes plus control bit for each byte.\n\n If interleaved is ``True`` then the control bits are adjacent to the bytes.\n \"\"\"\n\n def __init__(self, signal, clock, interleaved=True, callback=None,\n event=None):\n \"\"\"Args:\n signal (SimHandle): The XGMII data bus.\n clock (SimHandle): The associated clock (assumed to be\n driven by another coroutine).\n interleaved (bool, optional): Whether control bits are interleaved\n with the data bytes or not.\n\n If interleaved the bus is\n byte0, byte0_control, byte1, byte1_control, ...\n\n Otherwise expect\n byte0, byte1, ..., byte0_control, byte1_control, ...\n \"\"\"\n self.log = signal._log\n self.clock = clock\n self.signal = signal\n self.bytes = len(self.signal) / 9\n self.interleaved = interleaved\n Monitor.__init__(self, callback=callback, event=event)\n\n def _get_bytes(self):\n \"\"\"Take a value and extract the individual bytes and control bits.\n\n Returns a tuple of lists.\n \"\"\"\n value = self.signal.value.integer\n bytes = []\n ctrls = []\n byte_shift = 8\n ctrl_base = 8 * self.bytes\n ctrl_inc = 1\n if self.interleaved:\n byte_shift += 1\n ctrl_base = 8\n ctrl_inc = 9\n\n for i in range(self.bytes):\n bytes.append(chr((value >> (i * byte_shift)) & 0xff))\n ctrls.append(bool(value & (1 << ctrl_base)))\n ctrl_base += ctrl_inc\n\n return ctrls, bytes\n\n def _add_payload(self, ctrl, bytes):\n \"\"\"Take the payload and return true if more to come\"\"\"\n for index, byte in enumerate(bytes):\n if ctrl[index]:\n if byte != _XGMII_TERMINATE:\n self.log.error(\"Got control character in XGMII payload\")\n self.log.info(\"data = :\" +\n \" \".join([\"%02X\" % ord(b) for b in bytes]))\n self.log.info(\"ctrl = :\" +\n \" \".join([\"%s\" % str(c) for c in ctrl]))\n self._pkt = \"\"\n return False\n\n self._pkt += byte\n return True\n\n @cocotb.coroutine\n def _monitor_recv(self):\n clk = RisingEdge(self.clock)\n self._pkt = \"\"\n\n while True:\n yield clk\n ctrl, bytes = self._get_bytes()\n\n if ctrl[0] and bytes[0] == _XGMII_START:\n\n ctrl, bytes = ctrl[1:], bytes[1:]\n\n while self._add_payload(ctrl, bytes):\n yield clk\n ctrl, bytes = self._get_bytes()\n\n if self._pkt:\n\n self.log.debug(\"Received:\\n%s\" % (hexdump(self._pkt)))\n\n if len(self._pkt) < 64 + 7:\n self.log.error(\"Received a runt frame!\")\n if len(self._pkt) < 12:\n self.log.error(\"No data to extract\")\n self._pkt = \"\"\n continue\n\n preamble_sfd = self._pkt[0:7]\n crc32 = self._pkt[-4:]\n payload = self._pkt[7:-4]\n\n if preamble_sfd != _PREAMBLE_SFD:\n self.log.error(\"Got a frame with unknown preamble/SFD\")\n self.log.error(hexdump(preamble_sfd))\n self._pkt = \"\"\n continue\n\n expected_crc = struct.pack(\"<I\",\n (zlib.crc32(payload) & 0xFFFFFFFF))\n\n if crc32 != expected_crc:\n self.log.error(\"Incorrect CRC on received packet\")\n self.log.info(\"Expected: %s\" % (hexdump(expected_crc)))\n self.log.info(\"Received: %s\" % (hexdump(crc32)))\n\n # Use scapy to decode the packet\n if _have_scapy:\n p = Ether(payload)\n self.log.debug(\"Received decoded packet:\\n%s\" % p.show2())\n else:\n p = payload\n\n self._recv(p)\n self._pkt = \"\"\n", "path": "cocotb/monitors/xgmii.py"}], "after_files": [{"content": "# Copyright (c) 2013 Potential Ventures Ltd\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Monitor for XGMII (10 Gigabit Media Independent Interface).\"\"\"\n\n# By default cast to scapy packets, otherwise we pass the string of bytes\ntry:\n from scapy.all import Ether\n _have_scapy = True\nexcept ImportError:\n _have_scapy = False\n\nimport struct\nimport zlib\n\nimport cocotb\nfrom cocotb.utils import hexdump\nfrom cocotb.monitors import Monitor\nfrom cocotb.triggers import RisingEdge, ReadOnly\n\n_XGMII_IDLE = \"\\x07\" # noqa\n_XGMII_START = \"\\xFB\" # noqa\n_XGMII_TERMINATE = \"\\xFD\" # noqa\n\n_PREAMBLE_SFD = \"\\x55\\x55\\x55\\x55\\x55\\x55\\xD5\"\n\n\nclass XGMII(Monitor):\n \"\"\"XGMII (10 Gigabit Media Independent Interface) Monitor.\n\n Assumes a single vector, either 4 or 8 bytes plus control bit for each byte.\n\n If interleaved is ``True`` then the control bits are adjacent to the bytes.\n \"\"\"\n\n def __init__(self, signal, clock, interleaved=True, callback=None,\n event=None):\n \"\"\"Args:\n signal (SimHandle): The XGMII data bus.\n clock (SimHandle): The associated clock (assumed to be\n driven by another coroutine).\n interleaved (bool, optional): Whether control bits are interleaved\n with the data bytes or not.\n\n If interleaved the bus is\n byte0, byte0_control, byte1, byte1_control, ...\n\n Otherwise expect\n byte0, byte1, ..., byte0_control, byte1_control, ...\n \"\"\"\n self.log = signal._log\n self.clock = clock\n self.signal = signal\n self.bytes = len(self.signal) / 9\n self.interleaved = interleaved\n Monitor.__init__(self, callback=callback, event=event)\n\n def _get_bytes(self):\n \"\"\"Take a value and extract the individual bytes and control bits.\n\n Returns a tuple of lists.\n \"\"\"\n value = self.signal.value.integer\n bytes = []\n ctrls = []\n byte_shift = 8\n ctrl_base = 8 * self.bytes\n ctrl_inc = 1\n if self.interleaved:\n byte_shift += 1\n ctrl_base = 8\n ctrl_inc = 9\n\n for i in range(self.bytes):\n bytes.append(chr((value >> (i * byte_shift)) & 0xff))\n ctrls.append(bool(value & (1 << ctrl_base)))\n ctrl_base += ctrl_inc\n\n return ctrls, bytes\n\n def _add_payload(self, ctrl, bytes):\n \"\"\"Take the payload and return true if more to come\"\"\"\n for index, byte in enumerate(bytes):\n if ctrl[index]:\n if byte != _XGMII_TERMINATE:\n self.log.error(\"Got control character in XGMII payload\")\n self.log.info(\"data = :\" +\n \" \".join([\"%02X\" % ord(b) for b in bytes]))\n self.log.info(\"ctrl = :\" +\n \" \".join([\"%s\" % str(c) for c in ctrl]))\n self._pkt = \"\"\n return False\n\n self._pkt += byte\n return True\n\n @cocotb.coroutine\n def _monitor_recv(self):\n clk = RisingEdge(self.clock)\n self._pkt = \"\"\n\n while True:\n yield clk\n ctrl, bytes = self._get_bytes()\n\n if ctrl[0] and bytes[0] == _XGMII_START:\n\n ctrl, bytes = ctrl[1:], bytes[1:]\n\n while self._add_payload(ctrl, bytes):\n yield clk\n ctrl, bytes = self._get_bytes()\n\n elif self.bytes == 8 :\n if ctrl[4] and bytes[4] == _XGMII_START:\n\n ctrl, bytes = ctrl[5:], bytes[5:]\n\n while self._add_payload(ctrl, bytes):\n yield clk\n ctrl, bytes = self._get_bytes()\n\n if self._pkt:\n\n self.log.debug(\"Received:\\n%s\" % (hexdump(self._pkt)))\n\n if len(self._pkt) < 64 + 7:\n self.log.error(\"Received a runt frame!\")\n if len(self._pkt) < 12:\n self.log.error(\"No data to extract\")\n self._pkt = \"\"\n continue\n\n preamble_sfd = self._pkt[0:7]\n crc32 = self._pkt[-4:]\n payload = self._pkt[7:-4]\n\n if preamble_sfd != _PREAMBLE_SFD:\n self.log.error(\"Got a frame with unknown preamble/SFD\")\n self.log.error(hexdump(preamble_sfd))\n self._pkt = \"\"\n continue\n\n expected_crc = struct.pack(\"<I\",\n (zlib.crc32(payload) & 0xFFFFFFFF))\n\n if crc32 != expected_crc:\n self.log.error(\"Incorrect CRC on received packet\")\n self.log.info(\"Expected: %s\" % (hexdump(expected_crc)))\n self.log.info(\"Received: %s\" % (hexdump(crc32)))\n\n # Use scapy to decode the packet\n if _have_scapy:\n p = Ether(payload)\n self.log.debug(\"Received decoded packet:\\n%s\" % p.show2())\n else:\n p = payload\n\n self._recv(p)\n self._pkt = \"\"\n", "path": "cocotb/monitors/xgmii.py"}]} | 2,304 | 183 |
gh_patches_debug_28115 | rasdani/github-patches | git_diff | python__mypy-14035 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
misc/upload-pypi.py errors when uploading wheel for platform emscripten_3_1_14_wasm32
When running `python3 misc/upload-pypi.py 0.990` there was an error when uploading wheel for platform emscripten_3_1_14_wasm32
```
Uploading mypy-0.990-cp310-cp310-win_amd64.whl
100% ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 8.7/8.7 MB • 00:00 • 274.7 MB/s
Uploading mypy-0.990-cp310-cp310-emscripten_3_1_14_wasm32.whl
100% ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.3/6.3 MB • 00:00 • 268.6 MB/s
WARNING Error during upload. Retry with the --verbose option for more details.
ERROR HTTPError: 400 Bad Request from https://upload.pypi.org/legacy/
Binary wheel 'mypy-0.990-cp310-cp310-emscripten_3_1_14_wasm32.whl' has an unsupported platform tag 'emscripten_3_1_14_wasm32'.
Traceback (most recent call last):
File "/home/svalentin/src/mypy/misc/upload-pypi.py", line 139, in <module>
main()
File "/home/svalentin/src/mypy/misc/upload-pypi.py", line 135, in main
upload_to_pypi(args.version, args.dry_run)
File "/home/svalentin/src/mypy/misc/upload-pypi.py", line 123, in upload_to_pypi
upload_dist(dist, dry_run)
File "/home/svalentin/src/mypy/misc/upload-pypi.py", line 103, in upload_dist
subprocess.check_call(cmd)
File "/usr/lib/python3.9/subprocess.py", line 373, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '[PosixPath('/tmp/tmp5173rirz/venv/bin/twine'), 'upload', PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990.tar.gz'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-py3-none-any.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp39-cp39-win_amd64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp39-cp39-musllinux_1_1_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp39-cp39-macosx_11_0_arm64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp39-cp39-macosx_10_9_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp39-cp39-macosx_10_9_universal2.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp38-cp38-win_amd64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp38-cp38-musllinux_1_1_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp38-cp38-macosx_11_0_arm64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp38-cp38-macosx_10_9_universal2.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp38-cp38-macosx_10_9_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp37-cp37m-win_amd64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp37-cp37m-musllinux_1_1_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp37-cp37m-macosx_10_9_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp311-cp311-win_amd64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp311-cp311-macosx_10_9_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-win_amd64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-emscripten_3_1_14_wasm32.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-macosx_11_0_arm64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-musllinux_1_1_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp311-cp311-macosx_11_0_arm64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-macosx_10_9_universal2.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp311-cp311-macosx_10_9_universal2.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp311-cp311-musllinux_1_1_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-macosx_10_9_x86_64.whl')]' returned non-zero exit status 1.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `misc/upload-pypi.py`
Content:
```
1 #!/usr/bin/env python3
2 """Upload mypy packages to PyPI.
3
4 You must first tag the release, use `git push --tags` and wait for the wheel build in CI to complete.
5
6 """
7
8 from __future__ import annotations
9
10 import argparse
11 import contextlib
12 import json
13 import re
14 import shutil
15 import subprocess
16 import tarfile
17 import tempfile
18 import venv
19 from concurrent.futures import ThreadPoolExecutor
20 from pathlib import Path
21 from typing import Any, Iterator
22 from urllib.request import urlopen
23
24 BASE = "https://api.github.com/repos"
25 REPO = "mypyc/mypy_mypyc-wheels"
26
27
28 def is_whl_or_tar(name: str) -> bool:
29 return name.endswith(".tar.gz") or name.endswith(".whl")
30
31
32 def get_release_for_tag(tag: str) -> dict[str, Any]:
33 with urlopen(f"{BASE}/{REPO}/releases/tags/{tag}") as f:
34 data = json.load(f)
35 assert isinstance(data, dict)
36 assert data["tag_name"] == tag
37 return data
38
39
40 def download_asset(asset: dict[str, Any], dst: Path) -> Path:
41 name = asset["name"]
42 assert isinstance(name, str)
43 download_url = asset["browser_download_url"]
44 assert is_whl_or_tar(name)
45 with urlopen(download_url) as src_file:
46 with open(dst / name, "wb") as dst_file:
47 shutil.copyfileobj(src_file, dst_file)
48 return dst / name
49
50
51 def download_all_release_assets(release: dict[str, Any], dst: Path) -> None:
52 print("Downloading assets...")
53 with ThreadPoolExecutor() as e:
54 for asset in e.map(lambda asset: download_asset(asset, dst), release["assets"]):
55 print(f"Downloaded {asset}")
56
57
58 def check_sdist(dist: Path, version: str) -> None:
59 tarfiles = list(dist.glob("*.tar.gz"))
60 assert len(tarfiles) == 1
61 sdist = tarfiles[0]
62 assert version in sdist.name
63 with tarfile.open(sdist) as f:
64 version_py = f.extractfile(f"{sdist.name[:-len('.tar.gz')]}/mypy/version.py")
65 assert version_py is not None
66 version_py_contents = version_py.read().decode("utf-8")
67
68 # strip a git hash from our version, if necessary, since that's not present in version.py
69 match = re.match(r"(.*\+dev).*$", version)
70 hashless_version = match.group(1) if match else version
71
72 assert (
73 f'"{hashless_version}"' in version_py_contents
74 ), "Version does not match version.py in sdist"
75
76
77 def spot_check_dist(dist: Path, version: str) -> None:
78 items = [item for item in dist.iterdir() if is_whl_or_tar(item.name)]
79 assert len(items) > 10
80 assert all(version in item.name for item in items)
81 assert any(item.name.endswith("py3-none-any.whl") for item in items)
82
83
84 @contextlib.contextmanager
85 def tmp_twine() -> Iterator[Path]:
86 with tempfile.TemporaryDirectory() as tmp_dir:
87 tmp_venv_dir = Path(tmp_dir) / "venv"
88 venv.create(tmp_venv_dir, with_pip=True)
89 pip_exe = tmp_venv_dir / "bin" / "pip"
90 subprocess.check_call([pip_exe, "install", "twine"])
91 yield tmp_venv_dir / "bin" / "twine"
92
93
94 def upload_dist(dist: Path, dry_run: bool = True) -> None:
95 with tmp_twine() as twine:
96 files = [item for item in dist.iterdir() if is_whl_or_tar(item.name)]
97 cmd: list[Any] = [twine, "upload"]
98 cmd += files
99 if dry_run:
100 print("[dry run] " + " ".join(map(str, cmd)))
101 else:
102 print(" ".join(map(str, cmd)))
103 subprocess.check_call(cmd)
104
105
106 def upload_to_pypi(version: str, dry_run: bool = True) -> None:
107 assert re.match(r"v?0\.[0-9]{3}(\+\S+)?$", version)
108 if "dev" in version:
109 assert dry_run, "Must use --dry-run with dev versions of mypy"
110 if version.startswith("v"):
111 version = version[1:]
112
113 target_dir = tempfile.mkdtemp()
114 dist = Path(target_dir) / "dist"
115 dist.mkdir()
116 print(f"Temporary target directory: {target_dir}")
117
118 release = get_release_for_tag(f"v{version}")
119 download_all_release_assets(release, dist)
120
121 spot_check_dist(dist, version)
122 check_sdist(dist, version)
123 upload_dist(dist, dry_run)
124 print("<< All done! >>")
125
126
127 def main() -> None:
128 parser = argparse.ArgumentParser(description="PyPI mypy package uploader")
129 parser.add_argument(
130 "--dry-run", action="store_true", default=False, help="Don't actually upload packages"
131 )
132 parser.add_argument("version", help="mypy version to release")
133 args = parser.parse_args()
134
135 upload_to_pypi(args.version, args.dry_run)
136
137
138 if __name__ == "__main__":
139 main()
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/misc/upload-pypi.py b/misc/upload-pypi.py
--- a/misc/upload-pypi.py
+++ b/misc/upload-pypi.py
@@ -29,6 +29,21 @@
return name.endswith(".tar.gz") or name.endswith(".whl")
+def item_ok_for_pypi(name: str) -> bool:
+ if not is_whl_or_tar(name):
+ return False
+
+ if name.endswith(".tar.gz"):
+ name = name[:-7]
+ if name.endswith(".whl"):
+ name = name[:-4]
+
+ if name.endswith("wasm32"):
+ return False
+
+ return True
+
+
def get_release_for_tag(tag: str) -> dict[str, Any]:
with urlopen(f"{BASE}/{REPO}/releases/tags/{tag}") as f:
data = json.load(f)
@@ -75,7 +90,7 @@
def spot_check_dist(dist: Path, version: str) -> None:
- items = [item for item in dist.iterdir() if is_whl_or_tar(item.name)]
+ items = [item for item in dist.iterdir() if item_ok_for_pypi(item.name)]
assert len(items) > 10
assert all(version in item.name for item in items)
assert any(item.name.endswith("py3-none-any.whl") for item in items)
@@ -93,7 +108,7 @@
def upload_dist(dist: Path, dry_run: bool = True) -> None:
with tmp_twine() as twine:
- files = [item for item in dist.iterdir() if is_whl_or_tar(item.name)]
+ files = [item for item in dist.iterdir() if item_ok_for_pypi(item.name)]
cmd: list[Any] = [twine, "upload"]
cmd += files
if dry_run:
| {"golden_diff": "diff --git a/misc/upload-pypi.py b/misc/upload-pypi.py\n--- a/misc/upload-pypi.py\n+++ b/misc/upload-pypi.py\n@@ -29,6 +29,21 @@\n return name.endswith(\".tar.gz\") or name.endswith(\".whl\")\n \n \n+def item_ok_for_pypi(name: str) -> bool:\n+ if not is_whl_or_tar(name):\n+ return False\n+\n+ if name.endswith(\".tar.gz\"):\n+ name = name[:-7]\n+ if name.endswith(\".whl\"):\n+ name = name[:-4]\n+\n+ if name.endswith(\"wasm32\"):\n+ return False\n+\n+ return True\n+\n+\n def get_release_for_tag(tag: str) -> dict[str, Any]:\n with urlopen(f\"{BASE}/{REPO}/releases/tags/{tag}\") as f:\n data = json.load(f)\n@@ -75,7 +90,7 @@\n \n \n def spot_check_dist(dist: Path, version: str) -> None:\n- items = [item for item in dist.iterdir() if is_whl_or_tar(item.name)]\n+ items = [item for item in dist.iterdir() if item_ok_for_pypi(item.name)]\n assert len(items) > 10\n assert all(version in item.name for item in items)\n assert any(item.name.endswith(\"py3-none-any.whl\") for item in items)\n@@ -93,7 +108,7 @@\n \n def upload_dist(dist: Path, dry_run: bool = True) -> None:\n with tmp_twine() as twine:\n- files = [item for item in dist.iterdir() if is_whl_or_tar(item.name)]\n+ files = [item for item in dist.iterdir() if item_ok_for_pypi(item.name)]\n cmd: list[Any] = [twine, \"upload\"]\n cmd += files\n if dry_run:\n", "issue": "misc/upload-pypi.py errors when uploading wheel for platform emscripten_3_1_14_wasm32\nWhen running `python3 misc/upload-pypi.py 0.990` there was an error when uploading wheel for platform emscripten_3_1_14_wasm32\r\n\r\n```\r\nUploading mypy-0.990-cp310-cp310-win_amd64.whl\r\n100% \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 8.7/8.7 MB \u2022 00:00 \u2022 274.7 MB/s\r\nUploading mypy-0.990-cp310-cp310-emscripten_3_1_14_wasm32.whl\r\n100% \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 6.3/6.3 MB \u2022 00:00 \u2022 268.6 MB/s\r\nWARNING Error during upload. Retry with the --verbose option for more details.\r\nERROR HTTPError: 400 Bad Request from https://upload.pypi.org/legacy/\r\n Binary wheel 'mypy-0.990-cp310-cp310-emscripten_3_1_14_wasm32.whl' has an unsupported platform tag 'emscripten_3_1_14_wasm32'.\r\nTraceback (most recent call last):\r\n File \"/home/svalentin/src/mypy/misc/upload-pypi.py\", line 139, in <module>\r\n main()\r\n File \"/home/svalentin/src/mypy/misc/upload-pypi.py\", line 135, in main\r\n upload_to_pypi(args.version, args.dry_run)\r\n File \"/home/svalentin/src/mypy/misc/upload-pypi.py\", line 123, in upload_to_pypi\r\n upload_dist(dist, dry_run)\r\n File \"/home/svalentin/src/mypy/misc/upload-pypi.py\", line 103, in upload_dist\r\n subprocess.check_call(cmd)\r\n File \"/usr/lib/python3.9/subprocess.py\", line 373, in check_call\r\n raise CalledProcessError(retcode, cmd)\r\nsubprocess.CalledProcessError: Command '[PosixPath('/tmp/tmp5173rirz/venv/bin/twine'), 'upload', PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990.tar.gz'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-py3-none-any.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp39-cp39-win_amd64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp39-cp39-musllinux_1_1_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp39-cp39-macosx_11_0_arm64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp39-cp39-macosx_10_9_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp39-cp39-macosx_10_9_universal2.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp38-cp38-win_amd64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp38-cp38-musllinux_1_1_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp38-cp38-macosx_11_0_arm64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp38-cp38-macosx_10_9_universal2.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp38-cp38-macosx_10_9_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp37-cp37m-win_amd64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp37-cp37m-musllinux_1_1_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp37-cp37m-macosx_10_9_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp311-cp311-win_amd64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp311-cp311-macosx_10_9_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-win_amd64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-emscripten_3_1_14_wasm32.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-macosx_11_0_arm64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-musllinux_1_1_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp311-cp311-macosx_11_0_arm64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-macosx_10_9_universal2.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp311-cp311-macosx_10_9_universal2.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp311-cp311-musllinux_1_1_x86_64.whl'), PosixPath('/tmp/tmp8362thlv/dist/mypy-0.990-cp310-cp310-macosx_10_9_x86_64.whl')]' returned non-zero exit status 1.\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Upload mypy packages to PyPI.\n\nYou must first tag the release, use `git push --tags` and wait for the wheel build in CI to complete.\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport argparse\nimport contextlib\nimport json\nimport re\nimport shutil\nimport subprocess\nimport tarfile\nimport tempfile\nimport venv\nfrom concurrent.futures import ThreadPoolExecutor\nfrom pathlib import Path\nfrom typing import Any, Iterator\nfrom urllib.request import urlopen\n\nBASE = \"https://api.github.com/repos\"\nREPO = \"mypyc/mypy_mypyc-wheels\"\n\n\ndef is_whl_or_tar(name: str) -> bool:\n return name.endswith(\".tar.gz\") or name.endswith(\".whl\")\n\n\ndef get_release_for_tag(tag: str) -> dict[str, Any]:\n with urlopen(f\"{BASE}/{REPO}/releases/tags/{tag}\") as f:\n data = json.load(f)\n assert isinstance(data, dict)\n assert data[\"tag_name\"] == tag\n return data\n\n\ndef download_asset(asset: dict[str, Any], dst: Path) -> Path:\n name = asset[\"name\"]\n assert isinstance(name, str)\n download_url = asset[\"browser_download_url\"]\n assert is_whl_or_tar(name)\n with urlopen(download_url) as src_file:\n with open(dst / name, \"wb\") as dst_file:\n shutil.copyfileobj(src_file, dst_file)\n return dst / name\n\n\ndef download_all_release_assets(release: dict[str, Any], dst: Path) -> None:\n print(\"Downloading assets...\")\n with ThreadPoolExecutor() as e:\n for asset in e.map(lambda asset: download_asset(asset, dst), release[\"assets\"]):\n print(f\"Downloaded {asset}\")\n\n\ndef check_sdist(dist: Path, version: str) -> None:\n tarfiles = list(dist.glob(\"*.tar.gz\"))\n assert len(tarfiles) == 1\n sdist = tarfiles[0]\n assert version in sdist.name\n with tarfile.open(sdist) as f:\n version_py = f.extractfile(f\"{sdist.name[:-len('.tar.gz')]}/mypy/version.py\")\n assert version_py is not None\n version_py_contents = version_py.read().decode(\"utf-8\")\n\n # strip a git hash from our version, if necessary, since that's not present in version.py\n match = re.match(r\"(.*\\+dev).*$\", version)\n hashless_version = match.group(1) if match else version\n\n assert (\n f'\"{hashless_version}\"' in version_py_contents\n ), \"Version does not match version.py in sdist\"\n\n\ndef spot_check_dist(dist: Path, version: str) -> None:\n items = [item for item in dist.iterdir() if is_whl_or_tar(item.name)]\n assert len(items) > 10\n assert all(version in item.name for item in items)\n assert any(item.name.endswith(\"py3-none-any.whl\") for item in items)\n\n\[email protected]\ndef tmp_twine() -> Iterator[Path]:\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_venv_dir = Path(tmp_dir) / \"venv\"\n venv.create(tmp_venv_dir, with_pip=True)\n pip_exe = tmp_venv_dir / \"bin\" / \"pip\"\n subprocess.check_call([pip_exe, \"install\", \"twine\"])\n yield tmp_venv_dir / \"bin\" / \"twine\"\n\n\ndef upload_dist(dist: Path, dry_run: bool = True) -> None:\n with tmp_twine() as twine:\n files = [item for item in dist.iterdir() if is_whl_or_tar(item.name)]\n cmd: list[Any] = [twine, \"upload\"]\n cmd += files\n if dry_run:\n print(\"[dry run] \" + \" \".join(map(str, cmd)))\n else:\n print(\" \".join(map(str, cmd)))\n subprocess.check_call(cmd)\n\n\ndef upload_to_pypi(version: str, dry_run: bool = True) -> None:\n assert re.match(r\"v?0\\.[0-9]{3}(\\+\\S+)?$\", version)\n if \"dev\" in version:\n assert dry_run, \"Must use --dry-run with dev versions of mypy\"\n if version.startswith(\"v\"):\n version = version[1:]\n\n target_dir = tempfile.mkdtemp()\n dist = Path(target_dir) / \"dist\"\n dist.mkdir()\n print(f\"Temporary target directory: {target_dir}\")\n\n release = get_release_for_tag(f\"v{version}\")\n download_all_release_assets(release, dist)\n\n spot_check_dist(dist, version)\n check_sdist(dist, version)\n upload_dist(dist, dry_run)\n print(\"<< All done! >>\")\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser(description=\"PyPI mypy package uploader\")\n parser.add_argument(\n \"--dry-run\", action=\"store_true\", default=False, help=\"Don't actually upload packages\"\n )\n parser.add_argument(\"version\", help=\"mypy version to release\")\n args = parser.parse_args()\n\n upload_to_pypi(args.version, args.dry_run)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "misc/upload-pypi.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Upload mypy packages to PyPI.\n\nYou must first tag the release, use `git push --tags` and wait for the wheel build in CI to complete.\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport argparse\nimport contextlib\nimport json\nimport re\nimport shutil\nimport subprocess\nimport tarfile\nimport tempfile\nimport venv\nfrom concurrent.futures import ThreadPoolExecutor\nfrom pathlib import Path\nfrom typing import Any, Iterator\nfrom urllib.request import urlopen\n\nBASE = \"https://api.github.com/repos\"\nREPO = \"mypyc/mypy_mypyc-wheels\"\n\n\ndef is_whl_or_tar(name: str) -> bool:\n return name.endswith(\".tar.gz\") or name.endswith(\".whl\")\n\n\ndef item_ok_for_pypi(name: str) -> bool:\n if not is_whl_or_tar(name):\n return False\n\n if name.endswith(\".tar.gz\"):\n name = name[:-7]\n if name.endswith(\".whl\"):\n name = name[:-4]\n\n if name.endswith(\"wasm32\"):\n return False\n\n return True\n\n\ndef get_release_for_tag(tag: str) -> dict[str, Any]:\n with urlopen(f\"{BASE}/{REPO}/releases/tags/{tag}\") as f:\n data = json.load(f)\n assert isinstance(data, dict)\n assert data[\"tag_name\"] == tag\n return data\n\n\ndef download_asset(asset: dict[str, Any], dst: Path) -> Path:\n name = asset[\"name\"]\n assert isinstance(name, str)\n download_url = asset[\"browser_download_url\"]\n assert is_whl_or_tar(name)\n with urlopen(download_url) as src_file:\n with open(dst / name, \"wb\") as dst_file:\n shutil.copyfileobj(src_file, dst_file)\n return dst / name\n\n\ndef download_all_release_assets(release: dict[str, Any], dst: Path) -> None:\n print(\"Downloading assets...\")\n with ThreadPoolExecutor() as e:\n for asset in e.map(lambda asset: download_asset(asset, dst), release[\"assets\"]):\n print(f\"Downloaded {asset}\")\n\n\ndef check_sdist(dist: Path, version: str) -> None:\n tarfiles = list(dist.glob(\"*.tar.gz\"))\n assert len(tarfiles) == 1\n sdist = tarfiles[0]\n assert version in sdist.name\n with tarfile.open(sdist) as f:\n version_py = f.extractfile(f\"{sdist.name[:-len('.tar.gz')]}/mypy/version.py\")\n assert version_py is not None\n version_py_contents = version_py.read().decode(\"utf-8\")\n\n # strip a git hash from our version, if necessary, since that's not present in version.py\n match = re.match(r\"(.*\\+dev).*$\", version)\n hashless_version = match.group(1) if match else version\n\n assert (\n f'\"{hashless_version}\"' in version_py_contents\n ), \"Version does not match version.py in sdist\"\n\n\ndef spot_check_dist(dist: Path, version: str) -> None:\n items = [item for item in dist.iterdir() if item_ok_for_pypi(item.name)]\n assert len(items) > 10\n assert all(version in item.name for item in items)\n assert any(item.name.endswith(\"py3-none-any.whl\") for item in items)\n\n\[email protected]\ndef tmp_twine() -> Iterator[Path]:\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_venv_dir = Path(tmp_dir) / \"venv\"\n venv.create(tmp_venv_dir, with_pip=True)\n pip_exe = tmp_venv_dir / \"bin\" / \"pip\"\n subprocess.check_call([pip_exe, \"install\", \"twine\"])\n yield tmp_venv_dir / \"bin\" / \"twine\"\n\n\ndef upload_dist(dist: Path, dry_run: bool = True) -> None:\n with tmp_twine() as twine:\n files = [item for item in dist.iterdir() if item_ok_for_pypi(item.name)]\n cmd: list[Any] = [twine, \"upload\"]\n cmd += files\n if dry_run:\n print(\"[dry run] \" + \" \".join(map(str, cmd)))\n else:\n print(\" \".join(map(str, cmd)))\n subprocess.check_call(cmd)\n\n\ndef upload_to_pypi(version: str, dry_run: bool = True) -> None:\n assert re.match(r\"v?0\\.[0-9]{3}(\\+\\S+)?$\", version)\n if \"dev\" in version:\n assert dry_run, \"Must use --dry-run with dev versions of mypy\"\n if version.startswith(\"v\"):\n version = version[1:]\n\n target_dir = tempfile.mkdtemp()\n dist = Path(target_dir) / \"dist\"\n dist.mkdir()\n print(f\"Temporary target directory: {target_dir}\")\n\n release = get_release_for_tag(f\"v{version}\")\n download_all_release_assets(release, dist)\n\n spot_check_dist(dist, version)\n check_sdist(dist, version)\n upload_dist(dist, dry_run)\n print(\"<< All done! >>\")\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser(description=\"PyPI mypy package uploader\")\n parser.add_argument(\n \"--dry-run\", action=\"store_true\", default=False, help=\"Don't actually upload packages\"\n )\n parser.add_argument(\"version\", help=\"mypy version to release\")\n args = parser.parse_args()\n\n upload_to_pypi(args.version, args.dry_run)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "misc/upload-pypi.py"}]} | 3,699 | 415 |
gh_patches_debug_34090 | rasdani/github-patches | git_diff | dotkom__onlineweb4-584 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Recovery of passwords link is valid multiple times.
The RegisterToken that allows recovery seems to be valid more than once.
I'm guessing it's not getting deleted like it should be.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/authentication/views.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import uuid
4 import re
5
6 from django.contrib import auth
7 from django.contrib import messages
8 from django.core.mail import send_mail
9 from django.shortcuts import render, redirect, get_object_or_404
10 from django.http import HttpResponseRedirect
11 from django.utils.translation import ugettext as _
12 from django.views.decorators.debug import sensitive_post_parameters
13
14 from django.conf import settings
15 from apps.authentication.forms import (LoginForm, RegisterForm,
16 RecoveryForm, ChangePasswordForm)
17 from apps.authentication.models import OnlineUser as User, RegisterToken, Email
18
19
20 @sensitive_post_parameters()
21 def login(request):
22 redirect_url = request.REQUEST.get('next', '')
23 if request.method == 'POST':
24 form = LoginForm(request.POST)
25 if form.login(request):
26 messages.success(request, _(u'Du er nå logget inn.'))
27 if redirect_url:
28 return HttpResponseRedirect(redirect_url)
29 return HttpResponseRedirect('/')
30 else: form = LoginForm(request.POST, auto_id=True)
31 else:
32 form = LoginForm()
33
34 response_dict = { 'form' : form, 'next' : redirect_url}
35 return render(request, 'auth/login.html', response_dict)
36
37
38 def logout(request):
39 auth.logout(request)
40 messages.success(request, _(u'Du er nå logget ut.'))
41 return HttpResponseRedirect('/')
42
43
44 @sensitive_post_parameters()
45 def register(request):
46 if request.user.is_authenticated():
47 messages.error(request, _(u'Registrering av ny konto krever at du er logget ut.'))
48 return HttpResponseRedirect('/')
49 else:
50 if request.method == 'POST':
51 form = RegisterForm(request.POST)
52 if form.is_valid():
53 cleaned = form.cleaned_data
54
55 # Create user
56 user = User(
57 username=cleaned['username'].lower(),
58 first_name=cleaned['first_name'].title(),
59 last_name=cleaned['last_name'].title(),
60 )
61 # Set remaining fields
62 user.phone_number=cleaned['phone']
63 user.address=cleaned['address'].title()
64 user.zip_code=cleaned['zip_code']
65 # Store password properly
66 user.set_password(cleaned['password'])
67 # Users need to be manually activated
68 user.is_active = False
69 user.save()
70
71 # Set email address
72 email = Email(
73 user=user,
74 email=cleaned['email'],
75 )
76 email.primary = True
77 email.save()
78
79 # Create the registration token
80 token = uuid.uuid4().hex
81 rt = RegisterToken(user=user, email=cleaned['email'], token=token)
82 rt.save()
83
84 email_message = _(u"""
85 En konto har blitt registrert på online.ntnu.no med denne epostadressen. Dersom du ikke
86 har utført denne handlingen ber vi deg se bort fra denne eposten.
87
88 For å bruke denne kontoen kreves det at du verifiserer epostadressen. Du kan gjøre
89 dette ved å besøke linken under.
90
91 http://%s/auth/verify/%s/
92
93 Denne lenken vil være gyldig i 24 timer. Dersom du behøver å få tilsendt en ny lenke
94 kan dette gjøres med funksjonen for å gjenopprette passord.
95 """) % (request.META['HTTP_HOST'], token)
96
97 send_mail(_(u'Verifiser din konto'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])
98
99 messages.success(request, _(u'Registreringen var vellykket. Se tilsendt epost for verifiseringsinstrukser.'))
100
101 return HttpResponseRedirect('/')
102 else:
103 form = RegisterForm(request.POST, auto_id=True)
104 else:
105 form = RegisterForm()
106
107 return render(request, 'auth/register.html', {'form': form, })
108
109
110 def verify(request, token):
111 rt = get_object_or_404(RegisterToken, token=token)
112
113 if rt.is_valid:
114 email = get_object_or_404(Email, email=rt.email)
115 email.verified = True
116 email.save()
117
118 user = getattr(rt, 'user')
119
120 # If it is a stud email, set the ntnu_username for user
121 if re.match(r'[^@][email protected]', rt.email):
122 user.ntnu_username = rt.email.split("@")[0]
123
124 user_activated = False
125 if not user.is_active:
126 user.is_active = True
127 user_activated = True
128
129 user.save()
130 rt.delete()
131
132 if user_activated:
133 messages.success(request, _(u'Bruker %s ble aktivert. Du kan nå logge inn.') % user.username)
134 return redirect('auth_login')
135 else:
136 messages.success(request, _(u'Eposten %s er nå verifisert.') % email)
137 return redirect('profiles')
138 else:
139 messages.error(request, _(u'Denne lenken er utløpt. Bruk gjenopprett passord for å få tilsendt en ny lenke.'))
140 return HttpResponseRedirect('/')
141
142
143 def recover(request):
144 if request.user.is_authenticated():
145 messages.error(request, _(u'Gjenoppretning av passord krever at du er logget ut.'))
146 return HttpResponseRedirect('/')
147 else:
148 if request.method == 'POST':
149 form = RecoveryForm(request.POST)
150 if form.is_valid():
151 email_string = form.cleaned_data['email']
152 emails = Email.objects.filter(email=email_string)
153
154 if len(emails) == 0:
155 messages.error(request, _(u'Denne eposten er ikke registrert i våre systemer.'))
156 return HttpResponseRedirect('/')
157
158 email = emails[0]
159
160 # Create the registration token
161 token = uuid.uuid4().hex
162 rt = RegisterToken(user=email.user, email=email.email, token=token)
163 rt.save()
164
165 email_message = _(u"""
166 Vi har mottat forespørsel om å gjenopprette passordet for kontoen bundet til %s.
167 Dersom du ikke har bedt om denne handlingen ber vi deg se bort fra denne eposten.
168
169 Brukernavn: %s
170
171 Hvis du ønsker å gjennomføre en gjenoppretning av passord, bruk lenken under.
172
173 http://%s/auth/set_password/%s/
174
175 Denne lenken vil være gyldig i 24 timer. Dersom du behøver å få tilsendt en ny lenke
176 kan dette gjøres med funksjonen for å gjenopprette passord.
177 """) % (email.email, email.user.username, request.META['HTTP_HOST'], token)
178
179 send_mail(_(u'Gjenoppretning av passord'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])
180
181 messages.success(request, _(u'En lenke for gjenoppretning har blitt sendt til %s.') % email.email)
182
183 return HttpResponseRedirect('/')
184 else:
185 form = RecoveryForm(request.POST, auto_id=True)
186 else:
187 form = RecoveryForm()
188
189 return render(request, 'auth/recover.html', {'form': form})
190
191
192 @sensitive_post_parameters()
193 def set_password(request, token=None):
194 if request.user.is_authenticated():
195 return HttpResponseRedirect('/')
196 else:
197 rt = get_object_or_404(RegisterToken, token=token)
198
199 if rt.is_valid:
200 if request.method == 'POST':
201 form = ChangePasswordForm(request.POST, auto_id=True)
202 if form.is_valid():
203 user = getattr(rt, 'user')
204
205 user.is_active = True
206 user.set_password(form.cleaned_data['new_password'])
207 user.save()
208
209 rt.delete()
210
211 messages.success(request, _(u'Bruker %s har gjennomført vellykket gjenoppretning av passord. Du kan nå logge inn.') % user.username)
212
213 return HttpResponseRedirect('/')
214 else:
215
216 form = ChangePasswordForm()
217
218 messages.success(request, _(u'Lenken er akseptert. Vennligst skriv inn ønsket passord.'))
219
220 return render(request, 'auth/set_password.html', {'form': form, 'token': token})
221
222 else:
223 messages.error(request, _(u'Lenken er utløpt. Vennligst bruk gjenoppretning av passord for å få tilsendt en ny lenke.'))
224 return HttpResponseRedirect('/')
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/authentication/views.py b/apps/authentication/views.py
--- a/apps/authentication/views.py
+++ b/apps/authentication/views.py
@@ -194,31 +194,32 @@
if request.user.is_authenticated():
return HttpResponseRedirect('/')
else:
- rt = get_object_or_404(RegisterToken, token=token)
+ tokens = RegisterToken.objects.filter(token=token)
- if rt.is_valid:
- if request.method == 'POST':
- form = ChangePasswordForm(request.POST, auto_id=True)
- if form.is_valid():
- user = getattr(rt, 'user')
-
- user.is_active = True
- user.set_password(form.cleaned_data['new_password'])
- user.save()
-
- rt.delete()
-
- messages.success(request, _(u'Bruker %s har gjennomført vellykket gjenoppretning av passord. Du kan nå logge inn.') % user.username)
-
- return HttpResponseRedirect('/')
- else:
-
- form = ChangePasswordForm()
-
- messages.success(request, _(u'Lenken er akseptert. Vennligst skriv inn ønsket passord.'))
-
- return render(request, 'auth/set_password.html', {'form': form, 'token': token})
+ if tokens.count() == 1:
+ rt = tokens[0]
+ if rt.is_valid:
+ if request.method == 'POST':
+ form = ChangePasswordForm(request.POST, auto_id=True)
+ if form.is_valid():
+ user = getattr(rt, 'user')
+
+ user.is_active = True
+ user.set_password(form.cleaned_data['new_password'])
+ user.save()
+
+ rt.delete()
+
+ messages.success(request, _(u'Bruker %s har gjennomført vellykket gjenoppretning av passord. Du kan nå logge inn.') % user.username)
+
+ return HttpResponseRedirect('/')
+ else:
+ form = ChangePasswordForm()
+
+ messages.success(request, _(u'Lenken er akseptert. Vennligst skriv inn ønsket passord.'))
+
+ return render(request, 'auth/set_password.html', {'form': form, 'token': token})
else:
- messages.error(request, _(u'Lenken er utløpt. Vennligst bruk gjenoppretning av passord for å få tilsendt en ny lenke.'))
+ messages.error(request, _(u'Lenken er ugyldig. Vennligst bruk gjenoppretning av passord for å få tilsendt en ny lenke.'))
return HttpResponseRedirect('/')
| {"golden_diff": "diff --git a/apps/authentication/views.py b/apps/authentication/views.py\n--- a/apps/authentication/views.py\n+++ b/apps/authentication/views.py\n@@ -194,31 +194,32 @@\n if request.user.is_authenticated():\n return HttpResponseRedirect('/')\n else:\n- rt = get_object_or_404(RegisterToken, token=token)\n+ tokens = RegisterToken.objects.filter(token=token)\n \n- if rt.is_valid:\n- if request.method == 'POST':\n- form = ChangePasswordForm(request.POST, auto_id=True)\n- if form.is_valid():\n- user = getattr(rt, 'user')\n-\n- user.is_active = True\n- user.set_password(form.cleaned_data['new_password'])\n- user.save()\n- \n- rt.delete()\n-\n- messages.success(request, _(u'Bruker %s har gjennomf\u00f8rt vellykket gjenoppretning av passord. Du kan n\u00e5 logge inn.') % user.username)\n- \n- return HttpResponseRedirect('/') \n- else:\n- \n- form = ChangePasswordForm()\n-\n- messages.success(request, _(u'Lenken er akseptert. Vennligst skriv inn \u00f8nsket passord.'))\n-\n- return render(request, 'auth/set_password.html', {'form': form, 'token': token})\n+ if tokens.count() == 1:\n+ rt = tokens[0]\n+ if rt.is_valid:\n+ if request.method == 'POST':\n+ form = ChangePasswordForm(request.POST, auto_id=True)\n+ if form.is_valid():\n+ user = getattr(rt, 'user')\n+\n+ user.is_active = True\n+ user.set_password(form.cleaned_data['new_password'])\n+ user.save()\n+ \n+ rt.delete()\n+\n+ messages.success(request, _(u'Bruker %s har gjennomf\u00f8rt vellykket gjenoppretning av passord. Du kan n\u00e5 logge inn.') % user.username)\n+ \n+ return HttpResponseRedirect('/') \n+ else:\n+ form = ChangePasswordForm()\n+\n+ messages.success(request, _(u'Lenken er akseptert. Vennligst skriv inn \u00f8nsket passord.'))\n+\n+ return render(request, 'auth/set_password.html', {'form': form, 'token': token})\n \n else:\n- messages.error(request, _(u'Lenken er utl\u00f8pt. Vennligst bruk gjenoppretning av passord for \u00e5 f\u00e5 tilsendt en ny lenke.'))\n+ messages.error(request, _(u'Lenken er ugyldig. Vennligst bruk gjenoppretning av passord for \u00e5 f\u00e5 tilsendt en ny lenke.'))\n return HttpResponseRedirect('/')\n", "issue": "Recovery of passwords link is valid multiple times.\nThe RegisterToken that allows recovery seems to be valid more than once.\nI'm guessing it's not getting deleted like it should be.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport uuid\nimport re\n\nfrom django.contrib import auth\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.debug import sensitive_post_parameters\n\nfrom django.conf import settings\nfrom apps.authentication.forms import (LoginForm, RegisterForm, \n RecoveryForm, ChangePasswordForm)\nfrom apps.authentication.models import OnlineUser as User, RegisterToken, Email\n\n\n@sensitive_post_parameters()\ndef login(request):\n redirect_url = request.REQUEST.get('next', '')\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.login(request):\n messages.success(request, _(u'Du er n\u00e5 logget inn.'))\n if redirect_url:\n return HttpResponseRedirect(redirect_url)\n return HttpResponseRedirect('/')\n else: form = LoginForm(request.POST, auto_id=True)\n else:\n form = LoginForm()\n\n response_dict = { 'form' : form, 'next' : redirect_url}\n return render(request, 'auth/login.html', response_dict)\n\n\ndef logout(request):\n auth.logout(request)\n messages.success(request, _(u'Du er n\u00e5 logget ut.'))\n return HttpResponseRedirect('/')\n\n\n@sensitive_post_parameters()\ndef register(request):\n if request.user.is_authenticated():\n messages.error(request, _(u'Registrering av ny konto krever at du er logget ut.'))\n return HttpResponseRedirect('/')\n else:\n if request.method == 'POST':\n form = RegisterForm(request.POST)\n if form.is_valid():\n cleaned = form.cleaned_data\n\n # Create user\n user = User(\n username=cleaned['username'].lower(), \n first_name=cleaned['first_name'].title(), \n last_name=cleaned['last_name'].title(),\n )\n # Set remaining fields\n user.phone_number=cleaned['phone']\n user.address=cleaned['address'].title()\n user.zip_code=cleaned['zip_code']\n # Store password properly\n user.set_password(cleaned['password'])\n # Users need to be manually activated\n user.is_active = False\n user.save()\n\n # Set email address\n email = Email(\n user=user,\n email=cleaned['email'],\n )\n email.primary = True\n email.save() \n\n # Create the registration token\n token = uuid.uuid4().hex\n rt = RegisterToken(user=user, email=cleaned['email'], token=token)\n rt.save()\n\n email_message = _(u\"\"\"\nEn konto har blitt registrert p\u00e5 online.ntnu.no med denne epostadressen. Dersom du ikke\nhar utf\u00f8rt denne handlingen ber vi deg se bort fra denne eposten.\n\nFor \u00e5 bruke denne kontoen kreves det at du verifiserer epostadressen. Du kan gj\u00f8re\ndette ved \u00e5 bes\u00f8ke linken under.\n\nhttp://%s/auth/verify/%s/\n\nDenne lenken vil v\u00e6re gyldig i 24 timer. Dersom du beh\u00f8ver \u00e5 f\u00e5 tilsendt en ny lenke\nkan dette gj\u00f8res med funksjonen for \u00e5 gjenopprette passord.\n\"\"\") % (request.META['HTTP_HOST'], token)\n\n send_mail(_(u'Verifiser din konto'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])\n\n messages.success(request, _(u'Registreringen var vellykket. Se tilsendt epost for verifiseringsinstrukser.'))\n\n return HttpResponseRedirect('/') \n else:\n form = RegisterForm(request.POST, auto_id=True)\n else:\n form = RegisterForm()\n\n return render(request, 'auth/register.html', {'form': form, })\n\n\ndef verify(request, token):\n rt = get_object_or_404(RegisterToken, token=token)\n \n if rt.is_valid:\n email = get_object_or_404(Email, email=rt.email)\n email.verified = True\n email.save()\n \n user = getattr(rt, 'user')\n\n # If it is a stud email, set the ntnu_username for user\n if re.match(r'[^@][email protected]', rt.email):\n user.ntnu_username = rt.email.split(\"@\")[0]\n\n user_activated = False\n if not user.is_active:\n user.is_active = True\n user_activated = True\n\n user.save()\n rt.delete()\n\n if user_activated:\n messages.success(request, _(u'Bruker %s ble aktivert. Du kan n\u00e5 logge inn.') % user.username)\n return redirect('auth_login')\n else:\n messages.success(request, _(u'Eposten %s er n\u00e5 verifisert.') % email)\n return redirect('profiles')\n else:\n messages.error(request, _(u'Denne lenken er utl\u00f8pt. Bruk gjenopprett passord for \u00e5 f\u00e5 tilsendt en ny lenke.'))\n return HttpResponseRedirect('/') \n \n\ndef recover(request):\n if request.user.is_authenticated():\n messages.error(request, _(u'Gjenoppretning av passord krever at du er logget ut.'))\n return HttpResponseRedirect('/')\n else:\n if request.method == 'POST':\n form = RecoveryForm(request.POST)\n if form.is_valid():\n email_string = form.cleaned_data['email']\n emails = Email.objects.filter(email=email_string)\n\n if len(emails) == 0:\n messages.error(request, _(u'Denne eposten er ikke registrert i v\u00e5re systemer.'))\n return HttpResponseRedirect('/') \n\n email = emails[0]\n \n # Create the registration token\n token = uuid.uuid4().hex\n rt = RegisterToken(user=email.user, email=email.email, token=token)\n rt.save()\n\n email_message = _(u\"\"\"\nVi har mottat foresp\u00f8rsel om \u00e5 gjenopprette passordet for kontoen bundet til %s.\nDersom du ikke har bedt om denne handlingen ber vi deg se bort fra denne eposten.\n\nBrukernavn: %s\n\nHvis du \u00f8nsker \u00e5 gjennomf\u00f8re en gjenoppretning av passord, bruk lenken under.\n\nhttp://%s/auth/set_password/%s/\n\nDenne lenken vil v\u00e6re gyldig i 24 timer. Dersom du beh\u00f8ver \u00e5 f\u00e5 tilsendt en ny lenke\nkan dette gj\u00f8res med funksjonen for \u00e5 gjenopprette passord.\n\"\"\") % (email.email, email.user.username, request.META['HTTP_HOST'], token)\n\n send_mail(_(u'Gjenoppretning av passord'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])\n\n messages.success(request, _(u'En lenke for gjenoppretning har blitt sendt til %s.') % email.email)\n\n return HttpResponseRedirect('/') \n else:\n form = RecoveryForm(request.POST, auto_id=True)\n else:\n form = RecoveryForm()\n\n return render(request, 'auth/recover.html', {'form': form})\n\n\n@sensitive_post_parameters()\ndef set_password(request, token=None): \n if request.user.is_authenticated():\n return HttpResponseRedirect('/')\n else:\n rt = get_object_or_404(RegisterToken, token=token)\n \n if rt.is_valid:\n if request.method == 'POST':\n form = ChangePasswordForm(request.POST, auto_id=True)\n if form.is_valid():\n user = getattr(rt, 'user')\n\n user.is_active = True\n user.set_password(form.cleaned_data['new_password'])\n user.save()\n \n rt.delete()\n\n messages.success(request, _(u'Bruker %s har gjennomf\u00f8rt vellykket gjenoppretning av passord. Du kan n\u00e5 logge inn.') % user.username)\n \n return HttpResponseRedirect('/') \n else:\n \n form = ChangePasswordForm()\n\n messages.success(request, _(u'Lenken er akseptert. Vennligst skriv inn \u00f8nsket passord.'))\n\n return render(request, 'auth/set_password.html', {'form': form, 'token': token})\n\n else:\n messages.error(request, _(u'Lenken er utl\u00f8pt. Vennligst bruk gjenoppretning av passord for \u00e5 f\u00e5 tilsendt en ny lenke.'))\n return HttpResponseRedirect('/') \n", "path": "apps/authentication/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport uuid\nimport re\n\nfrom django.contrib import auth\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.debug import sensitive_post_parameters\n\nfrom django.conf import settings\nfrom apps.authentication.forms import (LoginForm, RegisterForm, \n RecoveryForm, ChangePasswordForm)\nfrom apps.authentication.models import OnlineUser as User, RegisterToken, Email\n\n\n@sensitive_post_parameters()\ndef login(request):\n redirect_url = request.REQUEST.get('next', '')\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.login(request):\n messages.success(request, _(u'Du er n\u00e5 logget inn.'))\n if redirect_url:\n return HttpResponseRedirect(redirect_url)\n return HttpResponseRedirect('/')\n else: form = LoginForm(request.POST, auto_id=True)\n else:\n form = LoginForm()\n\n response_dict = { 'form' : form, 'next' : redirect_url}\n return render(request, 'auth/login.html', response_dict)\n\n\ndef logout(request):\n auth.logout(request)\n messages.success(request, _(u'Du er n\u00e5 logget ut.'))\n return HttpResponseRedirect('/')\n\n\n@sensitive_post_parameters()\ndef register(request):\n if request.user.is_authenticated():\n messages.error(request, _(u'Registrering av ny konto krever at du er logget ut.'))\n return HttpResponseRedirect('/')\n else:\n if request.method == 'POST':\n form = RegisterForm(request.POST)\n if form.is_valid():\n cleaned = form.cleaned_data\n\n # Create user\n user = User(\n username=cleaned['username'].lower(), \n first_name=cleaned['first_name'].title(), \n last_name=cleaned['last_name'].title(),\n )\n # Set remaining fields\n user.phone_number=cleaned['phone']\n user.address=cleaned['address'].title()\n user.zip_code=cleaned['zip_code']\n # Store password properly\n user.set_password(cleaned['password'])\n # Users need to be manually activated\n user.is_active = False\n user.save()\n\n # Set email address\n email = Email(\n user=user,\n email=cleaned['email'],\n )\n email.primary = True\n email.save() \n\n # Create the registration token\n token = uuid.uuid4().hex\n rt = RegisterToken(user=user, email=cleaned['email'], token=token)\n rt.save()\n\n email_message = _(u\"\"\"\nEn konto har blitt registrert p\u00e5 online.ntnu.no med denne epostadressen. Dersom du ikke\nhar utf\u00f8rt denne handlingen ber vi deg se bort fra denne eposten.\n\nFor \u00e5 bruke denne kontoen kreves det at du verifiserer epostadressen. Du kan gj\u00f8re\ndette ved \u00e5 bes\u00f8ke linken under.\n\nhttp://%s/auth/verify/%s/\n\nDenne lenken vil v\u00e6re gyldig i 24 timer. Dersom du beh\u00f8ver \u00e5 f\u00e5 tilsendt en ny lenke\nkan dette gj\u00f8res med funksjonen for \u00e5 gjenopprette passord.\n\"\"\") % (request.META['HTTP_HOST'], token)\n\n send_mail(_(u'Verifiser din konto'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])\n\n messages.success(request, _(u'Registreringen var vellykket. Se tilsendt epost for verifiseringsinstrukser.'))\n\n return HttpResponseRedirect('/') \n else:\n form = RegisterForm(request.POST, auto_id=True)\n else:\n form = RegisterForm()\n\n return render(request, 'auth/register.html', {'form': form, })\n\n\ndef verify(request, token):\n rt = get_object_or_404(RegisterToken, token=token)\n \n if rt.is_valid:\n email = get_object_or_404(Email, email=rt.email)\n email.verified = True\n email.save()\n \n user = getattr(rt, 'user')\n\n # If it is a stud email, set the ntnu_username for user\n if re.match(r'[^@][email protected]', rt.email):\n user.ntnu_username = rt.email.split(\"@\")[0]\n\n user_activated = False\n if not user.is_active:\n user.is_active = True\n user_activated = True\n\n user.save()\n rt.delete()\n\n if user_activated:\n messages.success(request, _(u'Bruker %s ble aktivert. Du kan n\u00e5 logge inn.') % user.username)\n return redirect('auth_login')\n else:\n messages.success(request, _(u'Eposten %s er n\u00e5 verifisert.') % email)\n return redirect('profiles')\n else:\n messages.error(request, _(u'Denne lenken er utl\u00f8pt. Bruk gjenopprett passord for \u00e5 f\u00e5 tilsendt en ny lenke.'))\n return HttpResponseRedirect('/') \n \n\ndef recover(request):\n if request.user.is_authenticated():\n messages.error(request, _(u'Gjenoppretning av passord krever at du er logget ut.'))\n return HttpResponseRedirect('/')\n else:\n if request.method == 'POST':\n form = RecoveryForm(request.POST)\n if form.is_valid():\n email_string = form.cleaned_data['email']\n emails = Email.objects.filter(email=email_string)\n\n if len(emails) == 0:\n messages.error(request, _(u'Denne eposten er ikke registrert i v\u00e5re systemer.'))\n return HttpResponseRedirect('/') \n\n email = emails[0]\n \n # Create the registration token\n token = uuid.uuid4().hex\n rt = RegisterToken(user=email.user, email=email.email, token=token)\n rt.save()\n\n email_message = _(u\"\"\"\nVi har mottat foresp\u00f8rsel om \u00e5 gjenopprette passordet for kontoen bundet til %s.\nDersom du ikke har bedt om denne handlingen ber vi deg se bort fra denne eposten.\n\nBrukernavn: %s\n\nHvis du \u00f8nsker \u00e5 gjennomf\u00f8re en gjenoppretning av passord, bruk lenken under.\n\nhttp://%s/auth/set_password/%s/\n\nDenne lenken vil v\u00e6re gyldig i 24 timer. Dersom du beh\u00f8ver \u00e5 f\u00e5 tilsendt en ny lenke\nkan dette gj\u00f8res med funksjonen for \u00e5 gjenopprette passord.\n\"\"\") % (email.email, email.user.username, request.META['HTTP_HOST'], token)\n\n send_mail(_(u'Gjenoppretning av passord'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])\n\n messages.success(request, _(u'En lenke for gjenoppretning har blitt sendt til %s.') % email.email)\n\n return HttpResponseRedirect('/') \n else:\n form = RecoveryForm(request.POST, auto_id=True)\n else:\n form = RecoveryForm()\n\n return render(request, 'auth/recover.html', {'form': form})\n\n\n@sensitive_post_parameters()\ndef set_password(request, token=None): \n if request.user.is_authenticated():\n return HttpResponseRedirect('/')\n else:\n tokens = RegisterToken.objects.filter(token=token)\n \n if tokens.count() == 1:\n rt = tokens[0]\n if rt.is_valid:\n if request.method == 'POST':\n form = ChangePasswordForm(request.POST, auto_id=True)\n if form.is_valid():\n user = getattr(rt, 'user')\n\n user.is_active = True\n user.set_password(form.cleaned_data['new_password'])\n user.save()\n \n rt.delete()\n\n messages.success(request, _(u'Bruker %s har gjennomf\u00f8rt vellykket gjenoppretning av passord. Du kan n\u00e5 logge inn.') % user.username)\n \n return HttpResponseRedirect('/') \n else:\n form = ChangePasswordForm()\n\n messages.success(request, _(u'Lenken er akseptert. Vennligst skriv inn \u00f8nsket passord.'))\n\n return render(request, 'auth/set_password.html', {'form': form, 'token': token})\n\n else:\n messages.error(request, _(u'Lenken er ugyldig. Vennligst bruk gjenoppretning av passord for \u00e5 f\u00e5 tilsendt en ny lenke.'))\n return HttpResponseRedirect('/') \n", "path": "apps/authentication/views.py"}]} | 2,718 | 599 |
gh_patches_debug_39452 | rasdani/github-patches | git_diff | wagtail__wagtail-1375 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SnippetChooserPanel cannot take model string in model argument
We should update `SnippetChooserPanel` to use `resolve_model_string` (https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailcore/utils.py#L13-L37). This will make it behave more like `PageChooserPanel`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailsnippets/edit_handlers.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 from django.template.loader import render_to_string
4 from django.contrib.contenttypes.models import ContentType
5 from django.utils.safestring import mark_safe
6 from django.utils.encoding import force_text
7
8 from wagtail.wagtailadmin.edit_handlers import BaseChooserPanel
9 from .widgets import AdminSnippetChooser
10
11
12 class BaseSnippetChooserPanel(BaseChooserPanel):
13 object_type_name = 'item'
14
15 _content_type = None
16
17 @classmethod
18 def widget_overrides(cls):
19 return {cls.field_name: AdminSnippetChooser(
20 content_type=cls.content_type(), snippet_type_name=cls.snippet_type_name)}
21
22 @classmethod
23 def content_type(cls):
24 if cls._content_type is None:
25 # TODO: infer the content type by introspection on the foreign key rather than having to pass it explicitly
26 cls._content_type = ContentType.objects.get_for_model(cls.snippet_type)
27
28 return cls._content_type
29
30 def render_as_field(self):
31 instance_obj = self.get_chosen_item()
32 return mark_safe(render_to_string(self.field_template, {
33 'field': self.bound_field,
34 self.object_type_name: instance_obj,
35 'snippet_type_name': self.snippet_type_name,
36 }))
37
38
39 class SnippetChooserPanel(object):
40 def __init__(self, field_name, snippet_type):
41 self.field_name = field_name
42 self.snippet_type = snippet_type
43
44 def bind_to_model(self, model):
45 return type(str('_SnippetChooserPanel'), (BaseSnippetChooserPanel,), {
46 'model': model,
47 'field_name': self.field_name,
48 'snippet_type_name': force_text(self.snippet_type._meta.verbose_name),
49 'snippet_type': self.snippet_type,
50 })
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/wagtailsnippets/edit_handlers.py b/wagtail/wagtailsnippets/edit_handlers.py
--- a/wagtail/wagtailsnippets/edit_handlers.py
+++ b/wagtail/wagtailsnippets/edit_handlers.py
@@ -4,28 +4,42 @@
from django.contrib.contenttypes.models import ContentType
from django.utils.safestring import mark_safe
from django.utils.encoding import force_text
+from django.core.exceptions import ImproperlyConfigured
from wagtail.wagtailadmin.edit_handlers import BaseChooserPanel
+from wagtail.wagtailcore.utils import resolve_model_string
from .widgets import AdminSnippetChooser
class BaseSnippetChooserPanel(BaseChooserPanel):
object_type_name = 'item'
- _content_type = None
+ _target_content_type = None
@classmethod
def widget_overrides(cls):
return {cls.field_name: AdminSnippetChooser(
- content_type=cls.content_type(), snippet_type_name=cls.snippet_type_name)}
+ content_type=cls.target_content_type(), snippet_type_name=cls.snippet_type_name)}
@classmethod
- def content_type(cls):
- if cls._content_type is None:
- # TODO: infer the content type by introspection on the foreign key rather than having to pass it explicitly
- cls._content_type = ContentType.objects.get_for_model(cls.snippet_type)
+ def target_content_type(cls):
+ if cls._target_content_type is None:
+ if cls.snippet_type:
+ try:
+ model = resolve_model_string(cls.snippet_type)
+ except LookupError:
+ raise ImproperlyConfigured("{0}.snippet_type must be of the form 'app_label.model_name', given {1!r}".format(
+ cls.__name__, cls.snippet_type))
+ except ValueError:
+ raise ImproperlyConfigured("{0}.snippet_type refers to model {1!r} that has not been installed".format(
+ cls.__name__, cls.snippet_type))
- return cls._content_type
+ cls._target_content_type = ContentType.objects.get_for_model(model)
+ else:
+ target_model = cls.model._meta.get_field(cls.field_name).rel.to
+ cls._target_content_type = ContentType.objects.get_for_model(target_model)
+
+ return cls._target_content_type
def render_as_field(self):
instance_obj = self.get_chosen_item()
@@ -35,6 +49,10 @@
'snippet_type_name': self.snippet_type_name,
}))
+ @property
+ def snippet_type_name(self):
+ return force_text(self.target_content_type()._meta.verbose_name)
+
class SnippetChooserPanel(object):
def __init__(self, field_name, snippet_type):
@@ -45,6 +63,5 @@
return type(str('_SnippetChooserPanel'), (BaseSnippetChooserPanel,), {
'model': model,
'field_name': self.field_name,
- 'snippet_type_name': force_text(self.snippet_type._meta.verbose_name),
'snippet_type': self.snippet_type,
})
| {"golden_diff": "diff --git a/wagtail/wagtailsnippets/edit_handlers.py b/wagtail/wagtailsnippets/edit_handlers.py\n--- a/wagtail/wagtailsnippets/edit_handlers.py\n+++ b/wagtail/wagtailsnippets/edit_handlers.py\n@@ -4,28 +4,42 @@\n from django.contrib.contenttypes.models import ContentType\n from django.utils.safestring import mark_safe\n from django.utils.encoding import force_text\n+from django.core.exceptions import ImproperlyConfigured\n \n from wagtail.wagtailadmin.edit_handlers import BaseChooserPanel\n+from wagtail.wagtailcore.utils import resolve_model_string\n from .widgets import AdminSnippetChooser\n \n \n class BaseSnippetChooserPanel(BaseChooserPanel):\n object_type_name = 'item'\n \n- _content_type = None\n+ _target_content_type = None\n \n @classmethod\n def widget_overrides(cls):\n return {cls.field_name: AdminSnippetChooser(\n- content_type=cls.content_type(), snippet_type_name=cls.snippet_type_name)}\n+ content_type=cls.target_content_type(), snippet_type_name=cls.snippet_type_name)}\n \n @classmethod\n- def content_type(cls):\n- if cls._content_type is None:\n- # TODO: infer the content type by introspection on the foreign key rather than having to pass it explicitly\n- cls._content_type = ContentType.objects.get_for_model(cls.snippet_type)\n+ def target_content_type(cls):\n+ if cls._target_content_type is None:\n+ if cls.snippet_type:\n+ try:\n+ model = resolve_model_string(cls.snippet_type)\n+ except LookupError:\n+ raise ImproperlyConfigured(\"{0}.snippet_type must be of the form 'app_label.model_name', given {1!r}\".format(\n+ cls.__name__, cls.snippet_type))\n+ except ValueError:\n+ raise ImproperlyConfigured(\"{0}.snippet_type refers to model {1!r} that has not been installed\".format(\n+ cls.__name__, cls.snippet_type))\n \n- return cls._content_type\n+ cls._target_content_type = ContentType.objects.get_for_model(model)\n+ else:\n+ target_model = cls.model._meta.get_field(cls.field_name).rel.to\n+ cls._target_content_type = ContentType.objects.get_for_model(target_model)\n+\n+ return cls._target_content_type\n \n def render_as_field(self):\n instance_obj = self.get_chosen_item()\n@@ -35,6 +49,10 @@\n 'snippet_type_name': self.snippet_type_name,\n }))\n \n+ @property\n+ def snippet_type_name(self):\n+ return force_text(self.target_content_type()._meta.verbose_name)\n+\n \n class SnippetChooserPanel(object):\n def __init__(self, field_name, snippet_type):\n@@ -45,6 +63,5 @@\n return type(str('_SnippetChooserPanel'), (BaseSnippetChooserPanel,), {\n 'model': model,\n 'field_name': self.field_name,\n- 'snippet_type_name': force_text(self.snippet_type._meta.verbose_name),\n 'snippet_type': self.snippet_type,\n })\n", "issue": "SnippetChooserPanel cannot take model string in model argument\nWe should update `SnippetChooserPanel` to use `resolve_model_string` (https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailcore/utils.py#L13-L37). This will make it behave more like `PageChooserPanel`.\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django.template.loader import render_to_string\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.safestring import mark_safe\nfrom django.utils.encoding import force_text\n\nfrom wagtail.wagtailadmin.edit_handlers import BaseChooserPanel\nfrom .widgets import AdminSnippetChooser\n\n\nclass BaseSnippetChooserPanel(BaseChooserPanel):\n object_type_name = 'item'\n\n _content_type = None\n\n @classmethod\n def widget_overrides(cls):\n return {cls.field_name: AdminSnippetChooser(\n content_type=cls.content_type(), snippet_type_name=cls.snippet_type_name)}\n\n @classmethod\n def content_type(cls):\n if cls._content_type is None:\n # TODO: infer the content type by introspection on the foreign key rather than having to pass it explicitly\n cls._content_type = ContentType.objects.get_for_model(cls.snippet_type)\n\n return cls._content_type\n\n def render_as_field(self):\n instance_obj = self.get_chosen_item()\n return mark_safe(render_to_string(self.field_template, {\n 'field': self.bound_field,\n self.object_type_name: instance_obj,\n 'snippet_type_name': self.snippet_type_name,\n }))\n\n\nclass SnippetChooserPanel(object):\n def __init__(self, field_name, snippet_type):\n self.field_name = field_name\n self.snippet_type = snippet_type\n\n def bind_to_model(self, model):\n return type(str('_SnippetChooserPanel'), (BaseSnippetChooserPanel,), {\n 'model': model,\n 'field_name': self.field_name,\n 'snippet_type_name': force_text(self.snippet_type._meta.verbose_name),\n 'snippet_type': self.snippet_type,\n })\n", "path": "wagtail/wagtailsnippets/edit_handlers.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django.template.loader import render_to_string\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.safestring import mark_safe\nfrom django.utils.encoding import force_text\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom wagtail.wagtailadmin.edit_handlers import BaseChooserPanel\nfrom wagtail.wagtailcore.utils import resolve_model_string\nfrom .widgets import AdminSnippetChooser\n\n\nclass BaseSnippetChooserPanel(BaseChooserPanel):\n object_type_name = 'item'\n\n _target_content_type = None\n\n @classmethod\n def widget_overrides(cls):\n return {cls.field_name: AdminSnippetChooser(\n content_type=cls.target_content_type(), snippet_type_name=cls.snippet_type_name)}\n\n @classmethod\n def target_content_type(cls):\n if cls._target_content_type is None:\n if cls.snippet_type:\n try:\n model = resolve_model_string(cls.snippet_type)\n except LookupError:\n raise ImproperlyConfigured(\"{0}.snippet_type must be of the form 'app_label.model_name', given {1!r}\".format(\n cls.__name__, cls.snippet_type))\n except ValueError:\n raise ImproperlyConfigured(\"{0}.snippet_type refers to model {1!r} that has not been installed\".format(\n cls.__name__, cls.snippet_type))\n\n cls._target_content_type = ContentType.objects.get_for_model(model)\n else:\n target_model = cls.model._meta.get_field(cls.field_name).rel.to\n cls._target_content_type = ContentType.objects.get_for_model(target_model)\n\n return cls._target_content_type\n\n def render_as_field(self):\n instance_obj = self.get_chosen_item()\n return mark_safe(render_to_string(self.field_template, {\n 'field': self.bound_field,\n self.object_type_name: instance_obj,\n 'snippet_type_name': self.snippet_type_name,\n }))\n\n @property\n def snippet_type_name(self):\n return force_text(self.target_content_type()._meta.verbose_name)\n\n\nclass SnippetChooserPanel(object):\n def __init__(self, field_name, snippet_type):\n self.field_name = field_name\n self.snippet_type = snippet_type\n\n def bind_to_model(self, model):\n return type(str('_SnippetChooserPanel'), (BaseSnippetChooserPanel,), {\n 'model': model,\n 'field_name': self.field_name,\n 'snippet_type': self.snippet_type,\n })\n", "path": "wagtail/wagtailsnippets/edit_handlers.py"}]} | 804 | 679 |
gh_patches_debug_27915 | rasdani/github-patches | git_diff | pulp__pulpcore-193 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Verify if domain name has more than 50 chars
fixes: #4976
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/viewsets/upload.py`
Content:
```
1 import re
2 from datetime import datetime
3
4 from gettext import gettext as _
5 from drf_yasg.utils import swagger_auto_schema
6 from drf_yasg.openapi import Parameter
7 from rest_framework import mixins, serializers
8 from rest_framework.decorators import detail_route
9 from rest_framework.response import Response
10
11 from pulpcore.app.models import Upload
12 from pulpcore.app.serializers import UploadChunkSerializer, UploadCommitSerializer, UploadSerializer
13 from pulpcore.app.viewsets.base import NamedModelViewSet
14
15
16 class UploadViewSet(NamedModelViewSet,
17 mixins.CreateModelMixin,
18 mixins.RetrieveModelMixin,
19 mixins.UpdateModelMixin,
20 mixins.ListModelMixin):
21 """View for chunked uploads."""
22 endpoint_name = 'uploads'
23 queryset = Upload.objects.all()
24 serializer_class = UploadSerializer
25 http_method_names = ['get', 'post', 'head', 'put']
26
27 content_range_pattern = r'^bytes (\d+)-(\d+)/(\d+|[*])$'
28 content_range_parameter = \
29 Parameter(name='Content-Range', in_='header', required=True, type='string',
30 pattern=content_range_pattern,
31 description='The Content-Range header specifies the location of the file chunk '
32 'within the file.')
33
34 @swagger_auto_schema(operation_summary="Upload a file chunk",
35 request_body=UploadChunkSerializer,
36 manual_parameters=[content_range_parameter],
37 responses={200: UploadSerializer})
38 def update(self, request, pk=None):
39 """
40 Upload a chunk for an upload.
41 """
42 upload = self.get_object()
43
44 if upload.completed is not None:
45 raise serializers.ValidationError(_("Cannot upload chunk for a completed upload."))
46
47 try:
48 chunk = request.data['file']
49 except KeyError:
50 raise serializers.ValidationError(_("Missing 'file' parameter."))
51
52 content_range = request.META.get('HTTP_CONTENT_RANGE', '')
53 match = re.compile(self.content_range_pattern).match(content_range)
54 if not match:
55 raise serializers.ValidationError(_("Invalid or missing content range header."))
56 start = int(match[1])
57 end = int(match[2])
58
59 if (end - start + 1) != len(chunk):
60 raise serializers.ValidationError(_("Chunk size does not match content range."))
61
62 if end > upload.size - 1:
63 raise serializers.ValidationError(_("End byte is greater than upload size."))
64
65 upload.append(chunk, start)
66
67 serializer = UploadSerializer(upload, context={'request': request})
68 return Response(serializer.data)
69
70 @swagger_auto_schema(operation_summary="Finish an Upload",
71 request_body=UploadCommitSerializer,
72 responses={200: UploadSerializer})
73 @detail_route(methods=('put',))
74 def commit(self, request, pk):
75 """
76 Commit the upload and mark it as completed.
77 """
78 upload = self.get_object()
79
80 try:
81 sha256 = request.data['sha256']
82 except KeyError:
83 raise serializers.ValidationError(_("Checksum not supplied."))
84
85 if sha256 != upload.sha256:
86 raise serializers.ValidationError(_("Checksum does not match upload."))
87
88 if upload.completed is not None:
89 raise serializers.ValidationError(_("Upload is already complete."))
90
91 upload.completed = datetime.now()
92 upload.save()
93
94 serializer = UploadSerializer(upload, context={'request': request})
95 return Response(serializer.data)
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/viewsets/upload.py b/pulpcore/app/viewsets/upload.py
--- a/pulpcore/app/viewsets/upload.py
+++ b/pulpcore/app/viewsets/upload.py
@@ -10,19 +10,33 @@
from pulpcore.app.models import Upload
from pulpcore.app.serializers import UploadChunkSerializer, UploadCommitSerializer, UploadSerializer
-from pulpcore.app.viewsets.base import NamedModelViewSet
+from pulpcore.app.viewsets import BaseFilterSet
+from pulpcore.app.viewsets.base import DATETIME_FILTER_OPTIONS, NamedModelViewSet
+from pulpcore.app.viewsets.custom_filters import IsoDateTimeFilter
+
+
+class UploadFilter(BaseFilterSet):
+ completed = IsoDateTimeFilter(field_name='completed')
+
+ class Meta:
+ model = Upload
+ fields = {
+ 'completed': DATETIME_FILTER_OPTIONS + ['isnull']
+ }
class UploadViewSet(NamedModelViewSet,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
+ mixins.DestroyModelMixin,
mixins.ListModelMixin):
"""View for chunked uploads."""
endpoint_name = 'uploads'
queryset = Upload.objects.all()
serializer_class = UploadSerializer
- http_method_names = ['get', 'post', 'head', 'put']
+ filterset_class = UploadFilter
+ http_method_names = ['get', 'post', 'head', 'put', 'delete'] # remove PATCH
content_range_pattern = r'^bytes (\d+)-(\d+)/(\d+|[*])$'
content_range_parameter = \
| {"golden_diff": "diff --git a/pulpcore/app/viewsets/upload.py b/pulpcore/app/viewsets/upload.py\n--- a/pulpcore/app/viewsets/upload.py\n+++ b/pulpcore/app/viewsets/upload.py\n@@ -10,19 +10,33 @@\n \n from pulpcore.app.models import Upload\n from pulpcore.app.serializers import UploadChunkSerializer, UploadCommitSerializer, UploadSerializer\n-from pulpcore.app.viewsets.base import NamedModelViewSet\n+from pulpcore.app.viewsets import BaseFilterSet\n+from pulpcore.app.viewsets.base import DATETIME_FILTER_OPTIONS, NamedModelViewSet\n+from pulpcore.app.viewsets.custom_filters import IsoDateTimeFilter\n+\n+\n+class UploadFilter(BaseFilterSet):\n+ completed = IsoDateTimeFilter(field_name='completed')\n+\n+ class Meta:\n+ model = Upload\n+ fields = {\n+ 'completed': DATETIME_FILTER_OPTIONS + ['isnull']\n+ }\n \n \n class UploadViewSet(NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n+ mixins.DestroyModelMixin,\n mixins.ListModelMixin):\n \"\"\"View for chunked uploads.\"\"\"\n endpoint_name = 'uploads'\n queryset = Upload.objects.all()\n serializer_class = UploadSerializer\n- http_method_names = ['get', 'post', 'head', 'put']\n+ filterset_class = UploadFilter\n+ http_method_names = ['get', 'post', 'head', 'put', 'delete'] # remove PATCH\n \n content_range_pattern = r'^bytes (\\d+)-(\\d+)/(\\d+|[*])$'\n content_range_parameter = \\\n", "issue": "Verify if domain name has more than 50 chars\nfixes: #4976\n", "before_files": [{"content": "import re\nfrom datetime import datetime\n\nfrom gettext import gettext as _\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg.openapi import Parameter\nfrom rest_framework import mixins, serializers\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.response import Response\n\nfrom pulpcore.app.models import Upload\nfrom pulpcore.app.serializers import UploadChunkSerializer, UploadCommitSerializer, UploadSerializer\nfrom pulpcore.app.viewsets.base import NamedModelViewSet\n\n\nclass UploadViewSet(NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.ListModelMixin):\n \"\"\"View for chunked uploads.\"\"\"\n endpoint_name = 'uploads'\n queryset = Upload.objects.all()\n serializer_class = UploadSerializer\n http_method_names = ['get', 'post', 'head', 'put']\n\n content_range_pattern = r'^bytes (\\d+)-(\\d+)/(\\d+|[*])$'\n content_range_parameter = \\\n Parameter(name='Content-Range', in_='header', required=True, type='string',\n pattern=content_range_pattern,\n description='The Content-Range header specifies the location of the file chunk '\n 'within the file.')\n\n @swagger_auto_schema(operation_summary=\"Upload a file chunk\",\n request_body=UploadChunkSerializer,\n manual_parameters=[content_range_parameter],\n responses={200: UploadSerializer})\n def update(self, request, pk=None):\n \"\"\"\n Upload a chunk for an upload.\n \"\"\"\n upload = self.get_object()\n\n if upload.completed is not None:\n raise serializers.ValidationError(_(\"Cannot upload chunk for a completed upload.\"))\n\n try:\n chunk = request.data['file']\n except KeyError:\n raise serializers.ValidationError(_(\"Missing 'file' parameter.\"))\n\n content_range = request.META.get('HTTP_CONTENT_RANGE', '')\n match = re.compile(self.content_range_pattern).match(content_range)\n if not match:\n raise serializers.ValidationError(_(\"Invalid or missing content range header.\"))\n start = int(match[1])\n end = int(match[2])\n\n if (end - start + 1) != len(chunk):\n raise serializers.ValidationError(_(\"Chunk size does not match content range.\"))\n\n if end > upload.size - 1:\n raise serializers.ValidationError(_(\"End byte is greater than upload size.\"))\n\n upload.append(chunk, start)\n\n serializer = UploadSerializer(upload, context={'request': request})\n return Response(serializer.data)\n\n @swagger_auto_schema(operation_summary=\"Finish an Upload\",\n request_body=UploadCommitSerializer,\n responses={200: UploadSerializer})\n @detail_route(methods=('put',))\n def commit(self, request, pk):\n \"\"\"\n Commit the upload and mark it as completed.\n \"\"\"\n upload = self.get_object()\n\n try:\n sha256 = request.data['sha256']\n except KeyError:\n raise serializers.ValidationError(_(\"Checksum not supplied.\"))\n\n if sha256 != upload.sha256:\n raise serializers.ValidationError(_(\"Checksum does not match upload.\"))\n\n if upload.completed is not None:\n raise serializers.ValidationError(_(\"Upload is already complete.\"))\n\n upload.completed = datetime.now()\n upload.save()\n\n serializer = UploadSerializer(upload, context={'request': request})\n return Response(serializer.data)\n", "path": "pulpcore/app/viewsets/upload.py"}], "after_files": [{"content": "import re\nfrom datetime import datetime\n\nfrom gettext import gettext as _\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg.openapi import Parameter\nfrom rest_framework import mixins, serializers\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.response import Response\n\nfrom pulpcore.app.models import Upload\nfrom pulpcore.app.serializers import UploadChunkSerializer, UploadCommitSerializer, UploadSerializer\nfrom pulpcore.app.viewsets import BaseFilterSet\nfrom pulpcore.app.viewsets.base import DATETIME_FILTER_OPTIONS, NamedModelViewSet\nfrom pulpcore.app.viewsets.custom_filters import IsoDateTimeFilter\n\n\nclass UploadFilter(BaseFilterSet):\n completed = IsoDateTimeFilter(field_name='completed')\n\n class Meta:\n model = Upload\n fields = {\n 'completed': DATETIME_FILTER_OPTIONS + ['isnull']\n }\n\n\nclass UploadViewSet(NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n mixins.ListModelMixin):\n \"\"\"View for chunked uploads.\"\"\"\n endpoint_name = 'uploads'\n queryset = Upload.objects.all()\n serializer_class = UploadSerializer\n filterset_class = UploadFilter\n http_method_names = ['get', 'post', 'head', 'put', 'delete'] # remove PATCH\n\n content_range_pattern = r'^bytes (\\d+)-(\\d+)/(\\d+|[*])$'\n content_range_parameter = \\\n Parameter(name='Content-Range', in_='header', required=True, type='string',\n pattern=content_range_pattern,\n description='The Content-Range header specifies the location of the file chunk '\n 'within the file.')\n\n @swagger_auto_schema(operation_summary=\"Upload a file chunk\",\n request_body=UploadChunkSerializer,\n manual_parameters=[content_range_parameter],\n responses={200: UploadSerializer})\n def update(self, request, pk=None):\n \"\"\"\n Upload a chunk for an upload.\n \"\"\"\n upload = self.get_object()\n\n if upload.completed is not None:\n raise serializers.ValidationError(_(\"Cannot upload chunk for a completed upload.\"))\n\n try:\n chunk = request.data['file']\n except KeyError:\n raise serializers.ValidationError(_(\"Missing 'file' parameter.\"))\n\n content_range = request.META.get('HTTP_CONTENT_RANGE', '')\n match = re.compile(self.content_range_pattern).match(content_range)\n if not match:\n raise serializers.ValidationError(_(\"Invalid or missing content range header.\"))\n start = int(match[1])\n end = int(match[2])\n\n if (end - start + 1) != len(chunk):\n raise serializers.ValidationError(_(\"Chunk size does not match content range.\"))\n\n if end > upload.size - 1:\n raise serializers.ValidationError(_(\"End byte is greater than upload size.\"))\n\n upload.append(chunk, start)\n\n serializer = UploadSerializer(upload, context={'request': request})\n return Response(serializer.data)\n\n @swagger_auto_schema(operation_summary=\"Finish an Upload\",\n request_body=UploadCommitSerializer,\n responses={200: UploadSerializer})\n @detail_route(methods=('put',))\n def commit(self, request, pk):\n \"\"\"\n Commit the upload and mark it as completed.\n \"\"\"\n upload = self.get_object()\n\n try:\n sha256 = request.data['sha256']\n except KeyError:\n raise serializers.ValidationError(_(\"Checksum not supplied.\"))\n\n if sha256 != upload.sha256:\n raise serializers.ValidationError(_(\"Checksum does not match upload.\"))\n\n if upload.completed is not None:\n raise serializers.ValidationError(_(\"Upload is already complete.\"))\n\n upload.completed = datetime.now()\n upload.save()\n\n serializer = UploadSerializer(upload, context={'request': request})\n return Response(serializer.data)\n", "path": "pulpcore/app/viewsets/upload.py"}]} | 1,170 | 349 |
gh_patches_debug_10530 | rasdani/github-patches | git_diff | pytorch__examples-1084 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The GPU load is unbalanced
https://github.com/pytorch/examples/blob/2ee8d43dbe420be152fd5ce0d80b43b419a0e352/distributed/ddp-tutorial-series/multigpu_torchrun.py#L39
When I run the code and resume from a existed .pt file. The memory usage of GPU0 is significantly higher than other GPUs.
It can be solved by adding a parameter "map_location".
`snapshot = torch.load(snapshot_path, map_location=torch.device('cuda', int(os.environ["LOCAL_RANK"])))`
## My Environment
cudatoolkit 10.2
pytorch 12.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/ddp-tutorial-series/multinode.py`
Content:
```
1 import torch
2 import torch.nn.functional as F
3 from torch.utils.data import Dataset, DataLoader
4 from datautils import MyTrainDataset
5
6 import torch.multiprocessing as mp
7 from torch.utils.data.distributed import DistributedSampler
8 from torch.nn.parallel import DistributedDataParallel as DDP
9 from torch.distributed import init_process_group, destroy_process_group
10 import os
11
12
13 def ddp_setup():
14 init_process_group(backend="nccl")
15
16 class Trainer:
17 def __init__(
18 self,
19 model: torch.nn.Module,
20 train_data: DataLoader,
21 optimizer: torch.optim.Optimizer,
22 save_every: int,
23 snapshot_path: str,
24 ) -> None:
25 self.local_rank = int(os.environ["LOCAL_RANK"])
26 self.global_rank = int(os.environ["RANK"])
27 self.model = model.to(self.local_rank)
28 self.train_data = train_data
29 self.optimizer = optimizer
30 self.save_every = save_every
31 self.epochs_run = 0
32 self.snapshot_path = snapshot_path
33 if os.path.exists(snapshot_path):
34 print("Loading snapshot")
35 self._load_snapshot(snapshot_path)
36
37 self.model = DDP(self.model, device_ids=[self.local_rank])
38
39 def _load_snapshot(self, snapshot_path):
40 snapshot = torch.load(snapshot_path)
41 self.model.load_state_dict(snapshot["MODEL_STATE"])
42 self.epochs_run = snapshot["EPOCHS_RUN"]
43 print(f"Resuming training from snapshot at Epoch {self.epochs_run}")
44
45 def _run_batch(self, source, targets):
46 self.optimizer.zero_grad()
47 output = self.model(source)
48 loss = F.cross_entropy(output, targets)
49 loss.backward()
50 self.optimizer.step()
51
52 def _run_epoch(self, epoch):
53 b_sz = len(next(iter(self.train_data))[0])
54 print(f"[GPU{self.global_rank}] Epoch {epoch} | Batchsize: {b_sz} | Steps: {len(self.train_data)}")
55 self.train_data.sampler.set_epoch(epoch)
56 for source, targets in self.train_data:
57 source = source.to(self.local_rank)
58 targets = targets.to(self.local_rank)
59 self._run_batch(source, targets)
60
61 def _save_snapshot(self, epoch):
62 snapshot = {
63 "MODEL_STATE": self.model.module.state_dict(),
64 "EPOCHS_RUN": epoch,
65 }
66 torch.save(snapshot, self.snapshot_path)
67 print(f"Epoch {epoch} | Training snapshot saved at {self.snapshot_path}")
68
69 def train(self, max_epochs: int):
70 for epoch in range(self.epochs_run, max_epochs):
71 self._run_epoch(epoch)
72 if self.local_rank == 0 and epoch % self.save_every == 0:
73 self._save_snapshot(epoch)
74
75
76 def load_train_objs():
77 train_set = MyTrainDataset(2048) # load your dataset
78 model = torch.nn.Linear(20, 1) # load your model
79 optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
80 return train_set, model, optimizer
81
82
83 def prepare_dataloader(dataset: Dataset, batch_size: int):
84 return DataLoader(
85 dataset,
86 batch_size=batch_size,
87 pin_memory=True,
88 shuffle=False,
89 sampler=DistributedSampler(dataset)
90 )
91
92
93 def main(save_every: int, total_epochs: int, batch_size: int, snapshot_path: str = "snapshot.pt"):
94 ddp_setup()
95 dataset, model, optimizer = load_train_objs()
96 train_data = prepare_dataloader(dataset, batch_size)
97 trainer = Trainer(model, train_data, optimizer, save_every, snapshot_path)
98 trainer.train(total_epochs)
99 destroy_process_group()
100
101
102 if __name__ == "__main__":
103 import argparse
104 parser = argparse.ArgumentParser(description='simple distributed training job')
105 parser.add_argument('total_epochs', type=int, help='Total epochs to train the model')
106 parser.add_argument('save_every', type=int, help='How often to save a snapshot')
107 parser.add_argument('--batch_size', default=32, help='Input batch size on each device (default: 32)')
108 args = parser.parse_args()
109
110 main(args.save_every, args.total_epochs, args.batch_size)
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/distributed/ddp-tutorial-series/multinode.py b/distributed/ddp-tutorial-series/multinode.py
--- a/distributed/ddp-tutorial-series/multinode.py
+++ b/distributed/ddp-tutorial-series/multinode.py
@@ -37,7 +37,8 @@
self.model = DDP(self.model, device_ids=[self.local_rank])
def _load_snapshot(self, snapshot_path):
- snapshot = torch.load(snapshot_path)
+ loc = f"cuda:{self.gpu_id}"
+ snapshot = torch.load(snapshot_path, map_location=loc)
self.model.load_state_dict(snapshot["MODEL_STATE"])
self.epochs_run = snapshot["EPOCHS_RUN"]
print(f"Resuming training from snapshot at Epoch {self.epochs_run}")
| {"golden_diff": "diff --git a/distributed/ddp-tutorial-series/multinode.py b/distributed/ddp-tutorial-series/multinode.py\n--- a/distributed/ddp-tutorial-series/multinode.py\n+++ b/distributed/ddp-tutorial-series/multinode.py\n@@ -37,7 +37,8 @@\n self.model = DDP(self.model, device_ids=[self.local_rank])\n \n def _load_snapshot(self, snapshot_path):\n- snapshot = torch.load(snapshot_path)\n+ loc = f\"cuda:{self.gpu_id}\"\n+ snapshot = torch.load(snapshot_path, map_location=loc)\n self.model.load_state_dict(snapshot[\"MODEL_STATE\"])\n self.epochs_run = snapshot[\"EPOCHS_RUN\"]\n print(f\"Resuming training from snapshot at Epoch {self.epochs_run}\")\n", "issue": "The GPU load is unbalanced\nhttps://github.com/pytorch/examples/blob/2ee8d43dbe420be152fd5ce0d80b43b419a0e352/distributed/ddp-tutorial-series/multigpu_torchrun.py#L39\r\nWhen I run the code and resume from a existed .pt file. The memory usage of GPU0 is significantly higher than other GPUs. \r\nIt can be solved by adding a parameter \"map_location\".\r\n`snapshot = torch.load(snapshot_path, map_location=torch.device('cuda', int(os.environ[\"LOCAL_RANK\"])))`\r\n## My Environment\r\ncudatoolkit 10.2\r\npytorch 12.1\r\n\n", "before_files": [{"content": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom datautils import MyTrainDataset\n\nimport torch.multiprocessing as mp\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.distributed import init_process_group, destroy_process_group\nimport os\n\n\ndef ddp_setup():\n init_process_group(backend=\"nccl\")\n\nclass Trainer:\n def __init__(\n self,\n model: torch.nn.Module,\n train_data: DataLoader,\n optimizer: torch.optim.Optimizer,\n save_every: int,\n snapshot_path: str,\n ) -> None:\n self.local_rank = int(os.environ[\"LOCAL_RANK\"])\n self.global_rank = int(os.environ[\"RANK\"])\n self.model = model.to(self.local_rank)\n self.train_data = train_data\n self.optimizer = optimizer\n self.save_every = save_every\n self.epochs_run = 0\n self.snapshot_path = snapshot_path\n if os.path.exists(snapshot_path):\n print(\"Loading snapshot\")\n self._load_snapshot(snapshot_path)\n\n self.model = DDP(self.model, device_ids=[self.local_rank])\n\n def _load_snapshot(self, snapshot_path):\n snapshot = torch.load(snapshot_path)\n self.model.load_state_dict(snapshot[\"MODEL_STATE\"])\n self.epochs_run = snapshot[\"EPOCHS_RUN\"]\n print(f\"Resuming training from snapshot at Epoch {self.epochs_run}\")\n\n def _run_batch(self, source, targets):\n self.optimizer.zero_grad()\n output = self.model(source)\n loss = F.cross_entropy(output, targets)\n loss.backward()\n self.optimizer.step()\n\n def _run_epoch(self, epoch):\n b_sz = len(next(iter(self.train_data))[0])\n print(f\"[GPU{self.global_rank}] Epoch {epoch} | Batchsize: {b_sz} | Steps: {len(self.train_data)}\")\n self.train_data.sampler.set_epoch(epoch)\n for source, targets in self.train_data:\n source = source.to(self.local_rank)\n targets = targets.to(self.local_rank)\n self._run_batch(source, targets)\n\n def _save_snapshot(self, epoch):\n snapshot = {\n \"MODEL_STATE\": self.model.module.state_dict(),\n \"EPOCHS_RUN\": epoch,\n }\n torch.save(snapshot, self.snapshot_path)\n print(f\"Epoch {epoch} | Training snapshot saved at {self.snapshot_path}\")\n\n def train(self, max_epochs: int):\n for epoch in range(self.epochs_run, max_epochs):\n self._run_epoch(epoch)\n if self.local_rank == 0 and epoch % self.save_every == 0:\n self._save_snapshot(epoch)\n\n\ndef load_train_objs():\n train_set = MyTrainDataset(2048) # load your dataset\n model = torch.nn.Linear(20, 1) # load your model\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n return train_set, model, optimizer\n\n\ndef prepare_dataloader(dataset: Dataset, batch_size: int):\n return DataLoader(\n dataset,\n batch_size=batch_size,\n pin_memory=True,\n shuffle=False,\n sampler=DistributedSampler(dataset)\n )\n\n\ndef main(save_every: int, total_epochs: int, batch_size: int, snapshot_path: str = \"snapshot.pt\"):\n ddp_setup()\n dataset, model, optimizer = load_train_objs()\n train_data = prepare_dataloader(dataset, batch_size)\n trainer = Trainer(model, train_data, optimizer, save_every, snapshot_path)\n trainer.train(total_epochs)\n destroy_process_group()\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description='simple distributed training job')\n parser.add_argument('total_epochs', type=int, help='Total epochs to train the model')\n parser.add_argument('save_every', type=int, help='How often to save a snapshot')\n parser.add_argument('--batch_size', default=32, help='Input batch size on each device (default: 32)')\n args = parser.parse_args()\n \n main(args.save_every, args.total_epochs, args.batch_size)\n", "path": "distributed/ddp-tutorial-series/multinode.py"}], "after_files": [{"content": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom datautils import MyTrainDataset\n\nimport torch.multiprocessing as mp\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.distributed import init_process_group, destroy_process_group\nimport os\n\n\ndef ddp_setup():\n init_process_group(backend=\"nccl\")\n\nclass Trainer:\n def __init__(\n self,\n model: torch.nn.Module,\n train_data: DataLoader,\n optimizer: torch.optim.Optimizer,\n save_every: int,\n snapshot_path: str,\n ) -> None:\n self.local_rank = int(os.environ[\"LOCAL_RANK\"])\n self.global_rank = int(os.environ[\"RANK\"])\n self.model = model.to(self.local_rank)\n self.train_data = train_data\n self.optimizer = optimizer\n self.save_every = save_every\n self.epochs_run = 0\n self.snapshot_path = snapshot_path\n if os.path.exists(snapshot_path):\n print(\"Loading snapshot\")\n self._load_snapshot(snapshot_path)\n\n self.model = DDP(self.model, device_ids=[self.local_rank])\n\n def _load_snapshot(self, snapshot_path):\n loc = f\"cuda:{self.gpu_id}\"\n snapshot = torch.load(snapshot_path, map_location=loc)\n self.model.load_state_dict(snapshot[\"MODEL_STATE\"])\n self.epochs_run = snapshot[\"EPOCHS_RUN\"]\n print(f\"Resuming training from snapshot at Epoch {self.epochs_run}\")\n\n def _run_batch(self, source, targets):\n self.optimizer.zero_grad()\n output = self.model(source)\n loss = F.cross_entropy(output, targets)\n loss.backward()\n self.optimizer.step()\n\n def _run_epoch(self, epoch):\n b_sz = len(next(iter(self.train_data))[0])\n print(f\"[GPU{self.global_rank}] Epoch {epoch} | Batchsize: {b_sz} | Steps: {len(self.train_data)}\")\n self.train_data.sampler.set_epoch(epoch)\n for source, targets in self.train_data:\n source = source.to(self.local_rank)\n targets = targets.to(self.local_rank)\n self._run_batch(source, targets)\n\n def _save_snapshot(self, epoch):\n snapshot = {\n \"MODEL_STATE\": self.model.module.state_dict(),\n \"EPOCHS_RUN\": epoch,\n }\n torch.save(snapshot, self.snapshot_path)\n print(f\"Epoch {epoch} | Training snapshot saved at {self.snapshot_path}\")\n\n def train(self, max_epochs: int):\n for epoch in range(self.epochs_run, max_epochs):\n self._run_epoch(epoch)\n if self.local_rank == 0 and epoch % self.save_every == 0:\n self._save_snapshot(epoch)\n\n\ndef load_train_objs():\n train_set = MyTrainDataset(2048) # load your dataset\n model = torch.nn.Linear(20, 1) # load your model\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n return train_set, model, optimizer\n\n\ndef prepare_dataloader(dataset: Dataset, batch_size: int):\n return DataLoader(\n dataset,\n batch_size=batch_size,\n pin_memory=True,\n shuffle=False,\n sampler=DistributedSampler(dataset)\n )\n\n\ndef main(save_every: int, total_epochs: int, batch_size: int, snapshot_path: str = \"snapshot.pt\"):\n ddp_setup()\n dataset, model, optimizer = load_train_objs()\n train_data = prepare_dataloader(dataset, batch_size)\n trainer = Trainer(model, train_data, optimizer, save_every, snapshot_path)\n trainer.train(total_epochs)\n destroy_process_group()\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description='simple distributed training job')\n parser.add_argument('total_epochs', type=int, help='Total epochs to train the model')\n parser.add_argument('save_every', type=int, help='How often to save a snapshot')\n parser.add_argument('--batch_size', default=32, help='Input batch size on each device (default: 32)')\n args = parser.parse_args()\n \n main(args.save_every, args.total_epochs, args.batch_size)\n", "path": "distributed/ddp-tutorial-series/multinode.py"}]} | 1,544 | 171 |
gh_patches_debug_22395 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1858 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update CONTRIBUTING, README and package metadata
### Description:
- [x] Update `CONTRIBUTING.md` to mention the usage of `nox` instead of `tox`
- [x] Reorganize `README.md`
- [x] Update `setup.py` to add links to the project
CI/CD: Tox -> Nox
We are python project, and working with python files should be easier for any python developer, over working with text configuration in tox.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 """Nox tool configuration file.
2
3 Nox is Tox tool replacement.
4 """
5 import shutil
6 from pathlib import Path
7
8 import nox
9
10 nox.options.keywords = "not docs"
11
12
13 def base_install(session):
14 """Create basic environment setup for tests and linting."""
15 session.install("-r", "test_requirements.txt")
16 session.install("-e", ".")
17 return session
18
19
20 @nox.session(python="3.10")
21 def lint(session):
22 """Run linting check locally."""
23 session.install("pre-commit")
24 session.run("pre-commit", "run", "-a")
25
26
27 @nox.session(python=["3.7", "3.8", "3.9", "3.10", "3.11"])
28 def tests(session):
29 """Run test suite with pytest."""
30 session = base_install(session)
31 session.run(
32 "pytest",
33 "--cov-report=html",
34 "--cov-report=xml",
35 "--cov-branch",
36 "--cov-fail-under=100",
37 )
38
39
40 @nox.session(python=["3.7", "3.8", "3.9", "3.10", "3.11"])
41 def safety_tests(session):
42 """Run safety tests."""
43 session = base_install(session)
44 session.run("safety", "check", "--full-report")
45
46
47 @nox.session(python="3.10")
48 def documentation_tests(session):
49 """Run documentation tests."""
50 return docs(session, batch_run=True)
51
52
53 @nox.session(python="3.10")
54 def docs(session, batch_run: bool = False):
55 """Build the documentation or serve documentation interactively."""
56 shutil.rmtree(Path("docs").joinpath("_build"), ignore_errors=True)
57 session.install("-r", "docs/requirements.txt")
58 session.install("-e", ".")
59 session.cd("docs")
60 sphinx_args = ["-b", "html", "-W", ".", "_build/html"]
61
62 if not session.interactive or batch_run:
63 sphinx_cmd = "sphinx-build"
64 else:
65 sphinx_cmd = "sphinx-autobuild"
66 sphinx_args.extend(
67 [
68 "--open-browser",
69 "--port",
70 "9812",
71 "--watch",
72 "../*.md",
73 "--watch",
74 "../*.rst",
75 "--watch",
76 "../*.py",
77 "--watch",
78 "../cookiecutter",
79 ]
80 )
81
82 session.run(sphinx_cmd, *sphinx_args)
83
```
Path: `setup.py`
Content:
```
1 """cookiecutter distutils configuration."""
2 from setuptools import setup
3
4 version = "2.1.2.dev0"
5
6 with open('README.md', encoding='utf-8') as readme_file:
7 readme = readme_file.read()
8
9 requirements = [
10 'binaryornot>=0.4.4',
11 'Jinja2>=2.7,<4.0.0',
12 'click>=7.0,<9.0.0',
13 'pyyaml>=5.3.1',
14 'jinja2-time>=0.2.0',
15 'python-slugify>=4.0.0',
16 'requests>=2.23.0',
17 ]
18
19 setup(
20 name='cookiecutter',
21 version=version,
22 description=(
23 'A command-line utility that creates projects from project '
24 'templates, e.g. creating a Python package project from a '
25 'Python package project template.'
26 ),
27 long_description=readme,
28 long_description_content_type='text/markdown',
29 author='Audrey Feldroy',
30 author_email='[email protected]',
31 url='https://github.com/cookiecutter/cookiecutter',
32 packages=['cookiecutter'],
33 package_dir={'cookiecutter': 'cookiecutter'},
34 entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},
35 include_package_data=True,
36 python_requires='>=3.7',
37 install_requires=requirements,
38 license='BSD',
39 zip_safe=False,
40 classifiers=[
41 "Development Status :: 5 - Production/Stable",
42 "Environment :: Console",
43 "Intended Audience :: Developers",
44 "Natural Language :: English",
45 "License :: OSI Approved :: BSD License",
46 "Programming Language :: Python :: 3 :: Only",
47 "Programming Language :: Python :: 3",
48 "Programming Language :: Python :: 3.7",
49 "Programming Language :: Python :: 3.8",
50 "Programming Language :: Python :: 3.9",
51 "Programming Language :: Python :: 3.10",
52 "Programming Language :: Python :: 3.11",
53 "Programming Language :: Python :: Implementation :: CPython",
54 "Programming Language :: Python :: Implementation :: PyPy",
55 "Programming Language :: Python",
56 "Topic :: Software Development",
57 ],
58 keywords=[
59 "cookiecutter",
60 "Python",
61 "projects",
62 "project templates",
63 "Jinja2",
64 "skeleton",
65 "scaffolding",
66 "project directory",
67 "package",
68 "packaging",
69 ],
70 )
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -28,12 +28,14 @@
def tests(session):
"""Run test suite with pytest."""
session = base_install(session)
+ posargs = session.posargs or ""
session.run(
"pytest",
"--cov-report=html",
"--cov-report=xml",
"--cov-branch",
"--cov-fail-under=100",
+ *posargs,
)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,6 +29,11 @@
author='Audrey Feldroy',
author_email='[email protected]',
url='https://github.com/cookiecutter/cookiecutter',
+ project_urls={
+ "Documentation": "https://cookiecutter.readthedocs.io",
+ "Issues": "https://github.com/cookiecutter/cookiecutter/issues",
+ "Discord": "https://discord.gg/9BrxzPKuEW",
+ },
packages=['cookiecutter'],
package_dir={'cookiecutter': 'cookiecutter'},
entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -28,12 +28,14 @@\n def tests(session):\n \"\"\"Run test suite with pytest.\"\"\"\n session = base_install(session)\n+ posargs = session.posargs or \"\"\n session.run(\n \"pytest\",\n \"--cov-report=html\",\n \"--cov-report=xml\",\n \"--cov-branch\",\n \"--cov-fail-under=100\",\n+ *posargs,\n )\n \n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,6 +29,11 @@\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n+ project_urls={\n+ \"Documentation\": \"https://cookiecutter.readthedocs.io\",\n+ \"Issues\": \"https://github.com/cookiecutter/cookiecutter/issues\",\n+ \"Discord\": \"https://discord.gg/9BrxzPKuEW\",\n+ },\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n", "issue": "Update CONTRIBUTING, README and package metadata\n### Description:\r\n\r\n- [x] Update `CONTRIBUTING.md` to mention the usage of `nox` instead of `tox`\r\n- [x] Reorganize `README.md`\r\n- [x] Update `setup.py` to add links to the project\nCI/CD: Tox -> Nox\nWe are python project, and working with python files should be easier for any python developer, over working with text configuration in tox.\n", "before_files": [{"content": "\"\"\"Nox tool configuration file.\n\nNox is Tox tool replacement.\n\"\"\"\nimport shutil\nfrom pathlib import Path\n\nimport nox\n\nnox.options.keywords = \"not docs\"\n\n\ndef base_install(session):\n \"\"\"Create basic environment setup for tests and linting.\"\"\"\n session.install(\"-r\", \"test_requirements.txt\")\n session.install(\"-e\", \".\")\n return session\n\n\[email protected](python=\"3.10\")\ndef lint(session):\n \"\"\"Run linting check locally.\"\"\"\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"-a\")\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\"])\ndef tests(session):\n \"\"\"Run test suite with pytest.\"\"\"\n session = base_install(session)\n session.run(\n \"pytest\",\n \"--cov-report=html\",\n \"--cov-report=xml\",\n \"--cov-branch\",\n \"--cov-fail-under=100\",\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\"])\ndef safety_tests(session):\n \"\"\"Run safety tests.\"\"\"\n session = base_install(session)\n session.run(\"safety\", \"check\", \"--full-report\")\n\n\[email protected](python=\"3.10\")\ndef documentation_tests(session):\n \"\"\"Run documentation tests.\"\"\"\n return docs(session, batch_run=True)\n\n\[email protected](python=\"3.10\")\ndef docs(session, batch_run: bool = False):\n \"\"\"Build the documentation or serve documentation interactively.\"\"\"\n shutil.rmtree(Path(\"docs\").joinpath(\"_build\"), ignore_errors=True)\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\"-e\", \".\")\n session.cd(\"docs\")\n sphinx_args = [\"-b\", \"html\", \"-W\", \".\", \"_build/html\"]\n\n if not session.interactive or batch_run:\n sphinx_cmd = \"sphinx-build\"\n else:\n sphinx_cmd = \"sphinx-autobuild\"\n sphinx_args.extend(\n [\n \"--open-browser\",\n \"--port\",\n \"9812\",\n \"--watch\",\n \"../*.md\",\n \"--watch\",\n \"../*.rst\",\n \"--watch\",\n \"../*.py\",\n \"--watch\",\n \"../cookiecutter\",\n ]\n )\n\n session.run(sphinx_cmd, *sphinx_args)\n", "path": "noxfile.py"}, {"content": "\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.1.2.dev0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Nox tool configuration file.\n\nNox is Tox tool replacement.\n\"\"\"\nimport shutil\nfrom pathlib import Path\n\nimport nox\n\nnox.options.keywords = \"not docs\"\n\n\ndef base_install(session):\n \"\"\"Create basic environment setup for tests and linting.\"\"\"\n session.install(\"-r\", \"test_requirements.txt\")\n session.install(\"-e\", \".\")\n return session\n\n\[email protected](python=\"3.10\")\ndef lint(session):\n \"\"\"Run linting check locally.\"\"\"\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"-a\")\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\"])\ndef tests(session):\n \"\"\"Run test suite with pytest.\"\"\"\n session = base_install(session)\n posargs = session.posargs or \"\"\n session.run(\n \"pytest\",\n \"--cov-report=html\",\n \"--cov-report=xml\",\n \"--cov-branch\",\n \"--cov-fail-under=100\",\n *posargs,\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\"])\ndef safety_tests(session):\n \"\"\"Run safety tests.\"\"\"\n session = base_install(session)\n session.run(\"safety\", \"check\", \"--full-report\")\n\n\[email protected](python=\"3.10\")\ndef documentation_tests(session):\n \"\"\"Run documentation tests.\"\"\"\n return docs(session, batch_run=True)\n\n\[email protected](python=\"3.10\")\ndef docs(session, batch_run: bool = False):\n \"\"\"Build the documentation or serve documentation interactively.\"\"\"\n shutil.rmtree(Path(\"docs\").joinpath(\"_build\"), ignore_errors=True)\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\"-e\", \".\")\n session.cd(\"docs\")\n sphinx_args = [\"-b\", \"html\", \"-W\", \".\", \"_build/html\"]\n\n if not session.interactive or batch_run:\n sphinx_cmd = \"sphinx-build\"\n else:\n sphinx_cmd = \"sphinx-autobuild\"\n sphinx_args.extend(\n [\n \"--open-browser\",\n \"--port\",\n \"9812\",\n \"--watch\",\n \"../*.md\",\n \"--watch\",\n \"../*.rst\",\n \"--watch\",\n \"../*.py\",\n \"--watch\",\n \"../cookiecutter\",\n ]\n )\n\n session.run(sphinx_cmd, *sphinx_args)\n", "path": "noxfile.py"}, {"content": "\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.1.2.dev0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n project_urls={\n \"Documentation\": \"https://cookiecutter.readthedocs.io\",\n \"Issues\": \"https://github.com/cookiecutter/cookiecutter/issues\",\n \"Discord\": \"https://discord.gg/9BrxzPKuEW\",\n },\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py"}]} | 1,737 | 289 |
gh_patches_debug_32557 | rasdani/github-patches | git_diff | google__clusterfuzz-2579 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Google OAuth2 during setup is no longer operable (redirect_uri: urn:ietf:wg:oauth:2.0:oob)
When going through production setup on a new instance, I get to this step:
$ python butler.py create_config --oauth-client-secrets-path=$CLIENT_SECRETS_PATH --firebase-api-key=$FIREBASE_API_KEY --project-id=$CLOUD_PROJECT_ID $CONFIG_DIR
...
Please visit this URL to authorize this application: https://accounts.google.com/o/oauth2/auth?response_type=code&client_id=5xxx.....
Visiting that site generates a 400 error:
| Error 400: invalid_request
| You can't sign in to this app because it doesn't comply with Google's OAuth 2.0 policy for keeping apps secure.
The error appears related to this, https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html#instructions-oob which shows that from Feb 28th, 2022 and new OAuth flows related to the OOB will be blocked.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/local/butler/create_config.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Script for creating a new deployment config."""
15
16 import json
17 import os
18 import shutil
19 import subprocess
20 import sys
21
22 from google_auth_oauthlib.flow import InstalledAppFlow
23 from googleapiclient import discovery
24 import google_auth_httplib2
25 import httplib2
26
27 from local.butler import appengine
28 from local.butler import common
29
30 _REQUIRED_SERVICES = (
31 'appengineflex.googleapis.com',
32 'bigquery-json.googleapis.com',
33 'cloudapis.googleapis.com',
34 'cloudbuild.googleapis.com',
35 'clouddebugger.googleapis.com',
36 'clouderrorreporting.googleapis.com',
37 'cloudprofiler.googleapis.com',
38 'cloudresourcemanager.googleapis.com',
39 'compute.googleapis.com',
40 'containerregistry.googleapis.com',
41 'datastore.googleapis.com',
42 'deploymentmanager.googleapis.com',
43 'file.googleapis.com',
44 'iam.googleapis.com',
45 'iamcredentials.googleapis.com',
46 'logging.googleapis.com',
47 'monitoring.googleapis.com',
48 'oslogin.googleapis.com',
49 'pubsub.googleapis.com',
50 'redis.googleapis.com',
51 'replicapool.googleapis.com',
52 'replicapoolupdater.googleapis.com',
53 'resourceviews.googleapis.com',
54 'siteverification.googleapis.com',
55 'sourcerepo.googleapis.com',
56 'stackdriver.googleapis.com',
57 'storage-api.googleapis.com',
58 'storage-component.googleapis.com',
59 'vpcaccess.googleapis.com',
60 )
61
62 _NUM_RETRIES = 2
63 _ENABLE_SERVICE_BATCH_SIZE = 19
64
65
66 class DomainVerifier(object):
67 """Domain verifier."""
68
69 def __init__(self, oauth_client_secrets_path):
70 flow = InstalledAppFlow.from_client_secrets_file(
71 oauth_client_secrets_path,
72 scopes=['https://www.googleapis.com/auth/siteverification'])
73 credentials = flow.run_console()
74
75 http = google_auth_httplib2.AuthorizedHttp(
76 credentials, http=httplib2.Http())
77
78 self.api = discovery.build('siteVerification', 'v1', http=http)
79
80 def get_domain_verification_tag(self, domain):
81 """Get the domain verification meta tag."""
82 response = self.api.webResource().getToken(
83 body={
84 'verificationMethod': 'FILE',
85 'site': {
86 'identifier': domain,
87 'type': 'SITE',
88 }
89 }).execute(num_retries=_NUM_RETRIES)
90
91 return response['token']
92
93 def verify(self, domain):
94 """Verify the domain verification meta tag."""
95 self.api.webResource().insert(
96 body={
97 'site': {
98 'identifier': domain,
99 'type': 'SITE',
100 }
101 },
102 verificationMethod='FILE').execute(num_retries=_NUM_RETRIES)
103
104 def add_owner(self, domain, email):
105 """Add a new domain owner."""
106 response = self.api.webResource().get(id=domain).execute(
107 num_retries=_NUM_RETRIES)
108
109 if email not in response['owners']:
110 response['owners'].append(email)
111
112 self.api.webResource().update(
113 id=domain, body=response).execute(num_retries=_NUM_RETRIES)
114
115
116 def get_numeric_project_id(gcloud, project_id):
117 """Get the numeric project ID."""
118 project_info = json.loads(
119 gcloud.run('projects', 'describe', project_id, '--format=json'))
120 return project_info['projectNumber']
121
122
123 def app_engine_service_account(project_id):
124 """Get the default App Engine service account."""
125 return project_id + '@appspot.gserviceaccount.com'
126
127
128 def compute_engine_service_account(gcloud, project_id):
129 """Get the default compute engine service account."""
130 return (get_numeric_project_id(gcloud, project_id) +
131 '[email protected]')
132
133
134 def enable_services(gcloud):
135 """Enable required services."""
136 for i in range(0, len(_REQUIRED_SERVICES), _ENABLE_SERVICE_BATCH_SIZE):
137 end = i + _ENABLE_SERVICE_BATCH_SIZE
138 gcloud.run('services', 'enable', *_REQUIRED_SERVICES[i:i + end])
139
140
141 def replace_file_contents(file_path, replacements):
142 """Replace contents of a file."""
143 with open(file_path) as f:
144 old_contents = f.read()
145 contents = old_contents
146 for find, replace in replacements:
147 contents = contents.replace(find, replace)
148
149 if contents == old_contents:
150 return
151
152 with open(file_path, 'w') as f:
153 f.write(contents)
154
155
156 def project_bucket(project_id, bucket_name):
157 """Return a project-specific bucket name."""
158 return '{name}.{project_id}.appspot.com'.format(
159 name=bucket_name, project_id=project_id)
160
161
162 def create_new_config(gcloud, project_id, new_config_dir,
163 domain_verification_tag, bucket_replacements,
164 gae_location, gce_zone, firebase_api_key):
165 """Create a new config directory."""
166 if os.path.exists(new_config_dir):
167 print('Overwriting existing directory.')
168 shutil.rmtree(new_config_dir)
169
170 gae_region = appengine.region_from_location(gae_location)
171 replacements = [
172 ('test-clusterfuzz-service-account-email',
173 compute_engine_service_account(gcloud, project_id)),
174 ('test-clusterfuzz', project_id),
175 ('test-project', project_id),
176 ('domain-verification-tag', domain_verification_tag),
177 ('gae-region', gae_region),
178 ('gce-zone', gce_zone),
179 ('firebase-api-key', firebase_api_key),
180 ]
181 replacements.extend(bucket_replacements)
182
183 shutil.copytree(os.path.join('configs', 'test'), new_config_dir)
184 for root_dir, _, filenames in os.walk(new_config_dir):
185 for filename in filenames:
186 file_path = os.path.join(root_dir, filename)
187 replace_file_contents(file_path, replacements)
188
189
190 def deploy_appengine(gcloud, config_dir, appengine_location):
191 """Deploy to App Engine."""
192 try:
193 gcloud.run('app', 'describe')
194 except common.GcloudError:
195 # Create new App Engine app if it does not exist.
196 gcloud.run('app', 'create', '--region=' + appengine_location)
197
198 subprocess.check_call([
199 'python', 'butler.py', 'deploy', '--force', '--targets', 'appengine',
200 '--prod', '--config-dir', config_dir
201 ])
202
203
204 def deploy_zips(config_dir):
205 """Deploy source zips."""
206 subprocess.check_call([
207 'python', 'butler.py', 'deploy', '--force', '--targets', 'zips', '--prod',
208 '--config-dir', config_dir
209 ])
210
211
212 def create_buckets(project_id, buckets):
213 """Create buckets."""
214 gsutil = common.Gsutil()
215 for bucket in buckets:
216 try:
217 gsutil.run('defstorageclass', 'get', 'gs://' + bucket)
218 except common.GsutilError:
219 # Create the bucket if it does not exist.
220 gsutil.run('mb', '-p', project_id, 'gs://' + bucket)
221
222
223 def set_cors(config_dir, buckets):
224 """Sets cors settings."""
225 gsutil = common.Gsutil()
226 cors_file_path = os.path.join(config_dir, 'gae', 'cors.json')
227 for bucket in buckets:
228 gsutil.run('cors', 'set', cors_file_path, 'gs://' + bucket)
229
230
231 def add_service_account_role(gcloud, project_id, service_account, role):
232 """Add an IAM role to a service account."""
233 gcloud.run('projects', 'add-iam-policy-binding', project_id, '--member',
234 'serviceAccount:' + service_account, '--role', role)
235
236
237 def execute(args):
238 """Create a new config directory and deployment."""
239 # Check this early on, as the deployment at the end would fail otherwise.
240 if common.is_git_dirty():
241 print('Your checkout contains uncommitted changes. Cannot proceed.')
242 sys.exit(1)
243 verifier = DomainVerifier(args.oauth_client_secrets_path)
244
245 gcloud = common.Gcloud(args.project_id)
246 enable_services(gcloud)
247
248 # Get tag for domain verification.
249 appspot_domain = 'https://' + args.project_id + '.appspot.com/'
250 domain_verification_tag = verifier.get_domain_verification_tag(appspot_domain)
251
252 blobs_bucket = project_bucket(args.project_id, 'blobs')
253 deployment_bucket = project_bucket(args.project_id, 'deployment')
254
255 bucket_replacements = (
256 ('test-blobs-bucket', blobs_bucket),
257 ('test-deployment-bucket', deployment_bucket),
258 ('test-bigquery-bucket', project_bucket(args.project_id, 'bigquery')),
259 ('test-backup-bucket', project_bucket(args.project_id, 'backup')),
260 ('test-coverage-bucket', project_bucket(args.project_id, 'coverage')),
261 ('test-fuzzer-logs-bucket', project_bucket(args.project_id,
262 'fuzzer-logs')),
263 ('test-corpus-bucket', project_bucket(args.project_id, 'corpus')),
264 ('test-quarantine-bucket', project_bucket(args.project_id, 'quarantine')),
265 ('test-shared-corpus-bucket',
266 project_bucket(args.project_id, 'shared-corpus')),
267 ('test-fuzz-logs-bucket', project_bucket(args.project_id, 'fuzz-logs')),
268 ('test-mutator-plugins-bucket',
269 project_bucket(args.project_id, 'mutator-plugins')),
270 )
271
272 # Write new configs.
273 create_new_config(gcloud, args.project_id, args.new_config_dir,
274 domain_verification_tag, bucket_replacements,
275 args.appengine_location, args.gce_zone,
276 args.firebase_api_key)
277 prev_dir = os.getcwd()
278 os.chdir(args.new_config_dir)
279
280 # Deploy App Engine and finish verification of domain.
281 os.chdir(prev_dir)
282 deploy_appengine(
283 gcloud, args.new_config_dir, appengine_location=args.appengine_location)
284 verifier.verify(appspot_domain)
285
286 # App Engine service account requires:
287 # - Domain ownership to create domain namespaced GCS buckets
288 # - Datastore export permission for periodic backups.
289 # - Service account signing permission for GCS uploads.
290 service_account = app_engine_service_account(args.project_id)
291 verifier.add_owner(appspot_domain, service_account)
292 add_service_account_role(gcloud, args.project_id, service_account,
293 'roles/datastore.importExportAdmin')
294 add_service_account_role(gcloud, args.project_id, service_account,
295 'roles/iam.serviceAccountTokenCreator')
296
297 # Create buckets now that domain is verified.
298 create_buckets(args.project_id, [bucket for _, bucket in bucket_replacements])
299
300 # Set CORS settings on the buckets.
301 set_cors(args.new_config_dir, [blobs_bucket])
302
303 # Set deployment bucket for the cloud project.
304 gcloud.run('compute', 'project-info', 'add-metadata',
305 '--metadata=deployment-bucket=' + deployment_bucket)
306
307 # Deploy source zips.
308 deploy_zips(args.new_config_dir)
309
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/local/butler/create_config.py b/src/local/butler/create_config.py
--- a/src/local/butler/create_config.py
+++ b/src/local/butler/create_config.py
@@ -19,9 +19,9 @@
import subprocess
import sys
+import google_auth_httplib2
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient import discovery
-import google_auth_httplib2
import httplib2
from local.butler import appengine
@@ -70,7 +70,7 @@
flow = InstalledAppFlow.from_client_secrets_file(
oauth_client_secrets_path,
scopes=['https://www.googleapis.com/auth/siteverification'])
- credentials = flow.run_console()
+ credentials = flow.run_local_server()
http = google_auth_httplib2.AuthorizedHttp(
credentials, http=httplib2.Http())
@@ -140,7 +140,7 @@
def replace_file_contents(file_path, replacements):
"""Replace contents of a file."""
- with open(file_path) as f:
+ with open(file_path, encoding='utf-8') as f:
old_contents = f.read()
contents = old_contents
for find, replace in replacements:
@@ -149,14 +149,13 @@
if contents == old_contents:
return
- with open(file_path, 'w') as f:
+ with open(file_path, 'w', encoding='utf-8') as f:
f.write(contents)
def project_bucket(project_id, bucket_name):
"""Return a project-specific bucket name."""
- return '{name}.{project_id}.appspot.com'.format(
- name=bucket_name, project_id=project_id)
+ return f'{bucket_name}.{project_id}.appspot.com'
def create_new_config(gcloud, project_id, new_config_dir,
| {"golden_diff": "diff --git a/src/local/butler/create_config.py b/src/local/butler/create_config.py\n--- a/src/local/butler/create_config.py\n+++ b/src/local/butler/create_config.py\n@@ -19,9 +19,9 @@\n import subprocess\n import sys\n \n+import google_auth_httplib2\n from google_auth_oauthlib.flow import InstalledAppFlow\n from googleapiclient import discovery\n-import google_auth_httplib2\n import httplib2\n \n from local.butler import appengine\n@@ -70,7 +70,7 @@\n flow = InstalledAppFlow.from_client_secrets_file(\n oauth_client_secrets_path,\n scopes=['https://www.googleapis.com/auth/siteverification'])\n- credentials = flow.run_console()\n+ credentials = flow.run_local_server()\n \n http = google_auth_httplib2.AuthorizedHttp(\n credentials, http=httplib2.Http())\n@@ -140,7 +140,7 @@\n \n def replace_file_contents(file_path, replacements):\n \"\"\"Replace contents of a file.\"\"\"\n- with open(file_path) as f:\n+ with open(file_path, encoding='utf-8') as f:\n old_contents = f.read()\n contents = old_contents\n for find, replace in replacements:\n@@ -149,14 +149,13 @@\n if contents == old_contents:\n return\n \n- with open(file_path, 'w') as f:\n+ with open(file_path, 'w', encoding='utf-8') as f:\n f.write(contents)\n \n \n def project_bucket(project_id, bucket_name):\n \"\"\"Return a project-specific bucket name.\"\"\"\n- return '{name}.{project_id}.appspot.com'.format(\n- name=bucket_name, project_id=project_id)\n+ return f'{bucket_name}.{project_id}.appspot.com'\n \n \n def create_new_config(gcloud, project_id, new_config_dir,\n", "issue": "Google OAuth2 during setup is no longer operable (redirect_uri: urn:ietf:wg:oauth:2.0:oob)\nWhen going through production setup on a new instance, I get to this step:\r\n\r\n$ python butler.py create_config --oauth-client-secrets-path=$CLIENT_SECRETS_PATH --firebase-api-key=$FIREBASE_API_KEY --project-id=$CLOUD_PROJECT_ID $CONFIG_DIR\r\n...\r\nPlease visit this URL to authorize this application: https://accounts.google.com/o/oauth2/auth?response_type=code&client_id=5xxx.....\r\n\r\nVisiting that site generates a 400 error: \r\n\r\n| Error 400: invalid_request\r\n| You can't sign in to this app because it doesn't comply with Google's OAuth 2.0 policy for keeping apps secure.\r\n\r\n\r\nThe error appears related to this, https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html#instructions-oob which shows that from Feb 28th, 2022 and new OAuth flows related to the OOB will be blocked.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Script for creating a new deployment config.\"\"\"\n\nimport json\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient import discovery\nimport google_auth_httplib2\nimport httplib2\n\nfrom local.butler import appengine\nfrom local.butler import common\n\n_REQUIRED_SERVICES = (\n 'appengineflex.googleapis.com',\n 'bigquery-json.googleapis.com',\n 'cloudapis.googleapis.com',\n 'cloudbuild.googleapis.com',\n 'clouddebugger.googleapis.com',\n 'clouderrorreporting.googleapis.com',\n 'cloudprofiler.googleapis.com',\n 'cloudresourcemanager.googleapis.com',\n 'compute.googleapis.com',\n 'containerregistry.googleapis.com',\n 'datastore.googleapis.com',\n 'deploymentmanager.googleapis.com',\n 'file.googleapis.com',\n 'iam.googleapis.com',\n 'iamcredentials.googleapis.com',\n 'logging.googleapis.com',\n 'monitoring.googleapis.com',\n 'oslogin.googleapis.com',\n 'pubsub.googleapis.com',\n 'redis.googleapis.com',\n 'replicapool.googleapis.com',\n 'replicapoolupdater.googleapis.com',\n 'resourceviews.googleapis.com',\n 'siteverification.googleapis.com',\n 'sourcerepo.googleapis.com',\n 'stackdriver.googleapis.com',\n 'storage-api.googleapis.com',\n 'storage-component.googleapis.com',\n 'vpcaccess.googleapis.com',\n)\n\n_NUM_RETRIES = 2\n_ENABLE_SERVICE_BATCH_SIZE = 19\n\n\nclass DomainVerifier(object):\n \"\"\"Domain verifier.\"\"\"\n\n def __init__(self, oauth_client_secrets_path):\n flow = InstalledAppFlow.from_client_secrets_file(\n oauth_client_secrets_path,\n scopes=['https://www.googleapis.com/auth/siteverification'])\n credentials = flow.run_console()\n\n http = google_auth_httplib2.AuthorizedHttp(\n credentials, http=httplib2.Http())\n\n self.api = discovery.build('siteVerification', 'v1', http=http)\n\n def get_domain_verification_tag(self, domain):\n \"\"\"Get the domain verification meta tag.\"\"\"\n response = self.api.webResource().getToken(\n body={\n 'verificationMethod': 'FILE',\n 'site': {\n 'identifier': domain,\n 'type': 'SITE',\n }\n }).execute(num_retries=_NUM_RETRIES)\n\n return response['token']\n\n def verify(self, domain):\n \"\"\"Verify the domain verification meta tag.\"\"\"\n self.api.webResource().insert(\n body={\n 'site': {\n 'identifier': domain,\n 'type': 'SITE',\n }\n },\n verificationMethod='FILE').execute(num_retries=_NUM_RETRIES)\n\n def add_owner(self, domain, email):\n \"\"\"Add a new domain owner.\"\"\"\n response = self.api.webResource().get(id=domain).execute(\n num_retries=_NUM_RETRIES)\n\n if email not in response['owners']:\n response['owners'].append(email)\n\n self.api.webResource().update(\n id=domain, body=response).execute(num_retries=_NUM_RETRIES)\n\n\ndef get_numeric_project_id(gcloud, project_id):\n \"\"\"Get the numeric project ID.\"\"\"\n project_info = json.loads(\n gcloud.run('projects', 'describe', project_id, '--format=json'))\n return project_info['projectNumber']\n\n\ndef app_engine_service_account(project_id):\n \"\"\"Get the default App Engine service account.\"\"\"\n return project_id + '@appspot.gserviceaccount.com'\n\n\ndef compute_engine_service_account(gcloud, project_id):\n \"\"\"Get the default compute engine service account.\"\"\"\n return (get_numeric_project_id(gcloud, project_id) +\n '[email protected]')\n\n\ndef enable_services(gcloud):\n \"\"\"Enable required services.\"\"\"\n for i in range(0, len(_REQUIRED_SERVICES), _ENABLE_SERVICE_BATCH_SIZE):\n end = i + _ENABLE_SERVICE_BATCH_SIZE\n gcloud.run('services', 'enable', *_REQUIRED_SERVICES[i:i + end])\n\n\ndef replace_file_contents(file_path, replacements):\n \"\"\"Replace contents of a file.\"\"\"\n with open(file_path) as f:\n old_contents = f.read()\n contents = old_contents\n for find, replace in replacements:\n contents = contents.replace(find, replace)\n\n if contents == old_contents:\n return\n\n with open(file_path, 'w') as f:\n f.write(contents)\n\n\ndef project_bucket(project_id, bucket_name):\n \"\"\"Return a project-specific bucket name.\"\"\"\n return '{name}.{project_id}.appspot.com'.format(\n name=bucket_name, project_id=project_id)\n\n\ndef create_new_config(gcloud, project_id, new_config_dir,\n domain_verification_tag, bucket_replacements,\n gae_location, gce_zone, firebase_api_key):\n \"\"\"Create a new config directory.\"\"\"\n if os.path.exists(new_config_dir):\n print('Overwriting existing directory.')\n shutil.rmtree(new_config_dir)\n\n gae_region = appengine.region_from_location(gae_location)\n replacements = [\n ('test-clusterfuzz-service-account-email',\n compute_engine_service_account(gcloud, project_id)),\n ('test-clusterfuzz', project_id),\n ('test-project', project_id),\n ('domain-verification-tag', domain_verification_tag),\n ('gae-region', gae_region),\n ('gce-zone', gce_zone),\n ('firebase-api-key', firebase_api_key),\n ]\n replacements.extend(bucket_replacements)\n\n shutil.copytree(os.path.join('configs', 'test'), new_config_dir)\n for root_dir, _, filenames in os.walk(new_config_dir):\n for filename in filenames:\n file_path = os.path.join(root_dir, filename)\n replace_file_contents(file_path, replacements)\n\n\ndef deploy_appengine(gcloud, config_dir, appengine_location):\n \"\"\"Deploy to App Engine.\"\"\"\n try:\n gcloud.run('app', 'describe')\n except common.GcloudError:\n # Create new App Engine app if it does not exist.\n gcloud.run('app', 'create', '--region=' + appengine_location)\n\n subprocess.check_call([\n 'python', 'butler.py', 'deploy', '--force', '--targets', 'appengine',\n '--prod', '--config-dir', config_dir\n ])\n\n\ndef deploy_zips(config_dir):\n \"\"\"Deploy source zips.\"\"\"\n subprocess.check_call([\n 'python', 'butler.py', 'deploy', '--force', '--targets', 'zips', '--prod',\n '--config-dir', config_dir\n ])\n\n\ndef create_buckets(project_id, buckets):\n \"\"\"Create buckets.\"\"\"\n gsutil = common.Gsutil()\n for bucket in buckets:\n try:\n gsutil.run('defstorageclass', 'get', 'gs://' + bucket)\n except common.GsutilError:\n # Create the bucket if it does not exist.\n gsutil.run('mb', '-p', project_id, 'gs://' + bucket)\n\n\ndef set_cors(config_dir, buckets):\n \"\"\"Sets cors settings.\"\"\"\n gsutil = common.Gsutil()\n cors_file_path = os.path.join(config_dir, 'gae', 'cors.json')\n for bucket in buckets:\n gsutil.run('cors', 'set', cors_file_path, 'gs://' + bucket)\n\n\ndef add_service_account_role(gcloud, project_id, service_account, role):\n \"\"\"Add an IAM role to a service account.\"\"\"\n gcloud.run('projects', 'add-iam-policy-binding', project_id, '--member',\n 'serviceAccount:' + service_account, '--role', role)\n\n\ndef execute(args):\n \"\"\"Create a new config directory and deployment.\"\"\"\n # Check this early on, as the deployment at the end would fail otherwise.\n if common.is_git_dirty():\n print('Your checkout contains uncommitted changes. Cannot proceed.')\n sys.exit(1)\n verifier = DomainVerifier(args.oauth_client_secrets_path)\n\n gcloud = common.Gcloud(args.project_id)\n enable_services(gcloud)\n\n # Get tag for domain verification.\n appspot_domain = 'https://' + args.project_id + '.appspot.com/'\n domain_verification_tag = verifier.get_domain_verification_tag(appspot_domain)\n\n blobs_bucket = project_bucket(args.project_id, 'blobs')\n deployment_bucket = project_bucket(args.project_id, 'deployment')\n\n bucket_replacements = (\n ('test-blobs-bucket', blobs_bucket),\n ('test-deployment-bucket', deployment_bucket),\n ('test-bigquery-bucket', project_bucket(args.project_id, 'bigquery')),\n ('test-backup-bucket', project_bucket(args.project_id, 'backup')),\n ('test-coverage-bucket', project_bucket(args.project_id, 'coverage')),\n ('test-fuzzer-logs-bucket', project_bucket(args.project_id,\n 'fuzzer-logs')),\n ('test-corpus-bucket', project_bucket(args.project_id, 'corpus')),\n ('test-quarantine-bucket', project_bucket(args.project_id, 'quarantine')),\n ('test-shared-corpus-bucket',\n project_bucket(args.project_id, 'shared-corpus')),\n ('test-fuzz-logs-bucket', project_bucket(args.project_id, 'fuzz-logs')),\n ('test-mutator-plugins-bucket',\n project_bucket(args.project_id, 'mutator-plugins')),\n )\n\n # Write new configs.\n create_new_config(gcloud, args.project_id, args.new_config_dir,\n domain_verification_tag, bucket_replacements,\n args.appengine_location, args.gce_zone,\n args.firebase_api_key)\n prev_dir = os.getcwd()\n os.chdir(args.new_config_dir)\n\n # Deploy App Engine and finish verification of domain.\n os.chdir(prev_dir)\n deploy_appengine(\n gcloud, args.new_config_dir, appengine_location=args.appengine_location)\n verifier.verify(appspot_domain)\n\n # App Engine service account requires:\n # - Domain ownership to create domain namespaced GCS buckets\n # - Datastore export permission for periodic backups.\n # - Service account signing permission for GCS uploads.\n service_account = app_engine_service_account(args.project_id)\n verifier.add_owner(appspot_domain, service_account)\n add_service_account_role(gcloud, args.project_id, service_account,\n 'roles/datastore.importExportAdmin')\n add_service_account_role(gcloud, args.project_id, service_account,\n 'roles/iam.serviceAccountTokenCreator')\n\n # Create buckets now that domain is verified.\n create_buckets(args.project_id, [bucket for _, bucket in bucket_replacements])\n\n # Set CORS settings on the buckets.\n set_cors(args.new_config_dir, [blobs_bucket])\n\n # Set deployment bucket for the cloud project.\n gcloud.run('compute', 'project-info', 'add-metadata',\n '--metadata=deployment-bucket=' + deployment_bucket)\n\n # Deploy source zips.\n deploy_zips(args.new_config_dir)\n", "path": "src/local/butler/create_config.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Script for creating a new deployment config.\"\"\"\n\nimport json\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nimport google_auth_httplib2\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient import discovery\nimport httplib2\n\nfrom local.butler import appengine\nfrom local.butler import common\n\n_REQUIRED_SERVICES = (\n 'appengineflex.googleapis.com',\n 'bigquery-json.googleapis.com',\n 'cloudapis.googleapis.com',\n 'cloudbuild.googleapis.com',\n 'clouddebugger.googleapis.com',\n 'clouderrorreporting.googleapis.com',\n 'cloudprofiler.googleapis.com',\n 'cloudresourcemanager.googleapis.com',\n 'compute.googleapis.com',\n 'containerregistry.googleapis.com',\n 'datastore.googleapis.com',\n 'deploymentmanager.googleapis.com',\n 'file.googleapis.com',\n 'iam.googleapis.com',\n 'iamcredentials.googleapis.com',\n 'logging.googleapis.com',\n 'monitoring.googleapis.com',\n 'oslogin.googleapis.com',\n 'pubsub.googleapis.com',\n 'redis.googleapis.com',\n 'replicapool.googleapis.com',\n 'replicapoolupdater.googleapis.com',\n 'resourceviews.googleapis.com',\n 'siteverification.googleapis.com',\n 'sourcerepo.googleapis.com',\n 'stackdriver.googleapis.com',\n 'storage-api.googleapis.com',\n 'storage-component.googleapis.com',\n 'vpcaccess.googleapis.com',\n)\n\n_NUM_RETRIES = 2\n_ENABLE_SERVICE_BATCH_SIZE = 19\n\n\nclass DomainVerifier(object):\n \"\"\"Domain verifier.\"\"\"\n\n def __init__(self, oauth_client_secrets_path):\n flow = InstalledAppFlow.from_client_secrets_file(\n oauth_client_secrets_path,\n scopes=['https://www.googleapis.com/auth/siteverification'])\n credentials = flow.run_local_server()\n\n http = google_auth_httplib2.AuthorizedHttp(\n credentials, http=httplib2.Http())\n\n self.api = discovery.build('siteVerification', 'v1', http=http)\n\n def get_domain_verification_tag(self, domain):\n \"\"\"Get the domain verification meta tag.\"\"\"\n response = self.api.webResource().getToken(\n body={\n 'verificationMethod': 'FILE',\n 'site': {\n 'identifier': domain,\n 'type': 'SITE',\n }\n }).execute(num_retries=_NUM_RETRIES)\n\n return response['token']\n\n def verify(self, domain):\n \"\"\"Verify the domain verification meta tag.\"\"\"\n self.api.webResource().insert(\n body={\n 'site': {\n 'identifier': domain,\n 'type': 'SITE',\n }\n },\n verificationMethod='FILE').execute(num_retries=_NUM_RETRIES)\n\n def add_owner(self, domain, email):\n \"\"\"Add a new domain owner.\"\"\"\n response = self.api.webResource().get(id=domain).execute(\n num_retries=_NUM_RETRIES)\n\n if email not in response['owners']:\n response['owners'].append(email)\n\n self.api.webResource().update(\n id=domain, body=response).execute(num_retries=_NUM_RETRIES)\n\n\ndef get_numeric_project_id(gcloud, project_id):\n \"\"\"Get the numeric project ID.\"\"\"\n project_info = json.loads(\n gcloud.run('projects', 'describe', project_id, '--format=json'))\n return project_info['projectNumber']\n\n\ndef app_engine_service_account(project_id):\n \"\"\"Get the default App Engine service account.\"\"\"\n return project_id + '@appspot.gserviceaccount.com'\n\n\ndef compute_engine_service_account(gcloud, project_id):\n \"\"\"Get the default compute engine service account.\"\"\"\n return (get_numeric_project_id(gcloud, project_id) +\n '[email protected]')\n\n\ndef enable_services(gcloud):\n \"\"\"Enable required services.\"\"\"\n for i in range(0, len(_REQUIRED_SERVICES), _ENABLE_SERVICE_BATCH_SIZE):\n end = i + _ENABLE_SERVICE_BATCH_SIZE\n gcloud.run('services', 'enable', *_REQUIRED_SERVICES[i:i + end])\n\n\ndef replace_file_contents(file_path, replacements):\n \"\"\"Replace contents of a file.\"\"\"\n with open(file_path, encoding='utf-8') as f:\n old_contents = f.read()\n contents = old_contents\n for find, replace in replacements:\n contents = contents.replace(find, replace)\n\n if contents == old_contents:\n return\n\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(contents)\n\n\ndef project_bucket(project_id, bucket_name):\n \"\"\"Return a project-specific bucket name.\"\"\"\n return f'{bucket_name}.{project_id}.appspot.com'\n\n\ndef create_new_config(gcloud, project_id, new_config_dir,\n domain_verification_tag, bucket_replacements,\n gae_location, gce_zone, firebase_api_key):\n \"\"\"Create a new config directory.\"\"\"\n if os.path.exists(new_config_dir):\n print('Overwriting existing directory.')\n shutil.rmtree(new_config_dir)\n\n gae_region = appengine.region_from_location(gae_location)\n replacements = [\n ('test-clusterfuzz-service-account-email',\n compute_engine_service_account(gcloud, project_id)),\n ('test-clusterfuzz', project_id),\n ('test-project', project_id),\n ('domain-verification-tag', domain_verification_tag),\n ('gae-region', gae_region),\n ('gce-zone', gce_zone),\n ('firebase-api-key', firebase_api_key),\n ]\n replacements.extend(bucket_replacements)\n\n shutil.copytree(os.path.join('configs', 'test'), new_config_dir)\n for root_dir, _, filenames in os.walk(new_config_dir):\n for filename in filenames:\n file_path = os.path.join(root_dir, filename)\n replace_file_contents(file_path, replacements)\n\n\ndef deploy_appengine(gcloud, config_dir, appengine_location):\n \"\"\"Deploy to App Engine.\"\"\"\n try:\n gcloud.run('app', 'describe')\n except common.GcloudError:\n # Create new App Engine app if it does not exist.\n gcloud.run('app', 'create', '--region=' + appengine_location)\n\n subprocess.check_call([\n 'python', 'butler.py', 'deploy', '--force', '--targets', 'appengine',\n '--prod', '--config-dir', config_dir\n ])\n\n\ndef deploy_zips(config_dir):\n \"\"\"Deploy source zips.\"\"\"\n subprocess.check_call([\n 'python', 'butler.py', 'deploy', '--force', '--targets', 'zips', '--prod',\n '--config-dir', config_dir\n ])\n\n\ndef create_buckets(project_id, buckets):\n \"\"\"Create buckets.\"\"\"\n gsutil = common.Gsutil()\n for bucket in buckets:\n try:\n gsutil.run('defstorageclass', 'get', 'gs://' + bucket)\n except common.GsutilError:\n # Create the bucket if it does not exist.\n gsutil.run('mb', '-p', project_id, 'gs://' + bucket)\n\n\ndef set_cors(config_dir, buckets):\n \"\"\"Sets cors settings.\"\"\"\n gsutil = common.Gsutil()\n cors_file_path = os.path.join(config_dir, 'gae', 'cors.json')\n for bucket in buckets:\n gsutil.run('cors', 'set', cors_file_path, 'gs://' + bucket)\n\n\ndef add_service_account_role(gcloud, project_id, service_account, role):\n \"\"\"Add an IAM role to a service account.\"\"\"\n gcloud.run('projects', 'add-iam-policy-binding', project_id, '--member',\n 'serviceAccount:' + service_account, '--role', role)\n\n\ndef execute(args):\n \"\"\"Create a new config directory and deployment.\"\"\"\n # Check this early on, as the deployment at the end would fail otherwise.\n if common.is_git_dirty():\n print('Your checkout contains uncommitted changes. Cannot proceed.')\n sys.exit(1)\n verifier = DomainVerifier(args.oauth_client_secrets_path)\n\n gcloud = common.Gcloud(args.project_id)\n enable_services(gcloud)\n\n # Get tag for domain verification.\n appspot_domain = 'https://' + args.project_id + '.appspot.com/'\n domain_verification_tag = verifier.get_domain_verification_tag(appspot_domain)\n\n blobs_bucket = project_bucket(args.project_id, 'blobs')\n deployment_bucket = project_bucket(args.project_id, 'deployment')\n\n bucket_replacements = (\n ('test-blobs-bucket', blobs_bucket),\n ('test-deployment-bucket', deployment_bucket),\n ('test-bigquery-bucket', project_bucket(args.project_id, 'bigquery')),\n ('test-backup-bucket', project_bucket(args.project_id, 'backup')),\n ('test-coverage-bucket', project_bucket(args.project_id, 'coverage')),\n ('test-fuzzer-logs-bucket', project_bucket(args.project_id,\n 'fuzzer-logs')),\n ('test-corpus-bucket', project_bucket(args.project_id, 'corpus')),\n ('test-quarantine-bucket', project_bucket(args.project_id, 'quarantine')),\n ('test-shared-corpus-bucket',\n project_bucket(args.project_id, 'shared-corpus')),\n ('test-fuzz-logs-bucket', project_bucket(args.project_id, 'fuzz-logs')),\n ('test-mutator-plugins-bucket',\n project_bucket(args.project_id, 'mutator-plugins')),\n )\n\n # Write new configs.\n create_new_config(gcloud, args.project_id, args.new_config_dir,\n domain_verification_tag, bucket_replacements,\n args.appengine_location, args.gce_zone,\n args.firebase_api_key)\n prev_dir = os.getcwd()\n os.chdir(args.new_config_dir)\n\n # Deploy App Engine and finish verification of domain.\n os.chdir(prev_dir)\n deploy_appengine(\n gcloud, args.new_config_dir, appengine_location=args.appengine_location)\n verifier.verify(appspot_domain)\n\n # App Engine service account requires:\n # - Domain ownership to create domain namespaced GCS buckets\n # - Datastore export permission for periodic backups.\n # - Service account signing permission for GCS uploads.\n service_account = app_engine_service_account(args.project_id)\n verifier.add_owner(appspot_domain, service_account)\n add_service_account_role(gcloud, args.project_id, service_account,\n 'roles/datastore.importExportAdmin')\n add_service_account_role(gcloud, args.project_id, service_account,\n 'roles/iam.serviceAccountTokenCreator')\n\n # Create buckets now that domain is verified.\n create_buckets(args.project_id, [bucket for _, bucket in bucket_replacements])\n\n # Set CORS settings on the buckets.\n set_cors(args.new_config_dir, [blobs_bucket])\n\n # Set deployment bucket for the cloud project.\n gcloud.run('compute', 'project-info', 'add-metadata',\n '--metadata=deployment-bucket=' + deployment_bucket)\n\n # Deploy source zips.\n deploy_zips(args.new_config_dir)\n", "path": "src/local/butler/create_config.py"}]} | 3,762 | 411 |
gh_patches_debug_40014 | rasdani/github-patches | git_diff | DDMAL__CantusDB-1352 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
we should clean up CantusDB/django/cantusdb_project
```shell
root@7aa2f88fb303:/code/django/cantusdb_project# ls
align_text_mel.py create_fixtures.sh error_log.txt manage.py requirements.txt
articles differentia_data.txt latin_syllabification.py next_chants.py static
break_json.py editors_chant.csv load_fixtures.sh old_users_list.py templates
cantusdb editors_source.csv main_app oldcantususer_uid_role.csv users
```
The following files have already served their purpose:
- `differentia_data.txt` (used in #1137)
- `editors_chant.csv` (used in the data sync from OldCantus to New)
- `editors_source.csv` (used in the data sync from OldCantus to New)
- `old_users_list.py` (used in the data sync from OldCantus to New)
- `oldcantususer_uid_role.csv` (used in the data sync from OldCantus to New, created by `old_users_list.py`)
- `create_fixtures.sh`, (we no longer migrate data using fixtures)
- `error_log.txt` (This is a log generated during the syncing process from OldCantus to NewCantus. It's not clear why it was committed to the repo in the first place.)
- `break_json.py` (we no longer migrate data using fixtures)
- `load_fixtures.sh` (we no longer migrate data using fixtures)
I propose deleting these files, so that future developers don't need to spend time figuring out what they are.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/old_users_list.py`
Content:
```
1 import csv
2 import lxml.html as lh
3 import requests
4
5 with open("oldcantususer_uid_role.csv", "r") as csvinput:
6 with open("oldcantususer_uid_role_detailed.csv", "w") as csvoutput:
7 with open("id_username_email.csv", "r") as csvinput_username_email:
8 writer = csv.writer(csvoutput, lineterminator="\n")
9 reader = csv.reader(csvinput)
10 reader_username_email = csv.reader(csvinput_username_email)
11
12 # header
13 writer.writerow(
14 [
15 "uid",
16 "old role",
17 "new role",
18 "name",
19 "surname",
20 "institution",
21 "town",
22 "country",
23 "username",
24 "email",
25 ]
26 )
27
28 for row, row_username_email in zip(reader, reader_username_email):
29 old_role = row[1]
30 if old_role == "administrator":
31 row.append("project manager")
32 elif old_role == "anonymous user":
33 row.append("")
34 elif old_role == "authenticated user":
35 row.append("")
36 elif old_role == "contributor":
37 row.append("contributor")
38 elif old_role == "Debra":
39 row.append("project manager")
40 elif old_role == "editor":
41 row.append("editor")
42 elif old_role == "power":
43 row.append("editor")
44 elif old_role == "proofreader":
45 row.append("editor")
46 elif old_role == "SIMSSA contributor":
47 row.append("contributor")
48
49 id = row[0]
50 url = f"https://cantus.uwaterloo.ca/user/{id}"
51 response = requests.get(url)
52 doc = lh.fromstring(response.content)
53
54 try:
55 name = (
56 doc.find_class("field-name-field-name")[0]
57 .find_class("field-item")[0]
58 .text_content()
59 )
60 except:
61 name = ""
62 try:
63 surname = (
64 doc.find_class("field-name-field-surname")[0]
65 .find_class("field-item")[0]
66 .text_content()
67 )
68 except:
69 surname = ""
70 try:
71 institution = (
72 doc.find_class("field-name-field-institution")[0]
73 .find_class("field-item")[0]
74 .text_content()
75 )
76 except:
77 institution = ""
78 try:
79 town = (
80 doc.find_class("field-name-field-town")[0]
81 .find_class("field-item")[0]
82 .text_content()
83 )
84 except:
85 town = ""
86 try:
87 country = (
88 doc.find_class("field-name-field-country")[0]
89 .find_class("field-item")[0]
90 .text_content()
91 )
92 except:
93 country = ""
94
95 username = row_username_email[1]
96 email = row_username_email[2]
97
98 row.append(name)
99 row.append(surname)
100 row.append(institution)
101 row.append(town)
102 row.append(country)
103 row.append(username)
104 row.append(email)
105
106 writer.writerow(row)
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/old_users_list.py b/django/cantusdb_project/old_users_list.py
deleted file mode 100755
--- a/django/cantusdb_project/old_users_list.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import csv
-import lxml.html as lh
-import requests
-
-with open("oldcantususer_uid_role.csv", "r") as csvinput:
- with open("oldcantususer_uid_role_detailed.csv", "w") as csvoutput:
- with open("id_username_email.csv", "r") as csvinput_username_email:
- writer = csv.writer(csvoutput, lineterminator="\n")
- reader = csv.reader(csvinput)
- reader_username_email = csv.reader(csvinput_username_email)
-
- # header
- writer.writerow(
- [
- "uid",
- "old role",
- "new role",
- "name",
- "surname",
- "institution",
- "town",
- "country",
- "username",
- "email",
- ]
- )
-
- for row, row_username_email in zip(reader, reader_username_email):
- old_role = row[1]
- if old_role == "administrator":
- row.append("project manager")
- elif old_role == "anonymous user":
- row.append("")
- elif old_role == "authenticated user":
- row.append("")
- elif old_role == "contributor":
- row.append("contributor")
- elif old_role == "Debra":
- row.append("project manager")
- elif old_role == "editor":
- row.append("editor")
- elif old_role == "power":
- row.append("editor")
- elif old_role == "proofreader":
- row.append("editor")
- elif old_role == "SIMSSA contributor":
- row.append("contributor")
-
- id = row[0]
- url = f"https://cantus.uwaterloo.ca/user/{id}"
- response = requests.get(url)
- doc = lh.fromstring(response.content)
-
- try:
- name = (
- doc.find_class("field-name-field-name")[0]
- .find_class("field-item")[0]
- .text_content()
- )
- except:
- name = ""
- try:
- surname = (
- doc.find_class("field-name-field-surname")[0]
- .find_class("field-item")[0]
- .text_content()
- )
- except:
- surname = ""
- try:
- institution = (
- doc.find_class("field-name-field-institution")[0]
- .find_class("field-item")[0]
- .text_content()
- )
- except:
- institution = ""
- try:
- town = (
- doc.find_class("field-name-field-town")[0]
- .find_class("field-item")[0]
- .text_content()
- )
- except:
- town = ""
- try:
- country = (
- doc.find_class("field-name-field-country")[0]
- .find_class("field-item")[0]
- .text_content()
- )
- except:
- country = ""
-
- username = row_username_email[1]
- email = row_username_email[2]
-
- row.append(name)
- row.append(surname)
- row.append(institution)
- row.append(town)
- row.append(country)
- row.append(username)
- row.append(email)
-
- writer.writerow(row)
| {"golden_diff": "diff --git a/django/cantusdb_project/old_users_list.py b/django/cantusdb_project/old_users_list.py\ndeleted file mode 100755\n--- a/django/cantusdb_project/old_users_list.py\n+++ /dev/null\n@@ -1,106 +0,0 @@\n-import csv\n-import lxml.html as lh\n-import requests\n-\n-with open(\"oldcantususer_uid_role.csv\", \"r\") as csvinput:\n- with open(\"oldcantususer_uid_role_detailed.csv\", \"w\") as csvoutput:\n- with open(\"id_username_email.csv\", \"r\") as csvinput_username_email:\n- writer = csv.writer(csvoutput, lineterminator=\"\\n\")\n- reader = csv.reader(csvinput)\n- reader_username_email = csv.reader(csvinput_username_email)\n-\n- # header\n- writer.writerow(\n- [\n- \"uid\",\n- \"old role\",\n- \"new role\",\n- \"name\",\n- \"surname\",\n- \"institution\",\n- \"town\",\n- \"country\",\n- \"username\",\n- \"email\",\n- ]\n- )\n-\n- for row, row_username_email in zip(reader, reader_username_email):\n- old_role = row[1]\n- if old_role == \"administrator\":\n- row.append(\"project manager\")\n- elif old_role == \"anonymous user\":\n- row.append(\"\")\n- elif old_role == \"authenticated user\":\n- row.append(\"\")\n- elif old_role == \"contributor\":\n- row.append(\"contributor\")\n- elif old_role == \"Debra\":\n- row.append(\"project manager\")\n- elif old_role == \"editor\":\n- row.append(\"editor\")\n- elif old_role == \"power\":\n- row.append(\"editor\")\n- elif old_role == \"proofreader\":\n- row.append(\"editor\")\n- elif old_role == \"SIMSSA contributor\":\n- row.append(\"contributor\")\n-\n- id = row[0]\n- url = f\"https://cantus.uwaterloo.ca/user/{id}\"\n- response = requests.get(url)\n- doc = lh.fromstring(response.content)\n-\n- try:\n- name = (\n- doc.find_class(\"field-name-field-name\")[0]\n- .find_class(\"field-item\")[0]\n- .text_content()\n- )\n- except:\n- name = \"\"\n- try:\n- surname = (\n- doc.find_class(\"field-name-field-surname\")[0]\n- .find_class(\"field-item\")[0]\n- .text_content()\n- )\n- except:\n- surname = \"\"\n- try:\n- institution = (\n- doc.find_class(\"field-name-field-institution\")[0]\n- .find_class(\"field-item\")[0]\n- .text_content()\n- )\n- except:\n- institution = \"\"\n- try:\n- town = (\n- doc.find_class(\"field-name-field-town\")[0]\n- .find_class(\"field-item\")[0]\n- .text_content()\n- )\n- except:\n- town = \"\"\n- try:\n- country = (\n- doc.find_class(\"field-name-field-country\")[0]\n- .find_class(\"field-item\")[0]\n- .text_content()\n- )\n- except:\n- country = \"\"\n-\n- username = row_username_email[1]\n- email = row_username_email[2]\n-\n- row.append(name)\n- row.append(surname)\n- row.append(institution)\n- row.append(town)\n- row.append(country)\n- row.append(username)\n- row.append(email)\n-\n- writer.writerow(row)\n", "issue": "we should clean up CantusDB/django/cantusdb_project\n```shell\r\nroot@7aa2f88fb303:/code/django/cantusdb_project# ls\r\nalign_text_mel.py create_fixtures.sh\t error_log.txt\t\t manage.py\t\t requirements.txt\r\narticles\t differentia_data.txt latin_syllabification.py next_chants.py\t static\r\nbreak_json.py\t editors_chant.csv\t load_fixtures.sh\t old_users_list.py\t templates\r\ncantusdb\t editors_source.csv\t main_app\t\t oldcantususer_uid_role.csv users\r\n```\r\nThe following files have already served their purpose:\r\n- `differentia_data.txt` (used in #1137)\r\n- `editors_chant.csv` (used in the data sync from OldCantus to New)\r\n- `editors_source.csv` (used in the data sync from OldCantus to New)\r\n- `old_users_list.py` (used in the data sync from OldCantus to New)\r\n- `oldcantususer_uid_role.csv` (used in the data sync from OldCantus to New, created by `old_users_list.py`)\r\n- `create_fixtures.sh`, (we no longer migrate data using fixtures)\r\n- `error_log.txt` (This is a log generated during the syncing process from OldCantus to NewCantus. It's not clear why it was committed to the repo in the first place.)\r\n- `break_json.py` (we no longer migrate data using fixtures)\r\n- `load_fixtures.sh` (we no longer migrate data using fixtures)\r\n\r\nI propose deleting these files, so that future developers don't need to spend time figuring out what they are.\n", "before_files": [{"content": "import csv\nimport lxml.html as lh\nimport requests\n\nwith open(\"oldcantususer_uid_role.csv\", \"r\") as csvinput:\n with open(\"oldcantususer_uid_role_detailed.csv\", \"w\") as csvoutput:\n with open(\"id_username_email.csv\", \"r\") as csvinput_username_email:\n writer = csv.writer(csvoutput, lineterminator=\"\\n\")\n reader = csv.reader(csvinput)\n reader_username_email = csv.reader(csvinput_username_email)\n\n # header\n writer.writerow(\n [\n \"uid\",\n \"old role\",\n \"new role\",\n \"name\",\n \"surname\",\n \"institution\",\n \"town\",\n \"country\",\n \"username\",\n \"email\",\n ]\n )\n\n for row, row_username_email in zip(reader, reader_username_email):\n old_role = row[1]\n if old_role == \"administrator\":\n row.append(\"project manager\")\n elif old_role == \"anonymous user\":\n row.append(\"\")\n elif old_role == \"authenticated user\":\n row.append(\"\")\n elif old_role == \"contributor\":\n row.append(\"contributor\")\n elif old_role == \"Debra\":\n row.append(\"project manager\")\n elif old_role == \"editor\":\n row.append(\"editor\")\n elif old_role == \"power\":\n row.append(\"editor\")\n elif old_role == \"proofreader\":\n row.append(\"editor\")\n elif old_role == \"SIMSSA contributor\":\n row.append(\"contributor\")\n\n id = row[0]\n url = f\"https://cantus.uwaterloo.ca/user/{id}\"\n response = requests.get(url)\n doc = lh.fromstring(response.content)\n\n try:\n name = (\n doc.find_class(\"field-name-field-name\")[0]\n .find_class(\"field-item\")[0]\n .text_content()\n )\n except:\n name = \"\"\n try:\n surname = (\n doc.find_class(\"field-name-field-surname\")[0]\n .find_class(\"field-item\")[0]\n .text_content()\n )\n except:\n surname = \"\"\n try:\n institution = (\n doc.find_class(\"field-name-field-institution\")[0]\n .find_class(\"field-item\")[0]\n .text_content()\n )\n except:\n institution = \"\"\n try:\n town = (\n doc.find_class(\"field-name-field-town\")[0]\n .find_class(\"field-item\")[0]\n .text_content()\n )\n except:\n town = \"\"\n try:\n country = (\n doc.find_class(\"field-name-field-country\")[0]\n .find_class(\"field-item\")[0]\n .text_content()\n )\n except:\n country = \"\"\n\n username = row_username_email[1]\n email = row_username_email[2]\n\n row.append(name)\n row.append(surname)\n row.append(institution)\n row.append(town)\n row.append(country)\n row.append(username)\n row.append(email)\n\n writer.writerow(row)\n", "path": "django/cantusdb_project/old_users_list.py"}], "after_files": [{"content": null, "path": "django/cantusdb_project/old_users_list.py"}]} | 1,482 | 808 |
gh_patches_debug_4382 | rasdani/github-patches | git_diff | pymedusa__Medusa-5865 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LimeTorrents address needs to be updated
**Describe the bug**
Not really a bug but an updated is required for the address.
I'm getting Status: 301 errors (meaning site has moved) as shown below:
SEARCHQUEUE-DAILY-SEARCH :: [LimeTorrents] :: [54da0c7] GET URL: https://www.limetorrents.cc/browse-torrents/TV-shows/ [Status: 301]
I believe the new address is https://www.limetorrents.info
Because it's a built-in provider I can't seem to change it.
Could this be updated in a future commit?
Thanks
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'http://localhost:8081/errorlogs/viewlog/?min_level=10&log_filter=None&log_period=one_day&log_search=status%3A%20301'
4. See error
**Screenshots**
If applicable, add screenshots to help explain your problem.

**Medusa (please complete the following information):**
- OS: WIndows 10
- Branch: master
- Commit: 54da0c77d705a23f1ce67945ba4af3e1b978dcfc
**Logs:**
<details>
```
<-- Please replace this whole line with your logs -->
```
</details>
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `medusa/providers/torrent/html/limetorrents.py`
Content:
```
1 # coding=utf-8
2
3 """Provider code for Limetorrents."""
4
5 from __future__ import unicode_literals
6
7 import logging
8 import re
9
10 from medusa import tv
11 from medusa.bs4_parser import BS4Parser
12 from medusa.helper.common import (
13 convert_size,
14 try_int,
15 )
16 from medusa.logger.adapters.style import BraceAdapter
17 from medusa.providers.torrent.torrent_provider import TorrentProvider
18
19 from requests.compat import urljoin
20
21 log = BraceAdapter(logging.getLogger(__name__))
22 log.logger.addHandler(logging.NullHandler())
23
24 id_regex = re.compile(r'(?:\/)(.*)(?:-torrent-([0-9]*)\.html)', re.I)
25 hash_regex = re.compile(r'(.*)([0-9a-f]{40})(.*)', re.I)
26
27
28 class LimeTorrentsProvider(TorrentProvider):
29 """LimeTorrents Torrent provider."""
30
31 def __init__(self):
32 """Initialize the class."""
33 super(LimeTorrentsProvider, self).__init__('LimeTorrents')
34
35 # Credentials
36 self.public = True
37
38 # URLs
39 self.url = 'https://www.limetorrents.cc'
40 self.urls = {
41 'update': urljoin(self.url, '/post/updatestats.php'),
42 'search': urljoin(self.url, '/search/tv/{query}/'),
43 # Original rss feed url, temporary offline. Replaced by the main Tv-show page.
44 # 'rss': urljoin(self.url, '/browse-torrents/TV-shows/date/{page}/'),
45 'rss': urljoin(self.url, '/browse-torrents/TV-shows/'),
46 }
47
48 # Proper Strings
49 self.proper_strings = ['PROPER', 'REPACK', 'REAL']
50
51 # Miscellaneous Options
52 self.confirmed = False
53
54 # Torrent Stats
55 self.minseed = None
56 self.minleech = None
57
58 # Cache
59 self.cache = tv.Cache(self, min_time=15)
60
61 def search(self, search_strings, age=0, ep_obj=None, **kwargs):
62 """
63 Search a provider and parse the results.
64
65 :param search_strings: A dict with mode (key) and the search value (value)
66 :param age: Not used
67 :param ep_obj: Not used
68 :returns: A list of search results (structure)
69 """
70 results = []
71
72 for mode in search_strings:
73 log.debug('Search mode: {0}', mode)
74
75 for search_string in search_strings[mode]:
76
77 if mode != 'RSS':
78 log.debug('Search string: {search}',
79 {'search': search_string})
80 if self.confirmed:
81 log.debug('Searching only confirmed torrents')
82
83 search_url = self.urls['search'].format(query=search_string)
84 else:
85 # search_url = self.urls['rss'].format(page=1)
86 search_url = self.urls['rss']
87
88 response = self.session.get(search_url)
89 if not response or not response.text:
90 log.debug('No data returned from provider')
91 continue
92
93 results += self.parse(response.text, mode)
94
95 return results
96
97 def parse(self, data, mode):
98 """
99 Parse search results for items.
100
101 :param data: The raw response from a search
102 :param mode: The current mode used to search, e.g. RSS
103
104 :return: A list of items found
105 """
106 items = []
107
108 def process_column_header(th):
109 return th.span.get_text() if th.span else th.get_text()
110
111 with BS4Parser(data, 'html5lib') as html:
112 torrent_table = html.find('table', class_='table2')
113
114 if not torrent_table:
115 log.debug('Data returned from provider does not contain any {0}torrents',
116 'confirmed ' if self.confirmed else '')
117 return items
118
119 torrent_rows = torrent_table.find_all('tr')
120 labels = [process_column_header(label) for label in torrent_rows[0].find_all('th')]
121
122 # Skip the first row, since it isn't a valid result
123 for row in torrent_rows[1:]:
124 cells = row.find_all('td')
125
126 try:
127 title_cell = cells[labels.index('Torrent Name')]
128
129 verified = title_cell.find('img', title='Verified torrent')
130 if self.confirmed and not verified:
131 continue
132
133 title_anchors = title_cell.find_all('a')
134 if not title_anchors or len(title_anchors) < 2:
135 continue
136
137 title_url = title_anchors[0].get('href')
138 title = title_anchors[1].get_text(strip=True)
139 regex_result = id_regex.search(title_anchors[1].get('href'))
140
141 alt_title = regex_result.group(1)
142 if len(title) < len(alt_title):
143 title = alt_title.replace('-', ' ')
144
145 info_hash = hash_regex.search(title_url).group(2)
146 if not all([title, info_hash]):
147 continue
148
149 download_url = 'magnet:?xt=urn:btih:{hash}&dn={title}{trackers}'.format(
150 hash=info_hash, title=title, trackers=self._custom_trackers)
151
152 # Remove comma as thousands separator from larger number like 2,000 seeders = 2000
153 seeders = try_int(cells[labels.index('Seed')].get_text(strip=True).replace(',', ''))
154 leechers = try_int(cells[labels.index('Leech')].get_text(strip=True).replace(',', ''))
155
156 if seeders < min(self.minseed, 1):
157 if mode != 'RSS':
158 log.debug("Discarding torrent because it doesn't meet the"
159 ' minimum seeders: {0}. Seeders: {1}',
160 title, seeders)
161 continue
162
163 size = convert_size(cells[labels.index('Size')].get_text(strip=True)) or -1
164
165 pubdate_raw = cells[1].get_text().replace('Last', '1').replace('Yesterday', '24 hours')
166 pubdate = self.parse_pubdate(pubdate_raw, human_time=True)
167
168 item = {
169 'title': title,
170 'link': download_url,
171 'size': size,
172 'seeders': seeders,
173 'leechers': leechers,
174 'pubdate': pubdate,
175 }
176 if mode != 'RSS':
177 log.debug('Found result: {0} with {1} seeders and {2} leechers',
178 title, seeders, leechers)
179
180 items.append(item)
181 except (AttributeError, TypeError, KeyError, ValueError, IndexError):
182 log.exception('Failed parsing provider.')
183
184 return items
185
186
187 provider = LimeTorrentsProvider()
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/medusa/providers/torrent/html/limetorrents.py b/medusa/providers/torrent/html/limetorrents.py
--- a/medusa/providers/torrent/html/limetorrents.py
+++ b/medusa/providers/torrent/html/limetorrents.py
@@ -36,7 +36,7 @@
self.public = True
# URLs
- self.url = 'https://www.limetorrents.cc'
+ self.url = 'https://www.limetorrents.info'
self.urls = {
'update': urljoin(self.url, '/post/updatestats.php'),
'search': urljoin(self.url, '/search/tv/{query}/'),
| {"golden_diff": "diff --git a/medusa/providers/torrent/html/limetorrents.py b/medusa/providers/torrent/html/limetorrents.py\n--- a/medusa/providers/torrent/html/limetorrents.py\n+++ b/medusa/providers/torrent/html/limetorrents.py\n@@ -36,7 +36,7 @@\n self.public = True\n \n # URLs\n- self.url = 'https://www.limetorrents.cc'\n+ self.url = 'https://www.limetorrents.info'\n self.urls = {\n 'update': urljoin(self.url, '/post/updatestats.php'),\n 'search': urljoin(self.url, '/search/tv/{query}/'),\n", "issue": "LimeTorrents address needs to be updated\n**Describe the bug**\r\nNot really a bug but an updated is required for the address.\r\nI'm getting Status: 301 errors (meaning site has moved) as shown below:\r\nSEARCHQUEUE-DAILY-SEARCH :: [LimeTorrents] :: [54da0c7] GET URL: https://www.limetorrents.cc/browse-torrents/TV-shows/ [Status: 301]\r\n\r\nI believe the new address is https://www.limetorrents.info\r\n\r\nBecause it's a built-in provider I can't seem to change it.\r\n\r\nCould this be updated in a future commit?\r\n\r\nThanks\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'http://localhost:8081/errorlogs/viewlog/?min_level=10&log_filter=None&log_period=one_day&log_search=status%3A%20301'\r\n4. See error\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n\r\n\r\n**Medusa (please complete the following information):**\r\n - OS: WIndows 10\r\n - Branch: master\r\n - Commit: 54da0c77d705a23f1ce67945ba4af3e1b978dcfc \r\n\r\n**Logs:**\r\n<details>\r\n\r\n```\r\n<-- Please replace this whole line with your logs -->\r\n```\r\n</details>\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for Limetorrents.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import (\n convert_size,\n try_int,\n)\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\nid_regex = re.compile(r'(?:\\/)(.*)(?:-torrent-([0-9]*)\\.html)', re.I)\nhash_regex = re.compile(r'(.*)([0-9a-f]{40})(.*)', re.I)\n\n\nclass LimeTorrentsProvider(TorrentProvider):\n \"\"\"LimeTorrents Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(LimeTorrentsProvider, self).__init__('LimeTorrents')\n\n # Credentials\n self.public = True\n\n # URLs\n self.url = 'https://www.limetorrents.cc'\n self.urls = {\n 'update': urljoin(self.url, '/post/updatestats.php'),\n 'search': urljoin(self.url, '/search/tv/{query}/'),\n # Original rss feed url, temporary offline. Replaced by the main Tv-show page.\n # 'rss': urljoin(self.url, '/browse-torrents/TV-shows/date/{page}/'),\n 'rss': urljoin(self.url, '/browse-torrents/TV-shows/'),\n }\n\n # Proper Strings\n self.proper_strings = ['PROPER', 'REPACK', 'REAL']\n\n # Miscellaneous Options\n self.confirmed = False\n\n # Torrent Stats\n self.minseed = None\n self.minleech = None\n\n # Cache\n self.cache = tv.Cache(self, min_time=15)\n\n def search(self, search_strings, age=0, ep_obj=None, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n\n if mode != 'RSS':\n log.debug('Search string: {search}',\n {'search': search_string})\n if self.confirmed:\n log.debug('Searching only confirmed torrents')\n\n search_url = self.urls['search'].format(query=search_string)\n else:\n # search_url = self.urls['rss'].format(page=1)\n search_url = self.urls['rss']\n\n response = self.session.get(search_url)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n def process_column_header(th):\n return th.span.get_text() if th.span else th.get_text()\n\n with BS4Parser(data, 'html5lib') as html:\n torrent_table = html.find('table', class_='table2')\n\n if not torrent_table:\n log.debug('Data returned from provider does not contain any {0}torrents',\n 'confirmed ' if self.confirmed else '')\n return items\n\n torrent_rows = torrent_table.find_all('tr')\n labels = [process_column_header(label) for label in torrent_rows[0].find_all('th')]\n\n # Skip the first row, since it isn't a valid result\n for row in torrent_rows[1:]:\n cells = row.find_all('td')\n\n try:\n title_cell = cells[labels.index('Torrent Name')]\n\n verified = title_cell.find('img', title='Verified torrent')\n if self.confirmed and not verified:\n continue\n\n title_anchors = title_cell.find_all('a')\n if not title_anchors or len(title_anchors) < 2:\n continue\n\n title_url = title_anchors[0].get('href')\n title = title_anchors[1].get_text(strip=True)\n regex_result = id_regex.search(title_anchors[1].get('href'))\n\n alt_title = regex_result.group(1)\n if len(title) < len(alt_title):\n title = alt_title.replace('-', ' ')\n\n info_hash = hash_regex.search(title_url).group(2)\n if not all([title, info_hash]):\n continue\n\n download_url = 'magnet:?xt=urn:btih:{hash}&dn={title}{trackers}'.format(\n hash=info_hash, title=title, trackers=self._custom_trackers)\n\n # Remove comma as thousands separator from larger number like 2,000 seeders = 2000\n seeders = try_int(cells[labels.index('Seed')].get_text(strip=True).replace(',', ''))\n leechers = try_int(cells[labels.index('Leech')].get_text(strip=True).replace(',', ''))\n\n if seeders < min(self.minseed, 1):\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n ' minimum seeders: {0}. Seeders: {1}',\n title, seeders)\n continue\n\n size = convert_size(cells[labels.index('Size')].get_text(strip=True)) or -1\n\n pubdate_raw = cells[1].get_text().replace('Last', '1').replace('Yesterday', '24 hours')\n pubdate = self.parse_pubdate(pubdate_raw, human_time=True)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n\nprovider = LimeTorrentsProvider()\n", "path": "medusa/providers/torrent/html/limetorrents.py"}], "after_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for Limetorrents.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import (\n convert_size,\n try_int,\n)\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\nid_regex = re.compile(r'(?:\\/)(.*)(?:-torrent-([0-9]*)\\.html)', re.I)\nhash_regex = re.compile(r'(.*)([0-9a-f]{40})(.*)', re.I)\n\n\nclass LimeTorrentsProvider(TorrentProvider):\n \"\"\"LimeTorrents Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(LimeTorrentsProvider, self).__init__('LimeTorrents')\n\n # Credentials\n self.public = True\n\n # URLs\n self.url = 'https://www.limetorrents.info'\n self.urls = {\n 'update': urljoin(self.url, '/post/updatestats.php'),\n 'search': urljoin(self.url, '/search/tv/{query}/'),\n # Original rss feed url, temporary offline. Replaced by the main Tv-show page.\n # 'rss': urljoin(self.url, '/browse-torrents/TV-shows/date/{page}/'),\n 'rss': urljoin(self.url, '/browse-torrents/TV-shows/'),\n }\n\n # Proper Strings\n self.proper_strings = ['PROPER', 'REPACK', 'REAL']\n\n # Miscellaneous Options\n self.confirmed = False\n\n # Torrent Stats\n self.minseed = None\n self.minleech = None\n\n # Cache\n self.cache = tv.Cache(self, min_time=15)\n\n def search(self, search_strings, age=0, ep_obj=None, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n\n if mode != 'RSS':\n log.debug('Search string: {search}',\n {'search': search_string})\n if self.confirmed:\n log.debug('Searching only confirmed torrents')\n\n search_url = self.urls['search'].format(query=search_string)\n else:\n # search_url = self.urls['rss'].format(page=1)\n search_url = self.urls['rss']\n\n response = self.session.get(search_url)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n def process_column_header(th):\n return th.span.get_text() if th.span else th.get_text()\n\n with BS4Parser(data, 'html5lib') as html:\n torrent_table = html.find('table', class_='table2')\n\n if not torrent_table:\n log.debug('Data returned from provider does not contain any {0}torrents',\n 'confirmed ' if self.confirmed else '')\n return items\n\n torrent_rows = torrent_table.find_all('tr')\n labels = [process_column_header(label) for label in torrent_rows[0].find_all('th')]\n\n # Skip the first row, since it isn't a valid result\n for row in torrent_rows[1:]:\n cells = row.find_all('td')\n\n try:\n title_cell = cells[labels.index('Torrent Name')]\n\n verified = title_cell.find('img', title='Verified torrent')\n if self.confirmed and not verified:\n continue\n\n title_anchors = title_cell.find_all('a')\n if not title_anchors or len(title_anchors) < 2:\n continue\n\n title_url = title_anchors[0].get('href')\n title = title_anchors[1].get_text(strip=True)\n regex_result = id_regex.search(title_anchors[1].get('href'))\n\n alt_title = regex_result.group(1)\n if len(title) < len(alt_title):\n title = alt_title.replace('-', ' ')\n\n info_hash = hash_regex.search(title_url).group(2)\n if not all([title, info_hash]):\n continue\n\n download_url = 'magnet:?xt=urn:btih:{hash}&dn={title}{trackers}'.format(\n hash=info_hash, title=title, trackers=self._custom_trackers)\n\n # Remove comma as thousands separator from larger number like 2,000 seeders = 2000\n seeders = try_int(cells[labels.index('Seed')].get_text(strip=True).replace(',', ''))\n leechers = try_int(cells[labels.index('Leech')].get_text(strip=True).replace(',', ''))\n\n if seeders < min(self.minseed, 1):\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n ' minimum seeders: {0}. Seeders: {1}',\n title, seeders)\n continue\n\n size = convert_size(cells[labels.index('Size')].get_text(strip=True)) or -1\n\n pubdate_raw = cells[1].get_text().replace('Last', '1').replace('Yesterday', '24 hours')\n pubdate = self.parse_pubdate(pubdate_raw, human_time=True)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n\nprovider = LimeTorrentsProvider()\n", "path": "medusa/providers/torrent/html/limetorrents.py"}]} | 2,563 | 153 |
gh_patches_debug_19105 | rasdani/github-patches | git_diff | dotkom__onlineweb4-321 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Registering with an existing e-mail creates the user
When registering:
Choose a username
Enter an email-adresse already in use.
The user will be created, and your chosen username will be taken!
Registering with an existing e-mail creates the user
When registering:
Choose a username
Enter an email-adresse already in use.
The user will be created, and your chosen username will be taken!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/authentication/forms.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import datetime
4 import re
5
6 from django import forms
7 from django.contrib import auth
8 from django.utils.translation import ugettext as _
9
10 from apps.authentication.models import OnlineUser as User
11
12 class LoginForm(forms.Form):
13 username = forms.CharField(widget=forms.TextInput(), label=_("Brukernavn"), max_length=50)
14 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"Passord"))
15 user = None
16
17 def clean(self):
18 if self._errors:
19 return
20
21 user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])
22
23 if user:
24 if user.is_active:
25 self.user = user
26 else:
27 self._errors['username'] = self.error_class([_(u"Din konto er ikke aktiv. Forsøk gjenoppretning av passord.")])
28 else:
29 self._errors['username'] = self.error_class([_(u"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.")])
30 return self.cleaned_data
31
32 def login(self, request):
33 try:
34 User.objects.get(username=request.POST['username'])
35 except:
36 return False
37 if self.is_valid():
38 auth.login(request, self.user)
39 request.session.set_expiry(0)
40 return True
41 return False
42
43 class RegisterForm(forms.Form):
44 username = forms.CharField(label=_("brukernavn"), max_length=20)
45 first_name = forms.CharField(label=_("fornavn"), max_length=50)
46 last_name = forms.CharField(label=_("etternavn"), max_length=50)
47 email = forms.EmailField(label=_("epost"), max_length=50)
48 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("passord"))
49 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("gjenta passord"))
50 address = forms.CharField(label=_("adresse"), max_length=50)
51 zip_code = forms.CharField(label=_("postnummer"), max_length=4)
52 phone = forms.CharField(label=_("telefon"), max_length=20)
53
54 def clean(self):
55 super(RegisterForm, self).clean()
56 if self.is_valid():
57 cleaned_data = self.cleaned_data
58
59 # Check passwords
60 if cleaned_data['password'] != cleaned_data['repeat_password']:
61 self._errors['repeat_password'] = self.error_class([_(u"Passordene er ikke like.")])
62
63 # Check username
64 username = cleaned_data['username']
65 if User.objects.filter(username=username).count() > 0:
66 self._errors['username'] = self.error_class([_(u"Brukernavnet er allerede registrert.")])
67 if not re.match("^[a-zA-Z0-9_-]+$", username):
68 self._errors['username'] = self.error_class([_(u"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _")])
69
70 # Check email
71 email = cleaned_data['email']
72 if User.objects.filter(email=email).count() > 0:
73 self._errors['email'] = self.error_class([_(u"Det fins allerede en bruker med denne epostadressen.")])
74
75 # ZIP code digits only
76 zip_code = cleaned_data['zip_code']
77 if len(zip_code) != 4 or not zip_code.isdigit():
78 self._errors['zip_code'] = self.error_class([_(u"Postnummer må bestå av fire siffer.")])
79
80 return cleaned_data
81
82 class RecoveryForm(forms.Form):
83 email = forms.EmailField(label="Email", max_length=50)
84
85 class ChangePasswordForm(forms.Form):
86 new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"nytt passord"))
87 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"gjenta passord"))
88
89 def clean(self):
90 super(ChangePasswordForm, self).clean()
91 if self.is_valid():
92 cleaned_data = self.cleaned_data
93
94 # Check passwords
95 if cleaned_data['new_password'] != cleaned_data['repeat_password']:
96 self._errors['repeat_password'] = self.error_class([_(u"Passordene er ikke like.")])
97
98 return cleaned_data
99
100
101 class NewEmailForm(forms.Form):
102 new_email = forms.EmailField(_(u"ny epostadresse"))
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py
--- a/apps/authentication/forms.py
+++ b/apps/authentication/forms.py
@@ -7,7 +7,7 @@
from django.contrib import auth
from django.utils.translation import ugettext as _
-from apps.authentication.models import OnlineUser as User
+from apps.authentication.models import OnlineUser as User, Email
class LoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(), label=_("Brukernavn"), max_length=50)
@@ -69,7 +69,7 @@
# Check email
email = cleaned_data['email']
- if User.objects.filter(email=email).count() > 0:
+ if Email.objects.filter(email=email).count() > 0:
self._errors['email'] = self.error_class([_(u"Det fins allerede en bruker med denne epostadressen.")])
# ZIP code digits only
| {"golden_diff": "diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py\n--- a/apps/authentication/forms.py\n+++ b/apps/authentication/forms.py\n@@ -7,7 +7,7 @@\n from django.contrib import auth\n from django.utils.translation import ugettext as _\n \n-from apps.authentication.models import OnlineUser as User\n+from apps.authentication.models import OnlineUser as User, Email\n \n class LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(), label=_(\"Brukernavn\"), max_length=50)\n@@ -69,7 +69,7 @@\n \n # Check email\n email = cleaned_data['email']\n- if User.objects.filter(email=email).count() > 0:\n+ if Email.objects.filter(email=email).count() > 0:\n self._errors['email'] = self.error_class([_(u\"Det fins allerede en bruker med denne epostadressen.\")])\n \n # ZIP code digits only\n", "issue": "Registering with an existing e-mail creates the user\nWhen registering:\n\nChoose a username\nEnter an email-adresse already in use.\nThe user will be created, and your chosen username will be taken!\n\nRegistering with an existing e-mail creates the user\nWhen registering:\n\nChoose a username\nEnter an email-adresse already in use.\nThe user will be created, and your chosen username will be taken!\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser as User\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(), label=_(\"Brukernavn\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"Passord\"))\n user = None\n\n def clean(self):\n if self._errors:\n return\n \n user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])\n\n if user:\n if user.is_active:\n self.user = user\n else:\n self._errors['username'] = self.error_class([_(u\"Din konto er ikke aktiv. Fors\u00f8k gjenoppretning av passord.\")])\n else:\n self._errors['username'] = self.error_class([_(u\"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.\")])\n return self.cleaned_data\n\n def login(self, request):\n try:\n User.objects.get(username=request.POST['username'])\n except:\n return False\n if self.is_valid():\n auth.login(request, self.user)\n request.session.set_expiry(0)\n return True\n return False\n\nclass RegisterForm(forms.Form):\n username = forms.CharField(label=_(\"brukernavn\"), max_length=20)\n first_name = forms.CharField(label=_(\"fornavn\"), max_length=50)\n last_name = forms.CharField(label=_(\"etternavn\"), max_length=50)\n email = forms.EmailField(label=_(\"epost\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"gjenta passord\"))\n address = forms.CharField(label=_(\"adresse\"), max_length=50)\n zip_code = forms.CharField(label=_(\"postnummer\"), max_length=4)\n phone = forms.CharField(label=_(\"telefon\"), max_length=20)\n \n def clean(self):\n super(RegisterForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n\n # Check username\n username = cleaned_data['username']\n if User.objects.filter(username=username).count() > 0:\n self._errors['username'] = self.error_class([_(u\"Brukernavnet er allerede registrert.\")])\n if not re.match(\"^[a-zA-Z0-9_-]+$\", username):\n self._errors['username'] = self.error_class([_(u\"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _\")])\n\n # Check email\n email = cleaned_data['email']\n if User.objects.filter(email=email).count() > 0:\n self._errors['email'] = self.error_class([_(u\"Det fins allerede en bruker med denne epostadressen.\")])\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 4 or not zip_code.isdigit():\n self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data \n\nclass RecoveryForm(forms.Form):\n email = forms.EmailField(label=\"Email\", max_length=50)\n\nclass ChangePasswordForm(forms.Form):\n new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"nytt passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"gjenta passord\"))\n\n def clean(self):\n super(ChangePasswordForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['new_password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n\n return cleaned_data\n\n\nclass NewEmailForm(forms.Form):\n new_email = forms.EmailField(_(u\"ny epostadresse\"))\n", "path": "apps/authentication/forms.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser as User, Email\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(), label=_(\"Brukernavn\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"Passord\"))\n user = None\n\n def clean(self):\n if self._errors:\n return\n \n user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])\n\n if user:\n if user.is_active:\n self.user = user\n else:\n self._errors['username'] = self.error_class([_(u\"Din konto er ikke aktiv. Fors\u00f8k gjenoppretning av passord.\")])\n else:\n self._errors['username'] = self.error_class([_(u\"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.\")])\n return self.cleaned_data\n\n def login(self, request):\n try:\n User.objects.get(username=request.POST['username'])\n except:\n return False\n if self.is_valid():\n auth.login(request, self.user)\n request.session.set_expiry(0)\n return True\n return False\n\nclass RegisterForm(forms.Form):\n username = forms.CharField(label=_(\"brukernavn\"), max_length=20)\n first_name = forms.CharField(label=_(\"fornavn\"), max_length=50)\n last_name = forms.CharField(label=_(\"etternavn\"), max_length=50)\n email = forms.EmailField(label=_(\"epost\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"gjenta passord\"))\n address = forms.CharField(label=_(\"adresse\"), max_length=50)\n zip_code = forms.CharField(label=_(\"postnummer\"), max_length=4)\n phone = forms.CharField(label=_(\"telefon\"), max_length=20)\n \n def clean(self):\n super(RegisterForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n\n # Check username\n username = cleaned_data['username']\n if User.objects.filter(username=username).count() > 0:\n self._errors['username'] = self.error_class([_(u\"Brukernavnet er allerede registrert.\")])\n if not re.match(\"^[a-zA-Z0-9_-]+$\", username):\n self._errors['username'] = self.error_class([_(u\"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _\")])\n\n # Check email\n email = cleaned_data['email']\n if Email.objects.filter(email=email).count() > 0:\n self._errors['email'] = self.error_class([_(u\"Det fins allerede en bruker med denne epostadressen.\")])\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 4 or not zip_code.isdigit():\n self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data \n\nclass RecoveryForm(forms.Form):\n email = forms.EmailField(label=\"Email\", max_length=50)\n\nclass ChangePasswordForm(forms.Form):\n new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"nytt passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"gjenta passord\"))\n\n def clean(self):\n super(ChangePasswordForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['new_password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n\n return cleaned_data\n\n\nclass NewEmailForm(forms.Form):\n new_email = forms.EmailField(_(u\"ny epostadresse\"))\n", "path": "apps/authentication/forms.py"}]} | 1,504 | 199 |
gh_patches_debug_8087 | rasdani/github-patches | git_diff | ansible__ansible-11609 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
set_fact skipping
The following code works under ansible 1.9 but fails on the latest devel checkout. It runs the same rules, but skips the set_fact command.
```
- name: Set zookeeper ID facts
gather_facts: True
hosts: zookeeper
user: username
sudo: True
tasks:
- set_fact: zkid={{ item.0 | int + 1 }}
when: hostvars[item.1]['ansible_hostname'] == ansible_hostname
with_indexed_items: groups['zookeeper']
```
This assigns a unique id to each zookeeper instance dynamically.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansible/executor/task_result.py`
Content:
```
1 # (c) 2012-2014, Michael DeHaan <[email protected]>
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17
18 # Make coding more python3-ish
19 from __future__ import (absolute_import, division, print_function)
20 __metaclass__ = type
21
22 from ansible.parsing import DataLoader
23
24 class TaskResult:
25 '''
26 This class is responsible for interpretting the resulting data
27 from an executed task, and provides helper methods for determining
28 the result of a given task.
29 '''
30
31 def __init__(self, host, task, return_data):
32 self._host = host
33 self._task = task
34 if isinstance(return_data, dict):
35 self._result = return_data.copy()
36 else:
37 self._result = DataLoader().load(return_data)
38
39 def is_changed(self):
40 return self._check_key('changed')
41
42 def is_skipped(self):
43 return self._check_key('skipped')
44
45 def is_failed(self):
46 if 'failed_when_result' in self._result or \
47 'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:
48 return self._check_key('failed_when_result')
49 else:
50 return self._check_key('failed') or self._result.get('rc', 0) != 0
51
52 def is_unreachable(self):
53 return self._check_key('unreachable')
54
55 def _check_key(self, key):
56 if 'results' in self._result:
57 flag = False
58 for res in self._result.get('results', []):
59 if isinstance(res, dict):
60 flag |= res.get(key, False)
61 return flag
62 else:
63 return self._result.get(key, False)
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py
--- a/lib/ansible/executor/task_result.py
+++ b/lib/ansible/executor/task_result.py
@@ -40,7 +40,14 @@
return self._check_key('changed')
def is_skipped(self):
- return self._check_key('skipped')
+ if 'results' in self._result:
+ flag = True
+ for res in self._result.get('results', []):
+ if isinstance(res, dict):
+ flag &= res.get('skipped', False)
+ return flag
+ else:
+ return self._result.get('skipped', False)
def is_failed(self):
if 'failed_when_result' in self._result or \
| {"golden_diff": "diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py\n--- a/lib/ansible/executor/task_result.py\n+++ b/lib/ansible/executor/task_result.py\n@@ -40,7 +40,14 @@\n return self._check_key('changed')\n \n def is_skipped(self):\n- return self._check_key('skipped')\n+ if 'results' in self._result:\n+ flag = True\n+ for res in self._result.get('results', []):\n+ if isinstance(res, dict):\n+ flag &= res.get('skipped', False)\n+ return flag\n+ else:\n+ return self._result.get('skipped', False)\n \n def is_failed(self):\n if 'failed_when_result' in self._result or \\\n", "issue": "set_fact skipping\nThe following code works under ansible 1.9 but fails on the latest devel checkout. It runs the same rules, but skips the set_fact command.\n\n```\n- name: Set zookeeper ID facts\n gather_facts: True\n hosts: zookeeper\n user: username\n sudo: True\n tasks:\n - set_fact: zkid={{ item.0 | int + 1 }}\n when: hostvars[item.1]['ansible_hostname'] == ansible_hostname\n with_indexed_items: groups['zookeeper']\n```\n\nThis assigns a unique id to each zookeeper instance dynamically.\n\n", "before_files": [{"content": "# (c) 2012-2014, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom ansible.parsing import DataLoader\n\nclass TaskResult:\n '''\n This class is responsible for interpretting the resulting data\n from an executed task, and provides helper methods for determining\n the result of a given task.\n '''\n\n def __init__(self, host, task, return_data):\n self._host = host\n self._task = task\n if isinstance(return_data, dict):\n self._result = return_data.copy()\n else:\n self._result = DataLoader().load(return_data)\n\n def is_changed(self):\n return self._check_key('changed')\n\n def is_skipped(self):\n return self._check_key('skipped')\n\n def is_failed(self):\n if 'failed_when_result' in self._result or \\\n 'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:\n return self._check_key('failed_when_result')\n else:\n return self._check_key('failed') or self._result.get('rc', 0) != 0\n\n def is_unreachable(self):\n return self._check_key('unreachable')\n\n def _check_key(self, key):\n if 'results' in self._result:\n flag = False\n for res in self._result.get('results', []):\n if isinstance(res, dict):\n flag |= res.get(key, False)\n return flag\n else:\n return self._result.get(key, False)\n", "path": "lib/ansible/executor/task_result.py"}], "after_files": [{"content": "# (c) 2012-2014, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom ansible.parsing import DataLoader\n\nclass TaskResult:\n '''\n This class is responsible for interpretting the resulting data\n from an executed task, and provides helper methods for determining\n the result of a given task.\n '''\n\n def __init__(self, host, task, return_data):\n self._host = host\n self._task = task\n if isinstance(return_data, dict):\n self._result = return_data.copy()\n else:\n self._result = DataLoader().load(return_data)\n\n def is_changed(self):\n return self._check_key('changed')\n\n def is_skipped(self):\n if 'results' in self._result:\n flag = True\n for res in self._result.get('results', []):\n if isinstance(res, dict):\n flag &= res.get('skipped', False)\n return flag\n else:\n return self._result.get('skipped', False)\n\n def is_failed(self):\n if 'failed_when_result' in self._result or \\\n 'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:\n return self._check_key('failed_when_result')\n else:\n return self._check_key('failed') or self._result.get('rc', 0) != 0\n\n def is_unreachable(self):\n return self._check_key('unreachable')\n\n def _check_key(self, key):\n if 'results' in self._result:\n flag = False\n for res in self._result.get('results', []):\n if isinstance(res, dict):\n flag |= res.get(key, False)\n return flag\n else:\n return self._result.get(key, False)\n", "path": "lib/ansible/executor/task_result.py"}]} | 1,042 | 178 |
gh_patches_debug_28365 | rasdani/github-patches | git_diff | learningequality__kolibri-8691 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Learner - Cannot change username and full name
## Observed behavior
The fields for editing a learner's username and full name are disabled and I cannot change them.
## Expected behavior
It should be possible for a learner to edit their username and full name if the options are enabled in Facility settings
## Steps to reproduce the issue
1. Install the following [build.](https://github.com/learningequality/kolibri/releases/tag/v0.15.0-beta2)
2. Create a facility with enabled options for 'Allow learners to edit their username' and 'Allow learners to edit their full name' in Facility settings
3. Create a Learner user
4. Sign in with the Learner, go to Profile and select the Edit button.
5. Attempt to edit the Full name and Username fields.
## Additional information

Logs:
[logs.zip](https://github.com/learningequality/kolibri/files/7540298/logs.zip)
## Usage Details
- OS: Windows 10
- Browser: Chrome
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/core/auth/middleware.py`
Content:
```
1 from django.apps import apps
2 from django.conf import settings
3 from django.contrib.auth import _get_user_session_key
4 from django.contrib.auth import get_user
5 from django.contrib.auth.middleware import AuthenticationMiddleware
6 from django.contrib.sessions.middleware import SessionMiddleware
7 from django.core.cache import cache
8 from django.core.exceptions import ImproperlyConfigured
9 from django.utils.functional import SimpleLazyObject
10
11
12 def get_anonymous_user_model():
13 """
14 Return the Anonymous User model that is active in this project.
15 """
16 try:
17 app_name = settings.AUTH_ANONYMOUS_USER_MODEL.split(".")[0]
18 except AttributeError:
19 raise ImproperlyConfigured("AUTH_ANONYMOUS_USER_MODEL is not a string")
20 try:
21 model_name = settings.AUTH_ANONYMOUS_USER_MODEL.split(".")[1]
22 app = apps.get_app_config(app_name)
23 models_module = app.models_module
24 except IndexError:
25 raise ImproperlyConfigured(
26 "AUTH_ANONYMOUS_USER_MODEL must be of the form 'app_label.model_name'"
27 )
28 except LookupError:
29 raise ImproperlyConfigured(
30 "AUTH_ANONYMOUS_USER_MODEL refers to an app '{}' that has not been installed".format(
31 app_name
32 )
33 )
34 try:
35 return getattr(models_module, model_name)
36 except AttributeError:
37 raise ImproperlyConfigured(
38 "AUTH_ANONYMOUS_USER_MODEL refers to a model '{}' that does not exist in the app '{}'".format(
39 model_name, app_name
40 )
41 )
42
43
44 def _get_user(request):
45 if not hasattr(request, "_cached_user"):
46 try:
47 user_id = _get_user_session_key(request)
48 USER_CACHE_KEY = "USER_BY_SESSION_CACHE_{}".format(user_id)
49 user = cache.get(USER_CACHE_KEY)
50 if not user:
51 user = get_user(request)
52 cache.set(USER_CACHE_KEY, user)
53 except KeyError:
54 user = get_user(request)
55 if user.is_anonymous():
56 AnonymousUser = get_anonymous_user_model()
57 user = AnonymousUser()
58 request._cached_user = user
59
60 return request._cached_user
61
62
63 class CustomAuthenticationMiddleware(AuthenticationMiddleware):
64 """
65 Adaptation of Django's ``account.middleware.AuthenticationMiddleware``
66 to replace the default AnonymousUser with a custom implementation.
67 """
68
69 def process_request(self, request):
70 if not hasattr(request, "session"):
71 raise AssertionError(
72 "The authentication middleware requires session middleware "
73 "to be installed. Edit your MIDDLEWARE_CLASSES setting to insert "
74 "'django.contrib.sessions.middleware.SessionMiddleware' before "
75 "'kolibri.core.auth.middleware.CustomAuthenticationMiddleware'."
76 )
77 request.user = SimpleLazyObject(lambda: _get_user(request))
78
79
80 class XhrPreventLoginPromptMiddleware(object):
81 """
82 By default, HTTP 401 responses are sent with a ``WWW-Authenticate``
83 header. Web browsers react to this header by displaying a login prompt
84 dialog. By removing the header, the login prompt can be avoided. While
85 this isn't recommended in general, there's a convention of removing it
86 for XHR requests, so that unauthenticated XHR requests don't trigger a
87 popup.
88
89 See `here <https://stackoverflow.com/a/20221330>`_ for reference.
90 """
91
92 def __init__(self, get_response):
93 self.get_response = get_response
94
95 def __call__(self, request):
96 response = self.get_response(request)
97 if response and response.status_code == 401 and request.is_ajax():
98 del response["WWW-Authenticate"]
99 return response
100
101
102 SESSION_EXEMPT = "_session_exempt"
103
104
105 def session_exempt(view):
106 def wrapper_func(*args, **kwargs):
107 return view(*args, **kwargs)
108
109 setattr(wrapper_func, SESSION_EXEMPT, True)
110 return wrapper_func
111
112
113 class KolibriSessionMiddleware(SessionMiddleware):
114 def _is_exempt(self, obj):
115 return hasattr(obj, SESSION_EXEMPT)
116
117 def process_view(self, request, callback, callback_args, callback_kwargs):
118 if self._is_exempt(callback):
119 setattr(request, SESSION_EXEMPT, True)
120 return None
121
122 def process_response(self, request, response):
123 if self._is_exempt(request):
124 return response
125 return super(KolibriSessionMiddleware, self).process_response(request, response)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/core/auth/middleware.py b/kolibri/core/auth/middleware.py
--- a/kolibri/core/auth/middleware.py
+++ b/kolibri/core/auth/middleware.py
@@ -6,6 +6,7 @@
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
+from django.db.models.signals import post_save
from django.utils.functional import SimpleLazyObject
@@ -41,11 +42,14 @@
)
+USER_SESSION_CACHE_KEY = "USER_BY_SESSION_CACHE_{}"
+
+
def _get_user(request):
if not hasattr(request, "_cached_user"):
try:
user_id = _get_user_session_key(request)
- USER_CACHE_KEY = "USER_BY_SESSION_CACHE_{}".format(user_id)
+ USER_CACHE_KEY = USER_SESSION_CACHE_KEY.format(user_id)
user = cache.get(USER_CACHE_KEY)
if not user:
user = get_user(request)
@@ -60,6 +64,14 @@
return request._cached_user
+def clear_user_cache(sender, instance, created, **kwargs):
+ if not created:
+ cache.delete(USER_SESSION_CACHE_KEY.format(instance.id))
+
+
+post_save.connect(clear_user_cache, sender=settings.AUTH_USER_MODEL)
+
+
class CustomAuthenticationMiddleware(AuthenticationMiddleware):
"""
Adaptation of Django's ``account.middleware.AuthenticationMiddleware``
| {"golden_diff": "diff --git a/kolibri/core/auth/middleware.py b/kolibri/core/auth/middleware.py\n--- a/kolibri/core/auth/middleware.py\n+++ b/kolibri/core/auth/middleware.py\n@@ -6,6 +6,7 @@\n from django.contrib.sessions.middleware import SessionMiddleware\n from django.core.cache import cache\n from django.core.exceptions import ImproperlyConfigured\n+from django.db.models.signals import post_save\n from django.utils.functional import SimpleLazyObject\n \n \n@@ -41,11 +42,14 @@\n )\n \n \n+USER_SESSION_CACHE_KEY = \"USER_BY_SESSION_CACHE_{}\"\n+\n+\n def _get_user(request):\n if not hasattr(request, \"_cached_user\"):\n try:\n user_id = _get_user_session_key(request)\n- USER_CACHE_KEY = \"USER_BY_SESSION_CACHE_{}\".format(user_id)\n+ USER_CACHE_KEY = USER_SESSION_CACHE_KEY.format(user_id)\n user = cache.get(USER_CACHE_KEY)\n if not user:\n user = get_user(request)\n@@ -60,6 +64,14 @@\n return request._cached_user\n \n \n+def clear_user_cache(sender, instance, created, **kwargs):\n+ if not created:\n+ cache.delete(USER_SESSION_CACHE_KEY.format(instance.id))\n+\n+\n+post_save.connect(clear_user_cache, sender=settings.AUTH_USER_MODEL)\n+\n+\n class CustomAuthenticationMiddleware(AuthenticationMiddleware):\n \"\"\"\n Adaptation of Django's ``account.middleware.AuthenticationMiddleware``\n", "issue": "Learner - Cannot change username and full name\n## Observed behavior\r\nThe fields for editing a learner's username and full name are disabled and I cannot change them.\r\n\r\n## Expected behavior\r\nIt should be possible for a learner to edit their username and full name if the options are enabled in Facility settings\r\n\r\n## Steps to reproduce the issue\r\n1. Install the following [build.](https://github.com/learningequality/kolibri/releases/tag/v0.15.0-beta2)\r\n2. Create a facility with enabled options for 'Allow learners to edit their username' and 'Allow learners to edit their full name' in Facility settings\r\n3. Create a Learner user\r\n4. Sign in with the Learner, go to Profile and select the Edit button.\r\n5. Attempt to edit the Full name and Username fields.\r\n\r\n## Additional information\r\n\r\n\r\nLogs: \r\n[logs.zip](https://github.com/learningequality/kolibri/files/7540298/logs.zip)\r\n\r\n## Usage Details\r\n - OS: Windows 10\r\n - Browser: Chrome\n", "before_files": [{"content": "from django.apps import apps\nfrom django.conf import settings\nfrom django.contrib.auth import _get_user_session_key\nfrom django.contrib.auth import get_user\nfrom django.contrib.auth.middleware import AuthenticationMiddleware\nfrom django.contrib.sessions.middleware import SessionMiddleware\nfrom django.core.cache import cache\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.functional import SimpleLazyObject\n\n\ndef get_anonymous_user_model():\n \"\"\"\n Return the Anonymous User model that is active in this project.\n \"\"\"\n try:\n app_name = settings.AUTH_ANONYMOUS_USER_MODEL.split(\".\")[0]\n except AttributeError:\n raise ImproperlyConfigured(\"AUTH_ANONYMOUS_USER_MODEL is not a string\")\n try:\n model_name = settings.AUTH_ANONYMOUS_USER_MODEL.split(\".\")[1]\n app = apps.get_app_config(app_name)\n models_module = app.models_module\n except IndexError:\n raise ImproperlyConfigured(\n \"AUTH_ANONYMOUS_USER_MODEL must be of the form 'app_label.model_name'\"\n )\n except LookupError:\n raise ImproperlyConfigured(\n \"AUTH_ANONYMOUS_USER_MODEL refers to an app '{}' that has not been installed\".format(\n app_name\n )\n )\n try:\n return getattr(models_module, model_name)\n except AttributeError:\n raise ImproperlyConfigured(\n \"AUTH_ANONYMOUS_USER_MODEL refers to a model '{}' that does not exist in the app '{}'\".format(\n model_name, app_name\n )\n )\n\n\ndef _get_user(request):\n if not hasattr(request, \"_cached_user\"):\n try:\n user_id = _get_user_session_key(request)\n USER_CACHE_KEY = \"USER_BY_SESSION_CACHE_{}\".format(user_id)\n user = cache.get(USER_CACHE_KEY)\n if not user:\n user = get_user(request)\n cache.set(USER_CACHE_KEY, user)\n except KeyError:\n user = get_user(request)\n if user.is_anonymous():\n AnonymousUser = get_anonymous_user_model()\n user = AnonymousUser()\n request._cached_user = user\n\n return request._cached_user\n\n\nclass CustomAuthenticationMiddleware(AuthenticationMiddleware):\n \"\"\"\n Adaptation of Django's ``account.middleware.AuthenticationMiddleware``\n to replace the default AnonymousUser with a custom implementation.\n \"\"\"\n\n def process_request(self, request):\n if not hasattr(request, \"session\"):\n raise AssertionError(\n \"The authentication middleware requires session middleware \"\n \"to be installed. Edit your MIDDLEWARE_CLASSES setting to insert \"\n \"'django.contrib.sessions.middleware.SessionMiddleware' before \"\n \"'kolibri.core.auth.middleware.CustomAuthenticationMiddleware'.\"\n )\n request.user = SimpleLazyObject(lambda: _get_user(request))\n\n\nclass XhrPreventLoginPromptMiddleware(object):\n \"\"\"\n By default, HTTP 401 responses are sent with a ``WWW-Authenticate``\n header. Web browsers react to this header by displaying a login prompt\n dialog. By removing the header, the login prompt can be avoided. While\n this isn't recommended in general, there's a convention of removing it\n for XHR requests, so that unauthenticated XHR requests don't trigger a\n popup.\n\n See `here <https://stackoverflow.com/a/20221330>`_ for reference.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n response = self.get_response(request)\n if response and response.status_code == 401 and request.is_ajax():\n del response[\"WWW-Authenticate\"]\n return response\n\n\nSESSION_EXEMPT = \"_session_exempt\"\n\n\ndef session_exempt(view):\n def wrapper_func(*args, **kwargs):\n return view(*args, **kwargs)\n\n setattr(wrapper_func, SESSION_EXEMPT, True)\n return wrapper_func\n\n\nclass KolibriSessionMiddleware(SessionMiddleware):\n def _is_exempt(self, obj):\n return hasattr(obj, SESSION_EXEMPT)\n\n def process_view(self, request, callback, callback_args, callback_kwargs):\n if self._is_exempt(callback):\n setattr(request, SESSION_EXEMPT, True)\n return None\n\n def process_response(self, request, response):\n if self._is_exempt(request):\n return response\n return super(KolibriSessionMiddleware, self).process_response(request, response)\n", "path": "kolibri/core/auth/middleware.py"}], "after_files": [{"content": "from django.apps import apps\nfrom django.conf import settings\nfrom django.contrib.auth import _get_user_session_key\nfrom django.contrib.auth import get_user\nfrom django.contrib.auth.middleware import AuthenticationMiddleware\nfrom django.contrib.sessions.middleware import SessionMiddleware\nfrom django.core.cache import cache\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db.models.signals import post_save\nfrom django.utils.functional import SimpleLazyObject\n\n\ndef get_anonymous_user_model():\n \"\"\"\n Return the Anonymous User model that is active in this project.\n \"\"\"\n try:\n app_name = settings.AUTH_ANONYMOUS_USER_MODEL.split(\".\")[0]\n except AttributeError:\n raise ImproperlyConfigured(\"AUTH_ANONYMOUS_USER_MODEL is not a string\")\n try:\n model_name = settings.AUTH_ANONYMOUS_USER_MODEL.split(\".\")[1]\n app = apps.get_app_config(app_name)\n models_module = app.models_module\n except IndexError:\n raise ImproperlyConfigured(\n \"AUTH_ANONYMOUS_USER_MODEL must be of the form 'app_label.model_name'\"\n )\n except LookupError:\n raise ImproperlyConfigured(\n \"AUTH_ANONYMOUS_USER_MODEL refers to an app '{}' that has not been installed\".format(\n app_name\n )\n )\n try:\n return getattr(models_module, model_name)\n except AttributeError:\n raise ImproperlyConfigured(\n \"AUTH_ANONYMOUS_USER_MODEL refers to a model '{}' that does not exist in the app '{}'\".format(\n model_name, app_name\n )\n )\n\n\nUSER_SESSION_CACHE_KEY = \"USER_BY_SESSION_CACHE_{}\"\n\n\ndef _get_user(request):\n if not hasattr(request, \"_cached_user\"):\n try:\n user_id = _get_user_session_key(request)\n USER_CACHE_KEY = USER_SESSION_CACHE_KEY.format(user_id)\n user = cache.get(USER_CACHE_KEY)\n if not user:\n user = get_user(request)\n cache.set(USER_CACHE_KEY, user)\n except KeyError:\n user = get_user(request)\n if user.is_anonymous():\n AnonymousUser = get_anonymous_user_model()\n user = AnonymousUser()\n request._cached_user = user\n\n return request._cached_user\n\n\ndef clear_user_cache(sender, instance, created, **kwargs):\n if not created:\n cache.delete(USER_SESSION_CACHE_KEY.format(instance.id))\n\n\npost_save.connect(clear_user_cache, sender=settings.AUTH_USER_MODEL)\n\n\nclass CustomAuthenticationMiddleware(AuthenticationMiddleware):\n \"\"\"\n Adaptation of Django's ``account.middleware.AuthenticationMiddleware``\n to replace the default AnonymousUser with a custom implementation.\n \"\"\"\n\n def process_request(self, request):\n if not hasattr(request, \"session\"):\n raise AssertionError(\n \"The authentication middleware requires session middleware \"\n \"to be installed. Edit your MIDDLEWARE_CLASSES setting to insert \"\n \"'django.contrib.sessions.middleware.SessionMiddleware' before \"\n \"'kolibri.core.auth.middleware.CustomAuthenticationMiddleware'.\"\n )\n request.user = SimpleLazyObject(lambda: _get_user(request))\n\n\nclass XhrPreventLoginPromptMiddleware(object):\n \"\"\"\n By default, HTTP 401 responses are sent with a ``WWW-Authenticate``\n header. Web browsers react to this header by displaying a login prompt\n dialog. By removing the header, the login prompt can be avoided. While\n this isn't recommended in general, there's a convention of removing it\n for XHR requests, so that unauthenticated XHR requests don't trigger a\n popup.\n\n See `here <https://stackoverflow.com/a/20221330>`_ for reference.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n response = self.get_response(request)\n if response and response.status_code == 401 and request.is_ajax():\n del response[\"WWW-Authenticate\"]\n return response\n\n\nSESSION_EXEMPT = \"_session_exempt\"\n\n\ndef session_exempt(view):\n def wrapper_func(*args, **kwargs):\n return view(*args, **kwargs)\n\n setattr(wrapper_func, SESSION_EXEMPT, True)\n return wrapper_func\n\n\nclass KolibriSessionMiddleware(SessionMiddleware):\n def _is_exempt(self, obj):\n return hasattr(obj, SESSION_EXEMPT)\n\n def process_view(self, request, callback, callback_args, callback_kwargs):\n if self._is_exempt(callback):\n setattr(request, SESSION_EXEMPT, True)\n return None\n\n def process_response(self, request, response):\n if self._is_exempt(request):\n return response\n return super(KolibriSessionMiddleware, self).process_response(request, response)\n", "path": "kolibri/core/auth/middleware.py"}]} | 1,762 | 310 |
gh_patches_debug_24177 | rasdani/github-patches | git_diff | pre-commit__pre-commit-756 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
venv tests break virtualenv's `pip` when run from a `-mvirtualenv` virtualenv
Here's a reproduction, not exactly sure what's happening here:
```
$ tox -e py36 -r --notest
GLOB sdist-make: /home/asottile/workspace/pre-commit/setup.py
py36 create: /home/asottile/workspace/pre-commit/.tox/py36
py36 installdeps: -rrequirements-dev.txt
py36 inst: /home/asottile/workspace/pre-commit/.tox/dist/pre_commit-1.10.0.zip
py36 installed: You are using pip version 9.0.1, however version 10.0.1 is available.,You should consider upgrading via the 'pip install --upgrade pip' command.,aspy.yaml==1.1.1,atomicwrites==1.1.5,attrs==18.1.0,cached-property==1.4.2,cfgv==1.0.0,coverage==4.5.1,flake8==3.5.0,identify==1.0.18,mccabe==0.6.1,mock==2.0.0,more-itertools==4.2.0,nodeenv==1.3.0,pbr==4.0.3,pluggy==0.6.0,-e [email protected]:pre-commit/pre-commit@97fb49a533de9a378d20f0a41e79df118362e534#egg=pre_commit,py==1.5.3,pycodestyle==2.3.1,pyflakes==1.6.0,pytest==3.6.0,pytest-env==0.6.2,PyYAML==3.12,six==1.11.0,toml==0.9.4,virtualenv==16.0.0
___________________________________ summary ____________________________________
py36: skipped tests
congratulations :)
$ head -1 .tox/py36/bin/pip
#!/home/asottile/workspace/pre-commit/.tox/py36/bin/python3.6
$ .tox/py36/bin/pytest tests -k venv
============================= test session starts ==============================
platform linux -- Python 3.6.5, pytest-3.6.0, py-1.5.3, pluggy-0.6.0
rootdir: /home/asottile/workspace/pre-commit, inifile: tox.ini
plugins: env-0.6.2
collected 500 items / 492 deselected
tests/repository_test.py .. [ 25%]
tests/commands/install_uninstall_test.py . [ 37%]
tests/languages/all_test.py ..... [100%]
=================== 8 passed, 492 deselected in 4.12 seconds ===================
$ head -1 .tox/py36/bin/pip
#!/home/asottile/workspace/pre-commit/.tox/py36/bin/python3.6
$ tox -e py36 -- tests -k venv
GLOB sdist-make: /home/asottile/workspace/pre-commit/setup.py
py36 inst-nodeps: /home/asottile/workspace/pre-commit/.tox/dist/pre_commit-1.10.0.zip
py36 installed: You are using pip version 9.0.1, however version 10.0.1 is available.,You should consider upgrading via the 'pip install --upgrade pip' command.,aspy.yaml==1.1.1,atomicwrites==1.1.5,attrs==18.1.0,cached-property==1.4.2,cfgv==1.0.0,coverage==4.5.1,flake8==3.5.0,identify==1.0.18,mccabe==0.6.1,mock==2.0.0,more-itertools==4.2.0,nodeenv==1.3.0,pbr==4.0.3,pluggy==0.6.0,pre-commit==1.10.0,py==1.5.3,pycodestyle==2.3.1,pyflakes==1.6.0,pytest==3.6.0,pytest-env==0.6.2,PyYAML==3.12,six==1.11.0,toml==0.9.4,virtualenv==16.0.0
py36 runtests: PYTHONHASHSEED='93802395'
py36 runtests: commands[0] | coverage erase
py36 runtests: commands[1] | coverage run -m pytest tests -k venv
============================= test session starts ==============================
platform linux -- Python 3.6.5, pytest-3.6.0, py-1.5.3, pluggy-0.6.0
rootdir: /home/asottile/workspace/pre-commit, inifile: tox.ini
plugins: env-0.6.2
collected 500 items / 492 deselected
tests/repository_test.py .. [ 25%]
tests/commands/install_uninstall_test.py . [ 37%]
tests/languages/all_test.py ..... [100%]
=================== 8 passed, 492 deselected in 4.32 seconds ===================
py36 runtests: commands[2] | coverage report --fail-under 99
Name Stmts Miss Branch BrPart Cover Missing
---------------------------------------------------------------------------------------------
...
17 files skipped due to complete coverage.
ERROR: InvocationError: '/home/asottile/workspace/pre-commit/.tox/py36/bin/coverage report --fail-under 99'
___________________________________ summary ____________________________________
ERROR: py36: commands failed
$ head -1 .tox/py36/bin/pip
#!/tmp/pytest-of-asottile/pytest-3/test_python_venv0/0/.pre-commit/repo5xcuq11q/py_venv-python3.6/bin/python3.6
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/python_venv.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from pre_commit.languages import python
4 from pre_commit.util import cmd_output
5
6
7 ENVIRONMENT_DIR = 'py_venv'
8
9
10 def make_venv(envdir, python):
11 cmd_output(python, '-mvenv', envdir, cwd='/')
12
13
14 get_default_version = python.get_default_version
15 _interface = python.py_interface(ENVIRONMENT_DIR, make_venv)
16 in_env, healthy, run_hook, install_environment = _interface
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/languages/python_venv.py b/pre_commit/languages/python_venv.py
--- a/pre_commit/languages/python_venv.py
+++ b/pre_commit/languages/python_venv.py
@@ -1,14 +1,46 @@
from __future__ import unicode_literals
+import os.path
+
from pre_commit.languages import python
+from pre_commit.util import CalledProcessError
from pre_commit.util import cmd_output
ENVIRONMENT_DIR = 'py_venv'
+def orig_py_exe(exe): # pragma: no cover (platform specific)
+ """A -mvenv virtualenv made from a -mvirtualenv virtualenv installs
+ packages to the incorrect location. Attempt to find the _original_ exe
+ and invoke `-mvenv` from there.
+
+ See:
+ - https://github.com/pre-commit/pre-commit/issues/755
+ - https://github.com/pypa/virtualenv/issues/1095
+ - https://bugs.python.org/issue30811
+ """
+ try:
+ prefix_script = 'import sys; print(sys.real_prefix)'
+ _, prefix, _ = cmd_output(exe, '-c', prefix_script)
+ prefix = prefix.strip()
+ except CalledProcessError:
+ # not created from -mvirtualenv
+ return exe
+
+ if os.name == 'nt':
+ expected = os.path.join(prefix, 'python.exe')
+ else:
+ expected = os.path.join(prefix, 'bin', os.path.basename(exe))
+
+ if os.path.exists(expected):
+ return expected
+ else:
+ return exe
+
+
def make_venv(envdir, python):
- cmd_output(python, '-mvenv', envdir, cwd='/')
+ cmd_output(orig_py_exe(python), '-mvenv', envdir, cwd='/')
get_default_version = python.get_default_version
| {"golden_diff": "diff --git a/pre_commit/languages/python_venv.py b/pre_commit/languages/python_venv.py\n--- a/pre_commit/languages/python_venv.py\n+++ b/pre_commit/languages/python_venv.py\n@@ -1,14 +1,46 @@\n from __future__ import unicode_literals\n \n+import os.path\n+\n from pre_commit.languages import python\n+from pre_commit.util import CalledProcessError\n from pre_commit.util import cmd_output\n \n \n ENVIRONMENT_DIR = 'py_venv'\n \n \n+def orig_py_exe(exe): # pragma: no cover (platform specific)\n+ \"\"\"A -mvenv virtualenv made from a -mvirtualenv virtualenv installs\n+ packages to the incorrect location. Attempt to find the _original_ exe\n+ and invoke `-mvenv` from there.\n+\n+ See:\n+ - https://github.com/pre-commit/pre-commit/issues/755\n+ - https://github.com/pypa/virtualenv/issues/1095\n+ - https://bugs.python.org/issue30811\n+ \"\"\"\n+ try:\n+ prefix_script = 'import sys; print(sys.real_prefix)'\n+ _, prefix, _ = cmd_output(exe, '-c', prefix_script)\n+ prefix = prefix.strip()\n+ except CalledProcessError:\n+ # not created from -mvirtualenv\n+ return exe\n+\n+ if os.name == 'nt':\n+ expected = os.path.join(prefix, 'python.exe')\n+ else:\n+ expected = os.path.join(prefix, 'bin', os.path.basename(exe))\n+\n+ if os.path.exists(expected):\n+ return expected\n+ else:\n+ return exe\n+\n+\n def make_venv(envdir, python):\n- cmd_output(python, '-mvenv', envdir, cwd='/')\n+ cmd_output(orig_py_exe(python), '-mvenv', envdir, cwd='/')\n \n \n get_default_version = python.get_default_version\n", "issue": "venv tests break virtualenv's `pip` when run from a `-mvirtualenv` virtualenv\nHere's a reproduction, not exactly sure what's happening here:\r\n\r\n```\r\n$ tox -e py36 -r --notest\r\nGLOB sdist-make: /home/asottile/workspace/pre-commit/setup.py\r\npy36 create: /home/asottile/workspace/pre-commit/.tox/py36\r\npy36 installdeps: -rrequirements-dev.txt\r\npy36 inst: /home/asottile/workspace/pre-commit/.tox/dist/pre_commit-1.10.0.zip\r\npy36 installed: You are using pip version 9.0.1, however version 10.0.1 is available.,You should consider upgrading via the 'pip install --upgrade pip' command.,aspy.yaml==1.1.1,atomicwrites==1.1.5,attrs==18.1.0,cached-property==1.4.2,cfgv==1.0.0,coverage==4.5.1,flake8==3.5.0,identify==1.0.18,mccabe==0.6.1,mock==2.0.0,more-itertools==4.2.0,nodeenv==1.3.0,pbr==4.0.3,pluggy==0.6.0,-e [email protected]:pre-commit/pre-commit@97fb49a533de9a378d20f0a41e79df118362e534#egg=pre_commit,py==1.5.3,pycodestyle==2.3.1,pyflakes==1.6.0,pytest==3.6.0,pytest-env==0.6.2,PyYAML==3.12,six==1.11.0,toml==0.9.4,virtualenv==16.0.0\r\n___________________________________ summary ____________________________________\r\n py36: skipped tests\r\n congratulations :)\r\n\r\n$ head -1 .tox/py36/bin/pip\r\n#!/home/asottile/workspace/pre-commit/.tox/py36/bin/python3.6\r\n$ .tox/py36/bin/pytest tests -k venv\r\n============================= test session starts ==============================\r\nplatform linux -- Python 3.6.5, pytest-3.6.0, py-1.5.3, pluggy-0.6.0\r\nrootdir: /home/asottile/workspace/pre-commit, inifile: tox.ini\r\nplugins: env-0.6.2\r\ncollected 500 items / 492 deselected \r\n\r\ntests/repository_test.py .. [ 25%]\r\ntests/commands/install_uninstall_test.py . [ 37%]\r\ntests/languages/all_test.py ..... [100%]\r\n\r\n=================== 8 passed, 492 deselected in 4.12 seconds ===================\r\n$ head -1 .tox/py36/bin/pip\r\n#!/home/asottile/workspace/pre-commit/.tox/py36/bin/python3.6\r\n$ tox -e py36 -- tests -k venv\r\nGLOB sdist-make: /home/asottile/workspace/pre-commit/setup.py\r\npy36 inst-nodeps: /home/asottile/workspace/pre-commit/.tox/dist/pre_commit-1.10.0.zip\r\npy36 installed: You are using pip version 9.0.1, however version 10.0.1 is available.,You should consider upgrading via the 'pip install --upgrade pip' command.,aspy.yaml==1.1.1,atomicwrites==1.1.5,attrs==18.1.0,cached-property==1.4.2,cfgv==1.0.0,coverage==4.5.1,flake8==3.5.0,identify==1.0.18,mccabe==0.6.1,mock==2.0.0,more-itertools==4.2.0,nodeenv==1.3.0,pbr==4.0.3,pluggy==0.6.0,pre-commit==1.10.0,py==1.5.3,pycodestyle==2.3.1,pyflakes==1.6.0,pytest==3.6.0,pytest-env==0.6.2,PyYAML==3.12,six==1.11.0,toml==0.9.4,virtualenv==16.0.0\r\npy36 runtests: PYTHONHASHSEED='93802395'\r\npy36 runtests: commands[0] | coverage erase\r\npy36 runtests: commands[1] | coverage run -m pytest tests -k venv\r\n============================= test session starts ==============================\r\nplatform linux -- Python 3.6.5, pytest-3.6.0, py-1.5.3, pluggy-0.6.0\r\nrootdir: /home/asottile/workspace/pre-commit, inifile: tox.ini\r\nplugins: env-0.6.2\r\ncollected 500 items / 492 deselected \r\n\r\ntests/repository_test.py .. [ 25%]\r\ntests/commands/install_uninstall_test.py . [ 37%]\r\ntests/languages/all_test.py ..... [100%]\r\n\r\n=================== 8 passed, 492 deselected in 4.32 seconds ===================\r\npy36 runtests: commands[2] | coverage report --fail-under 99\r\nName Stmts Miss Branch BrPart Cover Missing\r\n---------------------------------------------------------------------------------------------\r\n...\r\n17 files skipped due to complete coverage.\r\nERROR: InvocationError: '/home/asottile/workspace/pre-commit/.tox/py36/bin/coverage report --fail-under 99'\r\n___________________________________ summary ____________________________________\r\nERROR: py36: commands failed\r\n\r\n$ head -1 .tox/py36/bin/pip\r\n#!/tmp/pytest-of-asottile/pytest-3/test_python_venv0/0/.pre-commit/repo5xcuq11q/py_venv-python3.6/bin/python3.6\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom pre_commit.languages import python\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'py_venv'\n\n\ndef make_venv(envdir, python):\n cmd_output(python, '-mvenv', envdir, cwd='/')\n\n\nget_default_version = python.get_default_version\n_interface = python.py_interface(ENVIRONMENT_DIR, make_venv)\nin_env, healthy, run_hook, install_environment = _interface\n", "path": "pre_commit/languages/python_venv.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport os.path\n\nfrom pre_commit.languages import python\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'py_venv'\n\n\ndef orig_py_exe(exe): # pragma: no cover (platform specific)\n \"\"\"A -mvenv virtualenv made from a -mvirtualenv virtualenv installs\n packages to the incorrect location. Attempt to find the _original_ exe\n and invoke `-mvenv` from there.\n\n See:\n - https://github.com/pre-commit/pre-commit/issues/755\n - https://github.com/pypa/virtualenv/issues/1095\n - https://bugs.python.org/issue30811\n \"\"\"\n try:\n prefix_script = 'import sys; print(sys.real_prefix)'\n _, prefix, _ = cmd_output(exe, '-c', prefix_script)\n prefix = prefix.strip()\n except CalledProcessError:\n # not created from -mvirtualenv\n return exe\n\n if os.name == 'nt':\n expected = os.path.join(prefix, 'python.exe')\n else:\n expected = os.path.join(prefix, 'bin', os.path.basename(exe))\n\n if os.path.exists(expected):\n return expected\n else:\n return exe\n\n\ndef make_venv(envdir, python):\n cmd_output(orig_py_exe(python), '-mvenv', envdir, cwd='/')\n\n\nget_default_version = python.get_default_version\n_interface = python.py_interface(ENVIRONMENT_DIR, make_venv)\nin_env, healthy, run_hook, install_environment = _interface\n", "path": "pre_commit/languages/python_venv.py"}]} | 1,765 | 436 |
gh_patches_debug_35079 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-962 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[PORT] [Certificate Authentication] Expose sendX5c parameter
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/3741
This parameter enables application developers to achieve easy certificates roll-over in Azure AD: setting this parameter to true will send the public certificate to Azure AD along with the token request, so that Azure AD can use it to validate the subject name based on a trusted issuer policy. This saves the application admin from the need to explicitly manage the certificate rollover (either via portal or powershell/CLI operation)
# Changed projects
* Microsoft.Bot.Connector
[R9,authentication]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from abc import ABC
5
6 from msal import ConfidentialClientApplication
7
8 from .app_credentials import AppCredentials
9
10
11 class CertificateAppCredentials(AppCredentials, ABC):
12 """
13 AppCredentials implementation using a certificate.
14
15 See:
16 https://github.com/AzureAD/microsoft-authentication-library-for-python/wiki/Client-Credentials#client-credentials-with-certificate
17 """
18
19 def __init__(
20 self,
21 app_id: str,
22 certificate_thumbprint: str,
23 certificate_private_key: str,
24 channel_auth_tenant: str = None,
25 oauth_scope: str = None,
26 ):
27 # super will set proper scope and endpoint.
28 super().__init__(
29 app_id=app_id,
30 channel_auth_tenant=channel_auth_tenant,
31 oauth_scope=oauth_scope,
32 )
33
34 self.scopes = [self.oauth_scope]
35 self.app = None
36 self.certificate_thumbprint = certificate_thumbprint
37 self.certificate_private_key = certificate_private_key
38
39 def get_access_token(self, force_refresh: bool = False) -> str:
40 """
41 Implementation of AppCredentials.get_token.
42 :return: The access token for the given certificate.
43 """
44
45 # Firstly, looks up a token from cache
46 # Since we are looking for token for the current app, NOT for an end user,
47 # notice we give account parameter as None.
48 auth_token = self.__get_msal_app().acquire_token_silent(
49 self.scopes, account=None
50 )
51 if not auth_token:
52 # No suitable token exists in cache. Let's get a new one from AAD.
53 auth_token = self.__get_msal_app().acquire_token_for_client(
54 scopes=self.scopes
55 )
56 return auth_token["access_token"]
57
58 def __get_msal_app(self):
59 if not self.app:
60 self.app = ConfidentialClientApplication(
61 client_id=self.microsoft_app_id,
62 authority=self.oauth_endpoint,
63 client_credential={
64 "thumbprint": self.certificate_thumbprint,
65 "private_key": self.certificate_private_key,
66 },
67 )
68
69 return self.app
70
```
Path: `libraries/botframework-connector/setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 import os
4 from setuptools import setup
5
6 NAME = "botframework-connector"
7 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.7.1"
8 REQUIRES = [
9 "msrest==0.6.10",
10 "requests==2.22.0",
11 "cryptography==2.8.0",
12 "PyJWT==1.5.3",
13 "botbuilder-schema>=4.7.1",
14 "adal==1.2.1",
15 "msal==1.1.0",
16 ]
17
18 root = os.path.abspath(os.path.dirname(__file__))
19
20 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
21 long_description = f.read()
22
23 setup(
24 name=NAME,
25 version=VERSION,
26 description="Microsoft Bot Framework Bot Builder SDK for Python.",
27 author="Microsoft",
28 url="https://www.github.com/Microsoft/botbuilder-python",
29 keywords=["BotFrameworkConnector", "bots", "ai", "botframework", "botbuilder"],
30 install_requires=REQUIRES,
31 packages=[
32 "botframework.connector",
33 "botframework.connector.auth",
34 "botframework.connector.async_mixin",
35 "botframework.connector.operations",
36 "botframework.connector.models",
37 "botframework.connector.aio",
38 "botframework.connector.aio.operations_async",
39 "botframework.connector.teams",
40 "botframework.connector.teams.operations",
41 "botframework.connector.token_api",
42 "botframework.connector.token_api.aio",
43 "botframework.connector.token_api.models",
44 "botframework.connector.token_api.operations",
45 ],
46 include_package_data=True,
47 long_description=long_description,
48 long_description_content_type="text/x-rst",
49 license="MIT",
50 classifiers=[
51 "Programming Language :: Python :: 3.7",
52 "Intended Audience :: Developers",
53 "License :: OSI Approved :: MIT License",
54 "Operating System :: OS Independent",
55 "Development Status :: 5 - Production/Stable",
56 "Topic :: Scientific/Engineering :: Artificial Intelligence",
57 ],
58 )
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py b/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py
--- a/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py
+++ b/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py
@@ -23,7 +23,20 @@
certificate_private_key: str,
channel_auth_tenant: str = None,
oauth_scope: str = None,
+ certificate_public: str = None,
):
+ """
+ AppCredentials implementation using a certificate.
+
+ :param app_id:
+ :param certificate_thumbprint:
+ :param certificate_private_key:
+ :param channel_auth_tenant:
+ :param oauth_scope:
+ :param certificate_public: public_certificate (optional) is public key certificate which will be sent
+ through ‘x5c’ JWT header only for subject name and issuer authentication to support cert auto rolls.
+ """
+
# super will set proper scope and endpoint.
super().__init__(
app_id=app_id,
@@ -35,6 +48,7 @@
self.app = None
self.certificate_thumbprint = certificate_thumbprint
self.certificate_private_key = certificate_private_key
+ self.certificate_public = certificate_public
def get_access_token(self, force_refresh: bool = False) -> str:
"""
@@ -63,6 +77,9 @@
client_credential={
"thumbprint": self.certificate_thumbprint,
"private_key": self.certificate_private_key,
+ "public_certificate": self.certificate_public
+ if self.certificate_public
+ else None,
},
)
diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py
--- a/libraries/botframework-connector/setup.py
+++ b/libraries/botframework-connector/setup.py
@@ -12,7 +12,7 @@
"PyJWT==1.5.3",
"botbuilder-schema>=4.7.1",
"adal==1.2.1",
- "msal==1.1.0",
+ "msal==1.2.0",
]
root = os.path.abspath(os.path.dirname(__file__))
| {"golden_diff": "diff --git a/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py b/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py\n--- a/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py\n+++ b/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py\n@@ -23,7 +23,20 @@\n certificate_private_key: str,\r\n channel_auth_tenant: str = None,\r\n oauth_scope: str = None,\r\n+ certificate_public: str = None,\r\n ):\r\n+ \"\"\"\r\n+ AppCredentials implementation using a certificate.\r\n+\r\n+ :param app_id:\r\n+ :param certificate_thumbprint:\r\n+ :param certificate_private_key:\r\n+ :param channel_auth_tenant:\r\n+ :param oauth_scope:\r\n+ :param certificate_public: public_certificate (optional) is public key certificate which will be sent\r\n+ through \u2018x5c\u2019 JWT header only for subject name and issuer authentication to support cert auto rolls.\r\n+ \"\"\"\r\n+\r\n # super will set proper scope and endpoint.\r\n super().__init__(\r\n app_id=app_id,\r\n@@ -35,6 +48,7 @@\n self.app = None\r\n self.certificate_thumbprint = certificate_thumbprint\r\n self.certificate_private_key = certificate_private_key\r\n+ self.certificate_public = certificate_public\r\n \r\n def get_access_token(self, force_refresh: bool = False) -> str:\r\n \"\"\"\r\n@@ -63,6 +77,9 @@\n client_credential={\r\n \"thumbprint\": self.certificate_thumbprint,\r\n \"private_key\": self.certificate_private_key,\r\n+ \"public_certificate\": self.certificate_public\r\n+ if self.certificate_public\r\n+ else None,\r\n },\r\n )\r\n \r\ndiff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py\n--- a/libraries/botframework-connector/setup.py\n+++ b/libraries/botframework-connector/setup.py\n@@ -12,7 +12,7 @@\n \"PyJWT==1.5.3\",\n \"botbuilder-schema>=4.7.1\",\n \"adal==1.2.1\",\n- \"msal==1.1.0\",\n+ \"msal==1.2.0\",\n ]\n \n root = os.path.abspath(os.path.dirname(__file__))\n", "issue": "[PORT] [Certificate Authentication] Expose sendX5c parameter\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3741\n\nThis parameter enables application developers to achieve easy certificates roll-over in Azure AD: setting this parameter to true will send the public certificate to Azure AD along with the token request, so that Azure AD can use it to validate the subject name based on a trusted issuer policy. This saves the application admin from the need to explicitly manage the certificate rollover (either via portal or powershell/CLI operation)\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Connector\r\n\r\n[R9,authentication]\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nfrom abc import ABC\r\n\r\nfrom msal import ConfidentialClientApplication\r\n\r\nfrom .app_credentials import AppCredentials\r\n\r\n\r\nclass CertificateAppCredentials(AppCredentials, ABC):\r\n \"\"\"\r\n AppCredentials implementation using a certificate.\r\n\r\n See:\r\n https://github.com/AzureAD/microsoft-authentication-library-for-python/wiki/Client-Credentials#client-credentials-with-certificate\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n app_id: str,\r\n certificate_thumbprint: str,\r\n certificate_private_key: str,\r\n channel_auth_tenant: str = None,\r\n oauth_scope: str = None,\r\n ):\r\n # super will set proper scope and endpoint.\r\n super().__init__(\r\n app_id=app_id,\r\n channel_auth_tenant=channel_auth_tenant,\r\n oauth_scope=oauth_scope,\r\n )\r\n\r\n self.scopes = [self.oauth_scope]\r\n self.app = None\r\n self.certificate_thumbprint = certificate_thumbprint\r\n self.certificate_private_key = certificate_private_key\r\n\r\n def get_access_token(self, force_refresh: bool = False) -> str:\r\n \"\"\"\r\n Implementation of AppCredentials.get_token.\r\n :return: The access token for the given certificate.\r\n \"\"\"\r\n\r\n # Firstly, looks up a token from cache\r\n # Since we are looking for token for the current app, NOT for an end user,\r\n # notice we give account parameter as None.\r\n auth_token = self.__get_msal_app().acquire_token_silent(\r\n self.scopes, account=None\r\n )\r\n if not auth_token:\r\n # No suitable token exists in cache. Let's get a new one from AAD.\r\n auth_token = self.__get_msal_app().acquire_token_for_client(\r\n scopes=self.scopes\r\n )\r\n return auth_token[\"access_token\"]\r\n\r\n def __get_msal_app(self):\r\n if not self.app:\r\n self.app = ConfidentialClientApplication(\r\n client_id=self.microsoft_app_id,\r\n authority=self.oauth_endpoint,\r\n client_credential={\r\n \"thumbprint\": self.certificate_thumbprint,\r\n \"private_key\": self.certificate_private_key,\r\n },\r\n )\r\n\r\n return self.app\r\n", "path": "libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nimport os\nfrom setuptools import setup\n\nNAME = \"botframework-connector\"\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.7.1\"\nREQUIRES = [\n \"msrest==0.6.10\",\n \"requests==2.22.0\",\n \"cryptography==2.8.0\",\n \"PyJWT==1.5.3\",\n \"botbuilder-schema>=4.7.1\",\n \"adal==1.2.1\",\n \"msal==1.1.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Microsoft Bot Framework Bot Builder SDK for Python.\",\n author=\"Microsoft\",\n url=\"https://www.github.com/Microsoft/botbuilder-python\",\n keywords=[\"BotFrameworkConnector\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n install_requires=REQUIRES,\n packages=[\n \"botframework.connector\",\n \"botframework.connector.auth\",\n \"botframework.connector.async_mixin\",\n \"botframework.connector.operations\",\n \"botframework.connector.models\",\n \"botframework.connector.aio\",\n \"botframework.connector.aio.operations_async\",\n \"botframework.connector.teams\",\n \"botframework.connector.teams.operations\",\n \"botframework.connector.token_api\",\n \"botframework.connector.token_api.aio\",\n \"botframework.connector.token_api.models\",\n \"botframework.connector.token_api.operations\",\n ],\n include_package_data=True,\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=\"MIT\",\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botframework-connector/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nfrom abc import ABC\r\n\r\nfrom msal import ConfidentialClientApplication\r\n\r\nfrom .app_credentials import AppCredentials\r\n\r\n\r\nclass CertificateAppCredentials(AppCredentials, ABC):\r\n \"\"\"\r\n AppCredentials implementation using a certificate.\r\n\r\n See:\r\n https://github.com/AzureAD/microsoft-authentication-library-for-python/wiki/Client-Credentials#client-credentials-with-certificate\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n app_id: str,\r\n certificate_thumbprint: str,\r\n certificate_private_key: str,\r\n channel_auth_tenant: str = None,\r\n oauth_scope: str = None,\r\n certificate_public: str = None,\r\n ):\r\n \"\"\"\r\n AppCredentials implementation using a certificate.\r\n\r\n :param app_id:\r\n :param certificate_thumbprint:\r\n :param certificate_private_key:\r\n :param channel_auth_tenant:\r\n :param oauth_scope:\r\n :param certificate_public: public_certificate (optional) is public key certificate which will be sent\r\n through \u2018x5c\u2019 JWT header only for subject name and issuer authentication to support cert auto rolls.\r\n \"\"\"\r\n\r\n # super will set proper scope and endpoint.\r\n super().__init__(\r\n app_id=app_id,\r\n channel_auth_tenant=channel_auth_tenant,\r\n oauth_scope=oauth_scope,\r\n )\r\n\r\n self.scopes = [self.oauth_scope]\r\n self.app = None\r\n self.certificate_thumbprint = certificate_thumbprint\r\n self.certificate_private_key = certificate_private_key\r\n self.certificate_public = certificate_public\r\n\r\n def get_access_token(self, force_refresh: bool = False) -> str:\r\n \"\"\"\r\n Implementation of AppCredentials.get_token.\r\n :return: The access token for the given certificate.\r\n \"\"\"\r\n\r\n # Firstly, looks up a token from cache\r\n # Since we are looking for token for the current app, NOT for an end user,\r\n # notice we give account parameter as None.\r\n auth_token = self.__get_msal_app().acquire_token_silent(\r\n self.scopes, account=None\r\n )\r\n if not auth_token:\r\n # No suitable token exists in cache. Let's get a new one from AAD.\r\n auth_token = self.__get_msal_app().acquire_token_for_client(\r\n scopes=self.scopes\r\n )\r\n return auth_token[\"access_token\"]\r\n\r\n def __get_msal_app(self):\r\n if not self.app:\r\n self.app = ConfidentialClientApplication(\r\n client_id=self.microsoft_app_id,\r\n authority=self.oauth_endpoint,\r\n client_credential={\r\n \"thumbprint\": self.certificate_thumbprint,\r\n \"private_key\": self.certificate_private_key,\r\n \"public_certificate\": self.certificate_public\r\n if self.certificate_public\r\n else None,\r\n },\r\n )\r\n\r\n return self.app\r\n", "path": "libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nimport os\nfrom setuptools import setup\n\nNAME = \"botframework-connector\"\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.7.1\"\nREQUIRES = [\n \"msrest==0.6.10\",\n \"requests==2.22.0\",\n \"cryptography==2.8.0\",\n \"PyJWT==1.5.3\",\n \"botbuilder-schema>=4.7.1\",\n \"adal==1.2.1\",\n \"msal==1.2.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Microsoft Bot Framework Bot Builder SDK for Python.\",\n author=\"Microsoft\",\n url=\"https://www.github.com/Microsoft/botbuilder-python\",\n keywords=[\"BotFrameworkConnector\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n install_requires=REQUIRES,\n packages=[\n \"botframework.connector\",\n \"botframework.connector.auth\",\n \"botframework.connector.async_mixin\",\n \"botframework.connector.operations\",\n \"botframework.connector.models\",\n \"botframework.connector.aio\",\n \"botframework.connector.aio.operations_async\",\n \"botframework.connector.teams\",\n \"botframework.connector.teams.operations\",\n \"botframework.connector.token_api\",\n \"botframework.connector.token_api.aio\",\n \"botframework.connector.token_api.models\",\n \"botframework.connector.token_api.operations\",\n ],\n include_package_data=True,\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=\"MIT\",\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botframework-connector/setup.py"}]} | 1,613 | 528 |
gh_patches_debug_65025 | rasdani/github-patches | git_diff | learningequality__kolibri-2777 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CSV export fails if logs refer to channel that no longer exists locally
### Observed behavior
Tried to export CSV log via http://kolibribeta.learningequality.org/management/facility/#/data and got a 500 (traceback below). I think the database summary logs referred to a channel that we since deleted. We try to extract the human-readable channel names to include as a column in the CSV file, but this blows up if we don't have the channel metadata anymore.
### Expected behavior
Should have exported the CSV successfully, and just skipped the title for any channels it couldn't find, and leave those cells blank.
### User-facing consequences
Can completely block the CSV export feature from working.
### Errors and logs
```
ERROR 2017-11-30 23:09:11,954 base Internal Server Error: /api/contentsessionlogcsv/
Traceback (most recent call last):
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/django/core/handlers/base.py", line 149, in get_response
response = self.process_exception_by_middleware(e, request)
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/django/core/handlers/base.py", line 147, in get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/viewsets.py", line 87, in view
return self.dispatch(request, *args, **kwargs)
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/views.py", line 466, in dispatch
response = self.handle_exception(exc)
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/views.py", line 463, in dispatch
response = handler(request, *args, **kwargs)
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/mixins.py", line 48, in list
return Response(serializer.data)
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/serializers.py", line 674, in data
ret = super(ListSerializer, self).data
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/serializers.py", line 239, in data
self._data = self.to_representation(self.instance)
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/serializers.py", line 614, in to_representation
self.child.to_representation(item) for item in iterable
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/serializers.py", line 472, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/fields.py", line 1653, in to_representation
return method(value)
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/logger/csv.py", line 38, in get_content_title
channel = ChannelMetadata.objects.get(id=obj.channel_id)
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/django/db/models/manager.py", line 122, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/django/db/models/query.py", line 387, in get
self.model._meta.object_name
DoesNotExist: ChannelMetadata matching query does not exist.
```
### Steps to reproduce
1. Download a channel
2. Interact with content to generate logs
3. Delete the channel
4. Try to export the CSV file
### Context
* Kolibri version: 0.7.0.dev020171130214228-git
* Operating system: Ubuntu
* Browser: Chrome
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/logger/csv.py`
Content:
```
1 from kolibri.auth.api import KolibriAuthPermissions, KolibriAuthPermissionsFilter
2 from kolibri.content.models import ChannelMetadata, ContentNode
3 from kolibri.core.api import CSVModelViewSet
4 from rest_framework import serializers
5
6 from .models import ContentSessionLog, ContentSummaryLog
7
8
9 class LogCSVSerializerBase(serializers.ModelSerializer):
10
11 username = serializers.SerializerMethodField()
12 facility_name = serializers.SerializerMethodField()
13 channel_name = serializers.SerializerMethodField()
14 content_title = serializers.SerializerMethodField()
15 time_spent = serializers.SerializerMethodField()
16 progress = serializers.SerializerMethodField()
17
18 def get_username(self, obj):
19 if obj.user:
20 return obj.user.username
21 else:
22 return ""
23
24 def get_facility_name(self, obj):
25 if obj.user:
26 return obj.user.facility.name
27 else:
28 return ""
29
30 def get_channel_name(self, obj):
31 try:
32 channel = ChannelMetadata.objects.get(id=obj.channel_id)
33 except ChannelMetadata.DoesNotExist:
34 return ""
35 return channel.name
36
37 def get_content_title(self, obj):
38 channel = ChannelMetadata.objects.get(id=obj.channel_id)
39 node = ContentNode.objects.filter(tree_id=channel.root.tree_id).first()
40 if node:
41 return node.title
42 else:
43 return ""
44
45 def get_time_spent(self, obj):
46 return str("{:.1f}".format(round(obj.time_spent, 1)))
47
48 def get_progress(self, obj):
49 return str("{:.4f}".format(round(obj.progress, 4)))
50
51
52 class ContentSummaryLogCSVSerializer(LogCSVSerializerBase):
53
54 class Meta:
55 model = ContentSummaryLog
56 fields = ('username', 'facility_name', 'content_id', 'content_title', 'channel_id', 'channel_name', 'start_timestamp',
57 'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind')
58 labels = {
59 "start_timestamp": "Time of first interaction",
60 "end_timestamp": "Time of last interaction",
61 "completion_timestamp": "Time of completion",
62 "time_spent": "Time Spent (sec)",
63 "progress": "Progress (0-1)",
64 }
65
66
67 class ContentSessionLogCSVSerializer(LogCSVSerializerBase):
68
69 class Meta:
70 model = ContentSessionLog
71 fields = ('username', 'facility_name', 'content_id', 'content_title', 'channel_id', 'channel_name', 'start_timestamp',
72 'end_timestamp', 'time_spent', 'progress', 'kind')
73 labels = {
74 "start_timestamp": "Time of first interaction",
75 "end_timestamp": "Time of last interaction",
76 "time_spent": "Time Spent (sec)",
77 "progress": "Progress (0-1)",
78 }
79
80
81 class ContentSummaryLogCSVExportViewSet(CSVModelViewSet):
82 permission_classes = (KolibriAuthPermissions,)
83 filter_backends = (KolibriAuthPermissionsFilter,)
84 queryset = ContentSummaryLog.objects.all()
85 serializer_class = ContentSummaryLogCSVSerializer
86 csv_export_filename = 'content_summary_logs'
87
88
89 class ContentSessionLogCSVExportViewSet(CSVModelViewSet):
90 permission_classes = (KolibriAuthPermissions,)
91 filter_backends = (KolibriAuthPermissionsFilter,)
92 queryset = ContentSessionLog.objects.all()
93 serializer_class = ContentSessionLogCSVSerializer
94 csv_export_filename = 'content_session_logs'
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/logger/csv.py b/kolibri/logger/csv.py
--- a/kolibri/logger/csv.py
+++ b/kolibri/logger/csv.py
@@ -35,8 +35,7 @@
return channel.name
def get_content_title(self, obj):
- channel = ChannelMetadata.objects.get(id=obj.channel_id)
- node = ContentNode.objects.filter(tree_id=channel.root.tree_id).first()
+ node = ContentNode.objects.filter(content_id=obj.content_id).first()
if node:
return node.title
else:
| {"golden_diff": "diff --git a/kolibri/logger/csv.py b/kolibri/logger/csv.py\n--- a/kolibri/logger/csv.py\n+++ b/kolibri/logger/csv.py\n@@ -35,8 +35,7 @@\n return channel.name\n \n def get_content_title(self, obj):\n- channel = ChannelMetadata.objects.get(id=obj.channel_id)\n- node = ContentNode.objects.filter(tree_id=channel.root.tree_id).first()\n+ node = ContentNode.objects.filter(content_id=obj.content_id).first()\n if node:\n return node.title\n else:\n", "issue": "CSV export fails if logs refer to channel that no longer exists locally\n### Observed behavior\r\n\r\nTried to export CSV log via http://kolibribeta.learningequality.org/management/facility/#/data and got a 500 (traceback below). I think the database summary logs referred to a channel that we since deleted. We try to extract the human-readable channel names to include as a column in the CSV file, but this blows up if we don't have the channel metadata anymore.\r\n\r\n### Expected behavior\r\n\r\nShould have exported the CSV successfully, and just skipped the title for any channels it couldn't find, and leave those cells blank.\r\n\r\n### User-facing consequences\r\n\r\nCan completely block the CSV export feature from working.\r\n\r\n### Errors and logs\r\n\r\n```\r\nERROR 2017-11-30 23:09:11,954 base Internal Server Error: /api/contentsessionlogcsv/\r\nTraceback (most recent call last):\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/django/core/handlers/base.py\", line 149, in get_response\r\n response = self.process_exception_by_middleware(e, request)\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/django/core/handlers/base.py\", line 147, in get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/django/views/decorators/csrf.py\", line 58, in wrapped_view\r\n return view_func(*args, **kwargs)\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/viewsets.py\", line 87, in view\r\n return self.dispatch(request, *args, **kwargs)\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/views.py\", line 466, in dispatch\r\n response = self.handle_exception(exc)\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/views.py\", line 463, in dispatch\r\n response = handler(request, *args, **kwargs)\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/mixins.py\", line 48, in list\r\n return Response(serializer.data)\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/serializers.py\", line 674, in data\r\n ret = super(ListSerializer, self).data\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/serializers.py\", line 239, in data\r\n self._data = self.to_representation(self.instance)\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/serializers.py\", line 614, in to_representation\r\n self.child.to_representation(item) for item in iterable\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/serializers.py\", line 472, in to_representation\r\n ret[field.field_name] = field.to_representation(attribute)\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/rest_framework/fields.py\", line 1653, in to_representation\r\n return method(value)\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/logger/csv.py\", line 38, in get_content_title\r\n channel = ChannelMetadata.objects.get(id=obj.channel_id)\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/django/db/models/manager.py\", line 122, in manager_method\r\n return getattr(self.get_queryset(), name)(*args, **kwargs)\r\n File \"/var/www/.pex/install/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl.3f4dacee3f158ff5e739b8a2a0b0b1cdb1b7a8e1/kolibri-0.7.0.dev020171130214228_git-py2.py3-none-any.whl/kolibri/dist/django/db/models/query.py\", line 387, in get\r\n self.model._meta.object_name\r\nDoesNotExist: ChannelMetadata matching query does not exist.\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. Download a channel\r\n2. Interact with content to generate logs\r\n3. Delete the channel\r\n4. Try to export the CSV file\r\n\r\n### Context\r\n\r\n* Kolibri version: 0.7.0.dev020171130214228-git\r\n* Operating system: Ubuntu\r\n* Browser: Chrome\r\n\n", "before_files": [{"content": "from kolibri.auth.api import KolibriAuthPermissions, KolibriAuthPermissionsFilter\nfrom kolibri.content.models import ChannelMetadata, ContentNode\nfrom kolibri.core.api import CSVModelViewSet\nfrom rest_framework import serializers\n\nfrom .models import ContentSessionLog, ContentSummaryLog\n\n\nclass LogCSVSerializerBase(serializers.ModelSerializer):\n\n username = serializers.SerializerMethodField()\n facility_name = serializers.SerializerMethodField()\n channel_name = serializers.SerializerMethodField()\n content_title = serializers.SerializerMethodField()\n time_spent = serializers.SerializerMethodField()\n progress = serializers.SerializerMethodField()\n\n def get_username(self, obj):\n if obj.user:\n return obj.user.username\n else:\n return \"\"\n\n def get_facility_name(self, obj):\n if obj.user:\n return obj.user.facility.name\n else:\n return \"\"\n\n def get_channel_name(self, obj):\n try:\n channel = ChannelMetadata.objects.get(id=obj.channel_id)\n except ChannelMetadata.DoesNotExist:\n return \"\"\n return channel.name\n\n def get_content_title(self, obj):\n channel = ChannelMetadata.objects.get(id=obj.channel_id)\n node = ContentNode.objects.filter(tree_id=channel.root.tree_id).first()\n if node:\n return node.title\n else:\n return \"\"\n\n def get_time_spent(self, obj):\n return str(\"{:.1f}\".format(round(obj.time_spent, 1)))\n\n def get_progress(self, obj):\n return str(\"{:.4f}\".format(round(obj.progress, 4)))\n\n\nclass ContentSummaryLogCSVSerializer(LogCSVSerializerBase):\n\n class Meta:\n model = ContentSummaryLog\n fields = ('username', 'facility_name', 'content_id', 'content_title', 'channel_id', 'channel_name', 'start_timestamp',\n 'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind')\n labels = {\n \"start_timestamp\": \"Time of first interaction\",\n \"end_timestamp\": \"Time of last interaction\",\n \"completion_timestamp\": \"Time of completion\",\n \"time_spent\": \"Time Spent (sec)\",\n \"progress\": \"Progress (0-1)\",\n }\n\n\nclass ContentSessionLogCSVSerializer(LogCSVSerializerBase):\n\n class Meta:\n model = ContentSessionLog\n fields = ('username', 'facility_name', 'content_id', 'content_title', 'channel_id', 'channel_name', 'start_timestamp',\n 'end_timestamp', 'time_spent', 'progress', 'kind')\n labels = {\n \"start_timestamp\": \"Time of first interaction\",\n \"end_timestamp\": \"Time of last interaction\",\n \"time_spent\": \"Time Spent (sec)\",\n \"progress\": \"Progress (0-1)\",\n }\n\n\nclass ContentSummaryLogCSVExportViewSet(CSVModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter,)\n queryset = ContentSummaryLog.objects.all()\n serializer_class = ContentSummaryLogCSVSerializer\n csv_export_filename = 'content_summary_logs'\n\n\nclass ContentSessionLogCSVExportViewSet(CSVModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter,)\n queryset = ContentSessionLog.objects.all()\n serializer_class = ContentSessionLogCSVSerializer\n csv_export_filename = 'content_session_logs'\n", "path": "kolibri/logger/csv.py"}], "after_files": [{"content": "from kolibri.auth.api import KolibriAuthPermissions, KolibriAuthPermissionsFilter\nfrom kolibri.content.models import ChannelMetadata, ContentNode\nfrom kolibri.core.api import CSVModelViewSet\nfrom rest_framework import serializers\n\nfrom .models import ContentSessionLog, ContentSummaryLog\n\n\nclass LogCSVSerializerBase(serializers.ModelSerializer):\n\n username = serializers.SerializerMethodField()\n facility_name = serializers.SerializerMethodField()\n channel_name = serializers.SerializerMethodField()\n content_title = serializers.SerializerMethodField()\n time_spent = serializers.SerializerMethodField()\n progress = serializers.SerializerMethodField()\n\n def get_username(self, obj):\n if obj.user:\n return obj.user.username\n else:\n return \"\"\n\n def get_facility_name(self, obj):\n if obj.user:\n return obj.user.facility.name\n else:\n return \"\"\n\n def get_channel_name(self, obj):\n try:\n channel = ChannelMetadata.objects.get(id=obj.channel_id)\n except ChannelMetadata.DoesNotExist:\n return \"\"\n return channel.name\n\n def get_content_title(self, obj):\n node = ContentNode.objects.filter(content_id=obj.content_id).first()\n if node:\n return node.title\n else:\n return \"\"\n\n def get_time_spent(self, obj):\n return str(\"{:.1f}\".format(round(obj.time_spent, 1)))\n\n def get_progress(self, obj):\n return str(\"{:.4f}\".format(round(obj.progress, 4)))\n\n\nclass ContentSummaryLogCSVSerializer(LogCSVSerializerBase):\n\n class Meta:\n model = ContentSummaryLog\n fields = ('username', 'facility_name', 'content_id', 'content_title', 'channel_id', 'channel_name', 'start_timestamp',\n 'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind')\n labels = {\n \"start_timestamp\": \"Time of first interaction\",\n \"end_timestamp\": \"Time of last interaction\",\n \"completion_timestamp\": \"Time of completion\",\n \"time_spent\": \"Time Spent (sec)\",\n \"progress\": \"Progress (0-1)\",\n }\n\n\nclass ContentSessionLogCSVSerializer(LogCSVSerializerBase):\n\n class Meta:\n model = ContentSessionLog\n fields = ('username', 'facility_name', 'content_id', 'content_title', 'channel_id', 'channel_name', 'start_timestamp',\n 'end_timestamp', 'time_spent', 'progress', 'kind')\n labels = {\n \"start_timestamp\": \"Time of first interaction\",\n \"end_timestamp\": \"Time of last interaction\",\n \"time_spent\": \"Time Spent (sec)\",\n \"progress\": \"Progress (0-1)\",\n }\n\n\nclass ContentSummaryLogCSVExportViewSet(CSVModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter,)\n queryset = ContentSummaryLog.objects.all()\n serializer_class = ContentSummaryLogCSVSerializer\n csv_export_filename = 'content_summary_logs'\n\n\nclass ContentSessionLogCSVExportViewSet(CSVModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter,)\n queryset = ContentSessionLog.objects.all()\n serializer_class = ContentSessionLogCSVSerializer\n csv_export_filename = 'content_session_logs'\n", "path": "kolibri/logger/csv.py"}]} | 3,643 | 121 |
gh_patches_debug_5330 | rasdani/github-patches | git_diff | networkx__networkx-7471 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`algorithms.bridges.bridges()` does a redundant copy of graph
This is the code of the `bridges` function:
```python
def bridges(G, root=None):
multigraph = G.is_multigraph()
H = nx.Graph(G) if multigraph else G
chains = nx.chain_decomposition(H, root=root)
chain_edges = set(chain.from_iterable(chains))
H_copy = H.copy()
if root is not None:
H = H.subgraph(nx.node_connected_component(H, root)).copy()
for u, v in H.edges():
if (u, v) not in chain_edges and (v, u) not in chain_edges:
if multigraph and len(G[u][v]) > 1:
continue
yield u, v
```
The statement in the middle:
```python
H_copy = H.copy()
```
Seem to have no effect, because `H_copy` is not used anywhere else in this module, and therefore **can be removed**.
### Current Behavior
Actually in addition to being redundant due to `H_copy` not used anywhere, presence of this statement makes extending the `networkx.Graph` class harder.
If you inherit from `networkx.Graph` and your class would have a constructor that requires some parameters to be provided, you wouldn't be able to use the `bridges()` function at all. Because this `.copy()` method has the following line:
```python
# Body of .copy() method
G = self.__class__()
```
And for inheritors of `networkx.Graph` that have custom constructors `self.__class__()` raises `TypeError` on missing arguments.
### Expected Behavior
There should be no redundant lines, and `bridges()` should work for inheritors as long as they don't override some dependent methods.
### Steps to Reproduce
Set up for example this:
```python
class Map(nx.Graph):
def __init__(self: tp.Self, routes: tp.Iterable[str]) -> None:
super().__init__()
```
Initialize this object like `Map(routes=("a", "b"))` and try using `bridges()` with it.
### Environment
Python version: 3.11
NetworkX version: 3.3
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/algorithms/bridges.py`
Content:
```
1 """Bridge-finding algorithms."""
2 from itertools import chain
3
4 import networkx as nx
5 from networkx.utils import not_implemented_for
6
7 __all__ = ["bridges", "has_bridges", "local_bridges"]
8
9
10 @not_implemented_for("directed")
11 @nx._dispatchable
12 def bridges(G, root=None):
13 """Generate all bridges in a graph.
14
15 A *bridge* in a graph is an edge whose removal causes the number of
16 connected components of the graph to increase. Equivalently, a bridge is an
17 edge that does not belong to any cycle. Bridges are also known as cut-edges,
18 isthmuses, or cut arcs.
19
20 Parameters
21 ----------
22 G : undirected graph
23
24 root : node (optional)
25 A node in the graph `G`. If specified, only the bridges in the
26 connected component containing this node will be returned.
27
28 Yields
29 ------
30 e : edge
31 An edge in the graph whose removal disconnects the graph (or
32 causes the number of connected components to increase).
33
34 Raises
35 ------
36 NodeNotFound
37 If `root` is not in the graph `G`.
38
39 NetworkXNotImplemented
40 If `G` is a directed graph.
41
42 Examples
43 --------
44 The barbell graph with parameter zero has a single bridge:
45
46 >>> G = nx.barbell_graph(10, 0)
47 >>> list(nx.bridges(G))
48 [(9, 10)]
49
50 Notes
51 -----
52 This is an implementation of the algorithm described in [1]_. An edge is a
53 bridge if and only if it is not contained in any chain. Chains are found
54 using the :func:`networkx.chain_decomposition` function.
55
56 The algorithm described in [1]_ requires a simple graph. If the provided
57 graph is a multigraph, we convert it to a simple graph and verify that any
58 bridges discovered by the chain decomposition algorithm are not multi-edges.
59
60 Ignoring polylogarithmic factors, the worst-case time complexity is the
61 same as the :func:`networkx.chain_decomposition` function,
62 $O(m + n)$, where $n$ is the number of nodes in the graph and $m$ is
63 the number of edges.
64
65 References
66 ----------
67 .. [1] https://en.wikipedia.org/wiki/Bridge_%28graph_theory%29#Bridge-Finding_with_Chain_Decompositions
68 """
69 multigraph = G.is_multigraph()
70 H = nx.Graph(G) if multigraph else G
71 chains = nx.chain_decomposition(H, root=root)
72 chain_edges = set(chain.from_iterable(chains))
73 H_copy = H.copy()
74 if root is not None:
75 H = H.subgraph(nx.node_connected_component(H, root)).copy()
76 for u, v in H.edges():
77 if (u, v) not in chain_edges and (v, u) not in chain_edges:
78 if multigraph and len(G[u][v]) > 1:
79 continue
80 yield u, v
81
82
83 @not_implemented_for("directed")
84 @nx._dispatchable
85 def has_bridges(G, root=None):
86 """Decide whether a graph has any bridges.
87
88 A *bridge* in a graph is an edge whose removal causes the number of
89 connected components of the graph to increase.
90
91 Parameters
92 ----------
93 G : undirected graph
94
95 root : node (optional)
96 A node in the graph `G`. If specified, only the bridges in the
97 connected component containing this node will be considered.
98
99 Returns
100 -------
101 bool
102 Whether the graph (or the connected component containing `root`)
103 has any bridges.
104
105 Raises
106 ------
107 NodeNotFound
108 If `root` is not in the graph `G`.
109
110 NetworkXNotImplemented
111 If `G` is a directed graph.
112
113 Examples
114 --------
115 The barbell graph with parameter zero has a single bridge::
116
117 >>> G = nx.barbell_graph(10, 0)
118 >>> nx.has_bridges(G)
119 True
120
121 On the other hand, the cycle graph has no bridges::
122
123 >>> G = nx.cycle_graph(5)
124 >>> nx.has_bridges(G)
125 False
126
127 Notes
128 -----
129 This implementation uses the :func:`networkx.bridges` function, so
130 it shares its worst-case time complexity, $O(m + n)$, ignoring
131 polylogarithmic factors, where $n$ is the number of nodes in the
132 graph and $m$ is the number of edges.
133
134 """
135 try:
136 next(bridges(G, root=root))
137 except StopIteration:
138 return False
139 else:
140 return True
141
142
143 @not_implemented_for("multigraph")
144 @not_implemented_for("directed")
145 @nx._dispatchable(edge_attrs="weight")
146 def local_bridges(G, with_span=True, weight=None):
147 """Iterate over local bridges of `G` optionally computing the span
148
149 A *local bridge* is an edge whose endpoints have no common neighbors.
150 That is, the edge is not part of a triangle in the graph.
151
152 The *span* of a *local bridge* is the shortest path length between
153 the endpoints if the local bridge is removed.
154
155 Parameters
156 ----------
157 G : undirected graph
158
159 with_span : bool
160 If True, yield a 3-tuple `(u, v, span)`
161
162 weight : function, string or None (default: None)
163 If function, used to compute edge weights for the span.
164 If string, the edge data attribute used in calculating span.
165 If None, all edges have weight 1.
166
167 Yields
168 ------
169 e : edge
170 The local bridges as an edge 2-tuple of nodes `(u, v)` or
171 as a 3-tuple `(u, v, span)` when `with_span is True`.
172
173 Raises
174 ------
175 NetworkXNotImplemented
176 If `G` is a directed graph or multigraph.
177
178 Examples
179 --------
180 A cycle graph has every edge a local bridge with span N-1.
181
182 >>> G = nx.cycle_graph(9)
183 >>> (0, 8, 8) in set(nx.local_bridges(G))
184 True
185 """
186 if with_span is not True:
187 for u, v in G.edges:
188 if not (set(G[u]) & set(G[v])):
189 yield u, v
190 else:
191 wt = nx.weighted._weight_function(G, weight)
192 for u, v in G.edges:
193 if not (set(G[u]) & set(G[v])):
194 enodes = {u, v}
195
196 def hide_edge(n, nbr, d):
197 if n not in enodes or nbr not in enodes:
198 return wt(n, nbr, d)
199 return None
200
201 try:
202 span = nx.shortest_path_length(G, u, v, weight=hide_edge)
203 yield u, v, span
204 except nx.NetworkXNoPath:
205 yield u, v, float("inf")
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/networkx/algorithms/bridges.py b/networkx/algorithms/bridges.py
--- a/networkx/algorithms/bridges.py
+++ b/networkx/algorithms/bridges.py
@@ -70,7 +70,6 @@
H = nx.Graph(G) if multigraph else G
chains = nx.chain_decomposition(H, root=root)
chain_edges = set(chain.from_iterable(chains))
- H_copy = H.copy()
if root is not None:
H = H.subgraph(nx.node_connected_component(H, root)).copy()
for u, v in H.edges():
| {"golden_diff": "diff --git a/networkx/algorithms/bridges.py b/networkx/algorithms/bridges.py\n--- a/networkx/algorithms/bridges.py\n+++ b/networkx/algorithms/bridges.py\n@@ -70,7 +70,6 @@\n H = nx.Graph(G) if multigraph else G\n chains = nx.chain_decomposition(H, root=root)\n chain_edges = set(chain.from_iterable(chains))\n- H_copy = H.copy()\n if root is not None:\n H = H.subgraph(nx.node_connected_component(H, root)).copy()\n for u, v in H.edges():\n", "issue": "`algorithms.bridges.bridges()` does a redundant copy of graph\nThis is the code of the `bridges` function:\r\n\r\n```python\r\ndef bridges(G, root=None):\r\n multigraph = G.is_multigraph()\r\n H = nx.Graph(G) if multigraph else G\r\n chains = nx.chain_decomposition(H, root=root)\r\n chain_edges = set(chain.from_iterable(chains))\r\n H_copy = H.copy()\r\n if root is not None:\r\n H = H.subgraph(nx.node_connected_component(H, root)).copy()\r\n for u, v in H.edges():\r\n if (u, v) not in chain_edges and (v, u) not in chain_edges:\r\n if multigraph and len(G[u][v]) > 1:\r\n continue\r\n yield u, v\r\n```\r\n\r\nThe statement in the middle:\r\n\r\n```python\r\n H_copy = H.copy()\r\n```\r\n\r\nSeem to have no effect, because `H_copy` is not used anywhere else in this module, and therefore **can be removed**.\r\n\r\n### Current Behavior\r\n\r\nActually in addition to being redundant due to `H_copy` not used anywhere, presence of this statement makes extending the `networkx.Graph` class harder.\r\n\r\nIf you inherit from `networkx.Graph` and your class would have a constructor that requires some parameters to be provided, you wouldn't be able to use the `bridges()` function at all. Because this `.copy()` method has the following line:\r\n\r\n```python\r\n# Body of .copy() method\r\nG = self.__class__()\r\n```\r\n\r\nAnd for inheritors of `networkx.Graph` that have custom constructors `self.__class__()` raises `TypeError` on missing arguments.\r\n\r\n### Expected Behavior\r\n\r\nThere should be no redundant lines, and `bridges()` should work for inheritors as long as they don't override some dependent methods.\r\n\r\n### Steps to Reproduce\r\n\r\nSet up for example this:\r\n\r\n```python\r\nclass Map(nx.Graph):\r\n\r\n def __init__(self: tp.Self, routes: tp.Iterable[str]) -> None:\r\n super().__init__()\r\n```\r\n\r\nInitialize this object like `Map(routes=(\"a\", \"b\"))` and try using `bridges()` with it.\r\n\r\n### Environment\r\n\r\nPython version: 3.11\r\nNetworkX version: 3.3\r\n\n", "before_files": [{"content": "\"\"\"Bridge-finding algorithms.\"\"\"\nfrom itertools import chain\n\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n\n__all__ = [\"bridges\", \"has_bridges\", \"local_bridges\"]\n\n\n@not_implemented_for(\"directed\")\n@nx._dispatchable\ndef bridges(G, root=None):\n \"\"\"Generate all bridges in a graph.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase. Equivalently, a bridge is an\n edge that does not belong to any cycle. Bridges are also known as cut-edges,\n isthmuses, or cut arcs.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be returned.\n\n Yields\n ------\n e : edge\n An edge in the graph whose removal disconnects the graph (or\n causes the number of connected components to increase).\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n NetworkXNotImplemented\n If `G` is a directed graph.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge:\n\n >>> G = nx.barbell_graph(10, 0)\n >>> list(nx.bridges(G))\n [(9, 10)]\n\n Notes\n -----\n This is an implementation of the algorithm described in [1]_. An edge is a\n bridge if and only if it is not contained in any chain. Chains are found\n using the :func:`networkx.chain_decomposition` function.\n\n The algorithm described in [1]_ requires a simple graph. If the provided\n graph is a multigraph, we convert it to a simple graph and verify that any\n bridges discovered by the chain decomposition algorithm are not multi-edges.\n\n Ignoring polylogarithmic factors, the worst-case time complexity is the\n same as the :func:`networkx.chain_decomposition` function,\n $O(m + n)$, where $n$ is the number of nodes in the graph and $m$ is\n the number of edges.\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Bridge_%28graph_theory%29#Bridge-Finding_with_Chain_Decompositions\n \"\"\"\n multigraph = G.is_multigraph()\n H = nx.Graph(G) if multigraph else G\n chains = nx.chain_decomposition(H, root=root)\n chain_edges = set(chain.from_iterable(chains))\n H_copy = H.copy()\n if root is not None:\n H = H.subgraph(nx.node_connected_component(H, root)).copy()\n for u, v in H.edges():\n if (u, v) not in chain_edges and (v, u) not in chain_edges:\n if multigraph and len(G[u][v]) > 1:\n continue\n yield u, v\n\n\n@not_implemented_for(\"directed\")\n@nx._dispatchable\ndef has_bridges(G, root=None):\n \"\"\"Decide whether a graph has any bridges.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be considered.\n\n Returns\n -------\n bool\n Whether the graph (or the connected component containing `root`)\n has any bridges.\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n NetworkXNotImplemented\n If `G` is a directed graph.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge::\n\n >>> G = nx.barbell_graph(10, 0)\n >>> nx.has_bridges(G)\n True\n\n On the other hand, the cycle graph has no bridges::\n\n >>> G = nx.cycle_graph(5)\n >>> nx.has_bridges(G)\n False\n\n Notes\n -----\n This implementation uses the :func:`networkx.bridges` function, so\n it shares its worst-case time complexity, $O(m + n)$, ignoring\n polylogarithmic factors, where $n$ is the number of nodes in the\n graph and $m$ is the number of edges.\n\n \"\"\"\n try:\n next(bridges(G, root=root))\n except StopIteration:\n return False\n else:\n return True\n\n\n@not_implemented_for(\"multigraph\")\n@not_implemented_for(\"directed\")\n@nx._dispatchable(edge_attrs=\"weight\")\ndef local_bridges(G, with_span=True, weight=None):\n \"\"\"Iterate over local bridges of `G` optionally computing the span\n\n A *local bridge* is an edge whose endpoints have no common neighbors.\n That is, the edge is not part of a triangle in the graph.\n\n The *span* of a *local bridge* is the shortest path length between\n the endpoints if the local bridge is removed.\n\n Parameters\n ----------\n G : undirected graph\n\n with_span : bool\n If True, yield a 3-tuple `(u, v, span)`\n\n weight : function, string or None (default: None)\n If function, used to compute edge weights for the span.\n If string, the edge data attribute used in calculating span.\n If None, all edges have weight 1.\n\n Yields\n ------\n e : edge\n The local bridges as an edge 2-tuple of nodes `(u, v)` or\n as a 3-tuple `(u, v, span)` when `with_span is True`.\n\n Raises\n ------\n NetworkXNotImplemented\n If `G` is a directed graph or multigraph.\n\n Examples\n --------\n A cycle graph has every edge a local bridge with span N-1.\n\n >>> G = nx.cycle_graph(9)\n >>> (0, 8, 8) in set(nx.local_bridges(G))\n True\n \"\"\"\n if with_span is not True:\n for u, v in G.edges:\n if not (set(G[u]) & set(G[v])):\n yield u, v\n else:\n wt = nx.weighted._weight_function(G, weight)\n for u, v in G.edges:\n if not (set(G[u]) & set(G[v])):\n enodes = {u, v}\n\n def hide_edge(n, nbr, d):\n if n not in enodes or nbr not in enodes:\n return wt(n, nbr, d)\n return None\n\n try:\n span = nx.shortest_path_length(G, u, v, weight=hide_edge)\n yield u, v, span\n except nx.NetworkXNoPath:\n yield u, v, float(\"inf\")\n", "path": "networkx/algorithms/bridges.py"}], "after_files": [{"content": "\"\"\"Bridge-finding algorithms.\"\"\"\nfrom itertools import chain\n\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n\n__all__ = [\"bridges\", \"has_bridges\", \"local_bridges\"]\n\n\n@not_implemented_for(\"directed\")\n@nx._dispatchable\ndef bridges(G, root=None):\n \"\"\"Generate all bridges in a graph.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase. Equivalently, a bridge is an\n edge that does not belong to any cycle. Bridges are also known as cut-edges,\n isthmuses, or cut arcs.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be returned.\n\n Yields\n ------\n e : edge\n An edge in the graph whose removal disconnects the graph (or\n causes the number of connected components to increase).\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n NetworkXNotImplemented\n If `G` is a directed graph.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge:\n\n >>> G = nx.barbell_graph(10, 0)\n >>> list(nx.bridges(G))\n [(9, 10)]\n\n Notes\n -----\n This is an implementation of the algorithm described in [1]_. An edge is a\n bridge if and only if it is not contained in any chain. Chains are found\n using the :func:`networkx.chain_decomposition` function.\n\n The algorithm described in [1]_ requires a simple graph. If the provided\n graph is a multigraph, we convert it to a simple graph and verify that any\n bridges discovered by the chain decomposition algorithm are not multi-edges.\n\n Ignoring polylogarithmic factors, the worst-case time complexity is the\n same as the :func:`networkx.chain_decomposition` function,\n $O(m + n)$, where $n$ is the number of nodes in the graph and $m$ is\n the number of edges.\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Bridge_%28graph_theory%29#Bridge-Finding_with_Chain_Decompositions\n \"\"\"\n multigraph = G.is_multigraph()\n H = nx.Graph(G) if multigraph else G\n chains = nx.chain_decomposition(H, root=root)\n chain_edges = set(chain.from_iterable(chains))\n if root is not None:\n H = H.subgraph(nx.node_connected_component(H, root)).copy()\n for u, v in H.edges():\n if (u, v) not in chain_edges and (v, u) not in chain_edges:\n if multigraph and len(G[u][v]) > 1:\n continue\n yield u, v\n\n\n@not_implemented_for(\"directed\")\n@nx._dispatchable\ndef has_bridges(G, root=None):\n \"\"\"Decide whether a graph has any bridges.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be considered.\n\n Returns\n -------\n bool\n Whether the graph (or the connected component containing `root`)\n has any bridges.\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n NetworkXNotImplemented\n If `G` is a directed graph.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge::\n\n >>> G = nx.barbell_graph(10, 0)\n >>> nx.has_bridges(G)\n True\n\n On the other hand, the cycle graph has no bridges::\n\n >>> G = nx.cycle_graph(5)\n >>> nx.has_bridges(G)\n False\n\n Notes\n -----\n This implementation uses the :func:`networkx.bridges` function, so\n it shares its worst-case time complexity, $O(m + n)$, ignoring\n polylogarithmic factors, where $n$ is the number of nodes in the\n graph and $m$ is the number of edges.\n\n \"\"\"\n try:\n next(bridges(G, root=root))\n except StopIteration:\n return False\n else:\n return True\n\n\n@not_implemented_for(\"multigraph\")\n@not_implemented_for(\"directed\")\n@nx._dispatchable(edge_attrs=\"weight\")\ndef local_bridges(G, with_span=True, weight=None):\n \"\"\"Iterate over local bridges of `G` optionally computing the span\n\n A *local bridge* is an edge whose endpoints have no common neighbors.\n That is, the edge is not part of a triangle in the graph.\n\n The *span* of a *local bridge* is the shortest path length between\n the endpoints if the local bridge is removed.\n\n Parameters\n ----------\n G : undirected graph\n\n with_span : bool\n If True, yield a 3-tuple `(u, v, span)`\n\n weight : function, string or None (default: None)\n If function, used to compute edge weights for the span.\n If string, the edge data attribute used in calculating span.\n If None, all edges have weight 1.\n\n Yields\n ------\n e : edge\n The local bridges as an edge 2-tuple of nodes `(u, v)` or\n as a 3-tuple `(u, v, span)` when `with_span is True`.\n\n Raises\n ------\n NetworkXNotImplemented\n If `G` is a directed graph or multigraph.\n\n Examples\n --------\n A cycle graph has every edge a local bridge with span N-1.\n\n >>> G = nx.cycle_graph(9)\n >>> (0, 8, 8) in set(nx.local_bridges(G))\n True\n \"\"\"\n if with_span is not True:\n for u, v in G.edges:\n if not (set(G[u]) & set(G[v])):\n yield u, v\n else:\n wt = nx.weighted._weight_function(G, weight)\n for u, v in G.edges:\n if not (set(G[u]) & set(G[v])):\n enodes = {u, v}\n\n def hide_edge(n, nbr, d):\n if n not in enodes or nbr not in enodes:\n return wt(n, nbr, d)\n return None\n\n try:\n span = nx.shortest_path_length(G, u, v, weight=hide_edge)\n yield u, v, span\n except nx.NetworkXNoPath:\n yield u, v, float(\"inf\")\n", "path": "networkx/algorithms/bridges.py"}]} | 2,817 | 133 |
gh_patches_debug_19257 | rasdani/github-patches | git_diff | ESMCI__cime-1240 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PR #1230 appears to break batch systems
After merging #1230 I get an error from Z_FullSystemTest:
Traceback (most recent call last):
File "./scripts_regression_tests.py", line 1175, in test_full_system
self.assertTrue(test_time > 0, msg="test time was zero for %s" % test_status)
AssertionError: test time was zero for /scratch/cluster/jedwards/scripts_regression_test.20170313_145646/ERR.f45_g37_rx1.A.hobart_intel.fake_testing_only_20170313_151740/TestStatus
All of the tests actually passed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/lib/CIME/case_submit.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 case.submit - Submit a cesm workflow to the queueing system or run it
5 if there is no queueing system. A cesm workflow may include multiple
6 jobs.
7 """
8 import socket
9 from CIME.XML.standard_module_setup import *
10 from CIME.utils import expect, run_and_log_case_status
11 from CIME.preview_namelists import create_namelists
12 from CIME.check_lockedfiles import check_lockedfiles
13 from CIME.check_input_data import check_all_input_data
14 from CIME.test_status import *
15
16 logger = logging.getLogger(__name__)
17
18 def _submit(case, job=None, resubmit=False, no_batch=False, batch_args=None):
19 caseroot = case.get_value("CASEROOT")
20
21 if job is None:
22 if case.get_value("TEST"):
23 job = "case.test"
24 else:
25 job = "case.run"
26
27 if resubmit:
28 resub = case.get_value("RESUBMIT")
29 logger.info("Submitting job '%s', resubmit=%d" % (job, resub))
30 case.set_value("RESUBMIT",resub-1)
31 if case.get_value("RESUBMIT_SETS_CONTINUE_RUN"):
32 case.set_value("CONTINUE_RUN", True)
33 else:
34 if job in ("case.test","case.run"):
35 check_case(case, caseroot)
36 check_DA_settings(case)
37 if case.get_value("MACH") == "mira":
38 with open(".original_host","w") as fd:
39 fd.write( socket.gethostname())
40
41 # if case.submit is called with the no_batch flag then we assume that this
42 # flag will stay in effect for the duration of the RESUBMITs
43 env_batch = case.get_env("batch")
44 if not resubmit:
45 case.set_value("IS_FIRST_RUN", True)
46 if no_batch:
47 batch_system = "none"
48 else:
49 batch_system = env_batch.get_batch_system_type()
50 case.set_value("BATCH_SYSTEM", batch_system)
51 else:
52 if env_batch.get_batch_system_type() == "none":
53 no_batch = True
54
55 # This is a resubmission, do not reinitialize test values
56 case.set_value("IS_FIRST_RUN", False)
57
58 #Load Modules
59 case.load_env()
60
61 case.set_value("RUN_WITH_SUBMIT",True)
62 case.flush()
63
64 logger.warn("submit_jobs %s" % job)
65 job_ids = case.submit_jobs(no_batch=no_batch, job=job, batch_args=batch_args)
66 logger.info("Submitted job ids %s" % job_ids)
67
68 def submit(case, job=None, resubmit=False, no_batch=False, batch_args=None):
69 if case.get_value("TEST"):
70 caseroot = case.get_value("CASEROOT")
71 casebaseid = case.get_value("CASEBASEID")
72 # This should take care of the race condition where the submitted job
73 # begins immediately and tries to set RUN phase. We proactively assume
74 # a passed SUBMIT phase.
75 with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
76 ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)
77
78 try:
79 functor = lambda: _submit(case, job, resubmit, no_batch, batch_args)
80 run_and_log_case_status(functor, "case.submit", caseroot=case.get_value("CASEROOT"))
81 except:
82 # If something failed in the batch system, make sure to mark
83 # the test as failed if we are running a test.
84 if case.get_value("TEST"):
85 with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
86 ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)
87
88 raise
89
90 def check_case(case, caseroot):
91 check_lockedfiles(caseroot)
92 create_namelists(case) # Must be called before check_all_input_data
93 logger.info("Checking that inputdata is available as part of case submission")
94 check_all_input_data(case)
95
96 expect(case.get_value("BUILD_COMPLETE"), "Build complete is "
97 "not True please rebuild the model by calling case.build")
98 logger.info("Check case OK")
99
100 def check_DA_settings(case):
101 if case.get_value("DATA_ASSIMILATION"):
102 script = case.get_value("DATA_ASSIMILATION_SCRIPT")
103 cycles = case.get_value("DATA_ASSIMILATION_CYCLES")
104 logger.info("Data Assimilation enabled using script %s with %d cycles"%(script,cycles))
105
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/lib/CIME/case_submit.py b/scripts/lib/CIME/case_submit.py
--- a/scripts/lib/CIME/case_submit.py
+++ b/scripts/lib/CIME/case_submit.py
@@ -71,9 +71,13 @@
casebaseid = case.get_value("CASEBASEID")
# This should take care of the race condition where the submitted job
# begins immediately and tries to set RUN phase. We proactively assume
- # a passed SUBMIT phase.
+ # a passed SUBMIT phase. If this state is already PASS, don't set it again
+ # because then we'll lose RUN phase info if it's there. This info is important
+ # for system_tests_common to know if it needs to reinitialize the test or not.
with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
- ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)
+ phase_status = ts.get_status(SUBMIT_PHASE)
+ if phase_status != TEST_PASS_STATUS:
+ ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)
try:
functor = lambda: _submit(case, job, resubmit, no_batch, batch_args)
| {"golden_diff": "diff --git a/scripts/lib/CIME/case_submit.py b/scripts/lib/CIME/case_submit.py\n--- a/scripts/lib/CIME/case_submit.py\n+++ b/scripts/lib/CIME/case_submit.py\n@@ -71,9 +71,13 @@\n casebaseid = case.get_value(\"CASEBASEID\")\n # This should take care of the race condition where the submitted job\n # begins immediately and tries to set RUN phase. We proactively assume\n- # a passed SUBMIT phase.\n+ # a passed SUBMIT phase. If this state is already PASS, don't set it again\n+ # because then we'll lose RUN phase info if it's there. This info is important\n+ # for system_tests_common to know if it needs to reinitialize the test or not.\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n- ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)\n+ phase_status = ts.get_status(SUBMIT_PHASE)\n+ if phase_status != TEST_PASS_STATUS:\n+ ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)\n \n try:\n functor = lambda: _submit(case, job, resubmit, no_batch, batch_args)\n", "issue": "PR #1230 appears to break batch systems \nAfter merging #1230 I get an error from Z_FullSystemTest:\r\n\r\nTraceback (most recent call last):\r\n File \"./scripts_regression_tests.py\", line 1175, in test_full_system\r\n self.assertTrue(test_time > 0, msg=\"test time was zero for %s\" % test_status)\r\nAssertionError: test time was zero for /scratch/cluster/jedwards/scripts_regression_test.20170313_145646/ERR.f45_g37_rx1.A.hobart_intel.fake_testing_only_20170313_151740/TestStatus\r\n\r\n\r\nAll of the tests actually passed. \n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\ncase.submit - Submit a cesm workflow to the queueing system or run it\nif there is no queueing system. A cesm workflow may include multiple\njobs.\n\"\"\"\nimport socket\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect, run_and_log_case_status\nfrom CIME.preview_namelists import create_namelists\nfrom CIME.check_lockedfiles import check_lockedfiles\nfrom CIME.check_input_data import check_all_input_data\nfrom CIME.test_status import *\n\nlogger = logging.getLogger(__name__)\n\ndef _submit(case, job=None, resubmit=False, no_batch=False, batch_args=None):\n caseroot = case.get_value(\"CASEROOT\")\n\n if job is None:\n if case.get_value(\"TEST\"):\n job = \"case.test\"\n else:\n job = \"case.run\"\n\n if resubmit:\n resub = case.get_value(\"RESUBMIT\")\n logger.info(\"Submitting job '%s', resubmit=%d\" % (job, resub))\n case.set_value(\"RESUBMIT\",resub-1)\n if case.get_value(\"RESUBMIT_SETS_CONTINUE_RUN\"):\n case.set_value(\"CONTINUE_RUN\", True)\n else:\n if job in (\"case.test\",\"case.run\"):\n check_case(case, caseroot)\n check_DA_settings(case)\n if case.get_value(\"MACH\") == \"mira\":\n with open(\".original_host\",\"w\") as fd:\n fd.write( socket.gethostname())\n\n # if case.submit is called with the no_batch flag then we assume that this\n # flag will stay in effect for the duration of the RESUBMITs\n env_batch = case.get_env(\"batch\")\n if not resubmit:\n case.set_value(\"IS_FIRST_RUN\", True)\n if no_batch:\n batch_system = \"none\"\n else:\n batch_system = env_batch.get_batch_system_type()\n case.set_value(\"BATCH_SYSTEM\", batch_system)\n else:\n if env_batch.get_batch_system_type() == \"none\":\n no_batch = True\n\n # This is a resubmission, do not reinitialize test values\n case.set_value(\"IS_FIRST_RUN\", False)\n\n #Load Modules\n case.load_env()\n\n case.set_value(\"RUN_WITH_SUBMIT\",True)\n case.flush()\n\n logger.warn(\"submit_jobs %s\" % job)\n job_ids = case.submit_jobs(no_batch=no_batch, job=job, batch_args=batch_args)\n logger.info(\"Submitted job ids %s\" % job_ids)\n\ndef submit(case, job=None, resubmit=False, no_batch=False, batch_args=None):\n if case.get_value(\"TEST\"):\n caseroot = case.get_value(\"CASEROOT\")\n casebaseid = case.get_value(\"CASEBASEID\")\n # This should take care of the race condition where the submitted job\n # begins immediately and tries to set RUN phase. We proactively assume\n # a passed SUBMIT phase.\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)\n\n try:\n functor = lambda: _submit(case, job, resubmit, no_batch, batch_args)\n run_and_log_case_status(functor, \"case.submit\", caseroot=case.get_value(\"CASEROOT\"))\n except:\n # If something failed in the batch system, make sure to mark\n # the test as failed if we are running a test.\n if case.get_value(\"TEST\"):\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)\n\n raise\n\ndef check_case(case, caseroot):\n check_lockedfiles(caseroot)\n create_namelists(case) # Must be called before check_all_input_data\n logger.info(\"Checking that inputdata is available as part of case submission\")\n check_all_input_data(case)\n\n expect(case.get_value(\"BUILD_COMPLETE\"), \"Build complete is \"\n \"not True please rebuild the model by calling case.build\")\n logger.info(\"Check case OK\")\n\ndef check_DA_settings(case):\n if case.get_value(\"DATA_ASSIMILATION\"):\n script = case.get_value(\"DATA_ASSIMILATION_SCRIPT\")\n cycles = case.get_value(\"DATA_ASSIMILATION_CYCLES\")\n logger.info(\"Data Assimilation enabled using script %s with %d cycles\"%(script,cycles))\n\n", "path": "scripts/lib/CIME/case_submit.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\ncase.submit - Submit a cesm workflow to the queueing system or run it\nif there is no queueing system. A cesm workflow may include multiple\njobs.\n\"\"\"\nimport socket\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect, run_and_log_case_status\nfrom CIME.preview_namelists import create_namelists\nfrom CIME.check_lockedfiles import check_lockedfiles\nfrom CIME.check_input_data import check_all_input_data\nfrom CIME.test_status import *\n\nlogger = logging.getLogger(__name__)\n\ndef _submit(case, job=None, resubmit=False, no_batch=False, batch_args=None):\n caseroot = case.get_value(\"CASEROOT\")\n\n if job is None:\n if case.get_value(\"TEST\"):\n job = \"case.test\"\n else:\n job = \"case.run\"\n\n if resubmit:\n resub = case.get_value(\"RESUBMIT\")\n logger.info(\"Submitting job '%s', resubmit=%d\" % (job, resub))\n case.set_value(\"RESUBMIT\",resub-1)\n if case.get_value(\"RESUBMIT_SETS_CONTINUE_RUN\"):\n case.set_value(\"CONTINUE_RUN\", True)\n else:\n if job in (\"case.test\",\"case.run\"):\n check_case(case, caseroot)\n check_DA_settings(case)\n if case.get_value(\"MACH\") == \"mira\":\n with open(\".original_host\",\"w\") as fd:\n fd.write( socket.gethostname())\n\n # if case.submit is called with the no_batch flag then we assume that this\n # flag will stay in effect for the duration of the RESUBMITs\n env_batch = case.get_env(\"batch\")\n if not resubmit:\n case.set_value(\"IS_FIRST_RUN\", True)\n if no_batch:\n batch_system = \"none\"\n else:\n batch_system = env_batch.get_batch_system_type()\n case.set_value(\"BATCH_SYSTEM\", batch_system)\n else:\n if env_batch.get_batch_system_type() == \"none\":\n no_batch = True\n\n # This is a resubmission, do not reinitialize test values\n case.set_value(\"IS_FIRST_RUN\", False)\n\n #Load Modules\n case.load_env()\n\n case.set_value(\"RUN_WITH_SUBMIT\",True)\n case.flush()\n\n logger.warn(\"submit_jobs %s\" % job)\n job_ids = case.submit_jobs(no_batch=no_batch, job=job, batch_args=batch_args)\n logger.info(\"Submitted job ids %s\" % job_ids)\n\ndef submit(case, job=None, resubmit=False, no_batch=False, batch_args=None):\n if case.get_value(\"TEST\"):\n caseroot = case.get_value(\"CASEROOT\")\n casebaseid = case.get_value(\"CASEBASEID\")\n # This should take care of the race condition where the submitted job\n # begins immediately and tries to set RUN phase. We proactively assume\n # a passed SUBMIT phase. If this state is already PASS, don't set it again\n # because then we'll lose RUN phase info if it's there. This info is important\n # for system_tests_common to know if it needs to reinitialize the test or not.\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n phase_status = ts.get_status(SUBMIT_PHASE)\n if phase_status != TEST_PASS_STATUS:\n ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)\n\n try:\n functor = lambda: _submit(case, job, resubmit, no_batch, batch_args)\n run_and_log_case_status(functor, \"case.submit\", caseroot=case.get_value(\"CASEROOT\"))\n except:\n # If something failed in the batch system, make sure to mark\n # the test as failed if we are running a test.\n if case.get_value(\"TEST\"):\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)\n\n raise\n\ndef check_case(case, caseroot):\n check_lockedfiles(caseroot)\n create_namelists(case) # Must be called before check_all_input_data\n logger.info(\"Checking that inputdata is available as part of case submission\")\n check_all_input_data(case)\n\n expect(case.get_value(\"BUILD_COMPLETE\"), \"Build complete is \"\n \"not True please rebuild the model by calling case.build\")\n logger.info(\"Check case OK\")\n\ndef check_DA_settings(case):\n if case.get_value(\"DATA_ASSIMILATION\"):\n script = case.get_value(\"DATA_ASSIMILATION_SCRIPT\")\n cycles = case.get_value(\"DATA_ASSIMILATION_CYCLES\")\n logger.info(\"Data Assimilation enabled using script %s with %d cycles\"%(script,cycles))\n\n", "path": "scripts/lib/CIME/case_submit.py"}]} | 1,617 | 269 |
gh_patches_debug_5307 | rasdani/github-patches | git_diff | biolab__orange3-text-435 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Saving a tab file after using 'import documents widget'
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
-->
##### Text version
<!-- From menu _Options→Add-ons→Orange3-Text_ or code `orangecontrib.text.version.full_version` -->
0.6.0
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
3.20.1 on Mac.
##### Expected behavior
"Import documents to create corpus". Send corpus to 'Save file", using *.tab format, to save the results in a folder. Using Corpus widget, can open this new tab file, with text features listed below.
##### Actual behavior
Import is successful and I can use the corpus in workflows.
But if I try to open the new tab file as a Corpus, then I get:
-Two error messages: "Corpus doesn't have any text features"
-Error message: "Can't read file ... line contains NULL byte"
##### Steps to reproduce the behavior
I have tried:
1. opening corpus in new orange session
2. saving corpus as *.csv or .xlsx
3. trying different folders for import documents
4. opening tab file first as folder and then linking it to a Corpus widget
5. inspecting *.tab file in text editor or excel for errors.
6. trying the same versions in Windows as well as Mac
##### Additional info (worksheets, data, screenshots, ...)
The idea is to avoid having to start each session with importing documents, by starting from a tab file
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `orangecontrib/text/import_documents.py`
Content:
```
1 import fnmatch
2 import logging
3 import os
4 import pathlib
5 import re
6
7 from collections import namedtuple
8 from types import SimpleNamespace as namespace
9
10 import numpy as np
11
12 import docx2txt
13 from odf.opendocument import load
14 from odf import text, teletype
15
16 from pdfminer.pdfparser import PDFParser, PDFDocument
17 from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
18 from pdfminer.converter import PDFPageAggregator
19 from pdfminer.layout import LAParams, LTTextBox, LTTextLine
20 from bs4 import BeautifulSoup
21
22 from Orange.data import DiscreteVariable, Domain, StringVariable
23 from Orange.data.io import detect_encoding
24 from Orange.util import Registry
25
26 from orangecontrib.text.corpus import Corpus
27
28
29 DefaultFormats = ("docx", "odt", "txt", "pdf", "xml")
30
31 TextData = namedtuple(
32 "Text",
33 ["name", "path", "ext", "category", "content"]
34 )
35 TextData.isvalid = property(lambda self: True)
36
37 TextDataError = namedtuple(
38 "TextDataError",
39 ["path", "error", "error_str"]
40 )
41 TextDataError.isvalid = property(lambda self: False)
42
43 log = logging.getLogger(__name__)
44
45
46 class Reader(metaclass=Registry):
47 def __init__(self, path, replace_white_space=False):
48 self.path = path
49 self.replace_white_space = replace_white_space
50 self.content = None
51
52 @classmethod
53 def get_reader(cls, path):
54 ext = pathlib.Path(path).suffix
55 for _reader in cls.registry:
56 reader = eval(_reader)
57 if ext in reader.ext:
58 return reader(path)
59 return Reader(path)
60
61 def read(self, ):
62 error = ""
63 try:
64 self.read_file()
65 except Exception as ex:
66 textdata = None
67 error = "{}".format(pathlib.Path(self.path).name)
68 log.exception('Error reading failed', exc_info=ex)
69 else:
70 textdata = self.make_text_data()
71 return textdata, error
72
73 def read_file(self):
74 raise NotImplementedError("No reader for {}".format(pathlib.Path(self.path).suffix))
75
76 def make_text_data(self):
77 name = pathlib.Path(self.path).stem
78 directory = pathlib.PurePath(self.path).parent
79 category = directory.parts[-1] or "None"
80 if self.replace_white_space:
81 self.content = re.sub('\s+', ' ', self.content)
82 return TextData(name, self.path, self.ext, category, self.content)
83
84
85 class TxtReader(Reader):
86 ext = [".txt"]
87
88 def read_file(self):
89 encoding = detect_encoding(self.path)
90 with open(self.path, 'r', encoding=encoding) as f:
91 self.content = f.read()
92
93
94 class DocxReader(Reader):
95 ext = [".docx"]
96
97 def read_file(self):
98 self.content = docx2txt.process(self.path)
99
100
101 class OdtReader(Reader):
102 ext = [".odt"]
103
104 def read_file(self):
105 odtfile = load(self.path)
106 texts = odtfile.getElementsByType(text.P)
107 self.content = " ".join(teletype.extractText(t) for t in texts)
108
109
110 class PdfReader(Reader):
111 """
112 char_margin — two text chunks whose distance is closer than this value are considered
113 contiguous and get grouped into one.
114 word_margin — it may be required to insert blank characters (spaces) as necessary if
115 the distance between two words is greater than this value, as a blank between words might
116 not be represented as a space, but indicated by the positioning of each word.
117 """
118 ext = [".pdf"]
119
120 def read_file(self):
121 with open(self.path, 'rb') as f:
122 parser = PDFParser(f)
123 doc = PDFDocument()
124 parser.set_document(doc)
125 doc.set_parser(parser)
126 doc.initialize('')
127 rsrcmgr = PDFResourceManager()
128 laparams = LAParams()
129 laparams.char_margin = 0.1
130 laparams.word_margin = 1.0
131 device = PDFPageAggregator(rsrcmgr, laparams=laparams)
132 interpreter = PDFPageInterpreter(rsrcmgr, device)
133 extracted_text = []
134
135 for page in doc.get_pages():
136 interpreter.process_page(page)
137 layout = device.get_result()
138 for lt_obj in layout:
139 if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):
140 extracted_text.append(lt_obj.get_text())
141 self.content = ' '.join(extracted_text)
142
143
144 class XmlReader(Reader):
145 ext = [".xml"]
146
147 def read_file(self):
148 encoding = detect_encoding(self.path)
149 with open(self.path, encoding=encoding, errors='ignore') as markup:
150 soup = BeautifulSoup(markup.read(), "lxml")
151 self.content = soup.get_text()
152
153
154 class ImportDocuments:
155 def __init__(self, startdir, formats=DefaultFormats, report_progress=None):
156 self.startdir = startdir
157 self.formats = formats
158 self._report_progress = report_progress
159 self.cancelled = False
160 self._text_data = []
161
162 def run(self):
163 text_data = []
164 errors = []
165 patterns = ["*.{}".format(fmt.lower()) for fmt in self.formats]
166 paths = self.scan(self.startdir, include_patterns=patterns)
167 n_paths = len(paths)
168 batch = []
169
170 for path in paths:
171 if len(batch) == 1 and self._report_progress is not None:
172 self._report_progress(
173 namespace(progress=len(text_data) / n_paths,
174 lastpath=path,
175 batch=batch))
176 batch = []
177
178 reader = Reader.get_reader(path)
179 text, error = reader.read()
180 if text is not None:
181 text_data.append(text)
182 batch.append(text_data)
183 else:
184 errors.append(error)
185
186 if self.cancelled:
187 return
188
189 self._text_data = text_data
190 return self._create_corpus(), errors
191
192 def _create_corpus(self):
193 corpus = None
194 names = ["name", "path", "content"]
195 data = []
196 category_data = []
197 text_categories = list(set(t.category for t in self._text_data))
198 values = list(set(text_categories))
199 category_var = DiscreteVariable.make("category", values=values)
200 for textdata in self._text_data:
201 data.append(
202 [textdata.name,
203 textdata.path,
204 textdata.content]
205 )
206 category_data.append(category_var.to_val(textdata.category))
207 if len(text_categories) > 1:
208 category_data = np.array(category_data)
209 else:
210 category_var = []
211 category_data = np.empty((len(data), 0))
212 domain = Domain(
213 [], category_var, [StringVariable.make(name) for name in names]
214 )
215 domain["name"].attributes["title"] = True
216 data = np.array(data, dtype=object)
217 if len(data):
218 corpus = Corpus(domain,
219 Y=category_data,
220 metas=data,
221 text_features=[domain.metas[2]])
222
223 return corpus
224
225 @staticmethod
226 def scan(topdir, include_patterns=("*",), exclude_patterns=(".*",)):
227 """
228 Yield file system paths under `topdir` that match include/exclude patterns
229
230 Parameters
231 ----------
232 topdir: str
233 Top level directory path for the search.
234 include_patterns: List[str]
235 `fnmatch.fnmatch` include patterns.
236 exclude_patterns: List[str]
237 `fnmatch.fnmatch` exclude patterns.
238
239 Returns
240 -------
241 list of paths
242 """
243 if include_patterns is None:
244 include_patterns = ["*"]
245
246 def matches_any(fname, patterns):
247 return any(fnmatch.fnmatch(fname.lower(), pattern)
248 for pattern in patterns)
249
250 paths = []
251
252 for dirpath, dirnames, filenames in os.walk(topdir):
253 for dirname in list(dirnames):
254 # do not recurse into hidden dirs
255 if fnmatch.fnmatch(dirname, ".*"):
256 dirnames.remove(dirname)
257
258 filenames = [fname for fname in filenames
259 if matches_any(fname, include_patterns)
260 and not matches_any(fname, exclude_patterns)]
261 paths = paths + [os.path.join(dirpath, fname) for fname in filenames]
262 return paths
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/orangecontrib/text/import_documents.py b/orangecontrib/text/import_documents.py
--- a/orangecontrib/text/import_documents.py
+++ b/orangecontrib/text/import_documents.py
@@ -138,7 +138,7 @@
for lt_obj in layout:
if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):
extracted_text.append(lt_obj.get_text())
- self.content = ' '.join(extracted_text)
+ self.content = ' '.join(extracted_text).replace('\x00', '')
class XmlReader(Reader):
| {"golden_diff": "diff --git a/orangecontrib/text/import_documents.py b/orangecontrib/text/import_documents.py\n--- a/orangecontrib/text/import_documents.py\n+++ b/orangecontrib/text/import_documents.py\n@@ -138,7 +138,7 @@\n for lt_obj in layout:\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n extracted_text.append(lt_obj.get_text())\n- self.content = ' '.join(extracted_text)\n+ self.content = ' '.join(extracted_text).replace('\\x00', '')\n \n \n class XmlReader(Reader):\n", "issue": "Saving a tab file after using 'import documents widget'\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Text version\r\n<!-- From menu _Options\u2192Add-ons\u2192Orange3-Text_ or code `orangecontrib.text.version.full_version` -->\r\n0.6.0\r\n\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.20.1 on Mac. \r\n\r\n##### Expected behavior\r\n\"Import documents to create corpus\". Send corpus to 'Save file\", using *.tab format, to save the results in a folder. Using Corpus widget, can open this new tab file, with text features listed below. \r\n\r\n\r\n##### Actual behavior\r\nImport is successful and I can use the corpus in workflows.\r\nBut if I try to open the new tab file as a Corpus, then I get:\r\n-Two error messages: \"Corpus doesn't have any text features\"\r\n-Error message: \"Can't read file ... line contains NULL byte\"\r\n\r\n\r\n\r\n##### Steps to reproduce the behavior\r\nI have tried:\r\n1. opening corpus in new orange session\r\n2. saving corpus as *.csv or .xlsx\r\n3. trying different folders for import documents\r\n4. opening tab file first as folder and then linking it to a Corpus widget\r\n5. inspecting *.tab file in text editor or excel for errors.\r\n6. trying the same versions in Windows as well as Mac\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\nThe idea is to avoid having to start each session with importing documents, by starting from a tab file\r\n\n", "before_files": [{"content": "import fnmatch\nimport logging\nimport os\nimport pathlib\nimport re\n\nfrom collections import namedtuple\nfrom types import SimpleNamespace as namespace\n\nimport numpy as np\n\nimport docx2txt\nfrom odf.opendocument import load\nfrom odf import text, teletype\n\nfrom pdfminer.pdfparser import PDFParser, PDFDocument\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import PDFPageAggregator\nfrom pdfminer.layout import LAParams, LTTextBox, LTTextLine\nfrom bs4 import BeautifulSoup\n\nfrom Orange.data import DiscreteVariable, Domain, StringVariable\nfrom Orange.data.io import detect_encoding\nfrom Orange.util import Registry\n\nfrom orangecontrib.text.corpus import Corpus\n\n\nDefaultFormats = (\"docx\", \"odt\", \"txt\", \"pdf\", \"xml\")\n\nTextData = namedtuple(\n \"Text\",\n [\"name\", \"path\", \"ext\", \"category\", \"content\"]\n)\nTextData.isvalid = property(lambda self: True)\n\nTextDataError = namedtuple(\n \"TextDataError\",\n [\"path\", \"error\", \"error_str\"]\n)\nTextDataError.isvalid = property(lambda self: False)\n\nlog = logging.getLogger(__name__)\n\n\nclass Reader(metaclass=Registry):\n def __init__(self, path, replace_white_space=False):\n self.path = path\n self.replace_white_space = replace_white_space\n self.content = None\n\n @classmethod\n def get_reader(cls, path):\n ext = pathlib.Path(path).suffix\n for _reader in cls.registry:\n reader = eval(_reader)\n if ext in reader.ext:\n return reader(path)\n return Reader(path)\n\n def read(self, ):\n error = \"\"\n try:\n self.read_file()\n except Exception as ex:\n textdata = None\n error = \"{}\".format(pathlib.Path(self.path).name)\n log.exception('Error reading failed', exc_info=ex)\n else:\n textdata = self.make_text_data()\n return textdata, error\n\n def read_file(self):\n raise NotImplementedError(\"No reader for {}\".format(pathlib.Path(self.path).suffix))\n\n def make_text_data(self):\n name = pathlib.Path(self.path).stem\n directory = pathlib.PurePath(self.path).parent\n category = directory.parts[-1] or \"None\"\n if self.replace_white_space:\n self.content = re.sub('\\s+', ' ', self.content)\n return TextData(name, self.path, self.ext, category, self.content)\n\n\nclass TxtReader(Reader):\n ext = [\".txt\"]\n\n def read_file(self):\n encoding = detect_encoding(self.path)\n with open(self.path, 'r', encoding=encoding) as f:\n self.content = f.read()\n\n\nclass DocxReader(Reader):\n ext = [\".docx\"]\n\n def read_file(self):\n self.content = docx2txt.process(self.path)\n\n\nclass OdtReader(Reader):\n ext = [\".odt\"]\n\n def read_file(self):\n odtfile = load(self.path)\n texts = odtfile.getElementsByType(text.P)\n self.content = \" \".join(teletype.extractText(t) for t in texts)\n\n\nclass PdfReader(Reader):\n \"\"\"\n char_margin \u2014 two text chunks whose distance is closer than this value are considered\n contiguous and get grouped into one.\n word_margin \u2014 it may be required to insert blank characters (spaces) as necessary if\n the distance between two words is greater than this value, as a blank between words might\n not be represented as a space, but indicated by the positioning of each word.\n \"\"\"\n ext = [\".pdf\"]\n\n def read_file(self):\n with open(self.path, 'rb') as f:\n parser = PDFParser(f)\n doc = PDFDocument()\n parser.set_document(doc)\n doc.set_parser(parser)\n doc.initialize('')\n rsrcmgr = PDFResourceManager()\n laparams = LAParams()\n laparams.char_margin = 0.1\n laparams.word_margin = 1.0\n device = PDFPageAggregator(rsrcmgr, laparams=laparams)\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n extracted_text = []\n\n for page in doc.get_pages():\n interpreter.process_page(page)\n layout = device.get_result()\n for lt_obj in layout:\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n extracted_text.append(lt_obj.get_text())\n self.content = ' '.join(extracted_text)\n\n\nclass XmlReader(Reader):\n ext = [\".xml\"]\n\n def read_file(self):\n encoding = detect_encoding(self.path)\n with open(self.path, encoding=encoding, errors='ignore') as markup:\n soup = BeautifulSoup(markup.read(), \"lxml\")\n self.content = soup.get_text()\n\n\nclass ImportDocuments:\n def __init__(self, startdir, formats=DefaultFormats, report_progress=None):\n self.startdir = startdir\n self.formats = formats\n self._report_progress = report_progress\n self.cancelled = False\n self._text_data = []\n\n def run(self):\n text_data = []\n errors = []\n patterns = [\"*.{}\".format(fmt.lower()) for fmt in self.formats]\n paths = self.scan(self.startdir, include_patterns=patterns)\n n_paths = len(paths)\n batch = []\n\n for path in paths:\n if len(batch) == 1 and self._report_progress is not None:\n self._report_progress(\n namespace(progress=len(text_data) / n_paths,\n lastpath=path,\n batch=batch))\n batch = []\n\n reader = Reader.get_reader(path)\n text, error = reader.read()\n if text is not None:\n text_data.append(text)\n batch.append(text_data)\n else:\n errors.append(error)\n\n if self.cancelled:\n return\n\n self._text_data = text_data\n return self._create_corpus(), errors\n\n def _create_corpus(self):\n corpus = None\n names = [\"name\", \"path\", \"content\"]\n data = []\n category_data = []\n text_categories = list(set(t.category for t in self._text_data))\n values = list(set(text_categories))\n category_var = DiscreteVariable.make(\"category\", values=values)\n for textdata in self._text_data:\n data.append(\n [textdata.name,\n textdata.path,\n textdata.content]\n )\n category_data.append(category_var.to_val(textdata.category))\n if len(text_categories) > 1:\n category_data = np.array(category_data)\n else:\n category_var = []\n category_data = np.empty((len(data), 0))\n domain = Domain(\n [], category_var, [StringVariable.make(name) for name in names]\n )\n domain[\"name\"].attributes[\"title\"] = True\n data = np.array(data, dtype=object)\n if len(data):\n corpus = Corpus(domain,\n Y=category_data,\n metas=data,\n text_features=[domain.metas[2]])\n\n return corpus\n\n @staticmethod\n def scan(topdir, include_patterns=(\"*\",), exclude_patterns=(\".*\",)):\n \"\"\"\n Yield file system paths under `topdir` that match include/exclude patterns\n\n Parameters\n ----------\n topdir: str\n Top level directory path for the search.\n include_patterns: List[str]\n `fnmatch.fnmatch` include patterns.\n exclude_patterns: List[str]\n `fnmatch.fnmatch` exclude patterns.\n\n Returns\n -------\n list of paths\n \"\"\"\n if include_patterns is None:\n include_patterns = [\"*\"]\n\n def matches_any(fname, patterns):\n return any(fnmatch.fnmatch(fname.lower(), pattern)\n for pattern in patterns)\n\n paths = []\n\n for dirpath, dirnames, filenames in os.walk(topdir):\n for dirname in list(dirnames):\n # do not recurse into hidden dirs\n if fnmatch.fnmatch(dirname, \".*\"):\n dirnames.remove(dirname)\n\n filenames = [fname for fname in filenames\n if matches_any(fname, include_patterns)\n and not matches_any(fname, exclude_patterns)]\n paths = paths + [os.path.join(dirpath, fname) for fname in filenames]\n return paths\n", "path": "orangecontrib/text/import_documents.py"}], "after_files": [{"content": "import fnmatch\nimport logging\nimport os\nimport pathlib\nimport re\n\nfrom collections import namedtuple\nfrom types import SimpleNamespace as namespace\n\nimport numpy as np\n\nimport docx2txt\nfrom odf.opendocument import load\nfrom odf import text, teletype\n\nfrom pdfminer.pdfparser import PDFParser, PDFDocument\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import PDFPageAggregator\nfrom pdfminer.layout import LAParams, LTTextBox, LTTextLine\nfrom bs4 import BeautifulSoup\n\nfrom Orange.data import DiscreteVariable, Domain, StringVariable\nfrom Orange.data.io import detect_encoding\nfrom Orange.util import Registry\n\nfrom orangecontrib.text.corpus import Corpus\n\n\nDefaultFormats = (\"docx\", \"odt\", \"txt\", \"pdf\", \"xml\")\n\nTextData = namedtuple(\n \"Text\",\n [\"name\", \"path\", \"ext\", \"category\", \"content\"]\n)\nTextData.isvalid = property(lambda self: True)\n\nTextDataError = namedtuple(\n \"TextDataError\",\n [\"path\", \"error\", \"error_str\"]\n)\nTextDataError.isvalid = property(lambda self: False)\n\nlog = logging.getLogger(__name__)\n\n\nclass Reader(metaclass=Registry):\n def __init__(self, path, replace_white_space=False):\n self.path = path\n self.replace_white_space = replace_white_space\n self.content = None\n\n @classmethod\n def get_reader(cls, path):\n ext = pathlib.Path(path).suffix\n for _reader in cls.registry:\n reader = eval(_reader)\n if ext in reader.ext:\n return reader(path)\n return Reader(path)\n\n def read(self, ):\n error = \"\"\n try:\n self.read_file()\n except Exception as ex:\n textdata = None\n error = \"{}\".format(pathlib.Path(self.path).name)\n log.exception('Error reading failed', exc_info=ex)\n else:\n textdata = self.make_text_data()\n return textdata, error\n\n def read_file(self):\n raise NotImplementedError(\"No reader for {}\".format(pathlib.Path(self.path).suffix))\n\n def make_text_data(self):\n name = pathlib.Path(self.path).stem\n directory = pathlib.PurePath(self.path).parent\n category = directory.parts[-1] or \"None\"\n if self.replace_white_space:\n self.content = re.sub('\\s+', ' ', self.content)\n return TextData(name, self.path, self.ext, category, self.content)\n\n\nclass TxtReader(Reader):\n ext = [\".txt\"]\n\n def read_file(self):\n encoding = detect_encoding(self.path)\n with open(self.path, 'r', encoding=encoding) as f:\n self.content = f.read()\n\n\nclass DocxReader(Reader):\n ext = [\".docx\"]\n\n def read_file(self):\n self.content = docx2txt.process(self.path)\n\n\nclass OdtReader(Reader):\n ext = [\".odt\"]\n\n def read_file(self):\n odtfile = load(self.path)\n texts = odtfile.getElementsByType(text.P)\n self.content = \" \".join(teletype.extractText(t) for t in texts)\n\n\nclass PdfReader(Reader):\n \"\"\"\n char_margin \u2014 two text chunks whose distance is closer than this value are considered\n contiguous and get grouped into one.\n word_margin \u2014 it may be required to insert blank characters (spaces) as necessary if\n the distance between two words is greater than this value, as a blank between words might\n not be represented as a space, but indicated by the positioning of each word.\n \"\"\"\n ext = [\".pdf\"]\n\n def read_file(self):\n with open(self.path, 'rb') as f:\n parser = PDFParser(f)\n doc = PDFDocument()\n parser.set_document(doc)\n doc.set_parser(parser)\n doc.initialize('')\n rsrcmgr = PDFResourceManager()\n laparams = LAParams()\n laparams.char_margin = 0.1\n laparams.word_margin = 1.0\n device = PDFPageAggregator(rsrcmgr, laparams=laparams)\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n extracted_text = []\n\n for page in doc.get_pages():\n interpreter.process_page(page)\n layout = device.get_result()\n for lt_obj in layout:\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n extracted_text.append(lt_obj.get_text())\n self.content = ' '.join(extracted_text).replace('\\x00', '')\n\n\nclass XmlReader(Reader):\n ext = [\".xml\"]\n\n def read_file(self):\n encoding = detect_encoding(self.path)\n with open(self.path, encoding=encoding, errors='ignore') as markup:\n soup = BeautifulSoup(markup.read(), \"lxml\")\n self.content = soup.get_text()\n\n\nclass ImportDocuments:\n def __init__(self, startdir, formats=DefaultFormats, report_progress=None):\n self.startdir = startdir\n self.formats = formats\n self._report_progress = report_progress\n self.cancelled = False\n self._text_data = []\n\n def run(self):\n text_data = []\n errors = []\n patterns = [\"*.{}\".format(fmt.lower()) for fmt in self.formats]\n paths = self.scan(self.startdir, include_patterns=patterns)\n n_paths = len(paths)\n batch = []\n\n for path in paths:\n if len(batch) == 1 and self._report_progress is not None:\n self._report_progress(\n namespace(progress=len(text_data) / n_paths,\n lastpath=path,\n batch=batch))\n batch = []\n\n reader = Reader.get_reader(path)\n text, error = reader.read()\n if text is not None:\n text_data.append(text)\n batch.append(text_data)\n else:\n errors.append(error)\n\n if self.cancelled:\n return\n\n self._text_data = text_data\n return self._create_corpus(), errors\n\n def _create_corpus(self):\n corpus = None\n names = [\"name\", \"path\", \"content\"]\n data = []\n category_data = []\n text_categories = list(set(t.category for t in self._text_data))\n values = list(set(text_categories))\n category_var = DiscreteVariable.make(\"category\", values=values)\n for textdata in self._text_data:\n data.append(\n [textdata.name,\n textdata.path,\n textdata.content]\n )\n category_data.append(category_var.to_val(textdata.category))\n if len(text_categories) > 1:\n category_data = np.array(category_data)\n else:\n category_var = []\n category_data = np.empty((len(data), 0))\n domain = Domain(\n [], category_var, [StringVariable.make(name) for name in names]\n )\n domain[\"name\"].attributes[\"title\"] = True\n data = np.array(data, dtype=object)\n if len(data):\n corpus = Corpus(domain,\n Y=category_data,\n metas=data,\n text_features=[domain.metas[2]])\n\n return corpus\n\n @staticmethod\n def scan(topdir, include_patterns=(\"*\",), exclude_patterns=(\".*\",)):\n \"\"\"\n Yield file system paths under `topdir` that match include/exclude patterns\n\n Parameters\n ----------\n topdir: str\n Top level directory path for the search.\n include_patterns: List[str]\n `fnmatch.fnmatch` include patterns.\n exclude_patterns: List[str]\n `fnmatch.fnmatch` exclude patterns.\n\n Returns\n -------\n list of paths\n \"\"\"\n if include_patterns is None:\n include_patterns = [\"*\"]\n\n def matches_any(fname, patterns):\n return any(fnmatch.fnmatch(fname.lower(), pattern)\n for pattern in patterns)\n\n paths = []\n\n for dirpath, dirnames, filenames in os.walk(topdir):\n for dirname in list(dirnames):\n # do not recurse into hidden dirs\n if fnmatch.fnmatch(dirname, \".*\"):\n dirnames.remove(dirname)\n\n filenames = [fname for fname in filenames\n if matches_any(fname, include_patterns)\n and not matches_any(fname, exclude_patterns)]\n paths = paths + [os.path.join(dirpath, fname) for fname in filenames]\n return paths\n", "path": "orangecontrib/text/import_documents.py"}]} | 3,077 | 129 |
gh_patches_debug_8923 | rasdani/github-patches | git_diff | feast-dev__feast-1968 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Redis Cluster materialization error
## Expected Behavior
Materialization for Redis Cluster works
## Current Behavior
During materialization Feast use Redis client instead of RedisCluster client
## Steps to reproduce
configure redis cluster
```
project: my_feature_repo
registry: data/registry.db
provider: local
online_store:
type: redis
redis_type: redis_cluster
connection_string: "redis1:6379,redis2:6379,ssl=true,password=my_password"
```
try to materialize
### Specifications
- Version: 0.14.0
- Platform: python 3.7
- Subsystem: linux (anaconda3 docker image)
## Possible Solution
correct _get_client method in redis.py l. 124
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/infra/online_stores/redis.py`
Content:
```
1 # Copyright 2021 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import json
15 import logging
16 from datetime import datetime
17 from enum import Enum
18 from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
19
20 from google.protobuf.timestamp_pb2 import Timestamp
21 from pydantic import StrictStr
22 from pydantic.typing import Literal
23
24 from feast import Entity, FeatureTable, FeatureView, RepoConfig, utils
25 from feast.infra.online_stores.helpers import _mmh3, _redis_key, _redis_key_prefix
26 from feast.infra.online_stores.online_store import OnlineStore
27 from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
28 from feast.protos.feast.types.Value_pb2 import Value as ValueProto
29 from feast.repo_config import FeastConfigBaseModel
30
31 try:
32 from redis import Redis
33 from rediscluster import RedisCluster
34 except ImportError as e:
35 from feast.errors import FeastExtrasDependencyImportError
36
37 raise FeastExtrasDependencyImportError("redis", str(e))
38
39 EX_SECONDS = 253402300799
40 logger = logging.getLogger(__name__)
41
42
43 class RedisType(str, Enum):
44 redis = "redis"
45 redis_cluster = "redis_cluster"
46
47
48 class RedisOnlineStoreConfig(FeastConfigBaseModel):
49 """Online store config for Redis store"""
50
51 type: Literal["redis"] = "redis"
52 """Online store type selector"""
53
54 redis_type: RedisType = RedisType.redis
55 """Redis type: redis or redis_cluster"""
56
57 connection_string: StrictStr = "localhost:6379"
58 """Connection string containing the host, port, and configuration parameters for Redis
59 format: host:port,parameter1,parameter2 eg. redis:6379,db=0 """
60
61
62 class RedisOnlineStore(OnlineStore):
63 _client: Optional[Union[Redis, RedisCluster]] = None
64
65 def delete_table_values(
66 self, config: RepoConfig, table: Union[FeatureTable, FeatureView]
67 ):
68 client = self._get_client(config.online_store)
69 deleted_count = 0
70 pipeline = client.pipeline()
71 prefix = _redis_key_prefix(table.entities)
72
73 for _k in client.scan_iter(
74 b"".join([prefix, b"*", config.project.encode("utf8")])
75 ):
76 pipeline.delete(_k)
77 deleted_count += 1
78 pipeline.execute()
79
80 logger.debug(f"Deleted {deleted_count} keys for {table.name}")
81
82 def update(
83 self,
84 config: RepoConfig,
85 tables_to_delete: Sequence[Union[FeatureTable, FeatureView]],
86 tables_to_keep: Sequence[Union[FeatureTable, FeatureView]],
87 entities_to_delete: Sequence[Entity],
88 entities_to_keep: Sequence[Entity],
89 partial: bool,
90 ):
91 """
92 We delete the keys in redis for tables/views being removed.
93 """
94 for table in tables_to_delete:
95 self.delete_table_values(config, table)
96
97 def teardown(
98 self,
99 config: RepoConfig,
100 tables: Sequence[Union[FeatureTable, FeatureView]],
101 entities: Sequence[Entity],
102 ):
103 """
104 We delete the keys in redis for tables/views being removed.
105 """
106 for table in tables:
107 self.delete_table_values(config, table)
108
109 @staticmethod
110 def _parse_connection_string(connection_string: str):
111 """
112 Reads Redis connections string using format
113 for RedisCluster:
114 redis1:6379,redis2:6379,decode_responses=true,skip_full_coverage_check=true,ssl=true,password=...
115 for Redis:
116 redis_master:6379,db=0,ssl=true,password=...
117 """
118 startup_nodes = [
119 dict(zip(["host", "port"], c.split(":")))
120 for c in connection_string.split(",")
121 if "=" not in c
122 ]
123 params = {}
124 for c in connection_string.split(","):
125 if "=" in c:
126 kv = c.split("=", 1)
127 try:
128 kv[1] = json.loads(kv[1])
129 except json.JSONDecodeError:
130 ...
131
132 it = iter(kv)
133 params.update(dict(zip(it, it)))
134
135 return startup_nodes, params
136
137 def _get_client(self, online_store_config: RedisOnlineStoreConfig):
138 """
139 Creates the Redis client RedisCluster or Redis depending on configuration
140 """
141 if not self._client:
142 startup_nodes, kwargs = self._parse_connection_string(
143 online_store_config.connection_string
144 )
145 if online_store_config.type == RedisType.redis_cluster:
146 kwargs["startup_nodes"] = startup_nodes
147 self._client = RedisCluster(**kwargs)
148 else:
149 kwargs["host"] = startup_nodes[0]["host"]
150 kwargs["port"] = startup_nodes[0]["port"]
151 self._client = Redis(**kwargs)
152 return self._client
153
154 def online_write_batch(
155 self,
156 config: RepoConfig,
157 table: Union[FeatureTable, FeatureView],
158 data: List[
159 Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
160 ],
161 progress: Optional[Callable[[int], Any]],
162 ) -> None:
163 online_store_config = config.online_store
164 assert isinstance(online_store_config, RedisOnlineStoreConfig)
165
166 client = self._get_client(online_store_config)
167 project = config.project
168
169 entity_hset = {}
170 feature_view = table.name
171
172 ex = Timestamp()
173 ex.seconds = EX_SECONDS
174 ex_str = ex.SerializeToString()
175 for entity_key, values, timestamp, created_ts in data:
176 redis_key_bin = _redis_key(project, entity_key)
177 ts = Timestamp()
178 ts.seconds = int(utils.make_tzaware(timestamp).timestamp())
179 entity_hset[f"_ts:{feature_view}"] = ts.SerializeToString()
180 entity_hset[f"_ex:{feature_view}"] = ex_str
181
182 for feature_name, val in values.items():
183 f_key = _mmh3(f"{feature_view}:{feature_name}")
184 entity_hset[f_key] = val.SerializeToString()
185
186 client.hset(redis_key_bin, mapping=entity_hset)
187 if progress:
188 progress(1)
189
190 def online_read(
191 self,
192 config: RepoConfig,
193 table: Union[FeatureTable, FeatureView],
194 entity_keys: List[EntityKeyProto],
195 requested_features: Optional[List[str]] = None,
196 ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
197 online_store_config = config.online_store
198 assert isinstance(online_store_config, RedisOnlineStoreConfig)
199
200 client = self._get_client(online_store_config)
201 feature_view = table.name
202 project = config.project
203
204 result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []
205
206 if not requested_features:
207 requested_features = [f.name for f in table.features]
208
209 for entity_key in entity_keys:
210 redis_key_bin = _redis_key(project, entity_key)
211 hset_keys = [_mmh3(f"{feature_view}:{k}") for k in requested_features]
212 ts_key = f"_ts:{feature_view}"
213 hset_keys.append(ts_key)
214 values = client.hmget(redis_key_bin, hset_keys)
215 requested_features.append(ts_key)
216 res_val = dict(zip(requested_features, values))
217
218 res_ts = Timestamp()
219 ts_val = res_val.pop(ts_key)
220 if ts_val:
221 res_ts.ParseFromString(ts_val)
222
223 res = {}
224 for feature_name, val_bin in res_val.items():
225 val = ValueProto()
226 if val_bin:
227 val.ParseFromString(val_bin)
228 res[feature_name] = val
229
230 if not res:
231 result.append((None, None))
232 else:
233 timestamp = datetime.fromtimestamp(res_ts.seconds)
234 result.append((timestamp, res))
235 return result
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/feast/infra/online_stores/redis.py b/sdk/python/feast/infra/online_stores/redis.py
--- a/sdk/python/feast/infra/online_stores/redis.py
+++ b/sdk/python/feast/infra/online_stores/redis.py
@@ -142,7 +142,7 @@
startup_nodes, kwargs = self._parse_connection_string(
online_store_config.connection_string
)
- if online_store_config.type == RedisType.redis_cluster:
+ if online_store_config.redis_type == RedisType.redis_cluster:
kwargs["startup_nodes"] = startup_nodes
self._client = RedisCluster(**kwargs)
else:
| {"golden_diff": "diff --git a/sdk/python/feast/infra/online_stores/redis.py b/sdk/python/feast/infra/online_stores/redis.py\n--- a/sdk/python/feast/infra/online_stores/redis.py\n+++ b/sdk/python/feast/infra/online_stores/redis.py\n@@ -142,7 +142,7 @@\n startup_nodes, kwargs = self._parse_connection_string(\n online_store_config.connection_string\n )\n- if online_store_config.type == RedisType.redis_cluster:\n+ if online_store_config.redis_type == RedisType.redis_cluster:\n kwargs[\"startup_nodes\"] = startup_nodes\n self._client = RedisCluster(**kwargs)\n else:\n", "issue": "Redis Cluster materialization error\n## Expected Behavior \r\nMaterialization for Redis Cluster works \r\n\r\n## Current Behavior\r\nDuring materialization Feast use Redis client instead of RedisCluster client\r\n\r\n## Steps to reproduce\r\nconfigure redis cluster \r\n```\r\nproject: my_feature_repo\r\nregistry: data/registry.db\r\nprovider: local\r\nonline_store:\r\n type: redis\r\n redis_type: redis_cluster\r\n connection_string: \"redis1:6379,redis2:6379,ssl=true,password=my_password\"\r\n```\r\ntry to materialize\r\n\r\n### Specifications\r\n\r\n- Version: 0.14.0\r\n- Platform: python 3.7\r\n- Subsystem: linux (anaconda3 docker image)\r\n\r\n## Possible Solution\r\ncorrect _get_client method in redis.py l. 124\r\n\n", "before_files": [{"content": "# Copyright 2021 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\nimport logging\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union\n\nfrom google.protobuf.timestamp_pb2 import Timestamp\nfrom pydantic import StrictStr\nfrom pydantic.typing import Literal\n\nfrom feast import Entity, FeatureTable, FeatureView, RepoConfig, utils\nfrom feast.infra.online_stores.helpers import _mmh3, _redis_key, _redis_key_prefix\nfrom feast.infra.online_stores.online_store import OnlineStore\nfrom feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto\nfrom feast.protos.feast.types.Value_pb2 import Value as ValueProto\nfrom feast.repo_config import FeastConfigBaseModel\n\ntry:\n from redis import Redis\n from rediscluster import RedisCluster\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"redis\", str(e))\n\nEX_SECONDS = 253402300799\nlogger = logging.getLogger(__name__)\n\n\nclass RedisType(str, Enum):\n redis = \"redis\"\n redis_cluster = \"redis_cluster\"\n\n\nclass RedisOnlineStoreConfig(FeastConfigBaseModel):\n \"\"\"Online store config for Redis store\"\"\"\n\n type: Literal[\"redis\"] = \"redis\"\n \"\"\"Online store type selector\"\"\"\n\n redis_type: RedisType = RedisType.redis\n \"\"\"Redis type: redis or redis_cluster\"\"\"\n\n connection_string: StrictStr = \"localhost:6379\"\n \"\"\"Connection string containing the host, port, and configuration parameters for Redis\n format: host:port,parameter1,parameter2 eg. redis:6379,db=0 \"\"\"\n\n\nclass RedisOnlineStore(OnlineStore):\n _client: Optional[Union[Redis, RedisCluster]] = None\n\n def delete_table_values(\n self, config: RepoConfig, table: Union[FeatureTable, FeatureView]\n ):\n client = self._get_client(config.online_store)\n deleted_count = 0\n pipeline = client.pipeline()\n prefix = _redis_key_prefix(table.entities)\n\n for _k in client.scan_iter(\n b\"\".join([prefix, b\"*\", config.project.encode(\"utf8\")])\n ):\n pipeline.delete(_k)\n deleted_count += 1\n pipeline.execute()\n\n logger.debug(f\"Deleted {deleted_count} keys for {table.name}\")\n\n def update(\n self,\n config: RepoConfig,\n tables_to_delete: Sequence[Union[FeatureTable, FeatureView]],\n tables_to_keep: Sequence[Union[FeatureTable, FeatureView]],\n entities_to_delete: Sequence[Entity],\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n \"\"\"\n We delete the keys in redis for tables/views being removed.\n \"\"\"\n for table in tables_to_delete:\n self.delete_table_values(config, table)\n\n def teardown(\n self,\n config: RepoConfig,\n tables: Sequence[Union[FeatureTable, FeatureView]],\n entities: Sequence[Entity],\n ):\n \"\"\"\n We delete the keys in redis for tables/views being removed.\n \"\"\"\n for table in tables:\n self.delete_table_values(config, table)\n\n @staticmethod\n def _parse_connection_string(connection_string: str):\n \"\"\"\n Reads Redis connections string using format\n for RedisCluster:\n redis1:6379,redis2:6379,decode_responses=true,skip_full_coverage_check=true,ssl=true,password=...\n for Redis:\n redis_master:6379,db=0,ssl=true,password=...\n \"\"\"\n startup_nodes = [\n dict(zip([\"host\", \"port\"], c.split(\":\")))\n for c in connection_string.split(\",\")\n if \"=\" not in c\n ]\n params = {}\n for c in connection_string.split(\",\"):\n if \"=\" in c:\n kv = c.split(\"=\", 1)\n try:\n kv[1] = json.loads(kv[1])\n except json.JSONDecodeError:\n ...\n\n it = iter(kv)\n params.update(dict(zip(it, it)))\n\n return startup_nodes, params\n\n def _get_client(self, online_store_config: RedisOnlineStoreConfig):\n \"\"\"\n Creates the Redis client RedisCluster or Redis depending on configuration\n \"\"\"\n if not self._client:\n startup_nodes, kwargs = self._parse_connection_string(\n online_store_config.connection_string\n )\n if online_store_config.type == RedisType.redis_cluster:\n kwargs[\"startup_nodes\"] = startup_nodes\n self._client = RedisCluster(**kwargs)\n else:\n kwargs[\"host\"] = startup_nodes[0][\"host\"]\n kwargs[\"port\"] = startup_nodes[0][\"port\"]\n self._client = Redis(**kwargs)\n return self._client\n\n def online_write_batch(\n self,\n config: RepoConfig,\n table: Union[FeatureTable, FeatureView],\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n online_store_config = config.online_store\n assert isinstance(online_store_config, RedisOnlineStoreConfig)\n\n client = self._get_client(online_store_config)\n project = config.project\n\n entity_hset = {}\n feature_view = table.name\n\n ex = Timestamp()\n ex.seconds = EX_SECONDS\n ex_str = ex.SerializeToString()\n for entity_key, values, timestamp, created_ts in data:\n redis_key_bin = _redis_key(project, entity_key)\n ts = Timestamp()\n ts.seconds = int(utils.make_tzaware(timestamp).timestamp())\n entity_hset[f\"_ts:{feature_view}\"] = ts.SerializeToString()\n entity_hset[f\"_ex:{feature_view}\"] = ex_str\n\n for feature_name, val in values.items():\n f_key = _mmh3(f\"{feature_view}:{feature_name}\")\n entity_hset[f_key] = val.SerializeToString()\n\n client.hset(redis_key_bin, mapping=entity_hset)\n if progress:\n progress(1)\n\n def online_read(\n self,\n config: RepoConfig,\n table: Union[FeatureTable, FeatureView],\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n online_store_config = config.online_store\n assert isinstance(online_store_config, RedisOnlineStoreConfig)\n\n client = self._get_client(online_store_config)\n feature_view = table.name\n project = config.project\n\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n\n if not requested_features:\n requested_features = [f.name for f in table.features]\n\n for entity_key in entity_keys:\n redis_key_bin = _redis_key(project, entity_key)\n hset_keys = [_mmh3(f\"{feature_view}:{k}\") for k in requested_features]\n ts_key = f\"_ts:{feature_view}\"\n hset_keys.append(ts_key)\n values = client.hmget(redis_key_bin, hset_keys)\n requested_features.append(ts_key)\n res_val = dict(zip(requested_features, values))\n\n res_ts = Timestamp()\n ts_val = res_val.pop(ts_key)\n if ts_val:\n res_ts.ParseFromString(ts_val)\n\n res = {}\n for feature_name, val_bin in res_val.items():\n val = ValueProto()\n if val_bin:\n val.ParseFromString(val_bin)\n res[feature_name] = val\n\n if not res:\n result.append((None, None))\n else:\n timestamp = datetime.fromtimestamp(res_ts.seconds)\n result.append((timestamp, res))\n return result\n", "path": "sdk/python/feast/infra/online_stores/redis.py"}], "after_files": [{"content": "# Copyright 2021 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\nimport logging\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union\n\nfrom google.protobuf.timestamp_pb2 import Timestamp\nfrom pydantic import StrictStr\nfrom pydantic.typing import Literal\n\nfrom feast import Entity, FeatureTable, FeatureView, RepoConfig, utils\nfrom feast.infra.online_stores.helpers import _mmh3, _redis_key, _redis_key_prefix\nfrom feast.infra.online_stores.online_store import OnlineStore\nfrom feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto\nfrom feast.protos.feast.types.Value_pb2 import Value as ValueProto\nfrom feast.repo_config import FeastConfigBaseModel\n\ntry:\n from redis import Redis\n from rediscluster import RedisCluster\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"redis\", str(e))\n\nEX_SECONDS = 253402300799\nlogger = logging.getLogger(__name__)\n\n\nclass RedisType(str, Enum):\n redis = \"redis\"\n redis_cluster = \"redis_cluster\"\n\n\nclass RedisOnlineStoreConfig(FeastConfigBaseModel):\n \"\"\"Online store config for Redis store\"\"\"\n\n type: Literal[\"redis\"] = \"redis\"\n \"\"\"Online store type selector\"\"\"\n\n redis_type: RedisType = RedisType.redis\n \"\"\"Redis type: redis or redis_cluster\"\"\"\n\n connection_string: StrictStr = \"localhost:6379\"\n \"\"\"Connection string containing the host, port, and configuration parameters for Redis\n format: host:port,parameter1,parameter2 eg. redis:6379,db=0 \"\"\"\n\n\nclass RedisOnlineStore(OnlineStore):\n _client: Optional[Union[Redis, RedisCluster]] = None\n\n def delete_table_values(\n self, config: RepoConfig, table: Union[FeatureTable, FeatureView]\n ):\n client = self._get_client(config.online_store)\n deleted_count = 0\n pipeline = client.pipeline()\n prefix = _redis_key_prefix(table.entities)\n\n for _k in client.scan_iter(\n b\"\".join([prefix, b\"*\", config.project.encode(\"utf8\")])\n ):\n pipeline.delete(_k)\n deleted_count += 1\n pipeline.execute()\n\n logger.debug(f\"Deleted {deleted_count} keys for {table.name}\")\n\n def update(\n self,\n config: RepoConfig,\n tables_to_delete: Sequence[Union[FeatureTable, FeatureView]],\n tables_to_keep: Sequence[Union[FeatureTable, FeatureView]],\n entities_to_delete: Sequence[Entity],\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n \"\"\"\n We delete the keys in redis for tables/views being removed.\n \"\"\"\n for table in tables_to_delete:\n self.delete_table_values(config, table)\n\n def teardown(\n self,\n config: RepoConfig,\n tables: Sequence[Union[FeatureTable, FeatureView]],\n entities: Sequence[Entity],\n ):\n \"\"\"\n We delete the keys in redis for tables/views being removed.\n \"\"\"\n for table in tables:\n self.delete_table_values(config, table)\n\n @staticmethod\n def _parse_connection_string(connection_string: str):\n \"\"\"\n Reads Redis connections string using format\n for RedisCluster:\n redis1:6379,redis2:6379,decode_responses=true,skip_full_coverage_check=true,ssl=true,password=...\n for Redis:\n redis_master:6379,db=0,ssl=true,password=...\n \"\"\"\n startup_nodes = [\n dict(zip([\"host\", \"port\"], c.split(\":\")))\n for c in connection_string.split(\",\")\n if \"=\" not in c\n ]\n params = {}\n for c in connection_string.split(\",\"):\n if \"=\" in c:\n kv = c.split(\"=\", 1)\n try:\n kv[1] = json.loads(kv[1])\n except json.JSONDecodeError:\n ...\n\n it = iter(kv)\n params.update(dict(zip(it, it)))\n\n return startup_nodes, params\n\n def _get_client(self, online_store_config: RedisOnlineStoreConfig):\n \"\"\"\n Creates the Redis client RedisCluster or Redis depending on configuration\n \"\"\"\n if not self._client:\n startup_nodes, kwargs = self._parse_connection_string(\n online_store_config.connection_string\n )\n if online_store_config.redis_type == RedisType.redis_cluster:\n kwargs[\"startup_nodes\"] = startup_nodes\n self._client = RedisCluster(**kwargs)\n else:\n kwargs[\"host\"] = startup_nodes[0][\"host\"]\n kwargs[\"port\"] = startup_nodes[0][\"port\"]\n self._client = Redis(**kwargs)\n return self._client\n\n def online_write_batch(\n self,\n config: RepoConfig,\n table: Union[FeatureTable, FeatureView],\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n online_store_config = config.online_store\n assert isinstance(online_store_config, RedisOnlineStoreConfig)\n\n client = self._get_client(online_store_config)\n project = config.project\n\n entity_hset = {}\n feature_view = table.name\n\n ex = Timestamp()\n ex.seconds = EX_SECONDS\n ex_str = ex.SerializeToString()\n for entity_key, values, timestamp, created_ts in data:\n redis_key_bin = _redis_key(project, entity_key)\n ts = Timestamp()\n ts.seconds = int(utils.make_tzaware(timestamp).timestamp())\n entity_hset[f\"_ts:{feature_view}\"] = ts.SerializeToString()\n entity_hset[f\"_ex:{feature_view}\"] = ex_str\n\n for feature_name, val in values.items():\n f_key = _mmh3(f\"{feature_view}:{feature_name}\")\n entity_hset[f_key] = val.SerializeToString()\n\n client.hset(redis_key_bin, mapping=entity_hset)\n if progress:\n progress(1)\n\n def online_read(\n self,\n config: RepoConfig,\n table: Union[FeatureTable, FeatureView],\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n online_store_config = config.online_store\n assert isinstance(online_store_config, RedisOnlineStoreConfig)\n\n client = self._get_client(online_store_config)\n feature_view = table.name\n project = config.project\n\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n\n if not requested_features:\n requested_features = [f.name for f in table.features]\n\n for entity_key in entity_keys:\n redis_key_bin = _redis_key(project, entity_key)\n hset_keys = [_mmh3(f\"{feature_view}:{k}\") for k in requested_features]\n ts_key = f\"_ts:{feature_view}\"\n hset_keys.append(ts_key)\n values = client.hmget(redis_key_bin, hset_keys)\n requested_features.append(ts_key)\n res_val = dict(zip(requested_features, values))\n\n res_ts = Timestamp()\n ts_val = res_val.pop(ts_key)\n if ts_val:\n res_ts.ParseFromString(ts_val)\n\n res = {}\n for feature_name, val_bin in res_val.items():\n val = ValueProto()\n if val_bin:\n val.ParseFromString(val_bin)\n res[feature_name] = val\n\n if not res:\n result.append((None, None))\n else:\n timestamp = datetime.fromtimestamp(res_ts.seconds)\n result.append((timestamp, res))\n return result\n", "path": "sdk/python/feast/infra/online_stores/redis.py"}]} | 2,854 | 153 |
gh_patches_debug_33063 | rasdani/github-patches | git_diff | ARM-DOE__ACT-776 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Function to create movies
The DQ Office creates many movies from static plots. Another researcher is working on a project that would benefit from a simple way to create moves from plots. We should create a function to make movies from a list of images. Finding a mostly Python way to do this would be best.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `act/utils/__init__.py`
Content:
```
1 """
2 This module contains the common procedures used by all modules of the ARM
3 Community Toolkit.
4
5 """
6 import lazy_loader as lazy
7
8 __getattr__, __dir__, __all__ = lazy.attach(
9 __name__,
10 submodules=['data_utils', 'datetime_utils', 'geo_utils', 'inst_utils', 'io_utils', 'qc_utils', 'radiance_utils', 'ship_utils'],
11 submod_attrs={
12 'data_utils': [
13 'ChangeUnits',
14 'accumulate_precip',
15 'add_in_nan',
16 'assign_coordinates',
17 'convert_units',
18 'create_pyart_obj',
19 'get_missing_value',
20 'ts_weighted_average',
21 'height_adjusted_pressure',
22 'height_adjusted_temperature',
23 'convert_to_potential_temp',
24 'arm_site_location_search',
25 'DatastreamParserARM',
26 ],
27 'datetime_utils': [
28 'dates_between',
29 'datetime64_to_datetime',
30 'determine_time_delta',
31 'numpy_to_arm_date',
32 'reduce_time_ranges',
33 'date_parser',
34 'adjust_timestamp'
35 ],
36 'geo_utils': [
37 'add_solar_variable',
38 'destination_azimuth_distance',
39 'get_solar_azimuth_elevation',
40 'get_sunrise_sunset_noon',
41 'is_sun_visible',
42 ],
43 'inst_utils': ['decode_present_weather'],
44 'qc_utils': ['calculate_dqr_times'],
45 'radiance_utils': ['planck_converter'],
46 'ship_utils': ['calc_cog_sog', 'proc_scog'],
47 'io_utils': ['pack_tar',
48 'unpack_tar',
49 'cleanup_files',
50 'is_gunzip_file',
51 'pack_gzip',
52 'unpack_gzip'
53 ],
54 },
55 )
56
```
Path: `act/utils/io_utils.py`
Content:
```
1 from pathlib import Path
2 import tarfile
3 from os import PathLike
4 from shutil import rmtree
5 import random
6 import string
7 import gzip
8 import shutil
9 import tempfile
10
11
12 def pack_tar(filenames, write_filename=None, write_directory=None, remove=False):
13 """
14 Creates TAR file from list of filenames provided. Currently only works with
15 all files existing in the same directory.
16
17 ...
18
19 Parameters
20 ----------
21 filenames : str or list
22 Filenames to be placed in TAR file
23 write_filename : str, pathlib.Path, None
24 TAR output filename. If not provided will use file name 'created_tarfile.tar'
25 write_directory : str, pathlib.Path, None
26 Path to directory to write TAR file. If the directory does not exist will
27 be created.
28 remove : boolean
29 Delete provided filenames after making TAR file
30
31 Returns
32 -------
33 list
34 List of files extracted from the TAR file or full path to created direcotry
35 containing extracted files.
36
37 """
38
39 if write_filename is None:
40 write_filename = 'created_tarfile.tar'
41
42 if isinstance(filenames, (str, PathLike)):
43 filenames = [filenames]
44
45 if write_directory is not None:
46 write_directory = Path(write_directory)
47 write_directory.mkdir(parents=True, exist_ok=True)
48 write_filename = Path(write_filename).name
49 elif Path(write_filename).parent != Path('.'):
50 write_directory = Path(write_filename).parent
51 else:
52 write_directory = Path('.')
53
54 if not str(write_filename).endswith('.tar'):
55 write_filename = str(write_filename) + '.tar'
56
57 write_filename = Path(write_directory, write_filename)
58 tar_file_handle = tarfile.open(write_filename, "w")
59 for filename in filenames:
60 tar_file_handle.add(filename, arcname=Path(filename).name)
61
62 tar_file_handle.close()
63
64 if remove:
65 for filename in filenames:
66 Path(filename).unlink()
67
68 return str(write_filename)
69
70
71 def unpack_tar(tar_files, write_directory=None, temp_dir=False, randomize=True,
72 return_files=True, remove=False):
73 """
74 Unpacks TAR file contents into provided base directory
75
76 ...
77
78 Parameters
79 ----------
80 tar_files : str or list
81 path to TAR file to be unpacked
82 write_directory : str or pathlib.Path
83 base path to extract contents of TAR files or create a new randomized directory
84 to extract contents of TAR file.
85 temp_dir : boolean
86 Should a temporary directory be created and TAR files extracted to the new directory.
87 write_directory and randomize are ignored if this option is used.
88 randomize : boolean
89 Create a new randomized directory to extract TAR files into.
90 return_files : boolean
91 When set will return a list of full path filenames to the extracted files.
92 When set to False will return full path to directory containing extracted files.
93 remove : boolean
94 Delete provided TAR files after extracting files.
95
96 Returns
97 -------
98 files : list or str
99 List of full path files extracted from the TAR file or full path to direcotry
100 containing extracted files.
101
102 """
103
104 files = []
105
106 if isinstance(tar_files, (str, PathLike)):
107 tar_files = [tar_files]
108
109 out_dir = Path.cwd()
110 if temp_dir is True:
111 out_dir = Path(tempfile.TemporaryDirectory().name)
112 else:
113 if write_directory is not None:
114 out_dir = Path(write_directory)
115 else:
116 out_dir = Path(Path(tar_files[0]).parent)
117
118 if out_dir.is_dir() is False:
119 out_dir.mkdir(parents=True, exist_ok=True)
120
121 if randomize:
122 out_dir = Path(tempfile.mkdtemp(dir=out_dir))
123
124 for tar_file in tar_files:
125 try:
126 tar = tarfile.open(tar_file)
127 tar.extractall(path=out_dir)
128 result = [str(Path(out_dir, ii.name)) for ii in tar.getmembers()]
129 files.extend(result)
130 tar.close()
131 except tarfile.ReadError:
132 print(f"\nCould not extract files from {tar_file}")
133
134 if return_files is False:
135 files = str(out_dir)
136 else:
137 files.sort()
138
139 if remove:
140 for tar_file in tar_files:
141 Path(tar_file).unlink()
142
143 return files
144
145
146 def cleanup_files(dirname=None, files=None):
147 """
148 Cleans up files and directory possibly created from unpacking TAR files with unpack_tar()
149
150 ...
151
152 Parameters
153 ----------
154 dirname : str, pathlib.Path, None
155 Path to directory of extracted files which will be removed.
156 files : str, pahtlib.Path, list, None
157 Full path file name(s) from extracted TAR file.
158 Assumes the directory this file exists in should be removed.
159
160 """
161
162 if isinstance(files, (str, PathLike)):
163 files = [str(files)]
164
165 try:
166 if dirname is not None:
167 rmtree(dirname)
168
169 if files is not None and len(files) > 0 and Path(files[0]).is_file():
170 out_dir = Path(files[0]).parent
171 rmtree(out_dir)
172
173 except Exception as error:
174 print("\nError removing files:", error)
175
176
177 def is_gunzip_file(filepath):
178 """
179 Function to test if file is a gunzip file.
180
181 Parameters
182 ----------
183
184 filepath : str or pathlib.Path to file to test
185
186 Returns
187 -------
188 test : boolean
189 Result from testing if file is a gunzip file
190
191 """
192
193 try:
194 with open(str(filepath), 'rb') as test_f:
195 return test_f.read(2) == b'\x1f\x8b'
196 except Exception:
197 return False
198
199
200 def pack_gzip(filename, write_directory=None, remove=False):
201 """
202 Creates a gunzip file from a filename path
203
204 ...
205
206 Parameters
207 ----------
208 filename : str, pathlib.Path
209 Filename to use in creation of gunzip version.
210 write_directory : str, pahtlib.Path, list, None
211 Path to directory to place newly created gunzip file.
212 remove : boolean
213 Remove provided filename after creating gunzip file
214
215 Returns
216 -------
217 write_filename : str
218 Full path name of created gunzip file
219
220 """
221
222 write_filename = Path(filename).name + '.gz'
223
224 if write_directory is not None:
225 write_filename = Path(write_directory, write_filename)
226 Path(write_directory).mkdir(parents=True, exist_ok=True)
227 else:
228 write_filename = Path(Path(filename).parent, write_filename)
229
230 with open(filename, 'rb') as f_in:
231 with gzip.open(write_filename, 'wb') as f_out:
232 shutil.copyfileobj(f_in, f_out)
233
234 if remove:
235 Path(filename).unlink()
236
237 return str(write_filename)
238
239
240 def unpack_gzip(filename, write_directory=None, remove=False):
241 """
242 Extracts file from a gunzip file.
243
244 ...
245
246 Parameters
247 ----------
248 filename : str, pathlib.Path
249 Filename to use in extraction of gunzip file.
250 write_directory : str, pahtlib.Path, list, None
251 Path to directory to place newly created gunzip file.
252 remove : boolean
253 Remove provided filename after creating gunzip file
254
255 Returns
256 -------
257 write_filename : str
258 Full path name of created gunzip file
259
260 """
261
262 if write_directory is None:
263 write_directory = Path(filename).parent
264
265 write_filename = Path(filename).name
266 if write_filename.endswith('.gz'):
267 write_filename = write_filename.replace(".gz", "")
268
269 write_filename = Path(write_directory, write_filename)
270
271 with gzip.open(filename, "rb") as f_in:
272 with open(write_filename, "wb") as f_out:
273 shutil.copyfileobj(f_in, f_out)
274
275 if remove:
276 Path(filename).unlink()
277
278 return str(write_filename)
279
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/act/utils/__init__.py b/act/utils/__init__.py
--- a/act/utils/__init__.py
+++ b/act/utils/__init__.py
@@ -49,7 +49,8 @@
'cleanup_files',
'is_gunzip_file',
'pack_gzip',
- 'unpack_gzip'
+ 'unpack_gzip',
+ 'generate_movie'
],
},
)
diff --git a/act/utils/io_utils.py b/act/utils/io_utils.py
--- a/act/utils/io_utils.py
+++ b/act/utils/io_utils.py
@@ -8,6 +8,12 @@
import shutil
import tempfile
+try:
+ import moviepy.video.io.ImageSequenceClip
+ MOVIEPY_AVAILABLE = True
+except ImportError:
+ MOVIEPY_AVAILABLE = False
+
def pack_tar(filenames, write_filename=None, write_directory=None, remove=False):
"""
@@ -276,3 +282,55 @@
Path(filename).unlink()
return str(write_filename)
+
+
+def generate_movie(images, write_directory=None, write_filename=None, fps=10, codec=None, threads=None):
+ """
+ Creates a movie from a list of images
+
+ ...
+
+ Parameters
+ ----------
+ images : list
+ List of images in the correct order to make into a movie
+ write_directory : str, pahtlib.Path, list, None
+ Path to directory to place newly created gunzip file.
+ write_filename : str, pathlib.Path, None
+ Movie output filename
+ fps: int
+ Frames per second
+ codec: int
+ Codec to use for image encoding
+ threads: int
+ Number of threads to use for ffmpeg
+
+
+ Returns
+ -------
+ write_filename : str
+ Full path name of created gunzip file
+
+ """
+ if not MOVIEPY_AVAILABLE:
+ raise ImportError(
+ 'MoviePy needs to be installed on your system to make movies.'
+ )
+
+ if write_filename is None:
+ write_filename = 'movie.mp4'
+
+ if write_directory is not None:
+ write_directory = Path(write_directory)
+ write_directory.mkdir(parents=True, exist_ok=True)
+ write_filename = Path(write_filename).name
+ elif Path(write_filename).parent != Path('.'):
+ write_directory = Path(write_filename).parent
+ else:
+ write_directory = Path('.')
+
+ full_path = write_directory / write_filename
+ clip = moviepy.video.io.ImageSequenceClip.ImageSequenceClip(images, fps=fps)
+ clip.write_videofile(str(full_path), codec=codec, threads=threads)
+
+ return full_path
| {"golden_diff": "diff --git a/act/utils/__init__.py b/act/utils/__init__.py\n--- a/act/utils/__init__.py\n+++ b/act/utils/__init__.py\n@@ -49,7 +49,8 @@\n 'cleanup_files',\n 'is_gunzip_file',\n 'pack_gzip',\n- 'unpack_gzip'\n+ 'unpack_gzip',\n+ 'generate_movie'\n ],\n },\n )\ndiff --git a/act/utils/io_utils.py b/act/utils/io_utils.py\n--- a/act/utils/io_utils.py\n+++ b/act/utils/io_utils.py\n@@ -8,6 +8,12 @@\n import shutil\n import tempfile\n \n+try:\n+ import moviepy.video.io.ImageSequenceClip\n+ MOVIEPY_AVAILABLE = True\n+except ImportError:\n+ MOVIEPY_AVAILABLE = False\n+\n \n def pack_tar(filenames, write_filename=None, write_directory=None, remove=False):\n \"\"\"\n@@ -276,3 +282,55 @@\n Path(filename).unlink()\n \n return str(write_filename)\n+\n+\n+def generate_movie(images, write_directory=None, write_filename=None, fps=10, codec=None, threads=None):\n+ \"\"\"\n+ Creates a movie from a list of images\n+\n+ ...\n+\n+ Parameters\n+ ----------\n+ images : list\n+ List of images in the correct order to make into a movie\n+ write_directory : str, pahtlib.Path, list, None\n+ Path to directory to place newly created gunzip file.\n+ write_filename : str, pathlib.Path, None\n+ Movie output filename\n+ fps: int\n+ Frames per second\n+ codec: int\n+ Codec to use for image encoding\n+ threads: int\n+ Number of threads to use for ffmpeg\n+\n+\n+ Returns\n+ -------\n+ write_filename : str\n+ Full path name of created gunzip file\n+\n+ \"\"\"\n+ if not MOVIEPY_AVAILABLE:\n+ raise ImportError(\n+ 'MoviePy needs to be installed on your system to make movies.'\n+ )\n+\n+ if write_filename is None:\n+ write_filename = 'movie.mp4'\n+\n+ if write_directory is not None:\n+ write_directory = Path(write_directory)\n+ write_directory.mkdir(parents=True, exist_ok=True)\n+ write_filename = Path(write_filename).name\n+ elif Path(write_filename).parent != Path('.'):\n+ write_directory = Path(write_filename).parent\n+ else:\n+ write_directory = Path('.')\n+\n+ full_path = write_directory / write_filename\n+ clip = moviepy.video.io.ImageSequenceClip.ImageSequenceClip(images, fps=fps)\n+ clip.write_videofile(str(full_path), codec=codec, threads=threads)\n+\n+ return full_path\n", "issue": "Function to create movies\nThe DQ Office creates many movies from static plots. Another researcher is working on a project that would benefit from a simple way to create moves from plots. We should create a function to make movies from a list of images. Finding a mostly Python way to do this would be best.\n", "before_files": [{"content": "\"\"\"\nThis module contains the common procedures used by all modules of the ARM\nCommunity Toolkit.\n\n\"\"\"\nimport lazy_loader as lazy\n\n__getattr__, __dir__, __all__ = lazy.attach(\n __name__,\n submodules=['data_utils', 'datetime_utils', 'geo_utils', 'inst_utils', 'io_utils', 'qc_utils', 'radiance_utils', 'ship_utils'],\n submod_attrs={\n 'data_utils': [\n 'ChangeUnits',\n 'accumulate_precip',\n 'add_in_nan',\n 'assign_coordinates',\n 'convert_units',\n 'create_pyart_obj',\n 'get_missing_value',\n 'ts_weighted_average',\n 'height_adjusted_pressure',\n 'height_adjusted_temperature',\n 'convert_to_potential_temp',\n 'arm_site_location_search',\n 'DatastreamParserARM',\n ],\n 'datetime_utils': [\n 'dates_between',\n 'datetime64_to_datetime',\n 'determine_time_delta',\n 'numpy_to_arm_date',\n 'reduce_time_ranges',\n 'date_parser',\n 'adjust_timestamp'\n ],\n 'geo_utils': [\n 'add_solar_variable',\n 'destination_azimuth_distance',\n 'get_solar_azimuth_elevation',\n 'get_sunrise_sunset_noon',\n 'is_sun_visible',\n ],\n 'inst_utils': ['decode_present_weather'],\n 'qc_utils': ['calculate_dqr_times'],\n 'radiance_utils': ['planck_converter'],\n 'ship_utils': ['calc_cog_sog', 'proc_scog'],\n 'io_utils': ['pack_tar',\n 'unpack_tar',\n 'cleanup_files',\n 'is_gunzip_file',\n 'pack_gzip',\n 'unpack_gzip'\n ],\n },\n)\n", "path": "act/utils/__init__.py"}, {"content": "from pathlib import Path\nimport tarfile\nfrom os import PathLike\nfrom shutil import rmtree\nimport random\nimport string\nimport gzip\nimport shutil\nimport tempfile\n\n\ndef pack_tar(filenames, write_filename=None, write_directory=None, remove=False):\n \"\"\"\n Creates TAR file from list of filenames provided. Currently only works with\n all files existing in the same directory.\n\n ...\n\n Parameters\n ----------\n filenames : str or list\n Filenames to be placed in TAR file\n write_filename : str, pathlib.Path, None\n TAR output filename. If not provided will use file name 'created_tarfile.tar'\n write_directory : str, pathlib.Path, None\n Path to directory to write TAR file. If the directory does not exist will\n be created.\n remove : boolean\n Delete provided filenames after making TAR file\n\n Returns\n -------\n list\n List of files extracted from the TAR file or full path to created direcotry\n containing extracted files.\n\n \"\"\"\n\n if write_filename is None:\n write_filename = 'created_tarfile.tar'\n\n if isinstance(filenames, (str, PathLike)):\n filenames = [filenames]\n\n if write_directory is not None:\n write_directory = Path(write_directory)\n write_directory.mkdir(parents=True, exist_ok=True)\n write_filename = Path(write_filename).name\n elif Path(write_filename).parent != Path('.'):\n write_directory = Path(write_filename).parent\n else:\n write_directory = Path('.')\n\n if not str(write_filename).endswith('.tar'):\n write_filename = str(write_filename) + '.tar'\n\n write_filename = Path(write_directory, write_filename)\n tar_file_handle = tarfile.open(write_filename, \"w\")\n for filename in filenames:\n tar_file_handle.add(filename, arcname=Path(filename).name)\n\n tar_file_handle.close()\n\n if remove:\n for filename in filenames:\n Path(filename).unlink()\n\n return str(write_filename)\n\n\ndef unpack_tar(tar_files, write_directory=None, temp_dir=False, randomize=True,\n return_files=True, remove=False):\n \"\"\"\n Unpacks TAR file contents into provided base directory\n\n ...\n\n Parameters\n ----------\n tar_files : str or list\n path to TAR file to be unpacked\n write_directory : str or pathlib.Path\n base path to extract contents of TAR files or create a new randomized directory\n to extract contents of TAR file.\n temp_dir : boolean\n Should a temporary directory be created and TAR files extracted to the new directory.\n write_directory and randomize are ignored if this option is used.\n randomize : boolean\n Create a new randomized directory to extract TAR files into.\n return_files : boolean\n When set will return a list of full path filenames to the extracted files.\n When set to False will return full path to directory containing extracted files.\n remove : boolean\n Delete provided TAR files after extracting files.\n\n Returns\n -------\n files : list or str\n List of full path files extracted from the TAR file or full path to direcotry\n containing extracted files.\n\n \"\"\"\n\n files = []\n\n if isinstance(tar_files, (str, PathLike)):\n tar_files = [tar_files]\n\n out_dir = Path.cwd()\n if temp_dir is True:\n out_dir = Path(tempfile.TemporaryDirectory().name)\n else:\n if write_directory is not None:\n out_dir = Path(write_directory)\n else:\n out_dir = Path(Path(tar_files[0]).parent)\n\n if out_dir.is_dir() is False:\n out_dir.mkdir(parents=True, exist_ok=True)\n\n if randomize:\n out_dir = Path(tempfile.mkdtemp(dir=out_dir))\n\n for tar_file in tar_files:\n try:\n tar = tarfile.open(tar_file)\n tar.extractall(path=out_dir)\n result = [str(Path(out_dir, ii.name)) for ii in tar.getmembers()]\n files.extend(result)\n tar.close()\n except tarfile.ReadError:\n print(f\"\\nCould not extract files from {tar_file}\")\n\n if return_files is False:\n files = str(out_dir)\n else:\n files.sort()\n\n if remove:\n for tar_file in tar_files:\n Path(tar_file).unlink()\n\n return files\n\n\ndef cleanup_files(dirname=None, files=None):\n \"\"\"\n Cleans up files and directory possibly created from unpacking TAR files with unpack_tar()\n\n ...\n\n Parameters\n ----------\n dirname : str, pathlib.Path, None\n Path to directory of extracted files which will be removed.\n files : str, pahtlib.Path, list, None\n Full path file name(s) from extracted TAR file.\n Assumes the directory this file exists in should be removed.\n\n \"\"\"\n\n if isinstance(files, (str, PathLike)):\n files = [str(files)]\n\n try:\n if dirname is not None:\n rmtree(dirname)\n\n if files is not None and len(files) > 0 and Path(files[0]).is_file():\n out_dir = Path(files[0]).parent\n rmtree(out_dir)\n\n except Exception as error:\n print(\"\\nError removing files:\", error)\n\n\ndef is_gunzip_file(filepath):\n \"\"\"\n Function to test if file is a gunzip file.\n\n Parameters\n ----------\n\n filepath : str or pathlib.Path to file to test\n\n Returns\n -------\n test : boolean\n Result from testing if file is a gunzip file\n\n \"\"\"\n\n try:\n with open(str(filepath), 'rb') as test_f:\n return test_f.read(2) == b'\\x1f\\x8b'\n except Exception:\n return False\n\n\ndef pack_gzip(filename, write_directory=None, remove=False):\n \"\"\"\n Creates a gunzip file from a filename path\n\n ...\n\n Parameters\n ----------\n filename : str, pathlib.Path\n Filename to use in creation of gunzip version.\n write_directory : str, pahtlib.Path, list, None\n Path to directory to place newly created gunzip file.\n remove : boolean\n Remove provided filename after creating gunzip file\n\n Returns\n -------\n write_filename : str\n Full path name of created gunzip file\n\n \"\"\"\n\n write_filename = Path(filename).name + '.gz'\n\n if write_directory is not None:\n write_filename = Path(write_directory, write_filename)\n Path(write_directory).mkdir(parents=True, exist_ok=True)\n else:\n write_filename = Path(Path(filename).parent, write_filename)\n\n with open(filename, 'rb') as f_in:\n with gzip.open(write_filename, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n\n if remove:\n Path(filename).unlink()\n\n return str(write_filename)\n\n\ndef unpack_gzip(filename, write_directory=None, remove=False):\n \"\"\"\n Extracts file from a gunzip file.\n\n ...\n\n Parameters\n ----------\n filename : str, pathlib.Path\n Filename to use in extraction of gunzip file.\n write_directory : str, pahtlib.Path, list, None\n Path to directory to place newly created gunzip file.\n remove : boolean\n Remove provided filename after creating gunzip file\n\n Returns\n -------\n write_filename : str\n Full path name of created gunzip file\n\n \"\"\"\n\n if write_directory is None:\n write_directory = Path(filename).parent\n\n write_filename = Path(filename).name\n if write_filename.endswith('.gz'):\n write_filename = write_filename.replace(\".gz\", \"\")\n\n write_filename = Path(write_directory, write_filename)\n\n with gzip.open(filename, \"rb\") as f_in:\n with open(write_filename, \"wb\") as f_out:\n shutil.copyfileobj(f_in, f_out)\n\n if remove:\n Path(filename).unlink()\n\n return str(write_filename)\n", "path": "act/utils/io_utils.py"}], "after_files": [{"content": "\"\"\"\nThis module contains the common procedures used by all modules of the ARM\nCommunity Toolkit.\n\n\"\"\"\nimport lazy_loader as lazy\n\n__getattr__, __dir__, __all__ = lazy.attach(\n __name__,\n submodules=['data_utils', 'datetime_utils', 'geo_utils', 'inst_utils', 'io_utils', 'qc_utils', 'radiance_utils', 'ship_utils'],\n submod_attrs={\n 'data_utils': [\n 'ChangeUnits',\n 'accumulate_precip',\n 'add_in_nan',\n 'assign_coordinates',\n 'convert_units',\n 'create_pyart_obj',\n 'get_missing_value',\n 'ts_weighted_average',\n 'height_adjusted_pressure',\n 'height_adjusted_temperature',\n 'convert_to_potential_temp',\n 'arm_site_location_search',\n 'DatastreamParserARM',\n ],\n 'datetime_utils': [\n 'dates_between',\n 'datetime64_to_datetime',\n 'determine_time_delta',\n 'numpy_to_arm_date',\n 'reduce_time_ranges',\n 'date_parser',\n 'adjust_timestamp'\n ],\n 'geo_utils': [\n 'add_solar_variable',\n 'destination_azimuth_distance',\n 'get_solar_azimuth_elevation',\n 'get_sunrise_sunset_noon',\n 'is_sun_visible',\n ],\n 'inst_utils': ['decode_present_weather'],\n 'qc_utils': ['calculate_dqr_times'],\n 'radiance_utils': ['planck_converter'],\n 'ship_utils': ['calc_cog_sog', 'proc_scog'],\n 'io_utils': ['pack_tar',\n 'unpack_tar',\n 'cleanup_files',\n 'is_gunzip_file',\n 'pack_gzip',\n 'unpack_gzip',\n 'generate_movie'\n ],\n },\n)\n", "path": "act/utils/__init__.py"}, {"content": "from pathlib import Path\nimport tarfile\nfrom os import PathLike\nfrom shutil import rmtree\nimport random\nimport string\nimport gzip\nimport shutil\nimport tempfile\n\ntry:\n import moviepy.video.io.ImageSequenceClip\n MOVIEPY_AVAILABLE = True\nexcept ImportError:\n MOVIEPY_AVAILABLE = False\n\n\ndef pack_tar(filenames, write_filename=None, write_directory=None, remove=False):\n \"\"\"\n Creates TAR file from list of filenames provided. Currently only works with\n all files existing in the same directory.\n\n ...\n\n Parameters\n ----------\n filenames : str or list\n Filenames to be placed in TAR file\n write_filename : str, pathlib.Path, None\n TAR output filename. If not provided will use file name 'created_tarfile.tar'\n write_directory : str, pathlib.Path, None\n Path to directory to write TAR file. If the directory does not exist will\n be created.\n remove : boolean\n Delete provided filenames after making TAR file\n\n Returns\n -------\n list\n List of files extracted from the TAR file or full path to created direcotry\n containing extracted files.\n\n \"\"\"\n\n if write_filename is None:\n write_filename = 'created_tarfile.tar'\n\n if isinstance(filenames, (str, PathLike)):\n filenames = [filenames]\n\n if write_directory is not None:\n write_directory = Path(write_directory)\n write_directory.mkdir(parents=True, exist_ok=True)\n write_filename = Path(write_filename).name\n elif Path(write_filename).parent != Path('.'):\n write_directory = Path(write_filename).parent\n else:\n write_directory = Path('.')\n\n if not str(write_filename).endswith('.tar'):\n write_filename = str(write_filename) + '.tar'\n\n write_filename = Path(write_directory, write_filename)\n tar_file_handle = tarfile.open(write_filename, \"w\")\n for filename in filenames:\n tar_file_handle.add(filename, arcname=Path(filename).name)\n\n tar_file_handle.close()\n\n if remove:\n for filename in filenames:\n Path(filename).unlink()\n\n return str(write_filename)\n\n\ndef unpack_tar(tar_files, write_directory=None, temp_dir=False, randomize=True,\n return_files=True, remove=False):\n \"\"\"\n Unpacks TAR file contents into provided base directory\n\n ...\n\n Parameters\n ----------\n tar_files : str or list\n path to TAR file to be unpacked\n write_directory : str or pathlib.Path\n base path to extract contents of TAR files or create a new randomized directory\n to extract contents of TAR file.\n temp_dir : boolean\n Should a temporary directory be created and TAR files extracted to the new directory.\n write_directory and randomize are ignored if this option is used.\n randomize : boolean\n Create a new randomized directory to extract TAR files into.\n return_files : boolean\n When set will return a list of full path filenames to the extracted files.\n When set to False will return full path to directory containing extracted files.\n remove : boolean\n Delete provided TAR files after extracting files.\n\n Returns\n -------\n files : list or str\n List of full path files extracted from the TAR file or full path to direcotry\n containing extracted files.\n\n \"\"\"\n\n files = []\n\n if isinstance(tar_files, (str, PathLike)):\n tar_files = [tar_files]\n\n out_dir = Path.cwd()\n if temp_dir is True:\n out_dir = Path(tempfile.TemporaryDirectory().name)\n else:\n if write_directory is not None:\n out_dir = Path(write_directory)\n else:\n out_dir = Path(Path(tar_files[0]).parent)\n\n if out_dir.is_dir() is False:\n out_dir.mkdir(parents=True, exist_ok=True)\n\n if randomize:\n out_dir = Path(tempfile.mkdtemp(dir=out_dir))\n\n for tar_file in tar_files:\n try:\n tar = tarfile.open(tar_file)\n tar.extractall(path=out_dir)\n result = [str(Path(out_dir, ii.name)) for ii in tar.getmembers()]\n files.extend(result)\n tar.close()\n except tarfile.ReadError:\n print(f\"\\nCould not extract files from {tar_file}\")\n\n if return_files is False:\n files = str(out_dir)\n else:\n files.sort()\n\n if remove:\n for tar_file in tar_files:\n Path(tar_file).unlink()\n\n return files\n\n\ndef cleanup_files(dirname=None, files=None):\n \"\"\"\n Cleans up files and directory possibly created from unpacking TAR files with unpack_tar()\n\n ...\n\n Parameters\n ----------\n dirname : str, pathlib.Path, None\n Path to directory of extracted files which will be removed.\n files : str, pahtlib.Path, list, None\n Full path file name(s) from extracted TAR file.\n Assumes the directory this file exists in should be removed.\n\n \"\"\"\n\n if isinstance(files, (str, PathLike)):\n files = [str(files)]\n\n try:\n if dirname is not None:\n rmtree(dirname)\n\n if files is not None and len(files) > 0 and Path(files[0]).is_file():\n out_dir = Path(files[0]).parent\n rmtree(out_dir)\n\n except Exception as error:\n print(\"\\nError removing files:\", error)\n\n\ndef is_gunzip_file(filepath):\n \"\"\"\n Function to test if file is a gunzip file.\n\n Parameters\n ----------\n\n filepath : str or pathlib.Path to file to test\n\n Returns\n -------\n test : boolean\n Result from testing if file is a gunzip file\n\n \"\"\"\n\n try:\n with open(str(filepath), 'rb') as test_f:\n return test_f.read(2) == b'\\x1f\\x8b'\n except Exception:\n return False\n\n\ndef pack_gzip(filename, write_directory=None, remove=False):\n \"\"\"\n Creates a gunzip file from a filename path\n\n ...\n\n Parameters\n ----------\n filename : str, pathlib.Path\n Filename to use in creation of gunzip version.\n write_directory : str, pahtlib.Path, list, None\n Path to directory to place newly created gunzip file.\n remove : boolean\n Remove provided filename after creating gunzip file\n\n Returns\n -------\n write_filename : str\n Full path name of created gunzip file\n\n \"\"\"\n\n write_filename = Path(filename).name + '.gz'\n\n if write_directory is not None:\n write_filename = Path(write_directory, write_filename)\n Path(write_directory).mkdir(parents=True, exist_ok=True)\n else:\n write_filename = Path(Path(filename).parent, write_filename)\n\n with open(filename, 'rb') as f_in:\n with gzip.open(write_filename, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n\n if remove:\n Path(filename).unlink()\n\n return str(write_filename)\n\n\ndef unpack_gzip(filename, write_directory=None, remove=False):\n \"\"\"\n Extracts file from a gunzip file.\n\n ...\n\n Parameters\n ----------\n filename : str, pathlib.Path\n Filename to use in extraction of gunzip file.\n write_directory : str, pahtlib.Path, list, None\n Path to directory to place newly created gunzip file.\n remove : boolean\n Remove provided filename after creating gunzip file\n\n Returns\n -------\n write_filename : str\n Full path name of created gunzip file\n\n \"\"\"\n\n if write_directory is None:\n write_directory = Path(filename).parent\n\n write_filename = Path(filename).name\n if write_filename.endswith('.gz'):\n write_filename = write_filename.replace(\".gz\", \"\")\n\n write_filename = Path(write_directory, write_filename)\n\n with gzip.open(filename, \"rb\") as f_in:\n with open(write_filename, \"wb\") as f_out:\n shutil.copyfileobj(f_in, f_out)\n\n if remove:\n Path(filename).unlink()\n\n return str(write_filename)\n\n\ndef generate_movie(images, write_directory=None, write_filename=None, fps=10, codec=None, threads=None):\n \"\"\"\n Creates a movie from a list of images\n\n ...\n\n Parameters\n ----------\n images : list\n List of images in the correct order to make into a movie\n write_directory : str, pahtlib.Path, list, None\n Path to directory to place newly created gunzip file.\n write_filename : str, pathlib.Path, None\n Movie output filename\n fps: int\n Frames per second\n codec: int\n Codec to use for image encoding\n threads: int\n Number of threads to use for ffmpeg\n\n\n Returns\n -------\n write_filename : str\n Full path name of created gunzip file\n\n \"\"\"\n if not MOVIEPY_AVAILABLE:\n raise ImportError(\n 'MoviePy needs to be installed on your system to make movies.'\n )\n\n if write_filename is None:\n write_filename = 'movie.mp4'\n\n if write_directory is not None:\n write_directory = Path(write_directory)\n write_directory.mkdir(parents=True, exist_ok=True)\n write_filename = Path(write_filename).name\n elif Path(write_filename).parent != Path('.'):\n write_directory = Path(write_filename).parent\n else:\n write_directory = Path('.')\n\n full_path = write_directory / write_filename\n clip = moviepy.video.io.ImageSequenceClip.ImageSequenceClip(images, fps=fps)\n clip.write_videofile(str(full_path), codec=codec, threads=threads)\n\n return full_path\n", "path": "act/utils/io_utils.py"}]} | 3,258 | 616 |
gh_patches_debug_28998 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-3050 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Different traceback messages appear, when try to run inappropriate file as a script
##### Steps to reproduce the problem:
1. Run mitmproxy.
2. Press `|`(`shift+\`). Input the path to not script file:
`: script.run @focus /home/kajoj/myphoto.JPG`
Traceback:

2a. `: script.run @focus /home/kajoj/passlist.txt`
Traceback:

##### Any other comments? What have you tried so far?
I think these aren't only traceback messages, which may appear.
We should take a look at this place: https://github.com/mitmproxy/mitmproxy/blob/master/mitmproxy/addons/script.py#L30
##### System information
Mitmproxy: 3.0.0.dev79 (commit 957a630)
Python: 3.5.2
OpenSSL: OpenSSL 1.1.0g 2 Nov 2017
Platform: Linux-4.4.0-112-generic-x86_64-with-Ubuntu-16.04-xenial
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/script.py`
Content:
```
1 import os
2 import importlib.util
3 import importlib.machinery
4 import time
5 import sys
6 import types
7 import typing
8 import traceback
9
10 from mitmproxy import addonmanager
11 from mitmproxy import exceptions
12 from mitmproxy import flow
13 from mitmproxy import command
14 from mitmproxy import eventsequence
15 from mitmproxy import ctx
16 import mitmproxy.types as mtypes
17
18
19 def load_script(path: str) -> types.ModuleType:
20 fullname = "__mitmproxy_script__.{}".format(
21 os.path.splitext(os.path.basename(path))[0]
22 )
23 # the fullname is not unique among scripts, so if there already is an existing script with said
24 # fullname, remove it.
25 sys.modules.pop(fullname, None)
26 oldpath = sys.path
27 sys.path.insert(0, os.path.dirname(path))
28 try:
29 loader = importlib.machinery.SourceFileLoader(fullname, path)
30 spec = importlib.util.spec_from_loader(fullname, loader=loader)
31 m = importlib.util.module_from_spec(spec)
32 loader.exec_module(m)
33 if not getattr(m, "name", None):
34 m.name = path # type: ignore
35 return m
36 finally:
37 sys.path[:] = oldpath
38
39
40 def script_error_handler(path, exc, msg="", tb=False):
41 """
42 Handles all the user's script errors with
43 an optional traceback
44 """
45 exception = type(exc).__name__
46 if msg:
47 exception = msg
48 lineno = ""
49 if hasattr(exc, "lineno"):
50 lineno = str(exc.lineno)
51 log_msg = "in Script {}:{} {}".format(path, lineno, exception)
52 if tb:
53 etype, value, tback = sys.exc_info()
54 tback = addonmanager.cut_traceback(tback, "invoke_addon")
55 log_msg = log_msg.join(["\n"] + traceback.format_exception(etype, value, tback))
56 ctx.log.error(log_msg)
57
58
59 class Script:
60 """
61 An addon that manages a single script.
62 """
63 ReloadInterval = 2
64
65 def __init__(self, path):
66 self.name = "scriptmanager:" + path
67 self.path = path
68 self.fullpath = os.path.expanduser(
69 path.strip("'\" ")
70 )
71 self.ns = None
72
73 self.last_load = 0
74 self.last_mtime = 0
75 if not os.path.isfile(self.fullpath):
76 raise exceptions.OptionsError('No such script')
77
78 @property
79 def addons(self):
80 return [self.ns] if self.ns else []
81
82 def tick(self):
83 if time.time() - self.last_load > self.ReloadInterval:
84 try:
85 mtime = os.stat(self.fullpath).st_mtime
86 except FileNotFoundError:
87 scripts = list(ctx.options.scripts)
88 scripts.remove(self.path)
89 ctx.options.update(scripts=scripts)
90 return
91
92 if mtime > self.last_mtime:
93 ctx.log.info("Loading script: %s" % self.path)
94 if self.ns:
95 ctx.master.addons.remove(self.ns)
96 self.ns = None
97 with addonmanager.safecall():
98 ns = load_script(self.fullpath)
99 ctx.master.addons.register(ns)
100 self.ns = ns
101 if self.ns:
102 # We're already running, so we have to explicitly register and
103 # configure the addon
104 ctx.master.addons.invoke_addon(self.ns, "running")
105 ctx.master.addons.invoke_addon(
106 self.ns,
107 "configure",
108 ctx.options.keys()
109 )
110 self.last_load = time.time()
111 self.last_mtime = mtime
112
113
114 class ScriptLoader:
115 """
116 An addon that manages loading scripts from options.
117 """
118 def __init__(self):
119 self.is_running = False
120 self.addons = []
121
122 def load(self, loader):
123 loader.add_option(
124 "scripts", typing.Sequence[str], [],
125 """
126 Execute a script.
127 """
128 )
129
130 def running(self):
131 self.is_running = True
132
133 @command.command("script.run")
134 def script_run(self, flows: typing.Sequence[flow.Flow], path: mtypes.Path) -> None:
135 """
136 Run a script on the specified flows. The script is loaded with
137 default options, and all lifecycle events for each flow are
138 simulated.
139 """
140 try:
141 s = Script(path)
142 l = addonmanager.Loader(ctx.master)
143 ctx.master.addons.invoke_addon(s, "load", l)
144 ctx.master.addons.invoke_addon(s, "configure", ctx.options.keys())
145 # Script is loaded on the first tick
146 ctx.master.addons.invoke_addon(s, "tick")
147 for f in flows:
148 for evt, arg in eventsequence.iterate(f):
149 ctx.master.addons.invoke_addon(s, evt, arg)
150 except exceptions.OptionsError as e:
151 script_error_handler(path, e, msg=str(e))
152
153 def configure(self, updated):
154 if "scripts" in updated:
155 for s in ctx.options.scripts:
156 if ctx.options.scripts.count(s) > 1:
157 raise exceptions.OptionsError("Duplicate script")
158
159 for a in self.addons[:]:
160 if a.path not in ctx.options.scripts:
161 ctx.log.info("Un-loading script: %s" % a.name)
162 ctx.master.addons.remove(a)
163 self.addons.remove(a)
164
165 # The machinations below are to ensure that:
166 # - Scripts remain in the same order
167 # - Scripts are not initialized un-necessarily. If only a
168 # script's order in the script list has changed, it is just
169 # moved.
170
171 current = {}
172 for a in self.addons:
173 current[a.path] = a
174
175 ordered = []
176 newscripts = []
177 for s in ctx.options.scripts:
178 if s in current:
179 ordered.append(current[s])
180 else:
181 sc = Script(s)
182 ordered.append(sc)
183 newscripts.append(sc)
184
185 self.addons = ordered
186
187 for s in newscripts:
188 ctx.master.addons.register(s)
189 if self.is_running:
190 # If we're already running, we configure and tell the addon
191 # we're up and running.
192 ctx.master.addons.invoke_addon(s, "running")
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/addons/script.py b/mitmproxy/addons/script.py
--- a/mitmproxy/addons/script.py
+++ b/mitmproxy/addons/script.py
@@ -25,6 +25,7 @@
sys.modules.pop(fullname, None)
oldpath = sys.path
sys.path.insert(0, os.path.dirname(path))
+ m = None
try:
loader = importlib.machinery.SourceFileLoader(fullname, path)
spec = importlib.util.spec_from_loader(fullname, loader=loader)
@@ -32,9 +33,11 @@
loader.exec_module(m)
if not getattr(m, "name", None):
m.name = path # type: ignore
- return m
+ except Exception as e:
+ script_error_handler(path, e, msg=str(e))
finally:
sys.path[:] = oldpath
+ return m
def script_error_handler(path, exc, msg="", tb=False):
@@ -48,11 +51,11 @@
lineno = ""
if hasattr(exc, "lineno"):
lineno = str(exc.lineno)
- log_msg = "in Script {}:{} {}".format(path, lineno, exception)
+ log_msg = "in script {}:{} {}".format(path, lineno, exception)
if tb:
etype, value, tback = sys.exc_info()
tback = addonmanager.cut_traceback(tback, "invoke_addon")
- log_msg = log_msg.join(["\n"] + traceback.format_exception(etype, value, tback))
+ log_msg = log_msg + "\n" + "".join(traceback.format_exception(etype, value, tback))
ctx.log.error(log_msg)
| {"golden_diff": "diff --git a/mitmproxy/addons/script.py b/mitmproxy/addons/script.py\n--- a/mitmproxy/addons/script.py\n+++ b/mitmproxy/addons/script.py\n@@ -25,6 +25,7 @@\n sys.modules.pop(fullname, None)\n oldpath = sys.path\n sys.path.insert(0, os.path.dirname(path))\n+ m = None\n try:\n loader = importlib.machinery.SourceFileLoader(fullname, path)\n spec = importlib.util.spec_from_loader(fullname, loader=loader)\n@@ -32,9 +33,11 @@\n loader.exec_module(m)\n if not getattr(m, \"name\", None):\n m.name = path # type: ignore\n- return m\n+ except Exception as e:\n+ script_error_handler(path, e, msg=str(e))\n finally:\n sys.path[:] = oldpath\n+ return m\n \n \n def script_error_handler(path, exc, msg=\"\", tb=False):\n@@ -48,11 +51,11 @@\n lineno = \"\"\n if hasattr(exc, \"lineno\"):\n lineno = str(exc.lineno)\n- log_msg = \"in Script {}:{} {}\".format(path, lineno, exception)\n+ log_msg = \"in script {}:{} {}\".format(path, lineno, exception)\n if tb:\n etype, value, tback = sys.exc_info()\n tback = addonmanager.cut_traceback(tback, \"invoke_addon\")\n- log_msg = log_msg.join([\"\\n\"] + traceback.format_exception(etype, value, tback))\n+ log_msg = log_msg + \"\\n\" + \"\".join(traceback.format_exception(etype, value, tback))\n ctx.log.error(log_msg)\n", "issue": "Different traceback messages appear, when try to run inappropriate file as a script\n##### Steps to reproduce the problem:\r\n\r\n1. Run mitmproxy.\r\n2. Press `|`(`shift+\\`). Input the path to not script file: \r\n`: script.run @focus /home/kajoj/myphoto.JPG`\r\n\r\nTraceback: \r\n\r\n\r\n2a. `: script.run @focus /home/kajoj/passlist.txt`\r\n\r\nTraceback: \r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nI think these aren't only traceback messages, which may appear. \r\nWe should take a look at this place: https://github.com/mitmproxy/mitmproxy/blob/master/mitmproxy/addons/script.py#L30\r\n\r\n\r\n##### System information\r\n\r\nMitmproxy: 3.0.0.dev79 (commit 957a630)\r\nPython: 3.5.2\r\nOpenSSL: OpenSSL 1.1.0g 2 Nov 2017\r\nPlatform: Linux-4.4.0-112-generic-x86_64-with-Ubuntu-16.04-xenial\r\n\n", "before_files": [{"content": "import os\nimport importlib.util\nimport importlib.machinery\nimport time\nimport sys\nimport types\nimport typing\nimport traceback\n\nfrom mitmproxy import addonmanager\nfrom mitmproxy import exceptions\nfrom mitmproxy import flow\nfrom mitmproxy import command\nfrom mitmproxy import eventsequence\nfrom mitmproxy import ctx\nimport mitmproxy.types as mtypes\n\n\ndef load_script(path: str) -> types.ModuleType:\n fullname = \"__mitmproxy_script__.{}\".format(\n os.path.splitext(os.path.basename(path))[0]\n )\n # the fullname is not unique among scripts, so if there already is an existing script with said\n # fullname, remove it.\n sys.modules.pop(fullname, None)\n oldpath = sys.path\n sys.path.insert(0, os.path.dirname(path))\n try:\n loader = importlib.machinery.SourceFileLoader(fullname, path)\n spec = importlib.util.spec_from_loader(fullname, loader=loader)\n m = importlib.util.module_from_spec(spec)\n loader.exec_module(m)\n if not getattr(m, \"name\", None):\n m.name = path # type: ignore\n return m\n finally:\n sys.path[:] = oldpath\n\n\ndef script_error_handler(path, exc, msg=\"\", tb=False):\n \"\"\"\n Handles all the user's script errors with\n an optional traceback\n \"\"\"\n exception = type(exc).__name__\n if msg:\n exception = msg\n lineno = \"\"\n if hasattr(exc, \"lineno\"):\n lineno = str(exc.lineno)\n log_msg = \"in Script {}:{} {}\".format(path, lineno, exception)\n if tb:\n etype, value, tback = sys.exc_info()\n tback = addonmanager.cut_traceback(tback, \"invoke_addon\")\n log_msg = log_msg.join([\"\\n\"] + traceback.format_exception(etype, value, tback))\n ctx.log.error(log_msg)\n\n\nclass Script:\n \"\"\"\n An addon that manages a single script.\n \"\"\"\n ReloadInterval = 2\n\n def __init__(self, path):\n self.name = \"scriptmanager:\" + path\n self.path = path\n self.fullpath = os.path.expanduser(\n path.strip(\"'\\\" \")\n )\n self.ns = None\n\n self.last_load = 0\n self.last_mtime = 0\n if not os.path.isfile(self.fullpath):\n raise exceptions.OptionsError('No such script')\n\n @property\n def addons(self):\n return [self.ns] if self.ns else []\n\n def tick(self):\n if time.time() - self.last_load > self.ReloadInterval:\n try:\n mtime = os.stat(self.fullpath).st_mtime\n except FileNotFoundError:\n scripts = list(ctx.options.scripts)\n scripts.remove(self.path)\n ctx.options.update(scripts=scripts)\n return\n\n if mtime > self.last_mtime:\n ctx.log.info(\"Loading script: %s\" % self.path)\n if self.ns:\n ctx.master.addons.remove(self.ns)\n self.ns = None\n with addonmanager.safecall():\n ns = load_script(self.fullpath)\n ctx.master.addons.register(ns)\n self.ns = ns\n if self.ns:\n # We're already running, so we have to explicitly register and\n # configure the addon\n ctx.master.addons.invoke_addon(self.ns, \"running\")\n ctx.master.addons.invoke_addon(\n self.ns,\n \"configure\",\n ctx.options.keys()\n )\n self.last_load = time.time()\n self.last_mtime = mtime\n\n\nclass ScriptLoader:\n \"\"\"\n An addon that manages loading scripts from options.\n \"\"\"\n def __init__(self):\n self.is_running = False\n self.addons = []\n\n def load(self, loader):\n loader.add_option(\n \"scripts\", typing.Sequence[str], [],\n \"\"\"\n Execute a script.\n \"\"\"\n )\n\n def running(self):\n self.is_running = True\n\n @command.command(\"script.run\")\n def script_run(self, flows: typing.Sequence[flow.Flow], path: mtypes.Path) -> None:\n \"\"\"\n Run a script on the specified flows. The script is loaded with\n default options, and all lifecycle events for each flow are\n simulated.\n \"\"\"\n try:\n s = Script(path)\n l = addonmanager.Loader(ctx.master)\n ctx.master.addons.invoke_addon(s, \"load\", l)\n ctx.master.addons.invoke_addon(s, \"configure\", ctx.options.keys())\n # Script is loaded on the first tick\n ctx.master.addons.invoke_addon(s, \"tick\")\n for f in flows:\n for evt, arg in eventsequence.iterate(f):\n ctx.master.addons.invoke_addon(s, evt, arg)\n except exceptions.OptionsError as e:\n script_error_handler(path, e, msg=str(e))\n\n def configure(self, updated):\n if \"scripts\" in updated:\n for s in ctx.options.scripts:\n if ctx.options.scripts.count(s) > 1:\n raise exceptions.OptionsError(\"Duplicate script\")\n\n for a in self.addons[:]:\n if a.path not in ctx.options.scripts:\n ctx.log.info(\"Un-loading script: %s\" % a.name)\n ctx.master.addons.remove(a)\n self.addons.remove(a)\n\n # The machinations below are to ensure that:\n # - Scripts remain in the same order\n # - Scripts are not initialized un-necessarily. If only a\n # script's order in the script list has changed, it is just\n # moved.\n\n current = {}\n for a in self.addons:\n current[a.path] = a\n\n ordered = []\n newscripts = []\n for s in ctx.options.scripts:\n if s in current:\n ordered.append(current[s])\n else:\n sc = Script(s)\n ordered.append(sc)\n newscripts.append(sc)\n\n self.addons = ordered\n\n for s in newscripts:\n ctx.master.addons.register(s)\n if self.is_running:\n # If we're already running, we configure and tell the addon\n # we're up and running.\n ctx.master.addons.invoke_addon(s, \"running\")\n", "path": "mitmproxy/addons/script.py"}], "after_files": [{"content": "import os\nimport importlib.util\nimport importlib.machinery\nimport time\nimport sys\nimport types\nimport typing\nimport traceback\n\nfrom mitmproxy import addonmanager\nfrom mitmproxy import exceptions\nfrom mitmproxy import flow\nfrom mitmproxy import command\nfrom mitmproxy import eventsequence\nfrom mitmproxy import ctx\nimport mitmproxy.types as mtypes\n\n\ndef load_script(path: str) -> types.ModuleType:\n fullname = \"__mitmproxy_script__.{}\".format(\n os.path.splitext(os.path.basename(path))[0]\n )\n # the fullname is not unique among scripts, so if there already is an existing script with said\n # fullname, remove it.\n sys.modules.pop(fullname, None)\n oldpath = sys.path\n sys.path.insert(0, os.path.dirname(path))\n m = None\n try:\n loader = importlib.machinery.SourceFileLoader(fullname, path)\n spec = importlib.util.spec_from_loader(fullname, loader=loader)\n m = importlib.util.module_from_spec(spec)\n loader.exec_module(m)\n if not getattr(m, \"name\", None):\n m.name = path # type: ignore\n except Exception as e:\n script_error_handler(path, e, msg=str(e))\n finally:\n sys.path[:] = oldpath\n return m\n\n\ndef script_error_handler(path, exc, msg=\"\", tb=False):\n \"\"\"\n Handles all the user's script errors with\n an optional traceback\n \"\"\"\n exception = type(exc).__name__\n if msg:\n exception = msg\n lineno = \"\"\n if hasattr(exc, \"lineno\"):\n lineno = str(exc.lineno)\n log_msg = \"in script {}:{} {}\".format(path, lineno, exception)\n if tb:\n etype, value, tback = sys.exc_info()\n tback = addonmanager.cut_traceback(tback, \"invoke_addon\")\n log_msg = log_msg + \"\\n\" + \"\".join(traceback.format_exception(etype, value, tback))\n ctx.log.error(log_msg)\n\n\nclass Script:\n \"\"\"\n An addon that manages a single script.\n \"\"\"\n ReloadInterval = 2\n\n def __init__(self, path):\n self.name = \"scriptmanager:\" + path\n self.path = path\n self.fullpath = os.path.expanduser(\n path.strip(\"'\\\" \")\n )\n self.ns = None\n\n self.last_load = 0\n self.last_mtime = 0\n if not os.path.isfile(self.fullpath):\n raise exceptions.OptionsError('No such script')\n\n @property\n def addons(self):\n return [self.ns] if self.ns else []\n\n def tick(self):\n if time.time() - self.last_load > self.ReloadInterval:\n try:\n mtime = os.stat(self.fullpath).st_mtime\n except FileNotFoundError:\n scripts = list(ctx.options.scripts)\n scripts.remove(self.path)\n ctx.options.update(scripts=scripts)\n return\n\n if mtime > self.last_mtime:\n ctx.log.info(\"Loading script: %s\" % self.path)\n if self.ns:\n ctx.master.addons.remove(self.ns)\n self.ns = None\n with addonmanager.safecall():\n ns = load_script(self.fullpath)\n ctx.master.addons.register(ns)\n self.ns = ns\n if self.ns:\n # We're already running, so we have to explicitly register and\n # configure the addon\n ctx.master.addons.invoke_addon(self.ns, \"running\")\n ctx.master.addons.invoke_addon(\n self.ns,\n \"configure\",\n ctx.options.keys()\n )\n self.last_load = time.time()\n self.last_mtime = mtime\n\n\nclass ScriptLoader:\n \"\"\"\n An addon that manages loading scripts from options.\n \"\"\"\n def __init__(self):\n self.is_running = False\n self.addons = []\n\n def load(self, loader):\n loader.add_option(\n \"scripts\", typing.Sequence[str], [],\n \"\"\"\n Execute a script.\n \"\"\"\n )\n\n def running(self):\n self.is_running = True\n\n @command.command(\"script.run\")\n def script_run(self, flows: typing.Sequence[flow.Flow], path: mtypes.Path) -> None:\n \"\"\"\n Run a script on the specified flows. The script is loaded with\n default options, and all lifecycle events for each flow are\n simulated.\n \"\"\"\n try:\n s = Script(path)\n l = addonmanager.Loader(ctx.master)\n ctx.master.addons.invoke_addon(s, \"load\", l)\n ctx.master.addons.invoke_addon(s, \"configure\", ctx.options.keys())\n # Script is loaded on the first tick\n ctx.master.addons.invoke_addon(s, \"tick\")\n for f in flows:\n for evt, arg in eventsequence.iterate(f):\n ctx.master.addons.invoke_addon(s, evt, arg)\n except exceptions.OptionsError as e:\n script_error_handler(path, e, msg=str(e))\n\n def configure(self, updated):\n if \"scripts\" in updated:\n for s in ctx.options.scripts:\n if ctx.options.scripts.count(s) > 1:\n raise exceptions.OptionsError(\"Duplicate script\")\n\n for a in self.addons[:]:\n if a.path not in ctx.options.scripts:\n ctx.log.info(\"Un-loading script: %s\" % a.name)\n ctx.master.addons.remove(a)\n self.addons.remove(a)\n\n # The machinations below are to ensure that:\n # - Scripts remain in the same order\n # - Scripts are not initialized un-necessarily. If only a\n # script's order in the script list has changed, it is just\n # moved.\n\n current = {}\n for a in self.addons:\n current[a.path] = a\n\n ordered = []\n newscripts = []\n for s in ctx.options.scripts:\n if s in current:\n ordered.append(current[s])\n else:\n sc = Script(s)\n ordered.append(sc)\n newscripts.append(sc)\n\n self.addons = ordered\n\n for s in newscripts:\n ctx.master.addons.register(s)\n if self.is_running:\n # If we're already running, we configure and tell the addon\n # we're up and running.\n ctx.master.addons.invoke_addon(s, \"running\")\n", "path": "mitmproxy/addons/script.py"}]} | 2,450 | 377 |
gh_patches_debug_6407 | rasdani/github-patches | git_diff | pytorch__text-149 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Multi30k Test Set URL Changed
Looks like the organizers moved the test set to :
http://www.quest.dcs.shef.ac.uk/wmt17_files_mmt/mmt_task1_test2016.tar.gz
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchtext/datasets/translation.py`
Content:
```
1 import os
2 import xml.etree.ElementTree as ET
3 import glob
4 import io
5
6 from .. import data
7
8
9 class TranslationDataset(data.Dataset):
10 """Defines a dataset for machine translation."""
11
12 @staticmethod
13 def sort_key(ex):
14 return data.interleave_keys(len(ex.src), len(ex.trg))
15
16 def __init__(self, path, exts, fields, **kwargs):
17 """Create a TranslationDataset given paths and fields.
18
19 Arguments:
20 path: Common prefix of paths to the data files for both languages.
21 exts: A tuple containing the extension to path for each language.
22 fields: A tuple containing the fields that will be used for data
23 in each language.
24 Remaining keyword arguments: Passed to the constructor of
25 data.Dataset.
26 """
27 if not isinstance(fields[0], (tuple, list)):
28 fields = [('src', fields[0]), ('trg', fields[1])]
29
30 src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts)
31
32 examples = []
33 with open(src_path) as src_file, open(trg_path) as trg_file:
34 for src_line, trg_line in zip(src_file, trg_file):
35 src_line, trg_line = src_line.strip(), trg_line.strip()
36 if src_line != '' and trg_line != '':
37 examples.append(data.Example.fromlist(
38 [src_line, trg_line], fields))
39
40 super(TranslationDataset, self).__init__(examples, fields, **kwargs)
41
42 @classmethod
43 def splits(cls, exts, fields, root='.data',
44 train='train', validation='val', test='test', **kwargs):
45 """Create dataset objects for splits of a TranslationDataset.
46
47 Arguments:
48
49 root: Root dataset storage directory. Default is '.data'.
50 exts: A tuple containing the extension to path for each language.
51 fields: A tuple containing the fields that will be used for data
52 in each language.
53 train: The prefix of the train data. Default: 'train'.
54 validation: The prefix of the validation data. Default: 'val'.
55 test: The prefix of the test data. Default: 'test'.
56 Remaining keyword arguments: Passed to the splits method of
57 Dataset.
58 """
59 path = cls.download(root)
60
61 train_data = None if train is None else cls(
62 os.path.join(path, train), exts, fields, **kwargs)
63 val_data = None if validation is None else cls(
64 os.path.join(path, validation), exts, fields, **kwargs)
65 test_data = None if test is None else cls(
66 os.path.join(path, test), exts, fields, **kwargs)
67 return tuple(d for d in (train_data, val_data, test_data)
68 if d is not None)
69
70
71 class Multi30k(TranslationDataset):
72 """The small-dataset WMT 2016 multimodal task, also known as Flickr30k"""
73
74 urls = ['http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz',
75 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz',
76 'https://staff.fnwi.uva.nl/d.elliott/wmt16/mmt16_task1_test.tgz']
77 name = 'multi30k'
78 dirname = ''
79
80 @classmethod
81 def splits(cls, exts, fields, root='.data',
82 train='train', validation='val', test='test', **kwargs):
83 """Create dataset objects for splits of the Multi30k dataset.
84
85 Arguments:
86
87 root: Root dataset storage directory. Default is '.data'.
88 exts: A tuple containing the extension to path for each language.
89 fields: A tuple containing the fields that will be used for data
90 in each language.
91 train: The prefix of the train data. Default: 'train'.
92 validation: The prefix of the validation data. Default: 'val'.
93 test: The prefix of the test data. Default: 'test'.
94 Remaining keyword arguments: Passed to the splits method of
95 Dataset.
96 """
97 return super(Multi30k, cls).splits(
98 exts, fields, root, train, validation, test, **kwargs)
99
100
101 class IWSLT(TranslationDataset):
102 """The IWSLT 2016 TED talk translation task"""
103
104 base_url = 'https://wit3.fbk.eu/archive/2016-01//texts/{}/{}/{}.tgz'
105 name = 'iwslt'
106 base_dirname = '{}-{}'
107
108 @classmethod
109 def splits(cls, exts, fields, root='.data',
110 train='train', validation='IWSLT16.TED.tst2013',
111 test='IWSLT16.TED.tst2014', **kwargs):
112 """Create dataset objects for splits of the IWSLT dataset.
113
114 Arguments:
115
116 root: Root dataset storage directory. Default is '.data'.
117 exts: A tuple containing the extension to path for each language.
118 fields: A tuple containing the fields that will be used for data
119 in each language.
120 train: The prefix of the train data. Default: 'train'.
121 validation: The prefix of the validation data. Default: 'val'.
122 test: The prefix of the test data. Default: 'test'.
123 Remaining keyword arguments: Passed to the splits method of
124 Dataset.
125 """
126 cls.dirname = cls.base_dirname.format(exts[0][1:], exts[1][1:])
127 cls.urls = [cls.base_url.format(exts[0][1:], exts[1][1:], cls.dirname)]
128 check = os.path.join(root, cls.name, cls.dirname)
129 path = cls.download(root, check=check)
130
131 train = '.'.join([train, cls.dirname])
132 validation = '.'.join([validation, cls.dirname])
133 if test is not None:
134 test = '.'.join([test, cls.dirname])
135
136 if not os.path.exists(os.path.join(path, train) + exts[0]):
137 cls.clean(path)
138
139 train_data = None if train is None else cls(
140 os.path.join(path, train), exts, fields, **kwargs)
141 val_data = None if validation is None else cls(
142 os.path.join(path, validation), exts, fields, **kwargs)
143 test_data = None if test is None else cls(
144 os.path.join(path, test), exts, fields, **kwargs)
145 return tuple(d for d in (train_data, val_data, test_data)
146 if d is not None)
147
148 @staticmethod
149 def clean(path):
150 for f_xml in glob.iglob(os.path.join(path, '*.xml')):
151 print(f_xml)
152 f_txt = os.path.splitext(f_xml)[0]
153 with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt:
154 root = ET.parse(f_xml).getroot()[0]
155 for doc in root.findall('doc'):
156 for e in doc.findall('seg'):
157 fd_txt.write(e.text.strip() + '\n')
158
159 xml_tags = ['<url', '<keywords', '<talkid', '<description',
160 '<reviewer', '<translator', '<title', '<speaker']
161 for f_orig in glob.iglob(os.path.join(path, 'train.tags*')):
162 print(f_orig)
163 f_txt = f_orig.replace('.tags', '')
164 with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt, \
165 io.open(f_orig, mode='r', encoding='utf-8') as fd_orig:
166 for l in fd_orig:
167 if not any(tag in l for tag in xml_tags):
168 fd_txt.write(l.strip() + '\n')
169
170
171 class WMT14(TranslationDataset):
172 """The WMT 2014 English-German dataset, as preprocessed by Google Brain.
173
174 Though this download contains test sets from 2015 and 2016, the train set
175 differs slightly from WMT 2015 and 2016 and significantly from WMT 2017."""
176
177 urls = [('https://drive.google.com/uc?export=download&'
178 'id=0B_bZck-ksdkpM25jRUN2X2UxMm8', 'wmt16_en_de.tar.gz')]
179 name = 'wmt14'
180 dirname = ''
181
182 @classmethod
183 def splits(cls, exts, fields, root='.data',
184 train='train.tok.clean.bpe.32000',
185 validation='newstest2013.tok.bpe.32000',
186 test='newstest2014.tok.bpe.32000', **kwargs):
187 """Create dataset objects for splits of the WMT 2014 dataset.
188
189 Arguments:
190
191 root: Root dataset storage directory. Default is '.data'.
192 exts: A tuple containing the extensions for each language. Must be
193 either ('.en', '.de') or the reverse.
194 fields: A tuple containing the fields that will be used for data
195 in each language.
196 train: The prefix of the train data. Default:
197 'train.tok.clean.bpe.32000'.
198 validation: The prefix of the validation data. Default:
199 'newstest2013.tok.bpe.32000'.
200 test: The prefix of the test data. Default:
201 'newstest2014.tok.bpe.32000'.
202 Remaining keyword arguments: Passed to the splits method of
203 Dataset.
204 """
205 return super(WMT14, cls).splits(
206 exts, fields, root, train, validation, test, **kwargs)
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchtext/datasets/translation.py b/torchtext/datasets/translation.py
--- a/torchtext/datasets/translation.py
+++ b/torchtext/datasets/translation.py
@@ -73,7 +73,8 @@
urls = ['http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz',
'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz',
- 'https://staff.fnwi.uva.nl/d.elliott/wmt16/mmt16_task1_test.tgz']
+ 'http://www.quest.dcs.shef.ac.uk/'
+ 'wmt17_files_mmt/mmt_task1_test2016.tar.gz']
name = 'multi30k'
dirname = ''
| {"golden_diff": "diff --git a/torchtext/datasets/translation.py b/torchtext/datasets/translation.py\n--- a/torchtext/datasets/translation.py\n+++ b/torchtext/datasets/translation.py\n@@ -73,7 +73,8 @@\n \n urls = ['http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz',\n 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz',\n- 'https://staff.fnwi.uva.nl/d.elliott/wmt16/mmt16_task1_test.tgz']\n+ 'http://www.quest.dcs.shef.ac.uk/'\n+ 'wmt17_files_mmt/mmt_task1_test2016.tar.gz']\n name = 'multi30k'\n dirname = ''\n", "issue": "Multi30k Test Set URL Changed\nLooks like the organizers moved the test set to : \r\n\r\nhttp://www.quest.dcs.shef.ac.uk/wmt17_files_mmt/mmt_task1_test2016.tar.gz\n", "before_files": [{"content": "import os\nimport xml.etree.ElementTree as ET\nimport glob\nimport io\n\nfrom .. import data\n\n\nclass TranslationDataset(data.Dataset):\n \"\"\"Defines a dataset for machine translation.\"\"\"\n\n @staticmethod\n def sort_key(ex):\n return data.interleave_keys(len(ex.src), len(ex.trg))\n\n def __init__(self, path, exts, fields, **kwargs):\n \"\"\"Create a TranslationDataset given paths and fields.\n\n Arguments:\n path: Common prefix of paths to the data files for both languages.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n Remaining keyword arguments: Passed to the constructor of\n data.Dataset.\n \"\"\"\n if not isinstance(fields[0], (tuple, list)):\n fields = [('src', fields[0]), ('trg', fields[1])]\n\n src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts)\n\n examples = []\n with open(src_path) as src_file, open(trg_path) as trg_file:\n for src_line, trg_line in zip(src_file, trg_file):\n src_line, trg_line = src_line.strip(), trg_line.strip()\n if src_line != '' and trg_line != '':\n examples.append(data.Example.fromlist(\n [src_line, trg_line], fields))\n\n super(TranslationDataset, self).__init__(examples, fields, **kwargs)\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train', validation='val', test='test', **kwargs):\n \"\"\"Create dataset objects for splits of a TranslationDataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n path = cls.download(root)\n\n train_data = None if train is None else cls(\n os.path.join(path, train), exts, fields, **kwargs)\n val_data = None if validation is None else cls(\n os.path.join(path, validation), exts, fields, **kwargs)\n test_data = None if test is None else cls(\n os.path.join(path, test), exts, fields, **kwargs)\n return tuple(d for d in (train_data, val_data, test_data)\n if d is not None)\n\n\nclass Multi30k(TranslationDataset):\n \"\"\"The small-dataset WMT 2016 multimodal task, also known as Flickr30k\"\"\"\n\n urls = ['http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz',\n 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz',\n 'https://staff.fnwi.uva.nl/d.elliott/wmt16/mmt16_task1_test.tgz']\n name = 'multi30k'\n dirname = ''\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train', validation='val', test='test', **kwargs):\n \"\"\"Create dataset objects for splits of the Multi30k dataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n return super(Multi30k, cls).splits(\n exts, fields, root, train, validation, test, **kwargs)\n\n\nclass IWSLT(TranslationDataset):\n \"\"\"The IWSLT 2016 TED talk translation task\"\"\"\n\n base_url = 'https://wit3.fbk.eu/archive/2016-01//texts/{}/{}/{}.tgz'\n name = 'iwslt'\n base_dirname = '{}-{}'\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train', validation='IWSLT16.TED.tst2013',\n test='IWSLT16.TED.tst2014', **kwargs):\n \"\"\"Create dataset objects for splits of the IWSLT dataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n cls.dirname = cls.base_dirname.format(exts[0][1:], exts[1][1:])\n cls.urls = [cls.base_url.format(exts[0][1:], exts[1][1:], cls.dirname)]\n check = os.path.join(root, cls.name, cls.dirname)\n path = cls.download(root, check=check)\n\n train = '.'.join([train, cls.dirname])\n validation = '.'.join([validation, cls.dirname])\n if test is not None:\n test = '.'.join([test, cls.dirname])\n\n if not os.path.exists(os.path.join(path, train) + exts[0]):\n cls.clean(path)\n\n train_data = None if train is None else cls(\n os.path.join(path, train), exts, fields, **kwargs)\n val_data = None if validation is None else cls(\n os.path.join(path, validation), exts, fields, **kwargs)\n test_data = None if test is None else cls(\n os.path.join(path, test), exts, fields, **kwargs)\n return tuple(d for d in (train_data, val_data, test_data)\n if d is not None)\n\n @staticmethod\n def clean(path):\n for f_xml in glob.iglob(os.path.join(path, '*.xml')):\n print(f_xml)\n f_txt = os.path.splitext(f_xml)[0]\n with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt:\n root = ET.parse(f_xml).getroot()[0]\n for doc in root.findall('doc'):\n for e in doc.findall('seg'):\n fd_txt.write(e.text.strip() + '\\n')\n\n xml_tags = ['<url', '<keywords', '<talkid', '<description',\n '<reviewer', '<translator', '<title', '<speaker']\n for f_orig in glob.iglob(os.path.join(path, 'train.tags*')):\n print(f_orig)\n f_txt = f_orig.replace('.tags', '')\n with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt, \\\n io.open(f_orig, mode='r', encoding='utf-8') as fd_orig:\n for l in fd_orig:\n if not any(tag in l for tag in xml_tags):\n fd_txt.write(l.strip() + '\\n')\n\n\nclass WMT14(TranslationDataset):\n \"\"\"The WMT 2014 English-German dataset, as preprocessed by Google Brain.\n\n Though this download contains test sets from 2015 and 2016, the train set\n differs slightly from WMT 2015 and 2016 and significantly from WMT 2017.\"\"\"\n\n urls = [('https://drive.google.com/uc?export=download&'\n 'id=0B_bZck-ksdkpM25jRUN2X2UxMm8', 'wmt16_en_de.tar.gz')]\n name = 'wmt14'\n dirname = ''\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train.tok.clean.bpe.32000',\n validation='newstest2013.tok.bpe.32000',\n test='newstest2014.tok.bpe.32000', **kwargs):\n \"\"\"Create dataset objects for splits of the WMT 2014 dataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extensions for each language. Must be\n either ('.en', '.de') or the reverse.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default:\n 'train.tok.clean.bpe.32000'.\n validation: The prefix of the validation data. Default:\n 'newstest2013.tok.bpe.32000'.\n test: The prefix of the test data. Default:\n 'newstest2014.tok.bpe.32000'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n return super(WMT14, cls).splits(\n exts, fields, root, train, validation, test, **kwargs)\n", "path": "torchtext/datasets/translation.py"}], "after_files": [{"content": "import os\nimport xml.etree.ElementTree as ET\nimport glob\nimport io\n\nfrom .. import data\n\n\nclass TranslationDataset(data.Dataset):\n \"\"\"Defines a dataset for machine translation.\"\"\"\n\n @staticmethod\n def sort_key(ex):\n return data.interleave_keys(len(ex.src), len(ex.trg))\n\n def __init__(self, path, exts, fields, **kwargs):\n \"\"\"Create a TranslationDataset given paths and fields.\n\n Arguments:\n path: Common prefix of paths to the data files for both languages.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n Remaining keyword arguments: Passed to the constructor of\n data.Dataset.\n \"\"\"\n if not isinstance(fields[0], (tuple, list)):\n fields = [('src', fields[0]), ('trg', fields[1])]\n\n src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts)\n\n examples = []\n with open(src_path) as src_file, open(trg_path) as trg_file:\n for src_line, trg_line in zip(src_file, trg_file):\n src_line, trg_line = src_line.strip(), trg_line.strip()\n if src_line != '' and trg_line != '':\n examples.append(data.Example.fromlist(\n [src_line, trg_line], fields))\n\n super(TranslationDataset, self).__init__(examples, fields, **kwargs)\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train', validation='val', test='test', **kwargs):\n \"\"\"Create dataset objects for splits of a TranslationDataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n path = cls.download(root)\n\n train_data = None if train is None else cls(\n os.path.join(path, train), exts, fields, **kwargs)\n val_data = None if validation is None else cls(\n os.path.join(path, validation), exts, fields, **kwargs)\n test_data = None if test is None else cls(\n os.path.join(path, test), exts, fields, **kwargs)\n return tuple(d for d in (train_data, val_data, test_data)\n if d is not None)\n\n\nclass Multi30k(TranslationDataset):\n \"\"\"The small-dataset WMT 2016 multimodal task, also known as Flickr30k\"\"\"\n\n urls = ['http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz',\n 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz',\n 'http://www.quest.dcs.shef.ac.uk/'\n 'wmt17_files_mmt/mmt_task1_test2016.tar.gz']\n name = 'multi30k'\n dirname = ''\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train', validation='val', test='test', **kwargs):\n \"\"\"Create dataset objects for splits of the Multi30k dataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n return super(Multi30k, cls).splits(\n exts, fields, root, train, validation, test, **kwargs)\n\n\nclass IWSLT(TranslationDataset):\n \"\"\"The IWSLT 2016 TED talk translation task\"\"\"\n\n base_url = 'https://wit3.fbk.eu/archive/2016-01//texts/{}/{}/{}.tgz'\n name = 'iwslt'\n base_dirname = '{}-{}'\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train', validation='IWSLT16.TED.tst2013',\n test='IWSLT16.TED.tst2014', **kwargs):\n \"\"\"Create dataset objects for splits of the IWSLT dataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n cls.dirname = cls.base_dirname.format(exts[0][1:], exts[1][1:])\n cls.urls = [cls.base_url.format(exts[0][1:], exts[1][1:], cls.dirname)]\n check = os.path.join(root, cls.name, cls.dirname)\n path = cls.download(root, check=check)\n\n train = '.'.join([train, cls.dirname])\n validation = '.'.join([validation, cls.dirname])\n if test is not None:\n test = '.'.join([test, cls.dirname])\n\n if not os.path.exists(os.path.join(path, train) + exts[0]):\n cls.clean(path)\n\n train_data = None if train is None else cls(\n os.path.join(path, train), exts, fields, **kwargs)\n val_data = None if validation is None else cls(\n os.path.join(path, validation), exts, fields, **kwargs)\n test_data = None if test is None else cls(\n os.path.join(path, test), exts, fields, **kwargs)\n return tuple(d for d in (train_data, val_data, test_data)\n if d is not None)\n\n @staticmethod\n def clean(path):\n for f_xml in glob.iglob(os.path.join(path, '*.xml')):\n print(f_xml)\n f_txt = os.path.splitext(f_xml)[0]\n with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt:\n root = ET.parse(f_xml).getroot()[0]\n for doc in root.findall('doc'):\n for e in doc.findall('seg'):\n fd_txt.write(e.text.strip() + '\\n')\n\n xml_tags = ['<url', '<keywords', '<talkid', '<description',\n '<reviewer', '<translator', '<title', '<speaker']\n for f_orig in glob.iglob(os.path.join(path, 'train.tags*')):\n print(f_orig)\n f_txt = f_orig.replace('.tags', '')\n with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt, \\\n io.open(f_orig, mode='r', encoding='utf-8') as fd_orig:\n for l in fd_orig:\n if not any(tag in l for tag in xml_tags):\n fd_txt.write(l.strip() + '\\n')\n\n\nclass WMT14(TranslationDataset):\n \"\"\"The WMT 2014 English-German dataset, as preprocessed by Google Brain.\n\n Though this download contains test sets from 2015 and 2016, the train set\n differs slightly from WMT 2015 and 2016 and significantly from WMT 2017.\"\"\"\n\n urls = [('https://drive.google.com/uc?export=download&'\n 'id=0B_bZck-ksdkpM25jRUN2X2UxMm8', 'wmt16_en_de.tar.gz')]\n name = 'wmt14'\n dirname = ''\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train.tok.clean.bpe.32000',\n validation='newstest2013.tok.bpe.32000',\n test='newstest2014.tok.bpe.32000', **kwargs):\n \"\"\"Create dataset objects for splits of the WMT 2014 dataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extensions for each language. Must be\n either ('.en', '.de') or the reverse.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default:\n 'train.tok.clean.bpe.32000'.\n validation: The prefix of the validation data. Default:\n 'newstest2013.tok.bpe.32000'.\n test: The prefix of the test data. Default:\n 'newstest2014.tok.bpe.32000'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n return super(WMT14, cls).splits(\n exts, fields, root, train, validation, test, **kwargs)\n", "path": "torchtext/datasets/translation.py"}]} | 2,970 | 187 |
gh_patches_debug_13996 | rasdani/github-patches | git_diff | rasterio__rasterio-196 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rasterize - incorrect use of output argument
Around line 299 in features.py
if out is not None:
if np.dtype(output.dtype).name not in valid_dtypes:
^^^^^^^^^^^^
I think that output.dtype needs to be changed to out.dtype....
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/features.py`
Content:
```
1 """Functions for working with features in a raster dataset."""
2
3 import json
4 import logging
5 import time
6 import warnings
7
8 import numpy as np
9
10 import rasterio
11 from rasterio._features import _shapes, _sieve, _rasterize
12 from rasterio.transform import IDENTITY, guard_transform
13 from rasterio.dtypes import get_minimum_int_dtype
14
15
16 log = logging.getLogger('rasterio')
17
18
19 class NullHandler(logging.Handler):
20 def emit(self, record):
21 pass
22 log.addHandler(NullHandler())
23
24
25 def shapes(image, mask=None, connectivity=4, transform=IDENTITY):
26 """
27 Return a generator of (polygon, value) for each each set of adjacent pixels
28 of the same value.
29
30 Parameters
31 ----------
32 image : numpy ndarray or rasterio Band object
33 (RasterReader, bidx namedtuple).
34 Data type must be one of rasterio.int16, rasterio.int32,
35 rasterio.uint8, rasterio.uint16, or rasterio.float32.
36 mask : numpy ndarray or rasterio Band object, optional
37 Values of False will be excluded from feature generation
38 Must be of type rasterio.bool_
39 connectivity : int, optional
40 Use 4 or 8 pixel connectivity for grouping pixels into features
41 transform : Affine transformation, optional
42 If not provided, feature coordinates will be generated based on pixel
43 coordinates
44
45 Returns
46 -------
47 Generator of (polygon, value)
48 Yields a pair of (polygon, value) for each feature found in the image.
49 Polygons are GeoJSON-like dicts and the values are the associated value
50 from the image, in the data type of the image.
51 Note: due to floating point precision issues, values returned from a
52 floating point image may not exactly match the original values.
53
54 Notes
55 -----
56 The amount of memory used by this algorithm is proportional to the number
57 and complexity of polygons produced. This algorithm is most appropriate
58 for simple thematic data. Data with high pixel-to-pixel variability, such
59 as imagery, may produce one polygon per pixel and consume large amounts of
60 memory.
61
62 """
63
64 valid_dtypes = ('int16', 'int32', 'uint8', 'uint16', 'float32')
65
66 if np.dtype(image.dtype).name not in valid_dtypes:
67 raise ValueError('image dtype must be one of: %s'
68 % (', '.join(valid_dtypes)))
69
70 if mask is not None and np.dtype(mask.dtype) != np.dtype(rasterio.bool_):
71 raise ValueError("Mask must be dtype rasterio.bool_")
72
73 if connectivity not in (4, 8):
74 raise ValueError("Connectivity Option must be 4 or 8")
75
76 transform = guard_transform(transform)
77
78 with rasterio.drivers():
79 for s, v in _shapes(image, mask, connectivity, transform.to_gdal()):
80 yield s, v
81
82
83 def sieve(image, size, out=None, output=None, mask=None, connectivity=4):
84 """
85 Replaces small polygons in `image` with the value of their largest
86 neighbor. Polygons are found for each set of neighboring pixels of the
87 same value.
88
89 Parameters
90 ----------
91 image : numpy ndarray or rasterio Band object
92 (RasterReader, bidx namedtuple)
93 Must be of type rasterio.int16, rasterio.int32, rasterio.uint8,
94 rasterio.uint16, or rasterio.float32
95 size : int
96 minimum polygon size (number of pixels) to retain.
97 out : numpy ndarray, optional
98 Array of same shape and data type as `image` in which to store results.
99 output : older alias for `out`, will be removed before 1.0.
100 output : numpy ndarray, optional
101 mask : numpy ndarray or rasterio Band object, optional
102 Values of False will be excluded from feature generation
103 Must be of type rasterio.bool_
104 connectivity : int, optional
105 Use 4 or 8 pixel connectivity for grouping pixels into features
106
107 Returns
108 -------
109 out : numpy ndarray
110 Result
111
112 Notes
113 -----
114 GDAL only supports values that can be cast to 32-bit integers for this
115 operation.
116
117 The amount of memory used by this algorithm is proportional to the number
118 and complexity of polygons found in the image. This algorithm is most
119 appropriate for simple thematic data. Data with high pixel-to-pixel
120 variability, such as imagery, may produce one polygon per pixel and consume
121 large amounts of memory.
122
123 """
124
125 valid_dtypes = ('int16', 'int32', 'uint8', 'uint16')
126
127 if np.dtype(image.dtype).name not in valid_dtypes:
128 valid_types_str = ', '.join(('rasterio.{0}'.format(t) for t
129 in valid_dtypes))
130 raise ValueError('image dtype must be one of: %' % valid_types_str)
131
132 if size <= 0:
133 raise ValueError('size must be greater than 0')
134 elif type(size) == float:
135 raise ValueError('size must be an integer number of pixels')
136 elif size > (image.shape[0] * image.shape[1]):
137 raise ValueError('size must be smaller than size of image')
138
139 if connectivity not in (4, 8):
140 raise ValueError('connectivity must be 4 or 8')
141
142 if mask is not None:
143 if np.dtype(mask.dtype) != np.dtype(rasterio.bool_):
144 raise ValueError('Mask must be dtype rasterio.bool_')
145 elif mask.shape != image.shape:
146 raise ValueError('mask shape must be same as image shape')
147
148 # Start moving users over to 'out'.
149 if output is not None:
150 warnings.warn(
151 "The 'output' keyword arg has been superceded by 'out' "
152 "and will be removed before Rasterio 1.0.",
153 FutureWarning,
154 stacklevel=2)
155
156 out = out if out is not None else output
157 if out is None:
158 out = np.zeros_like(image)
159 else:
160 if np.dtype(image.dtype).name != np.dtype(out.dtype).name:
161 raise ValueError('output must match dtype of image')
162 elif out.shape != image.shape:
163 raise ValueError('mask shape must be same as image shape')
164
165 with rasterio.drivers():
166 _sieve(image, size, out, mask, connectivity)
167 return out
168
169
170 def rasterize(
171 shapes,
172 out_shape=None,
173 fill=0,
174 out=None,
175 output=None,
176 transform=IDENTITY,
177 all_touched=False,
178 default_value=1,
179 dtype=None):
180 """
181 Returns an image array with input geometries burned in.
182
183 Parameters
184 ----------
185 shapes : iterable of (geometry, value) pairs or iterable over geometries
186 `geometry` can either be an object that implements the geo interface or
187 GeoJSON-like object.
188 out_shape : tuple or list
189 Shape of output numpy ndarray
190 fill : int or float, optional
191 Used as fill value for all areas not covered by input geometries
192 out : numpy ndarray, optional
193 Array of same shape and data type as `image` in which to store results.
194 output : older alias for `out`, will be removed before 1.0.
195 transform : Affine transformation object, optional
196 transformation applied to shape geometries into pixel coordinates
197 all_touched : boolean, optional
198 If True, all pixels touched by geometries will be burned in.
199 If false, only pixels whose center is within the polygon or that are
200 selected by brezenhams line algorithm will be burned in.
201 default_value : int or float, optional
202 Used as value for all geometries, if not provided in `shapes`
203 dtype : rasterio or numpy data type, optional
204 Used as data type for results, if `output` is not provided
205
206 Returns
207 -------
208 out : numpy ndarray
209 Results
210
211 Notes
212 -----
213 Valid data types for `fill`, `default_value`, `out`, `dtype` and
214 shape values are rasterio.int16, rasterio.int32, rasterio.uint8,
215 rasterio.uint16, rasterio.uint32, rasterio.float32, rasterio.float64
216
217 """
218
219 valid_dtypes = ('int16', 'int32', 'uint8', 'uint16', 'uint32', 'float32',
220 'float64')
221
222 def get_valid_dtype(values):
223 values_dtype = values.dtype
224 if values_dtype.kind == 'i':
225 values_dtype = np.dtype(get_minimum_int_dtype(values))
226 if values_dtype.name in valid_dtypes:
227 return values_dtype
228 return None
229
230 def can_cast_dtype(values, dtype):
231 if values.dtype.name == np.dtype(dtype).name:
232 return True
233 elif values.dtype.kind == 'f':
234 return np.allclose(values, values.astype(dtype))
235 else:
236 return np.array_equal(values, values.astype(dtype))
237
238 if fill != 0:
239 fill_array = np.array([fill])
240 if get_valid_dtype(fill_array) is None:
241 raise ValueError('fill must be one of these types: %s'
242 % (', '.join(valid_dtypes)))
243 elif dtype is not None and not can_cast_dtype(fill_array, dtype):
244 raise ValueError('fill value cannot be cast to specified dtype')
245
246 if default_value != 1:
247 default_value_array = np.array([default_value])
248 if get_valid_dtype(default_value_array) is None:
249 raise ValueError('default_value must be one of these types: %s'
250 % (', '.join(valid_dtypes)))
251 elif dtype is not None and not can_cast_dtype(default_value_array,
252 dtype):
253 raise ValueError('default_value cannot be cast to specified dtype')
254
255 valid_shapes = []
256 shape_values = []
257 for index, item in enumerate(shapes):
258 try:
259 if isinstance(item, (tuple, list)):
260 geom, value = item
261 else:
262 geom = item
263 value = default_value
264 geom = getattr(geom, '__geo_interface__', None) or geom
265 if (not isinstance(geom, dict) or
266 'type' not in geom or 'coordinates' not in geom):
267 raise ValueError(
268 'Object %r at index %d is not a geometry object' %
269 (geom, index))
270 valid_shapes.append((geom, value))
271 shape_values.append(value)
272 except Exception:
273 log.exception('Exception caught, skipping shape %d', index)
274
275 if not valid_shapes:
276 raise ValueError('No valid shapes found for rasterize. Shapes must be '
277 'valid geometry objects')
278
279 shape_values = np.array(shape_values)
280 values_dtype = get_valid_dtype(shape_values)
281 if values_dtype is None:
282 raise ValueError('shape values must be one of these dtypes: %s' %
283 (', '.join(valid_dtypes)))
284
285 if dtype is None:
286 dtype = values_dtype
287 elif np.dtype(dtype).name not in valid_dtypes:
288 raise ValueError('dtype must be one of: %s' % (', '.join(valid_dtypes)))
289 elif not can_cast_dtype(shape_values, dtype):
290 raise ValueError('shape values could not be cast to specified dtype')
291
292 if output is not None:
293 warnings.warn(
294 "The 'output' keyword arg has been superceded by 'out' "
295 "and will be removed before Rasterio 1.0.",
296 FutureWarning,
297 stacklevel=2)
298 out = out if out is not None else output
299 if out is not None:
300 if np.dtype(output.dtype).name not in valid_dtypes:
301 raise ValueError('Output image dtype must be one of: %s'
302 % (', '.join(valid_dtypes)))
303 if not can_cast_dtype(shape_values, output.dtype):
304 raise ValueError('shape values cannot be cast to dtype of output '
305 'image')
306
307 elif out_shape is not None:
308 out = np.empty(out_shape, dtype=dtype)
309 out.fill(fill)
310 else:
311 raise ValueError('Either an output shape or image must be provided')
312
313 transform = guard_transform(transform)
314
315 with rasterio.drivers():
316 _rasterize(valid_shapes, out, transform.to_gdal(), all_touched)
317
318 return out
319
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/features.py b/rasterio/features.py
--- a/rasterio/features.py
+++ b/rasterio/features.py
@@ -297,10 +297,10 @@
stacklevel=2)
out = out if out is not None else output
if out is not None:
- if np.dtype(output.dtype).name not in valid_dtypes:
+ if np.dtype(out.dtype).name not in valid_dtypes:
raise ValueError('Output image dtype must be one of: %s'
% (', '.join(valid_dtypes)))
- if not can_cast_dtype(shape_values, output.dtype):
+ if not can_cast_dtype(shape_values, out.dtype):
raise ValueError('shape values cannot be cast to dtype of output '
'image')
| {"golden_diff": "diff --git a/rasterio/features.py b/rasterio/features.py\n--- a/rasterio/features.py\n+++ b/rasterio/features.py\n@@ -297,10 +297,10 @@\n stacklevel=2)\n out = out if out is not None else output\n if out is not None:\n- if np.dtype(output.dtype).name not in valid_dtypes:\n+ if np.dtype(out.dtype).name not in valid_dtypes:\n raise ValueError('Output image dtype must be one of: %s'\n % (', '.join(valid_dtypes)))\n- if not can_cast_dtype(shape_values, output.dtype):\n+ if not can_cast_dtype(shape_values, out.dtype):\n raise ValueError('shape values cannot be cast to dtype of output '\n 'image')\n", "issue": "rasterize - incorrect use of output argument\nAround line 299 in features.py\n\nif out is not None:\n if np.dtype(output.dtype).name not in valid_dtypes:\n ^^^^^^^^^^^^\n\nI think that output.dtype needs to be changed to out.dtype....\n\nThanks\n\n", "before_files": [{"content": "\"\"\"Functions for working with features in a raster dataset.\"\"\"\n\nimport json\nimport logging\nimport time\nimport warnings\n\nimport numpy as np\n\nimport rasterio\nfrom rasterio._features import _shapes, _sieve, _rasterize\nfrom rasterio.transform import IDENTITY, guard_transform\nfrom rasterio.dtypes import get_minimum_int_dtype\n\n\nlog = logging.getLogger('rasterio')\n\n\nclass NullHandler(logging.Handler):\n def emit(self, record):\n pass\nlog.addHandler(NullHandler())\n\n\ndef shapes(image, mask=None, connectivity=4, transform=IDENTITY):\n \"\"\"\n Return a generator of (polygon, value) for each each set of adjacent pixels\n of the same value.\n\n Parameters\n ----------\n image : numpy ndarray or rasterio Band object\n (RasterReader, bidx namedtuple).\n Data type must be one of rasterio.int16, rasterio.int32,\n rasterio.uint8, rasterio.uint16, or rasterio.float32.\n mask : numpy ndarray or rasterio Band object, optional\n Values of False will be excluded from feature generation\n Must be of type rasterio.bool_\n connectivity : int, optional\n Use 4 or 8 pixel connectivity for grouping pixels into features\n transform : Affine transformation, optional\n If not provided, feature coordinates will be generated based on pixel\n coordinates\n\n Returns\n -------\n Generator of (polygon, value)\n Yields a pair of (polygon, value) for each feature found in the image.\n Polygons are GeoJSON-like dicts and the values are the associated value\n from the image, in the data type of the image.\n Note: due to floating point precision issues, values returned from a\n floating point image may not exactly match the original values.\n\n Notes\n -----\n The amount of memory used by this algorithm is proportional to the number\n and complexity of polygons produced. This algorithm is most appropriate\n for simple thematic data. Data with high pixel-to-pixel variability, such\n as imagery, may produce one polygon per pixel and consume large amounts of\n memory.\n\n \"\"\"\n\n valid_dtypes = ('int16', 'int32', 'uint8', 'uint16', 'float32')\n\n if np.dtype(image.dtype).name not in valid_dtypes:\n raise ValueError('image dtype must be one of: %s'\n % (', '.join(valid_dtypes)))\n\n if mask is not None and np.dtype(mask.dtype) != np.dtype(rasterio.bool_):\n raise ValueError(\"Mask must be dtype rasterio.bool_\")\n\n if connectivity not in (4, 8):\n raise ValueError(\"Connectivity Option must be 4 or 8\")\n\n transform = guard_transform(transform)\n\n with rasterio.drivers():\n for s, v in _shapes(image, mask, connectivity, transform.to_gdal()):\n yield s, v\n\n\ndef sieve(image, size, out=None, output=None, mask=None, connectivity=4):\n \"\"\"\n Replaces small polygons in `image` with the value of their largest\n neighbor. Polygons are found for each set of neighboring pixels of the\n same value.\n\n Parameters\n ----------\n image : numpy ndarray or rasterio Band object\n (RasterReader, bidx namedtuple)\n Must be of type rasterio.int16, rasterio.int32, rasterio.uint8,\n rasterio.uint16, or rasterio.float32\n size : int\n minimum polygon size (number of pixels) to retain.\n out : numpy ndarray, optional\n Array of same shape and data type as `image` in which to store results.\n output : older alias for `out`, will be removed before 1.0.\n output : numpy ndarray, optional\n mask : numpy ndarray or rasterio Band object, optional\n Values of False will be excluded from feature generation\n Must be of type rasterio.bool_\n connectivity : int, optional\n Use 4 or 8 pixel connectivity for grouping pixels into features\n\n Returns\n -------\n out : numpy ndarray\n Result\n\n Notes\n -----\n GDAL only supports values that can be cast to 32-bit integers for this\n operation.\n\n The amount of memory used by this algorithm is proportional to the number\n and complexity of polygons found in the image. This algorithm is most\n appropriate for simple thematic data. Data with high pixel-to-pixel\n variability, such as imagery, may produce one polygon per pixel and consume\n large amounts of memory.\n\n \"\"\"\n\n valid_dtypes = ('int16', 'int32', 'uint8', 'uint16')\n\n if np.dtype(image.dtype).name not in valid_dtypes:\n valid_types_str = ', '.join(('rasterio.{0}'.format(t) for t\n in valid_dtypes))\n raise ValueError('image dtype must be one of: %' % valid_types_str)\n\n if size <= 0:\n raise ValueError('size must be greater than 0')\n elif type(size) == float:\n raise ValueError('size must be an integer number of pixels')\n elif size > (image.shape[0] * image.shape[1]):\n raise ValueError('size must be smaller than size of image')\n\n if connectivity not in (4, 8):\n raise ValueError('connectivity must be 4 or 8')\n\n if mask is not None:\n if np.dtype(mask.dtype) != np.dtype(rasterio.bool_):\n raise ValueError('Mask must be dtype rasterio.bool_')\n elif mask.shape != image.shape:\n raise ValueError('mask shape must be same as image shape')\n\n # Start moving users over to 'out'.\n if output is not None:\n warnings.warn(\n \"The 'output' keyword arg has been superceded by 'out' \"\n \"and will be removed before Rasterio 1.0.\",\n FutureWarning,\n stacklevel=2)\n \n out = out if out is not None else output\n if out is None:\n out = np.zeros_like(image)\n else:\n if np.dtype(image.dtype).name != np.dtype(out.dtype).name:\n raise ValueError('output must match dtype of image')\n elif out.shape != image.shape:\n raise ValueError('mask shape must be same as image shape')\n\n with rasterio.drivers():\n _sieve(image, size, out, mask, connectivity)\n return out\n\n\ndef rasterize(\n shapes,\n out_shape=None,\n fill=0,\n out=None,\n output=None,\n transform=IDENTITY,\n all_touched=False,\n default_value=1,\n dtype=None):\n \"\"\"\n Returns an image array with input geometries burned in.\n\n Parameters\n ----------\n shapes : iterable of (geometry, value) pairs or iterable over geometries\n `geometry` can either be an object that implements the geo interface or\n GeoJSON-like object.\n out_shape : tuple or list\n Shape of output numpy ndarray\n fill : int or float, optional\n Used as fill value for all areas not covered by input geometries\n out : numpy ndarray, optional\n Array of same shape and data type as `image` in which to store results.\n output : older alias for `out`, will be removed before 1.0.\n transform : Affine transformation object, optional\n transformation applied to shape geometries into pixel coordinates\n all_touched : boolean, optional\n If True, all pixels touched by geometries will be burned in.\n If false, only pixels whose center is within the polygon or that are\n selected by brezenhams line algorithm will be burned in.\n default_value : int or float, optional\n Used as value for all geometries, if not provided in `shapes`\n dtype : rasterio or numpy data type, optional\n Used as data type for results, if `output` is not provided\n\n Returns\n -------\n out : numpy ndarray\n Results\n\n Notes\n -----\n Valid data types for `fill`, `default_value`, `out`, `dtype` and\n shape values are rasterio.int16, rasterio.int32, rasterio.uint8,\n rasterio.uint16, rasterio.uint32, rasterio.float32, rasterio.float64\n\n \"\"\"\n\n valid_dtypes = ('int16', 'int32', 'uint8', 'uint16', 'uint32', 'float32',\n 'float64')\n\n def get_valid_dtype(values):\n values_dtype = values.dtype\n if values_dtype.kind == 'i':\n values_dtype = np.dtype(get_minimum_int_dtype(values))\n if values_dtype.name in valid_dtypes:\n return values_dtype\n return None\n\n def can_cast_dtype(values, dtype):\n if values.dtype.name == np.dtype(dtype).name:\n return True\n elif values.dtype.kind == 'f':\n return np.allclose(values, values.astype(dtype))\n else:\n return np.array_equal(values, values.astype(dtype))\n\n if fill != 0:\n fill_array = np.array([fill])\n if get_valid_dtype(fill_array) is None:\n raise ValueError('fill must be one of these types: %s'\n % (', '.join(valid_dtypes)))\n elif dtype is not None and not can_cast_dtype(fill_array, dtype):\n raise ValueError('fill value cannot be cast to specified dtype')\n\n if default_value != 1:\n default_value_array = np.array([default_value])\n if get_valid_dtype(default_value_array) is None:\n raise ValueError('default_value must be one of these types: %s'\n % (', '.join(valid_dtypes)))\n elif dtype is not None and not can_cast_dtype(default_value_array,\n dtype):\n raise ValueError('default_value cannot be cast to specified dtype')\n\n valid_shapes = []\n shape_values = []\n for index, item in enumerate(shapes):\n try:\n if isinstance(item, (tuple, list)):\n geom, value = item\n else:\n geom = item\n value = default_value\n geom = getattr(geom, '__geo_interface__', None) or geom\n if (not isinstance(geom, dict) or\n 'type' not in geom or 'coordinates' not in geom):\n raise ValueError(\n 'Object %r at index %d is not a geometry object' %\n (geom, index))\n valid_shapes.append((geom, value))\n shape_values.append(value)\n except Exception:\n log.exception('Exception caught, skipping shape %d', index)\n\n if not valid_shapes:\n raise ValueError('No valid shapes found for rasterize. Shapes must be '\n 'valid geometry objects')\n\n shape_values = np.array(shape_values)\n values_dtype = get_valid_dtype(shape_values)\n if values_dtype is None:\n raise ValueError('shape values must be one of these dtypes: %s' %\n (', '.join(valid_dtypes)))\n\n if dtype is None:\n dtype = values_dtype\n elif np.dtype(dtype).name not in valid_dtypes:\n raise ValueError('dtype must be one of: %s' % (', '.join(valid_dtypes)))\n elif not can_cast_dtype(shape_values, dtype):\n raise ValueError('shape values could not be cast to specified dtype')\n\n if output is not None:\n warnings.warn(\n \"The 'output' keyword arg has been superceded by 'out' \"\n \"and will be removed before Rasterio 1.0.\",\n FutureWarning,\n stacklevel=2)\n out = out if out is not None else output\n if out is not None:\n if np.dtype(output.dtype).name not in valid_dtypes:\n raise ValueError('Output image dtype must be one of: %s'\n % (', '.join(valid_dtypes)))\n if not can_cast_dtype(shape_values, output.dtype):\n raise ValueError('shape values cannot be cast to dtype of output '\n 'image')\n\n elif out_shape is not None:\n out = np.empty(out_shape, dtype=dtype)\n out.fill(fill)\n else:\n raise ValueError('Either an output shape or image must be provided')\n\n transform = guard_transform(transform)\n\n with rasterio.drivers():\n _rasterize(valid_shapes, out, transform.to_gdal(), all_touched)\n\n return out\n", "path": "rasterio/features.py"}], "after_files": [{"content": "\"\"\"Functions for working with features in a raster dataset.\"\"\"\n\nimport json\nimport logging\nimport time\nimport warnings\n\nimport numpy as np\n\nimport rasterio\nfrom rasterio._features import _shapes, _sieve, _rasterize\nfrom rasterio.transform import IDENTITY, guard_transform\nfrom rasterio.dtypes import get_minimum_int_dtype\n\n\nlog = logging.getLogger('rasterio')\n\n\nclass NullHandler(logging.Handler):\n def emit(self, record):\n pass\nlog.addHandler(NullHandler())\n\n\ndef shapes(image, mask=None, connectivity=4, transform=IDENTITY):\n \"\"\"\n Return a generator of (polygon, value) for each each set of adjacent pixels\n of the same value.\n\n Parameters\n ----------\n image : numpy ndarray or rasterio Band object\n (RasterReader, bidx namedtuple).\n Data type must be one of rasterio.int16, rasterio.int32,\n rasterio.uint8, rasterio.uint16, or rasterio.float32.\n mask : numpy ndarray or rasterio Band object, optional\n Values of False will be excluded from feature generation\n Must be of type rasterio.bool_\n connectivity : int, optional\n Use 4 or 8 pixel connectivity for grouping pixels into features\n transform : Affine transformation, optional\n If not provided, feature coordinates will be generated based on pixel\n coordinates\n\n Returns\n -------\n Generator of (polygon, value)\n Yields a pair of (polygon, value) for each feature found in the image.\n Polygons are GeoJSON-like dicts and the values are the associated value\n from the image, in the data type of the image.\n Note: due to floating point precision issues, values returned from a\n floating point image may not exactly match the original values.\n\n Notes\n -----\n The amount of memory used by this algorithm is proportional to the number\n and complexity of polygons produced. This algorithm is most appropriate\n for simple thematic data. Data with high pixel-to-pixel variability, such\n as imagery, may produce one polygon per pixel and consume large amounts of\n memory.\n\n \"\"\"\n\n valid_dtypes = ('int16', 'int32', 'uint8', 'uint16', 'float32')\n\n if np.dtype(image.dtype).name not in valid_dtypes:\n raise ValueError('image dtype must be one of: %s'\n % (', '.join(valid_dtypes)))\n\n if mask is not None and np.dtype(mask.dtype) != np.dtype(rasterio.bool_):\n raise ValueError(\"Mask must be dtype rasterio.bool_\")\n\n if connectivity not in (4, 8):\n raise ValueError(\"Connectivity Option must be 4 or 8\")\n\n transform = guard_transform(transform)\n\n with rasterio.drivers():\n for s, v in _shapes(image, mask, connectivity, transform.to_gdal()):\n yield s, v\n\n\ndef sieve(image, size, out=None, output=None, mask=None, connectivity=4):\n \"\"\"\n Replaces small polygons in `image` with the value of their largest\n neighbor. Polygons are found for each set of neighboring pixels of the\n same value.\n\n Parameters\n ----------\n image : numpy ndarray or rasterio Band object\n (RasterReader, bidx namedtuple)\n Must be of type rasterio.int16, rasterio.int32, rasterio.uint8,\n rasterio.uint16, or rasterio.float32\n size : int\n minimum polygon size (number of pixels) to retain.\n out : numpy ndarray, optional\n Array of same shape and data type as `image` in which to store results.\n output : older alias for `out`, will be removed before 1.0.\n output : numpy ndarray, optional\n mask : numpy ndarray or rasterio Band object, optional\n Values of False will be excluded from feature generation\n Must be of type rasterio.bool_\n connectivity : int, optional\n Use 4 or 8 pixel connectivity for grouping pixels into features\n\n Returns\n -------\n out : numpy ndarray\n Result\n\n Notes\n -----\n GDAL only supports values that can be cast to 32-bit integers for this\n operation.\n\n The amount of memory used by this algorithm is proportional to the number\n and complexity of polygons found in the image. This algorithm is most\n appropriate for simple thematic data. Data with high pixel-to-pixel\n variability, such as imagery, may produce one polygon per pixel and consume\n large amounts of memory.\n\n \"\"\"\n\n valid_dtypes = ('int16', 'int32', 'uint8', 'uint16')\n\n if np.dtype(image.dtype).name not in valid_dtypes:\n valid_types_str = ', '.join(('rasterio.{0}'.format(t) for t\n in valid_dtypes))\n raise ValueError('image dtype must be one of: %' % valid_types_str)\n\n if size <= 0:\n raise ValueError('size must be greater than 0')\n elif type(size) == float:\n raise ValueError('size must be an integer number of pixels')\n elif size > (image.shape[0] * image.shape[1]):\n raise ValueError('size must be smaller than size of image')\n\n if connectivity not in (4, 8):\n raise ValueError('connectivity must be 4 or 8')\n\n if mask is not None:\n if np.dtype(mask.dtype) != np.dtype(rasterio.bool_):\n raise ValueError('Mask must be dtype rasterio.bool_')\n elif mask.shape != image.shape:\n raise ValueError('mask shape must be same as image shape')\n\n # Start moving users over to 'out'.\n if output is not None:\n warnings.warn(\n \"The 'output' keyword arg has been superceded by 'out' \"\n \"and will be removed before Rasterio 1.0.\",\n FutureWarning,\n stacklevel=2)\n \n out = out if out is not None else output\n if out is None:\n out = np.zeros_like(image)\n else:\n if np.dtype(image.dtype).name != np.dtype(out.dtype).name:\n raise ValueError('output must match dtype of image')\n elif out.shape != image.shape:\n raise ValueError('mask shape must be same as image shape')\n\n with rasterio.drivers():\n _sieve(image, size, out, mask, connectivity)\n return out\n\n\ndef rasterize(\n shapes,\n out_shape=None,\n fill=0,\n out=None,\n output=None,\n transform=IDENTITY,\n all_touched=False,\n default_value=1,\n dtype=None):\n \"\"\"\n Returns an image array with input geometries burned in.\n\n Parameters\n ----------\n shapes : iterable of (geometry, value) pairs or iterable over geometries\n `geometry` can either be an object that implements the geo interface or\n GeoJSON-like object.\n out_shape : tuple or list\n Shape of output numpy ndarray\n fill : int or float, optional\n Used as fill value for all areas not covered by input geometries\n out : numpy ndarray, optional\n Array of same shape and data type as `image` in which to store results.\n output : older alias for `out`, will be removed before 1.0.\n transform : Affine transformation object, optional\n transformation applied to shape geometries into pixel coordinates\n all_touched : boolean, optional\n If True, all pixels touched by geometries will be burned in.\n If false, only pixels whose center is within the polygon or that are\n selected by brezenhams line algorithm will be burned in.\n default_value : int or float, optional\n Used as value for all geometries, if not provided in `shapes`\n dtype : rasterio or numpy data type, optional\n Used as data type for results, if `output` is not provided\n\n Returns\n -------\n out : numpy ndarray\n Results\n\n Notes\n -----\n Valid data types for `fill`, `default_value`, `out`, `dtype` and\n shape values are rasterio.int16, rasterio.int32, rasterio.uint8,\n rasterio.uint16, rasterio.uint32, rasterio.float32, rasterio.float64\n\n \"\"\"\n\n valid_dtypes = ('int16', 'int32', 'uint8', 'uint16', 'uint32', 'float32',\n 'float64')\n\n def get_valid_dtype(values):\n values_dtype = values.dtype\n if values_dtype.kind == 'i':\n values_dtype = np.dtype(get_minimum_int_dtype(values))\n if values_dtype.name in valid_dtypes:\n return values_dtype\n return None\n\n def can_cast_dtype(values, dtype):\n if values.dtype.name == np.dtype(dtype).name:\n return True\n elif values.dtype.kind == 'f':\n return np.allclose(values, values.astype(dtype))\n else:\n return np.array_equal(values, values.astype(dtype))\n\n if fill != 0:\n fill_array = np.array([fill])\n if get_valid_dtype(fill_array) is None:\n raise ValueError('fill must be one of these types: %s'\n % (', '.join(valid_dtypes)))\n elif dtype is not None and not can_cast_dtype(fill_array, dtype):\n raise ValueError('fill value cannot be cast to specified dtype')\n\n if default_value != 1:\n default_value_array = np.array([default_value])\n if get_valid_dtype(default_value_array) is None:\n raise ValueError('default_value must be one of these types: %s'\n % (', '.join(valid_dtypes)))\n elif dtype is not None and not can_cast_dtype(default_value_array,\n dtype):\n raise ValueError('default_value cannot be cast to specified dtype')\n\n valid_shapes = []\n shape_values = []\n for index, item in enumerate(shapes):\n try:\n if isinstance(item, (tuple, list)):\n geom, value = item\n else:\n geom = item\n value = default_value\n geom = getattr(geom, '__geo_interface__', None) or geom\n if (not isinstance(geom, dict) or\n 'type' not in geom or 'coordinates' not in geom):\n raise ValueError(\n 'Object %r at index %d is not a geometry object' %\n (geom, index))\n valid_shapes.append((geom, value))\n shape_values.append(value)\n except Exception:\n log.exception('Exception caught, skipping shape %d', index)\n\n if not valid_shapes:\n raise ValueError('No valid shapes found for rasterize. Shapes must be '\n 'valid geometry objects')\n\n shape_values = np.array(shape_values)\n values_dtype = get_valid_dtype(shape_values)\n if values_dtype is None:\n raise ValueError('shape values must be one of these dtypes: %s' %\n (', '.join(valid_dtypes)))\n\n if dtype is None:\n dtype = values_dtype\n elif np.dtype(dtype).name not in valid_dtypes:\n raise ValueError('dtype must be one of: %s' % (', '.join(valid_dtypes)))\n elif not can_cast_dtype(shape_values, dtype):\n raise ValueError('shape values could not be cast to specified dtype')\n\n if output is not None:\n warnings.warn(\n \"The 'output' keyword arg has been superceded by 'out' \"\n \"and will be removed before Rasterio 1.0.\",\n FutureWarning,\n stacklevel=2)\n out = out if out is not None else output\n if out is not None:\n if np.dtype(out.dtype).name not in valid_dtypes:\n raise ValueError('Output image dtype must be one of: %s'\n % (', '.join(valid_dtypes)))\n if not can_cast_dtype(shape_values, out.dtype):\n raise ValueError('shape values cannot be cast to dtype of output '\n 'image')\n\n elif out_shape is not None:\n out = np.empty(out_shape, dtype=dtype)\n out.fill(fill)\n else:\n raise ValueError('Either an output shape or image must be provided')\n\n transform = guard_transform(transform)\n\n with rasterio.drivers():\n _rasterize(valid_shapes, out, transform.to_gdal(), all_touched)\n\n return out\n", "path": "rasterio/features.py"}]} | 3,852 | 172 |
gh_patches_debug_7691 | rasdani/github-patches | git_diff | pulp__pulpcore-5246 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RBAC denies upload unless chunk size is specified
**Version**
Deployed on K8s via Operator
```
{
"versions": [
{
"component": "core",
"version": "3.49.1",
"package": "pulpcore",
"module": "pulpcore.app",
"domain_compatible": true
},
{
"component": "ansible",
"version": "0.21.3",
"package": "pulp-ansible",
"module": "pulp_ansible.app",
"domain_compatible": false
},
{
"component": "container",
"version": "2.19.2",
"package": "pulp-container",
"module": "pulp_container.app",
"domain_compatible": false
},
{
"component": "deb",
"version": "3.2.0",
"package": "pulp_deb",
"module": "pulp_deb.app",
"domain_compatible": false
},
{
"component": "maven",
"version": "0.8.0",
"package": "pulp-maven",
"module": "pulp_maven.app",
"domain_compatible": false
},
{
"component": "ostree",
"version": "2.3.0",
"package": "pulp-ostree",
"module": "pulp_ostree.app",
"domain_compatible": true
},
{
"component": "python",
"version": "3.11.0",
"package": "pulp-python",
"module": "pulp_python.app",
"domain_compatible": false
},
{
"component": "rpm",
"version": "3.25.1",
"package": "pulp-rpm",
"module": "pulp_rpm.app",
"domain_compatible": true
},
{
"component": "certguard",
"version": "3.49.1",
"package": "pulpcore",
"module": "pulp_certguard.app",
"domain_compatible": true
},
{
"component": "file",
"version": "3.49.1",
"package": "pulpcore",
"module": "pulp_file.app",
"domain_compatible": true
}
],
```
**Describe the bug**
While uploading some files (I haven't been able to exactly pin down what they have in common yet) as a non-admin user, we get `Error: {"detail":"You do not have permission to perform this action."}` while doing a `pulp file content upload` despite permissions looking fine across the board. Specifying a _sufficiently high_ chunk size is the only thing that seems to resolve it. For example:
```
~$ wget https://github.com/mstorsjo/llvm-mingw/releases/download/20231128/llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz
~$ pulp --config /tmp/config.toml file content upload --file llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz --relative-path llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz --repository file-local --chunk-size 10000000
Error: {"detail":"You do not have permission to perform this action."}
~$ stat --printf="%n,%s" llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz
llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz,72800008
~$ pulp --config /tmp/config.toml file content upload --file llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz --relative-path llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz --repository file-local --chunk-size 100000000
Started background task /pulp/api/v3/tasks/018e860b-bde4-7a59-bacd-710eec7c94bd/
Done.
<snip>
```
Interestingly, I _don't_ see this behavior when using the admin user, only a user that we created, which makes me think this is some permission I missed when creating the user, but I have no idea what it would be.
**To Reproduce**
1. Create a file repository and distribution (with autopublish).
2. Create a user with the following roles for the repository and distribution: `file.filedistribution_creator`, `file.filedistribution_owner`, `file.filerepository_creator`, `file.filerepository_owner`.
3. Download a large file of an affected type (whatever that is... .tar.xz seems to trigger it) and try to upload without `--chunk-size` set.
4. Upload should fail with permissions error.
**Expected behavior**
Upload should happen without an error.
**Additional context**
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/models/upload.py`
Content:
```
1 import hashlib
2 import os
3
4 from gettext import gettext as _
5
6 from django.core.files.base import ContentFile
7 from django.db import models
8 from django.db.models.signals import post_delete
9 from django.dispatch import receiver
10 from rest_framework import serializers
11
12 from pulpcore.app.models import BaseModel, fields, storage
13 from pulpcore.app.util import get_domain_pk
14
15
16 class Upload(BaseModel):
17 """
18 A chunked upload. Stores chunks until used to create an artifact, etc.
19
20 Fields:
21
22 size (models.BigIntegerField): The size of the file in bytes.
23
24 Relations:
25
26 pulp_domain (models.ForeignKey): The domain the Upload is a part of.
27 """
28
29 size = models.BigIntegerField()
30 pulp_domain = models.ForeignKey("Domain", default=get_domain_pk, on_delete=models.PROTECT)
31
32 def append(self, chunk, offset, sha256=None):
33 """
34 Append a chunk to an upload.
35
36 Args:
37 chunk (File): Binary data to append to the upload file.
38 offset (int): First byte position to write chunk to.
39 """
40 chunk = chunk.read()
41 if sha256:
42 current_sha256 = hashlib.sha256(chunk).hexdigest()
43 if sha256 != current_sha256:
44 raise serializers.ValidationError(_("Checksum does not match chunk upload."))
45
46 upload_chunk = UploadChunk(upload=self, offset=offset, size=len(chunk))
47 filename = os.path.basename(upload_chunk.storage_path(""))
48 upload_chunk.file.save(filename, ContentFile(chunk))
49
50 class Meta:
51 permissions = [
52 ("manage_roles_upload", "Can manage role assignments on upload"),
53 ]
54
55
56 class UploadChunk(BaseModel):
57 """
58 A chunk for an uploaded file.
59
60 Fields:
61
62 file (fields.FileField): A file where the uploaded chunk is stored.
63 upload (models.ForeignKey): Upload this chunk belongs to.
64 offset (models.BigIntegerField): Start of the chunk in bytes.
65 size (models.BigIntegerField): Size of the chunk in bytes.
66 """
67
68 def storage_path(self, name):
69 """
70 Callable used by FileField to determine where the uploaded file should be stored.
71
72 Args:
73 name (str): Original name of uploaded file. It is ignored by this method because the
74 pulp_id is used to determine a file path instead.
75 """
76 return storage.get_upload_chunk_file_path(self.pulp_id)
77
78 file = fields.FileField(
79 null=False, upload_to=storage_path, storage=storage.DomainStorage, max_length=255
80 )
81 upload = models.ForeignKey(Upload, on_delete=models.CASCADE, related_name="chunks")
82 offset = models.BigIntegerField()
83 size = models.BigIntegerField()
84
85 @property
86 def pulp_domain(self):
87 """Get the Domain for this chunk from the Upload."""
88 return self.upload.pulp_domain
89
90
91 @receiver(post_delete, sender=UploadChunk)
92 def upload_chunk_delete(instance, **kwargs):
93 instance.file.delete(save=False)
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/models/upload.py b/pulpcore/app/models/upload.py
--- a/pulpcore/app/models/upload.py
+++ b/pulpcore/app/models/upload.py
@@ -9,11 +9,11 @@
from django.dispatch import receiver
from rest_framework import serializers
-from pulpcore.app.models import BaseModel, fields, storage
+from pulpcore.app.models import BaseModel, fields, storage, AutoAddObjPermsMixin
from pulpcore.app.util import get_domain_pk
-class Upload(BaseModel):
+class Upload(BaseModel, AutoAddObjPermsMixin):
"""
A chunked upload. Stores chunks until used to create an artifact, etc.
| {"golden_diff": "diff --git a/pulpcore/app/models/upload.py b/pulpcore/app/models/upload.py\n--- a/pulpcore/app/models/upload.py\n+++ b/pulpcore/app/models/upload.py\n@@ -9,11 +9,11 @@\n from django.dispatch import receiver\n from rest_framework import serializers\n \n-from pulpcore.app.models import BaseModel, fields, storage\n+from pulpcore.app.models import BaseModel, fields, storage, AutoAddObjPermsMixin\n from pulpcore.app.util import get_domain_pk\n \n \n-class Upload(BaseModel):\n+class Upload(BaseModel, AutoAddObjPermsMixin):\n \"\"\"\n A chunked upload. Stores chunks until used to create an artifact, etc.\n", "issue": "RBAC denies upload unless chunk size is specified\n**Version**\r\nDeployed on K8s via Operator\r\n```\r\n{ \r\n \"versions\": [ \r\n { \r\n \"component\": \"core\", \r\n \"version\": \"3.49.1\", \r\n \"package\": \"pulpcore\", \r\n \"module\": \"pulpcore.app\", \r\n \"domain_compatible\": true\r\n }, \r\n { \r\n \"component\": \"ansible\", \r\n \"version\": \"0.21.3\", \r\n \"package\": \"pulp-ansible\", \r\n \"module\": \"pulp_ansible.app\", \r\n \"domain_compatible\": false \r\n }, \r\n { \r\n \"component\": \"container\",\r\n \"version\": \"2.19.2\",\r\n \"package\": \"pulp-container\",\r\n \"module\": \"pulp_container.app\",\r\n \"domain_compatible\": false\r\n },\r\n {\r\n \"component\": \"deb\",\r\n \"version\": \"3.2.0\",\r\n \"package\": \"pulp_deb\",\r\n \"module\": \"pulp_deb.app\",\r\n \"domain_compatible\": false\r\n },\r\n {\r\n \"component\": \"maven\",\r\n \"version\": \"0.8.0\",\r\n \"package\": \"pulp-maven\",\r\n \"module\": \"pulp_maven.app\",\r\n \"domain_compatible\": false\r\n },\r\n {\r\n \"component\": \"ostree\",\r\n \"version\": \"2.3.0\",\r\n \"package\": \"pulp-ostree\",\r\n \"module\": \"pulp_ostree.app\",\r\n \"domain_compatible\": true\r\n },\r\n {\r\n \"component\": \"python\",\r\n \"version\": \"3.11.0\",\r\n \"package\": \"pulp-python\",\r\n \"module\": \"pulp_python.app\",\r\n \"domain_compatible\": false\r\n },\r\n {\r\n \"component\": \"rpm\",\r\n \"version\": \"3.25.1\",\r\n \"package\": \"pulp-rpm\",\r\n \"module\": \"pulp_rpm.app\",\r\n \"domain_compatible\": true\r\n },\r\n {\r\n \"component\": \"certguard\",\r\n \"version\": \"3.49.1\",\r\n \"package\": \"pulpcore\",\r\n \"module\": \"pulp_certguard.app\",\r\n \"domain_compatible\": true\r\n },\r\n {\r\n \"component\": \"file\",\r\n \"version\": \"3.49.1\",\r\n \"package\": \"pulpcore\",\r\n \"module\": \"pulp_file.app\",\r\n \"domain_compatible\": true\r\n }\r\n ],\r\n```\r\n\r\n**Describe the bug**\r\nWhile uploading some files (I haven't been able to exactly pin down what they have in common yet) as a non-admin user, we get `Error: {\"detail\":\"You do not have permission to perform this action.\"}` while doing a `pulp file content upload` despite permissions looking fine across the board. Specifying a _sufficiently high_ chunk size is the only thing that seems to resolve it. For example:\r\n```\r\n~$ wget https://github.com/mstorsjo/llvm-mingw/releases/download/20231128/llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz\r\n~$ pulp --config /tmp/config.toml file content upload --file llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz --relative-path llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz --repository file-local --chunk-size 10000000\r\nError: {\"detail\":\"You do not have permission to perform this action.\"}\r\n~$ stat --printf=\"%n,%s\" llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz \r\nllvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz,72800008\r\n~$ pulp --config /tmp/config.toml file content upload --file llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz --relative-path llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz --repository file-local --chunk-size 100000000\r\nStarted background task /pulp/api/v3/tasks/018e860b-bde4-7a59-bacd-710eec7c94bd/\r\nDone.\r\n<snip>\r\n```\r\nInterestingly, I _don't_ see this behavior when using the admin user, only a user that we created, which makes me think this is some permission I missed when creating the user, but I have no idea what it would be. \r\n\r\n**To Reproduce**\r\n1. Create a file repository and distribution (with autopublish).\r\n2. Create a user with the following roles for the repository and distribution: `file.filedistribution_creator`, `file.filedistribution_owner`, `file.filerepository_creator`, `file.filerepository_owner`.\r\n3. Download a large file of an affected type (whatever that is... .tar.xz seems to trigger it) and try to upload without `--chunk-size` set.\r\n4. Upload should fail with permissions error.\r\n\r\n**Expected behavior**\r\nUpload should happen without an error. \r\n\r\n**Additional context**\r\nN/A\r\n\n", "before_files": [{"content": "import hashlib\nimport os\n\nfrom gettext import gettext as _\n\nfrom django.core.files.base import ContentFile\nfrom django.db import models\nfrom django.db.models.signals import post_delete\nfrom django.dispatch import receiver\nfrom rest_framework import serializers\n\nfrom pulpcore.app.models import BaseModel, fields, storage\nfrom pulpcore.app.util import get_domain_pk\n\n\nclass Upload(BaseModel):\n \"\"\"\n A chunked upload. Stores chunks until used to create an artifact, etc.\n\n Fields:\n\n size (models.BigIntegerField): The size of the file in bytes.\n\n Relations:\n\n pulp_domain (models.ForeignKey): The domain the Upload is a part of.\n \"\"\"\n\n size = models.BigIntegerField()\n pulp_domain = models.ForeignKey(\"Domain\", default=get_domain_pk, on_delete=models.PROTECT)\n\n def append(self, chunk, offset, sha256=None):\n \"\"\"\n Append a chunk to an upload.\n\n Args:\n chunk (File): Binary data to append to the upload file.\n offset (int): First byte position to write chunk to.\n \"\"\"\n chunk = chunk.read()\n if sha256:\n current_sha256 = hashlib.sha256(chunk).hexdigest()\n if sha256 != current_sha256:\n raise serializers.ValidationError(_(\"Checksum does not match chunk upload.\"))\n\n upload_chunk = UploadChunk(upload=self, offset=offset, size=len(chunk))\n filename = os.path.basename(upload_chunk.storage_path(\"\"))\n upload_chunk.file.save(filename, ContentFile(chunk))\n\n class Meta:\n permissions = [\n (\"manage_roles_upload\", \"Can manage role assignments on upload\"),\n ]\n\n\nclass UploadChunk(BaseModel):\n \"\"\"\n A chunk for an uploaded file.\n\n Fields:\n\n file (fields.FileField): A file where the uploaded chunk is stored.\n upload (models.ForeignKey): Upload this chunk belongs to.\n offset (models.BigIntegerField): Start of the chunk in bytes.\n size (models.BigIntegerField): Size of the chunk in bytes.\n \"\"\"\n\n def storage_path(self, name):\n \"\"\"\n Callable used by FileField to determine where the uploaded file should be stored.\n\n Args:\n name (str): Original name of uploaded file. It is ignored by this method because the\n pulp_id is used to determine a file path instead.\n \"\"\"\n return storage.get_upload_chunk_file_path(self.pulp_id)\n\n file = fields.FileField(\n null=False, upload_to=storage_path, storage=storage.DomainStorage, max_length=255\n )\n upload = models.ForeignKey(Upload, on_delete=models.CASCADE, related_name=\"chunks\")\n offset = models.BigIntegerField()\n size = models.BigIntegerField()\n\n @property\n def pulp_domain(self):\n \"\"\"Get the Domain for this chunk from the Upload.\"\"\"\n return self.upload.pulp_domain\n\n\n@receiver(post_delete, sender=UploadChunk)\ndef upload_chunk_delete(instance, **kwargs):\n instance.file.delete(save=False)\n", "path": "pulpcore/app/models/upload.py"}], "after_files": [{"content": "import hashlib\nimport os\n\nfrom gettext import gettext as _\n\nfrom django.core.files.base import ContentFile\nfrom django.db import models\nfrom django.db.models.signals import post_delete\nfrom django.dispatch import receiver\nfrom rest_framework import serializers\n\nfrom pulpcore.app.models import BaseModel, fields, storage, AutoAddObjPermsMixin\nfrom pulpcore.app.util import get_domain_pk\n\n\nclass Upload(BaseModel, AutoAddObjPermsMixin):\n \"\"\"\n A chunked upload. Stores chunks until used to create an artifact, etc.\n\n Fields:\n\n size (models.BigIntegerField): The size of the file in bytes.\n\n Relations:\n\n pulp_domain (models.ForeignKey): The domain the Upload is a part of.\n \"\"\"\n\n size = models.BigIntegerField()\n pulp_domain = models.ForeignKey(\"Domain\", default=get_domain_pk, on_delete=models.PROTECT)\n\n def append(self, chunk, offset, sha256=None):\n \"\"\"\n Append a chunk to an upload.\n\n Args:\n chunk (File): Binary data to append to the upload file.\n offset (int): First byte position to write chunk to.\n \"\"\"\n chunk = chunk.read()\n if sha256:\n current_sha256 = hashlib.sha256(chunk).hexdigest()\n if sha256 != current_sha256:\n raise serializers.ValidationError(_(\"Checksum does not match chunk upload.\"))\n\n upload_chunk = UploadChunk(upload=self, offset=offset, size=len(chunk))\n filename = os.path.basename(upload_chunk.storage_path(\"\"))\n upload_chunk.file.save(filename, ContentFile(chunk))\n\n class Meta:\n permissions = [\n (\"manage_roles_upload\", \"Can manage role assignments on upload\"),\n ]\n\n\nclass UploadChunk(BaseModel):\n \"\"\"\n A chunk for an uploaded file.\n\n Fields:\n\n file (fields.FileField): A file where the uploaded chunk is stored.\n upload (models.ForeignKey): Upload this chunk belongs to.\n offset (models.BigIntegerField): Start of the chunk in bytes.\n size (models.BigIntegerField): Size of the chunk in bytes.\n \"\"\"\n\n def storage_path(self, name):\n \"\"\"\n Callable used by FileField to determine where the uploaded file should be stored.\n\n Args:\n name (str): Original name of uploaded file. It is ignored by this method because the\n pulp_id is used to determine a file path instead.\n \"\"\"\n return storage.get_upload_chunk_file_path(self.pulp_id)\n\n file = fields.FileField(\n null=False, upload_to=storage_path, storage=storage.DomainStorage, max_length=255\n )\n upload = models.ForeignKey(Upload, on_delete=models.CASCADE, related_name=\"chunks\")\n offset = models.BigIntegerField()\n size = models.BigIntegerField()\n\n @property\n def pulp_domain(self):\n \"\"\"Get the Domain for this chunk from the Upload.\"\"\"\n return self.upload.pulp_domain\n\n\n@receiver(post_delete, sender=UploadChunk)\ndef upload_chunk_delete(instance, **kwargs):\n instance.file.delete(save=False)\n", "path": "pulpcore/app/models/upload.py"}]} | 2,335 | 144 |
gh_patches_debug_56184 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-608 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
file_handle.close() is never called -- and it causes a bug!!
In https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/generate.py#L90, `file_handle.close()` is never called, and there's no need for it to remain open.
This is the first time in over 10 years of python programming that I've run into an actual issue with file handles left open, so I felt that the use of two exclamation points in the subject was warranted. I'm removing a temporary template after an unrelated error, and...
```
File "D:\anaconda32\lib\shutil.py", line 250, in rmtree
os.remove(fullname)
WindowsError: [Error 32] The process cannot access the file because it is being
used by another process: '.\\tmpp2duu1\\cookiecutter.json'
```
This change in generate.py:90 helps python not stumble on this particular issue:
```
try:
with open(context_file) as file_handle:
obj = json.load(file_handle, object_pairs_hook=OrderedDict)
except ValueError as e:
# ...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/generate.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.generate
6 ---------------------
7
8 Functions for generating a project from a project template.
9 """
10 from __future__ import unicode_literals
11 from collections import OrderedDict
12 import fnmatch
13 import io
14 import json
15 import logging
16 import os
17 import shutil
18
19 from jinja2 import FileSystemLoader, Template
20 from jinja2.environment import Environment
21 from jinja2.exceptions import TemplateSyntaxError
22 from binaryornot.check import is_binary
23
24 from .exceptions import (
25 NonTemplatedInputDirException,
26 ContextDecodingException,
27 FailedHookException,
28 OutputDirExistsException
29 )
30 from .find import find_template
31 from .utils import make_sure_path_exists, work_in, rmtree
32 from .hooks import run_hook
33
34
35 def copy_without_render(path, context):
36 """
37 Returns True if `path` matches some pattern in the
38 `_copy_without_render` context setting.
39
40 :param path: A file-system path referring to a file or dir that
41 should be rendered or just copied.
42 :param context: cookiecutter context.
43 """
44 try:
45 for dont_render in context['cookiecutter']['_copy_without_render']:
46 if fnmatch.fnmatch(path, dont_render):
47 return True
48 except KeyError:
49 return False
50
51 return False
52
53
54 def apply_overwrites_to_context(context, overwrite_context):
55 """Modify the given context in place based on the overwrite_context."""
56 for variable, overwrite in overwrite_context.items():
57 if variable not in context:
58 # Do not include variables which are not used in the template
59 continue
60
61 context_value = context[variable]
62
63 if isinstance(context_value, list):
64 # We are dealing with a choice variable
65 if overwrite in context_value:
66 # This overwrite is actually valid for the given context
67 # Let's set it as default (by definition first item in list)
68 # see ``cookiecutter.prompt.prompt_choice_for_config``
69 context_value.remove(overwrite)
70 context_value.insert(0, overwrite)
71 else:
72 # Simply overwrite the value for this variable
73 context[variable] = overwrite
74
75
76 def generate_context(context_file='cookiecutter.json', default_context=None,
77 extra_context=None):
78 """
79 Generates the context for a Cookiecutter project template.
80 Loads the JSON file as a Python object, with key being the JSON filename.
81
82 :param context_file: JSON file containing key/value pairs for populating
83 the cookiecutter's variables.
84 :param default_context: Dictionary containing config to take into account.
85 :param extra_context: Dictionary containing configuration overrides
86 """
87
88 context = {}
89
90 file_handle = open(context_file)
91 try:
92 obj = json.load(file_handle, object_pairs_hook=OrderedDict)
93 except ValueError as e:
94 # JSON decoding error. Let's throw a new exception that is more
95 # friendly for the developer or user.
96 full_fpath = os.path.abspath(context_file)
97 json_exc_message = str(e)
98 our_exc_message = (
99 'JSON decoding error while loading "{0}". Decoding'
100 ' error details: "{1}"'.format(full_fpath, json_exc_message))
101 raise ContextDecodingException(our_exc_message)
102
103 # Add the Python object to the context dictionary
104 file_name = os.path.split(context_file)[1]
105 file_stem = file_name.split('.')[0]
106 context[file_stem] = obj
107
108 # Overwrite context variable defaults with the default context from the
109 # user's global config, if available
110 if default_context:
111 apply_overwrites_to_context(obj, default_context)
112 if extra_context:
113 apply_overwrites_to_context(obj, extra_context)
114
115 logging.debug('Context generated is {0}'.format(context))
116 return context
117
118
119 def generate_file(project_dir, infile, context, env):
120 """
121 1. Render the filename of infile as the name of outfile.
122 2. Deal with infile appropriately:
123
124 a. If infile is a binary file, copy it over without rendering.
125 b. If infile is a text file, render its contents and write the
126 rendered infile to outfile.
127
128 Precondition:
129
130 When calling `generate_file()`, the root template dir must be the
131 current working directory. Using `utils.work_in()` is the recommended
132 way to perform this directory change.
133
134 :param project_dir: Absolute path to the resulting generated project.
135 :param infile: Input file to generate the file from. Relative to the root
136 template dir.
137 :param context: Dict for populating the cookiecutter's variables.
138 :param env: Jinja2 template execution environment.
139 """
140
141 logging.debug('Generating file {0}'.format(infile))
142
143 # Render the path to the output file (not including the root project dir)
144 outfile_tmpl = Template(infile)
145
146 outfile = os.path.join(project_dir, outfile_tmpl.render(**context))
147 file_name_is_empty = os.path.isdir(outfile)
148 if file_name_is_empty:
149 logging.debug('The resulting file name is empty: {0}'.format(outfile))
150 return
151
152 logging.debug('outfile is {0}'.format(outfile))
153
154 # Just copy over binary files. Don't render.
155 logging.debug("Check {0} to see if it's a binary".format(infile))
156 if is_binary(infile):
157 logging.debug('Copying binary {0} to {1} without rendering'
158 .format(infile, outfile))
159 shutil.copyfile(infile, outfile)
160 else:
161 # Force fwd slashes on Windows for get_template
162 # This is a by-design Jinja issue
163 infile_fwd_slashes = infile.replace(os.path.sep, '/')
164
165 # Render the file
166 try:
167 tmpl = env.get_template(infile_fwd_slashes)
168 except TemplateSyntaxError as exception:
169 # Disable translated so that printed exception contains verbose
170 # information about syntax error location
171 exception.translated = False
172 raise
173 rendered_file = tmpl.render(**context)
174
175 logging.debug('Writing {0}'.format(outfile))
176
177 with io.open(outfile, 'w', encoding='utf-8') as fh:
178 fh.write(rendered_file)
179
180 # Apply file permissions to output file
181 shutil.copymode(infile, outfile)
182
183
184 def render_and_create_dir(dirname, context, output_dir,
185 overwrite_if_exists=False):
186 """
187 Renders the name of a directory, creates the directory, and
188 returns its path.
189 """
190
191 name_tmpl = Template(dirname)
192 rendered_dirname = name_tmpl.render(**context)
193 logging.debug('Rendered dir {0} must exist in output_dir {1}'.format(
194 rendered_dirname,
195 output_dir
196 ))
197 dir_to_create = os.path.normpath(
198 os.path.join(output_dir, rendered_dirname)
199 )
200
201 output_dir_exists = os.path.exists(dir_to_create)
202
203 if overwrite_if_exists:
204 if output_dir_exists:
205 logging.debug('Output directory {} already exists,'
206 'overwriting it'.format(dir_to_create))
207 else:
208 if output_dir_exists:
209 msg = 'Error: "{}" directory already exists'.format(dir_to_create)
210 raise OutputDirExistsException(msg)
211
212 make_sure_path_exists(dir_to_create)
213 return dir_to_create
214
215
216 def ensure_dir_is_templated(dirname):
217 """
218 Ensures that dirname is a templated directory name.
219 """
220 if '{{' in dirname and '}}' in dirname:
221 return True
222 else:
223 raise NonTemplatedInputDirException
224
225
226 def _run_hook_from_repo_dir(repo_dir, hook_name, project_dir, context):
227 """
228 Run hook from repo directory, cleaning up project directory if hook fails
229 """
230 with work_in(repo_dir):
231 try:
232 run_hook(hook_name, project_dir, context)
233 except FailedHookException:
234 rmtree(project_dir)
235 logging.error("Stopping generation because %s"
236 " hook script didn't exit sucessfully" % hook_name)
237 raise
238
239
240 def generate_files(repo_dir, context=None, output_dir='.',
241 overwrite_if_exists=False):
242 """
243 Renders the templates and saves them to files.
244
245 :param repo_dir: Project template input directory.
246 :param context: Dict for populating the template's variables.
247 :param output_dir: Where to output the generated project dir into.
248 :param overwrite_if_exists: Overwrite the contents of the output directory
249 if it exists
250 """
251
252 template_dir = find_template(repo_dir)
253 logging.debug('Generating project from {0}...'.format(template_dir))
254 context = context or {}
255
256 unrendered_dir = os.path.split(template_dir)[1]
257 ensure_dir_is_templated(unrendered_dir)
258 project_dir = render_and_create_dir(unrendered_dir,
259 context,
260 output_dir,
261 overwrite_if_exists)
262
263 # We want the Jinja path and the OS paths to match. Consequently, we'll:
264 # + CD to the template folder
265 # + Set Jinja's path to '.'
266 #
267 # In order to build our files to the correct folder(s), we'll use an
268 # absolute path for the target folder (project_dir)
269
270 project_dir = os.path.abspath(project_dir)
271 logging.debug('project_dir is {0}'.format(project_dir))
272
273 _run_hook_from_repo_dir(repo_dir, 'pre_gen_project', project_dir, context)
274
275 with work_in(template_dir):
276 env = Environment(keep_trailing_newline=True)
277 env.loader = FileSystemLoader('.')
278
279 for root, dirs, files in os.walk('.'):
280 # We must separate the two types of dirs into different lists.
281 # The reason is that we don't want ``os.walk`` to go through the
282 # unrendered directories, since they will just be copied.
283 copy_dirs = []
284 render_dirs = []
285
286 for d in dirs:
287 d_ = os.path.normpath(os.path.join(root, d))
288 # We check the full path, because that's how it can be
289 # specified in the ``_copy_without_render`` setting, but
290 # we store just the dir name
291 if copy_without_render(d_, context):
292 copy_dirs.append(d)
293 else:
294 render_dirs.append(d)
295
296 for copy_dir in copy_dirs:
297 indir = os.path.normpath(os.path.join(root, copy_dir))
298 outdir = os.path.normpath(os.path.join(project_dir, indir))
299 logging.debug(
300 'Copying dir {0} to {1} without rendering'
301 ''.format(indir, outdir)
302 )
303 shutil.copytree(indir, outdir)
304
305 # We mutate ``dirs``, because we only want to go through these dirs
306 # recursively
307 dirs[:] = render_dirs
308 for d in dirs:
309 unrendered_dir = os.path.join(project_dir, root, d)
310 render_and_create_dir(unrendered_dir, context, output_dir,
311 overwrite_if_exists)
312
313 for f in files:
314 infile = os.path.normpath(os.path.join(root, f))
315 if copy_without_render(infile, context):
316 outfile_tmpl = Template(infile)
317 outfile_rendered = outfile_tmpl.render(**context)
318 outfile = os.path.join(project_dir, outfile_rendered)
319 logging.debug(
320 'Copying file {0} to {1} without rendering'
321 ''.format(infile, outfile)
322 )
323 shutil.copyfile(infile, outfile)
324 shutil.copymode(infile, outfile)
325 continue
326 logging.debug('f is {0}'.format(f))
327 generate_file(project_dir, infile, context, env)
328
329 _run_hook_from_repo_dir(repo_dir, 'post_gen_project', project_dir, context)
330
331 return project_dir
332
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/generate.py b/cookiecutter/generate.py
--- a/cookiecutter/generate.py
+++ b/cookiecutter/generate.py
@@ -87,9 +87,9 @@
context = {}
- file_handle = open(context_file)
try:
- obj = json.load(file_handle, object_pairs_hook=OrderedDict)
+ with open(context_file) as file_handle:
+ obj = json.load(file_handle, object_pairs_hook=OrderedDict)
except ValueError as e:
# JSON decoding error. Let's throw a new exception that is more
# friendly for the developer or user.
| {"golden_diff": "diff --git a/cookiecutter/generate.py b/cookiecutter/generate.py\n--- a/cookiecutter/generate.py\n+++ b/cookiecutter/generate.py\n@@ -87,9 +87,9 @@\n \n context = {}\n \n- file_handle = open(context_file)\n try:\n- obj = json.load(file_handle, object_pairs_hook=OrderedDict)\n+ with open(context_file) as file_handle:\n+ obj = json.load(file_handle, object_pairs_hook=OrderedDict)\n except ValueError as e:\n # JSON decoding error. Let's throw a new exception that is more\n # friendly for the developer or user.\n", "issue": "file_handle.close() is never called -- and it causes a bug!!\nIn https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/generate.py#L90, `file_handle.close()` is never called, and there's no need for it to remain open.\n\nThis is the first time in over 10 years of python programming that I've run into an actual issue with file handles left open, so I felt that the use of two exclamation points in the subject was warranted. I'm removing a temporary template after an unrelated error, and...\n\n```\n File \"D:\\anaconda32\\lib\\shutil.py\", line 250, in rmtree\n os.remove(fullname)\nWindowsError: [Error 32] The process cannot access the file because it is being\nused by another process: '.\\\\tmpp2duu1\\\\cookiecutter.json'\n```\n\nThis change in generate.py:90 helps python not stumble on this particular issue:\n\n```\ntry:\n with open(context_file) as file_handle:\n obj = json.load(file_handle, object_pairs_hook=OrderedDict)\nexcept ValueError as e:\n # ...\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.generate\n---------------------\n\nFunctions for generating a project from a project template.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\nimport fnmatch\nimport io\nimport json\nimport logging\nimport os\nimport shutil\n\nfrom jinja2 import FileSystemLoader, Template\nfrom jinja2.environment import Environment\nfrom jinja2.exceptions import TemplateSyntaxError\nfrom binaryornot.check import is_binary\n\nfrom .exceptions import (\n NonTemplatedInputDirException,\n ContextDecodingException,\n FailedHookException,\n OutputDirExistsException\n)\nfrom .find import find_template\nfrom .utils import make_sure_path_exists, work_in, rmtree\nfrom .hooks import run_hook\n\n\ndef copy_without_render(path, context):\n \"\"\"\n Returns True if `path` matches some pattern in the\n `_copy_without_render` context setting.\n\n :param path: A file-system path referring to a file or dir that\n should be rendered or just copied.\n :param context: cookiecutter context.\n \"\"\"\n try:\n for dont_render in context['cookiecutter']['_copy_without_render']:\n if fnmatch.fnmatch(path, dont_render):\n return True\n except KeyError:\n return False\n\n return False\n\n\ndef apply_overwrites_to_context(context, overwrite_context):\n \"\"\"Modify the given context in place based on the overwrite_context.\"\"\"\n for variable, overwrite in overwrite_context.items():\n if variable not in context:\n # Do not include variables which are not used in the template\n continue\n\n context_value = context[variable]\n\n if isinstance(context_value, list):\n # We are dealing with a choice variable\n if overwrite in context_value:\n # This overwrite is actually valid for the given context\n # Let's set it as default (by definition first item in list)\n # see ``cookiecutter.prompt.prompt_choice_for_config``\n context_value.remove(overwrite)\n context_value.insert(0, overwrite)\n else:\n # Simply overwrite the value for this variable\n context[variable] = overwrite\n\n\ndef generate_context(context_file='cookiecutter.json', default_context=None,\n extra_context=None):\n \"\"\"\n Generates the context for a Cookiecutter project template.\n Loads the JSON file as a Python object, with key being the JSON filename.\n\n :param context_file: JSON file containing key/value pairs for populating\n the cookiecutter's variables.\n :param default_context: Dictionary containing config to take into account.\n :param extra_context: Dictionary containing configuration overrides\n \"\"\"\n\n context = {}\n\n file_handle = open(context_file)\n try:\n obj = json.load(file_handle, object_pairs_hook=OrderedDict)\n except ValueError as e:\n # JSON decoding error. Let's throw a new exception that is more\n # friendly for the developer or user.\n full_fpath = os.path.abspath(context_file)\n json_exc_message = str(e)\n our_exc_message = (\n 'JSON decoding error while loading \"{0}\". Decoding'\n ' error details: \"{1}\"'.format(full_fpath, json_exc_message))\n raise ContextDecodingException(our_exc_message)\n\n # Add the Python object to the context dictionary\n file_name = os.path.split(context_file)[1]\n file_stem = file_name.split('.')[0]\n context[file_stem] = obj\n\n # Overwrite context variable defaults with the default context from the\n # user's global config, if available\n if default_context:\n apply_overwrites_to_context(obj, default_context)\n if extra_context:\n apply_overwrites_to_context(obj, extra_context)\n\n logging.debug('Context generated is {0}'.format(context))\n return context\n\n\ndef generate_file(project_dir, infile, context, env):\n \"\"\"\n 1. Render the filename of infile as the name of outfile.\n 2. Deal with infile appropriately:\n\n a. If infile is a binary file, copy it over without rendering.\n b. If infile is a text file, render its contents and write the\n rendered infile to outfile.\n\n Precondition:\n\n When calling `generate_file()`, the root template dir must be the\n current working directory. Using `utils.work_in()` is the recommended\n way to perform this directory change.\n\n :param project_dir: Absolute path to the resulting generated project.\n :param infile: Input file to generate the file from. Relative to the root\n template dir.\n :param context: Dict for populating the cookiecutter's variables.\n :param env: Jinja2 template execution environment.\n \"\"\"\n\n logging.debug('Generating file {0}'.format(infile))\n\n # Render the path to the output file (not including the root project dir)\n outfile_tmpl = Template(infile)\n\n outfile = os.path.join(project_dir, outfile_tmpl.render(**context))\n file_name_is_empty = os.path.isdir(outfile)\n if file_name_is_empty:\n logging.debug('The resulting file name is empty: {0}'.format(outfile))\n return\n\n logging.debug('outfile is {0}'.format(outfile))\n\n # Just copy over binary files. Don't render.\n logging.debug(\"Check {0} to see if it's a binary\".format(infile))\n if is_binary(infile):\n logging.debug('Copying binary {0} to {1} without rendering'\n .format(infile, outfile))\n shutil.copyfile(infile, outfile)\n else:\n # Force fwd slashes on Windows for get_template\n # This is a by-design Jinja issue\n infile_fwd_slashes = infile.replace(os.path.sep, '/')\n\n # Render the file\n try:\n tmpl = env.get_template(infile_fwd_slashes)\n except TemplateSyntaxError as exception:\n # Disable translated so that printed exception contains verbose\n # information about syntax error location\n exception.translated = False\n raise\n rendered_file = tmpl.render(**context)\n\n logging.debug('Writing {0}'.format(outfile))\n\n with io.open(outfile, 'w', encoding='utf-8') as fh:\n fh.write(rendered_file)\n\n # Apply file permissions to output file\n shutil.copymode(infile, outfile)\n\n\ndef render_and_create_dir(dirname, context, output_dir,\n overwrite_if_exists=False):\n \"\"\"\n Renders the name of a directory, creates the directory, and\n returns its path.\n \"\"\"\n\n name_tmpl = Template(dirname)\n rendered_dirname = name_tmpl.render(**context)\n logging.debug('Rendered dir {0} must exist in output_dir {1}'.format(\n rendered_dirname,\n output_dir\n ))\n dir_to_create = os.path.normpath(\n os.path.join(output_dir, rendered_dirname)\n )\n\n output_dir_exists = os.path.exists(dir_to_create)\n\n if overwrite_if_exists:\n if output_dir_exists:\n logging.debug('Output directory {} already exists,'\n 'overwriting it'.format(dir_to_create))\n else:\n if output_dir_exists:\n msg = 'Error: \"{}\" directory already exists'.format(dir_to_create)\n raise OutputDirExistsException(msg)\n\n make_sure_path_exists(dir_to_create)\n return dir_to_create\n\n\ndef ensure_dir_is_templated(dirname):\n \"\"\"\n Ensures that dirname is a templated directory name.\n \"\"\"\n if '{{' in dirname and '}}' in dirname:\n return True\n else:\n raise NonTemplatedInputDirException\n\n\ndef _run_hook_from_repo_dir(repo_dir, hook_name, project_dir, context):\n \"\"\"\n Run hook from repo directory, cleaning up project directory if hook fails\n \"\"\"\n with work_in(repo_dir):\n try:\n run_hook(hook_name, project_dir, context)\n except FailedHookException:\n rmtree(project_dir)\n logging.error(\"Stopping generation because %s\"\n \" hook script didn't exit sucessfully\" % hook_name)\n raise\n\n\ndef generate_files(repo_dir, context=None, output_dir='.',\n overwrite_if_exists=False):\n \"\"\"\n Renders the templates and saves them to files.\n\n :param repo_dir: Project template input directory.\n :param context: Dict for populating the template's variables.\n :param output_dir: Where to output the generated project dir into.\n :param overwrite_if_exists: Overwrite the contents of the output directory\n if it exists\n \"\"\"\n\n template_dir = find_template(repo_dir)\n logging.debug('Generating project from {0}...'.format(template_dir))\n context = context or {}\n\n unrendered_dir = os.path.split(template_dir)[1]\n ensure_dir_is_templated(unrendered_dir)\n project_dir = render_and_create_dir(unrendered_dir,\n context,\n output_dir,\n overwrite_if_exists)\n\n # We want the Jinja path and the OS paths to match. Consequently, we'll:\n # + CD to the template folder\n # + Set Jinja's path to '.'\n #\n # In order to build our files to the correct folder(s), we'll use an\n # absolute path for the target folder (project_dir)\n\n project_dir = os.path.abspath(project_dir)\n logging.debug('project_dir is {0}'.format(project_dir))\n\n _run_hook_from_repo_dir(repo_dir, 'pre_gen_project', project_dir, context)\n\n with work_in(template_dir):\n env = Environment(keep_trailing_newline=True)\n env.loader = FileSystemLoader('.')\n\n for root, dirs, files in os.walk('.'):\n # We must separate the two types of dirs into different lists.\n # The reason is that we don't want ``os.walk`` to go through the\n # unrendered directories, since they will just be copied.\n copy_dirs = []\n render_dirs = []\n\n for d in dirs:\n d_ = os.path.normpath(os.path.join(root, d))\n # We check the full path, because that's how it can be\n # specified in the ``_copy_without_render`` setting, but\n # we store just the dir name\n if copy_without_render(d_, context):\n copy_dirs.append(d)\n else:\n render_dirs.append(d)\n\n for copy_dir in copy_dirs:\n indir = os.path.normpath(os.path.join(root, copy_dir))\n outdir = os.path.normpath(os.path.join(project_dir, indir))\n logging.debug(\n 'Copying dir {0} to {1} without rendering'\n ''.format(indir, outdir)\n )\n shutil.copytree(indir, outdir)\n\n # We mutate ``dirs``, because we only want to go through these dirs\n # recursively\n dirs[:] = render_dirs\n for d in dirs:\n unrendered_dir = os.path.join(project_dir, root, d)\n render_and_create_dir(unrendered_dir, context, output_dir,\n overwrite_if_exists)\n\n for f in files:\n infile = os.path.normpath(os.path.join(root, f))\n if copy_without_render(infile, context):\n outfile_tmpl = Template(infile)\n outfile_rendered = outfile_tmpl.render(**context)\n outfile = os.path.join(project_dir, outfile_rendered)\n logging.debug(\n 'Copying file {0} to {1} without rendering'\n ''.format(infile, outfile)\n )\n shutil.copyfile(infile, outfile)\n shutil.copymode(infile, outfile)\n continue\n logging.debug('f is {0}'.format(f))\n generate_file(project_dir, infile, context, env)\n\n _run_hook_from_repo_dir(repo_dir, 'post_gen_project', project_dir, context)\n\n return project_dir\n", "path": "cookiecutter/generate.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.generate\n---------------------\n\nFunctions for generating a project from a project template.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\nimport fnmatch\nimport io\nimport json\nimport logging\nimport os\nimport shutil\n\nfrom jinja2 import FileSystemLoader, Template\nfrom jinja2.environment import Environment\nfrom jinja2.exceptions import TemplateSyntaxError\nfrom binaryornot.check import is_binary\n\nfrom .exceptions import (\n NonTemplatedInputDirException,\n ContextDecodingException,\n FailedHookException,\n OutputDirExistsException\n)\nfrom .find import find_template\nfrom .utils import make_sure_path_exists, work_in, rmtree\nfrom .hooks import run_hook\n\n\ndef copy_without_render(path, context):\n \"\"\"\n Returns True if `path` matches some pattern in the\n `_copy_without_render` context setting.\n\n :param path: A file-system path referring to a file or dir that\n should be rendered or just copied.\n :param context: cookiecutter context.\n \"\"\"\n try:\n for dont_render in context['cookiecutter']['_copy_without_render']:\n if fnmatch.fnmatch(path, dont_render):\n return True\n except KeyError:\n return False\n\n return False\n\n\ndef apply_overwrites_to_context(context, overwrite_context):\n \"\"\"Modify the given context in place based on the overwrite_context.\"\"\"\n for variable, overwrite in overwrite_context.items():\n if variable not in context:\n # Do not include variables which are not used in the template\n continue\n\n context_value = context[variable]\n\n if isinstance(context_value, list):\n # We are dealing with a choice variable\n if overwrite in context_value:\n # This overwrite is actually valid for the given context\n # Let's set it as default (by definition first item in list)\n # see ``cookiecutter.prompt.prompt_choice_for_config``\n context_value.remove(overwrite)\n context_value.insert(0, overwrite)\n else:\n # Simply overwrite the value for this variable\n context[variable] = overwrite\n\n\ndef generate_context(context_file='cookiecutter.json', default_context=None,\n extra_context=None):\n \"\"\"\n Generates the context for a Cookiecutter project template.\n Loads the JSON file as a Python object, with key being the JSON filename.\n\n :param context_file: JSON file containing key/value pairs for populating\n the cookiecutter's variables.\n :param default_context: Dictionary containing config to take into account.\n :param extra_context: Dictionary containing configuration overrides\n \"\"\"\n\n context = {}\n\n try:\n with open(context_file) as file_handle:\n obj = json.load(file_handle, object_pairs_hook=OrderedDict)\n except ValueError as e:\n # JSON decoding error. Let's throw a new exception that is more\n # friendly for the developer or user.\n full_fpath = os.path.abspath(context_file)\n json_exc_message = str(e)\n our_exc_message = (\n 'JSON decoding error while loading \"{0}\". Decoding'\n ' error details: \"{1}\"'.format(full_fpath, json_exc_message))\n raise ContextDecodingException(our_exc_message)\n\n # Add the Python object to the context dictionary\n file_name = os.path.split(context_file)[1]\n file_stem = file_name.split('.')[0]\n context[file_stem] = obj\n\n # Overwrite context variable defaults with the default context from the\n # user's global config, if available\n if default_context:\n apply_overwrites_to_context(obj, default_context)\n if extra_context:\n apply_overwrites_to_context(obj, extra_context)\n\n logging.debug('Context generated is {0}'.format(context))\n return context\n\n\ndef generate_file(project_dir, infile, context, env):\n \"\"\"\n 1. Render the filename of infile as the name of outfile.\n 2. Deal with infile appropriately:\n\n a. If infile is a binary file, copy it over without rendering.\n b. If infile is a text file, render its contents and write the\n rendered infile to outfile.\n\n Precondition:\n\n When calling `generate_file()`, the root template dir must be the\n current working directory. Using `utils.work_in()` is the recommended\n way to perform this directory change.\n\n :param project_dir: Absolute path to the resulting generated project.\n :param infile: Input file to generate the file from. Relative to the root\n template dir.\n :param context: Dict for populating the cookiecutter's variables.\n :param env: Jinja2 template execution environment.\n \"\"\"\n\n logging.debug('Generating file {0}'.format(infile))\n\n # Render the path to the output file (not including the root project dir)\n outfile_tmpl = Template(infile)\n\n outfile = os.path.join(project_dir, outfile_tmpl.render(**context))\n file_name_is_empty = os.path.isdir(outfile)\n if file_name_is_empty:\n logging.debug('The resulting file name is empty: {0}'.format(outfile))\n return\n\n logging.debug('outfile is {0}'.format(outfile))\n\n # Just copy over binary files. Don't render.\n logging.debug(\"Check {0} to see if it's a binary\".format(infile))\n if is_binary(infile):\n logging.debug('Copying binary {0} to {1} without rendering'\n .format(infile, outfile))\n shutil.copyfile(infile, outfile)\n else:\n # Force fwd slashes on Windows for get_template\n # This is a by-design Jinja issue\n infile_fwd_slashes = infile.replace(os.path.sep, '/')\n\n # Render the file\n try:\n tmpl = env.get_template(infile_fwd_slashes)\n except TemplateSyntaxError as exception:\n # Disable translated so that printed exception contains verbose\n # information about syntax error location\n exception.translated = False\n raise\n rendered_file = tmpl.render(**context)\n\n logging.debug('Writing {0}'.format(outfile))\n\n with io.open(outfile, 'w', encoding='utf-8') as fh:\n fh.write(rendered_file)\n\n # Apply file permissions to output file\n shutil.copymode(infile, outfile)\n\n\ndef render_and_create_dir(dirname, context, output_dir,\n overwrite_if_exists=False):\n \"\"\"\n Renders the name of a directory, creates the directory, and\n returns its path.\n \"\"\"\n\n name_tmpl = Template(dirname)\n rendered_dirname = name_tmpl.render(**context)\n logging.debug('Rendered dir {0} must exist in output_dir {1}'.format(\n rendered_dirname,\n output_dir\n ))\n dir_to_create = os.path.normpath(\n os.path.join(output_dir, rendered_dirname)\n )\n\n output_dir_exists = os.path.exists(dir_to_create)\n\n if overwrite_if_exists:\n if output_dir_exists:\n logging.debug('Output directory {} already exists,'\n 'overwriting it'.format(dir_to_create))\n else:\n if output_dir_exists:\n msg = 'Error: \"{}\" directory already exists'.format(dir_to_create)\n raise OutputDirExistsException(msg)\n\n make_sure_path_exists(dir_to_create)\n return dir_to_create\n\n\ndef ensure_dir_is_templated(dirname):\n \"\"\"\n Ensures that dirname is a templated directory name.\n \"\"\"\n if '{{' in dirname and '}}' in dirname:\n return True\n else:\n raise NonTemplatedInputDirException\n\n\ndef _run_hook_from_repo_dir(repo_dir, hook_name, project_dir, context):\n \"\"\"\n Run hook from repo directory, cleaning up project directory if hook fails\n \"\"\"\n with work_in(repo_dir):\n try:\n run_hook(hook_name, project_dir, context)\n except FailedHookException:\n rmtree(project_dir)\n logging.error(\"Stopping generation because %s\"\n \" hook script didn't exit sucessfully\" % hook_name)\n raise\n\n\ndef generate_files(repo_dir, context=None, output_dir='.',\n overwrite_if_exists=False):\n \"\"\"\n Renders the templates and saves them to files.\n\n :param repo_dir: Project template input directory.\n :param context: Dict for populating the template's variables.\n :param output_dir: Where to output the generated project dir into.\n :param overwrite_if_exists: Overwrite the contents of the output directory\n if it exists\n \"\"\"\n\n template_dir = find_template(repo_dir)\n logging.debug('Generating project from {0}...'.format(template_dir))\n context = context or {}\n\n unrendered_dir = os.path.split(template_dir)[1]\n ensure_dir_is_templated(unrendered_dir)\n project_dir = render_and_create_dir(unrendered_dir,\n context,\n output_dir,\n overwrite_if_exists)\n\n # We want the Jinja path and the OS paths to match. Consequently, we'll:\n # + CD to the template folder\n # + Set Jinja's path to '.'\n #\n # In order to build our files to the correct folder(s), we'll use an\n # absolute path for the target folder (project_dir)\n\n project_dir = os.path.abspath(project_dir)\n logging.debug('project_dir is {0}'.format(project_dir))\n\n _run_hook_from_repo_dir(repo_dir, 'pre_gen_project', project_dir, context)\n\n with work_in(template_dir):\n env = Environment(keep_trailing_newline=True)\n env.loader = FileSystemLoader('.')\n\n for root, dirs, files in os.walk('.'):\n # We must separate the two types of dirs into different lists.\n # The reason is that we don't want ``os.walk`` to go through the\n # unrendered directories, since they will just be copied.\n copy_dirs = []\n render_dirs = []\n\n for d in dirs:\n d_ = os.path.normpath(os.path.join(root, d))\n # We check the full path, because that's how it can be\n # specified in the ``_copy_without_render`` setting, but\n # we store just the dir name\n if copy_without_render(d_, context):\n copy_dirs.append(d)\n else:\n render_dirs.append(d)\n\n for copy_dir in copy_dirs:\n indir = os.path.normpath(os.path.join(root, copy_dir))\n outdir = os.path.normpath(os.path.join(project_dir, indir))\n logging.debug(\n 'Copying dir {0} to {1} without rendering'\n ''.format(indir, outdir)\n )\n shutil.copytree(indir, outdir)\n\n # We mutate ``dirs``, because we only want to go through these dirs\n # recursively\n dirs[:] = render_dirs\n for d in dirs:\n unrendered_dir = os.path.join(project_dir, root, d)\n render_and_create_dir(unrendered_dir, context, output_dir,\n overwrite_if_exists)\n\n for f in files:\n infile = os.path.normpath(os.path.join(root, f))\n if copy_without_render(infile, context):\n outfile_tmpl = Template(infile)\n outfile_rendered = outfile_tmpl.render(**context)\n outfile = os.path.join(project_dir, outfile_rendered)\n logging.debug(\n 'Copying file {0} to {1} without rendering'\n ''.format(infile, outfile)\n )\n shutil.copyfile(infile, outfile)\n shutil.copymode(infile, outfile)\n continue\n logging.debug('f is {0}'.format(f))\n generate_file(project_dir, infile, context, env)\n\n _run_hook_from_repo_dir(repo_dir, 'post_gen_project', project_dir, context)\n\n return project_dir\n", "path": "cookiecutter/generate.py"}]} | 3,918 | 145 |
gh_patches_debug_18728 | rasdani/github-patches | git_diff | TheAlgorithms__Python-6190 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[suggestion] use relative path in DIRECTORY.md
when openning DIRECTORY.md in local machine, the links in it refers to https://github.com/TheAlgorithms/Python/blob/master/xxx.
it's not convinient for reading locally.
I suggest to make a new file "TOC.md", which removes "https://github.com/TheAlgorithms/Python/blob/master/" in every link.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/build_directory_md.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import os
4 from typing import Iterator
5
6 URL_BASE = "https://github.com/TheAlgorithms/Python/blob/master"
7
8
9 def good_file_paths(top_dir: str = ".") -> Iterator[str]:
10 for dir_path, dir_names, filenames in os.walk(top_dir):
11 dir_names[:] = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
12 for filename in filenames:
13 if filename == "__init__.py":
14 continue
15 if os.path.splitext(filename)[1] in (".py", ".ipynb"):
16 yield os.path.join(dir_path, filename).lstrip("./")
17
18
19 def md_prefix(i):
20 return f"{i * ' '}*" if i else "\n##"
21
22
23 def print_path(old_path: str, new_path: str) -> str:
24 old_parts = old_path.split(os.sep)
25 for i, new_part in enumerate(new_path.split(os.sep)):
26 if i + 1 > len(old_parts) or old_parts[i] != new_part:
27 if new_part:
28 print(f"{md_prefix(i)} {new_part.replace('_', ' ').title()}")
29 return new_path
30
31
32 def print_directory_md(top_dir: str = ".") -> None:
33 old_path = ""
34 for filepath in sorted(good_file_paths(top_dir)):
35 filepath, filename = os.path.split(filepath)
36 if filepath != old_path:
37 old_path = print_path(old_path, filepath)
38 indent = (filepath.count(os.sep) + 1) if filepath else 0
39 url = "/".join((URL_BASE, filepath, filename)).replace(" ", "%20")
40 filename = os.path.splitext(filename.replace("_", " ").title())[0]
41 print(f"{md_prefix(indent)} [{filename}]({url})")
42
43
44 if __name__ == "__main__":
45 print_directory_md(".")
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py
--- a/scripts/build_directory_md.py
+++ b/scripts/build_directory_md.py
@@ -3,8 +3,6 @@
import os
from typing import Iterator
-URL_BASE = "https://github.com/TheAlgorithms/Python/blob/master"
-
def good_file_paths(top_dir: str = ".") -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(top_dir):
@@ -36,7 +34,7 @@
if filepath != old_path:
old_path = print_path(old_path, filepath)
indent = (filepath.count(os.sep) + 1) if filepath else 0
- url = "/".join((URL_BASE, filepath, filename)).replace(" ", "%20")
+ url = "/".join((filepath, filename)).replace(" ", "%20")
filename = os.path.splitext(filename.replace("_", " ").title())[0]
print(f"{md_prefix(indent)} [{filename}]({url})")
| {"golden_diff": "diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py\n--- a/scripts/build_directory_md.py\n+++ b/scripts/build_directory_md.py\n@@ -3,8 +3,6 @@\n import os\n from typing import Iterator\n \n-URL_BASE = \"https://github.com/TheAlgorithms/Python/blob/master\"\n-\n \n def good_file_paths(top_dir: str = \".\") -> Iterator[str]:\n for dir_path, dir_names, filenames in os.walk(top_dir):\n@@ -36,7 +34,7 @@\n if filepath != old_path:\n old_path = print_path(old_path, filepath)\n indent = (filepath.count(os.sep) + 1) if filepath else 0\n- url = \"/\".join((URL_BASE, filepath, filename)).replace(\" \", \"%20\")\n+ url = \"/\".join((filepath, filename)).replace(\" \", \"%20\")\n filename = os.path.splitext(filename.replace(\"_\", \" \").title())[0]\n print(f\"{md_prefix(indent)} [{filename}]({url})\")\n", "issue": "[suggestion] use relative path in DIRECTORY.md\nwhen openning DIRECTORY.md in local machine, the links in it refers to https://github.com/TheAlgorithms/Python/blob/master/xxx.\r\n\r\nit's not convinient for reading locally.\r\n\r\nI suggest to make a new file \"TOC.md\", which removes \"https://github.com/TheAlgorithms/Python/blob/master/\" in every link.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport os\nfrom typing import Iterator\n\nURL_BASE = \"https://github.com/TheAlgorithms/Python/blob/master\"\n\n\ndef good_file_paths(top_dir: str = \".\") -> Iterator[str]:\n for dir_path, dir_names, filenames in os.walk(top_dir):\n dir_names[:] = [d for d in dir_names if d != \"scripts\" and d[0] not in \"._\"]\n for filename in filenames:\n if filename == \"__init__.py\":\n continue\n if os.path.splitext(filename)[1] in (\".py\", \".ipynb\"):\n yield os.path.join(dir_path, filename).lstrip(\"./\")\n\n\ndef md_prefix(i):\n return f\"{i * ' '}*\" if i else \"\\n##\"\n\n\ndef print_path(old_path: str, new_path: str) -> str:\n old_parts = old_path.split(os.sep)\n for i, new_part in enumerate(new_path.split(os.sep)):\n if i + 1 > len(old_parts) or old_parts[i] != new_part:\n if new_part:\n print(f\"{md_prefix(i)} {new_part.replace('_', ' ').title()}\")\n return new_path\n\n\ndef print_directory_md(top_dir: str = \".\") -> None:\n old_path = \"\"\n for filepath in sorted(good_file_paths(top_dir)):\n filepath, filename = os.path.split(filepath)\n if filepath != old_path:\n old_path = print_path(old_path, filepath)\n indent = (filepath.count(os.sep) + 1) if filepath else 0\n url = \"/\".join((URL_BASE, filepath, filename)).replace(\" \", \"%20\")\n filename = os.path.splitext(filename.replace(\"_\", \" \").title())[0]\n print(f\"{md_prefix(indent)} [{filename}]({url})\")\n\n\nif __name__ == \"__main__\":\n print_directory_md(\".\")\n", "path": "scripts/build_directory_md.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport os\nfrom typing import Iterator\n\n\ndef good_file_paths(top_dir: str = \".\") -> Iterator[str]:\n for dir_path, dir_names, filenames in os.walk(top_dir):\n dir_names[:] = [d for d in dir_names if d != \"scripts\" and d[0] not in \"._\"]\n for filename in filenames:\n if filename == \"__init__.py\":\n continue\n if os.path.splitext(filename)[1] in (\".py\", \".ipynb\"):\n yield os.path.join(dir_path, filename).lstrip(\"./\")\n\n\ndef md_prefix(i):\n return f\"{i * ' '}*\" if i else \"\\n##\"\n\n\ndef print_path(old_path: str, new_path: str) -> str:\n old_parts = old_path.split(os.sep)\n for i, new_part in enumerate(new_path.split(os.sep)):\n if i + 1 > len(old_parts) or old_parts[i] != new_part:\n if new_part:\n print(f\"{md_prefix(i)} {new_part.replace('_', ' ').title()}\")\n return new_path\n\n\ndef print_directory_md(top_dir: str = \".\") -> None:\n old_path = \"\"\n for filepath in sorted(good_file_paths(top_dir)):\n filepath, filename = os.path.split(filepath)\n if filepath != old_path:\n old_path = print_path(old_path, filepath)\n indent = (filepath.count(os.sep) + 1) if filepath else 0\n url = \"/\".join((filepath, filename)).replace(\" \", \"%20\")\n filename = os.path.splitext(filename.replace(\"_\", \" \").title())[0]\n print(f\"{md_prefix(indent)} [{filename}]({url})\")\n\n\nif __name__ == \"__main__\":\n print_directory_md(\".\")\n", "path": "scripts/build_directory_md.py"}]} | 828 | 220 |
gh_patches_debug_37070 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-935 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Page filter subpages not shown
### Describe the Bug
When filtering for pages which are not root-pages, these subpages will not be shown in the page tree view.
### Steps to Reproduce
1. Filter on the page view for any page which is not a root-page
### Expected Behavior
All matching pages are shown
### Actual Behavior
Only the root pages (and matching subpages of the root pages) get shown
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cms/templatetags/page_filters.py`
Content:
```
1 """
2 This is a collection of tags and filters for :class:`~cms.models.pages.page.Page` objects.
3 """
4 from django import template
5
6 register = template.Library()
7
8
9 @register.simple_tag
10 def get_last_root_page(pages):
11 """
12 This tag returns the last page on the root level.
13
14 :param pages: The requested page tree
15 :type pages: list [ ~cms.models.pages.page.Page ]
16
17 :return: The last root page of the given page list
18 :rtype: ~cms.models.pages.page.Page
19 """
20 root_pages = list(filter(lambda p: not p.parent, pages))
21 return root_pages[-1] if root_pages else None
22
```
Path: `src/cms/templatetags/tree_filters.py`
Content:
```
1 """
2 This is a collection of tags and filters for models which inherit from the MPTT model
3 (:class:`~cms.models.pages.page.Page` and :class:`~cms.models.languages.language_tree_node.LanguageTreeNode`).
4 """
5 from django import template
6
7 register = template.Library()
8
9
10 @register.filter
11 def get_descendants(node):
12 """
13 This filter returns the ids of all the node's descendants.
14
15 :param node: The requested node
16 :type node: ~cms.models.pages.page.Page or ~cms.models.languages.language_tree_node.LanguageTreeNode
17
18 :return: The list of all the node's descendants' ids
19 :rtype: list [ int ]
20 """
21 return [descendant.id for descendant in node.get_descendants(include_self=True)]
22
23
24 @register.filter
25 def get_children(node):
26 """
27 This filter returns the ids of all the node's direct children.
28
29 :param node: The requested node
30 :type node: ~cms.models.pages.page.Page or ~cms.models.languages.language_tree_node.LanguageTreeNode
31
32 :return: The list of all the node's children's ids
33 :rtype: list [ int ]
34 """
35 return [child.id for child in node.children.all()]
36
```
Path: `src/cms/views/pages/page_tree_view.py`
Content:
```
1 import logging
2
3 from django.contrib import messages
4 from django.contrib.auth.decorators import login_required
5 from django.shortcuts import render, redirect
6 from django.utils.decorators import method_decorator
7 from django.utils.translation import ugettext as _
8 from django.views.generic import TemplateView
9
10 from ...constants import translation_status
11 from ...decorators import region_permission_required, permission_required
12 from ...forms import PageFilterForm
13 from ...models import Region, Language
14 from .page_context_mixin import PageContextMixin
15
16 logger = logging.getLogger(__name__)
17
18
19 @method_decorator(login_required, name="dispatch")
20 @method_decorator(region_permission_required, name="dispatch")
21 @method_decorator(permission_required("cms.view_page"), name="dispatch")
22 class PageTreeView(TemplateView, PageContextMixin):
23 """
24 View for showing the page tree
25 """
26
27 #: Template for list of non-archived pages
28 template = "pages/page_tree.html"
29 #: Template for list of archived pages
30 template_archived = "pages/page_tree_archived.html"
31 #: Whether or not to show archived pages
32 archived = False
33
34 @property
35 def template_name(self):
36 """
37 Select correct HTML template, depending on :attr:`~cms.views.pages.page_tree_view.PageTreeView.archived` flag
38 (see :class:`~django.views.generic.base.TemplateResponseMixin`)
39
40 :return: Path to HTML template
41 :rtype: str
42 """
43
44 return self.template_archived if self.archived else self.template
45
46 # pylint: disable=too-many-locals
47 def get(self, request, *args, **kwargs):
48 """
49 Render page tree
50
51 :param request: The current request
52 :type request: ~django.http.HttpResponse
53
54 :param args: The supplied arguments
55 :type args: list
56
57 :param kwargs: The supplied keyword arguments
58 :type kwargs: dict
59
60 :return: The rendered template response
61 :rtype: ~django.template.response.TemplateResponse
62 """
63
64 # current region
65 region_slug = kwargs.get("region_slug")
66 region = Region.get_current_region(request)
67
68 # current language
69 language_slug = kwargs.get("language_slug")
70 if language_slug:
71 language = Language.objects.get(slug=language_slug)
72 elif region.default_language:
73 return redirect(
74 "pages",
75 **{
76 "region_slug": region_slug,
77 "language_slug": region.default_language.slug,
78 }
79 )
80 else:
81 messages.error(
82 request,
83 _("Please create at least one language node before creating pages."),
84 )
85 return redirect(
86 "language_tree",
87 **{
88 "region_slug": region_slug,
89 }
90 )
91
92 if not request.user.has_perm("cms.change_page"):
93 messages.warning(
94 request, _("You don't have the permission to edit or create pages.")
95 )
96 context = self.get_context_data(**kwargs)
97
98 pages = region.get_pages(archived=self.archived)
99 enable_drag_and_drop = True
100 # Filter pages according to given filters, if any
101 filter_data = kwargs.get("filter_data")
102 if filter_data:
103 # Set data for filter form rendering
104 filter_form = PageFilterForm(data=filter_data)
105 if filter_form.is_valid():
106 selected_status = filter_form.cleaned_data["translation_status"]
107 # only filter if at least one checkbox but not all are checked
108 if 0 < len(selected_status) < len(translation_status.CHOICES):
109 enable_drag_and_drop = False
110
111 def page_filter(page):
112 translation = page.get_translation(language_slug)
113 if not translation:
114 return translation_status.MISSING in selected_status
115 if translation.currently_in_translation:
116 return translation_status.IN_TRANSLATION in selected_status
117 if translation.is_outdated:
118 return translation_status.OUTDATED in selected_status
119 return translation_status.UP_TO_DATE in selected_status
120
121 pages = list(filter(page_filter, pages))
122 else:
123 filter_form = PageFilterForm()
124 filter_form.changed_data.clear()
125
126 return render(
127 request,
128 self.template_name,
129 {
130 **context,
131 "current_menu_item": "pages",
132 "pages": pages,
133 "archived_count": region.get_pages(archived=True).count(),
134 "language": language,
135 "languages": region.languages,
136 "filter_form": filter_form,
137 "enable_drag_and_drop": enable_drag_and_drop,
138 },
139 )
140
141 def post(self, request, *args, **kwargs):
142 """
143 Apply page filters and render page tree
144
145 :param request: The current request
146 :type request: ~django.http.HttpResponse
147
148 :param args: The supplied arguments
149 :type args: list
150
151 :param kwargs: The supplied keyword arguments
152 :type kwargs: dict
153
154 :return: The rendered template response
155 :rtype: ~django.template.response.TemplateResponse
156 """
157 return self.get(request, *args, **kwargs, filter_data=request.POST)
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cms/templatetags/page_filters.py b/src/cms/templatetags/page_filters.py
--- a/src/cms/templatetags/page_filters.py
+++ b/src/cms/templatetags/page_filters.py
@@ -19,3 +19,40 @@
"""
root_pages = list(filter(lambda p: not p.parent, pages))
return root_pages[-1] if root_pages else None
+
+
[email protected]_tag
+def get_depth_in(node, pageset):
+ """
+ This tag returns the depth of node whithin the tree/pages in pageset.
+
+ :param node : the page
+ :type node : ~cms.models.pages.page.Page
+
+ :param pageset: The pages (all pages or pages chosen by filter)
+ :type pageset: list [ ~cms.models.pages.page.Page ]
+
+ :return: the depth of node whithin the tree/pages in pageset
+ :rtype: int
+ """
+ if not node.parent in pageset:
+ return 0
+ return node.depth - get_highest_anscentor_in(node, pageset).depth
+
+
+def get_highest_anscentor_in(node, pageset):
+ """
+ This tag returns the highest (farthest) anscestor of node whithin the tree/pages in pageset.
+
+ :param node : the page
+ :type node : ~cms.models.pages.page.Page
+
+ :param pageset: The pages (all pages or pages chosen by filter)
+ :type pageset: list [ ~cms.models.pages.page.Page ]
+
+ :return: the highest (farthest) anscestor of node whithin the tree/pages in pageset
+ :rtype: ~cms.models.pages.page.Page
+ """
+ if node.parent in pageset:
+ return get_highest_anscentor_in(node.parent, pageset)
+ return node
diff --git a/src/cms/templatetags/tree_filters.py b/src/cms/templatetags/tree_filters.py
--- a/src/cms/templatetags/tree_filters.py
+++ b/src/cms/templatetags/tree_filters.py
@@ -32,4 +32,4 @@
:return: The list of all the node's children's ids
:rtype: list [ int ]
"""
- return [child.id for child in node.children.all()]
+ return [child.id for child in node.get_children()]
diff --git a/src/cms/views/pages/page_tree_view.py b/src/cms/views/pages/page_tree_view.py
--- a/src/cms/views/pages/page_tree_view.py
+++ b/src/cms/views/pages/page_tree_view.py
@@ -10,7 +10,7 @@
from ...constants import translation_status
from ...decorators import region_permission_required, permission_required
from ...forms import PageFilterForm
-from ...models import Region, Language
+from ...models import Region, Language, Page
from .page_context_mixin import PageContextMixin
logger = logging.getLogger(__name__)
@@ -118,7 +118,8 @@
return translation_status.OUTDATED in selected_status
return translation_status.UP_TO_DATE in selected_status
- pages = list(filter(page_filter, pages))
+ pages = map(lambda p: p.id, list(filter(page_filter, pages)))
+ pages = Page.objects.filter(id__in=pages).order_by()
else:
filter_form = PageFilterForm()
filter_form.changed_data.clear()
| {"golden_diff": "diff --git a/src/cms/templatetags/page_filters.py b/src/cms/templatetags/page_filters.py\n--- a/src/cms/templatetags/page_filters.py\n+++ b/src/cms/templatetags/page_filters.py\n@@ -19,3 +19,40 @@\n \"\"\"\n root_pages = list(filter(lambda p: not p.parent, pages))\n return root_pages[-1] if root_pages else None\n+\n+\[email protected]_tag\n+def get_depth_in(node, pageset):\n+ \"\"\"\n+ This tag returns the depth of node whithin the tree/pages in pageset.\n+\n+ :param node : the page\n+ :type node : ~cms.models.pages.page.Page\n+\n+ :param pageset: The pages (all pages or pages chosen by filter)\n+ :type pageset: list [ ~cms.models.pages.page.Page ]\n+\n+ :return: the depth of node whithin the tree/pages in pageset\n+ :rtype: int\n+ \"\"\"\n+ if not node.parent in pageset:\n+ return 0\n+ return node.depth - get_highest_anscentor_in(node, pageset).depth\n+\n+\n+def get_highest_anscentor_in(node, pageset):\n+ \"\"\"\n+ This tag returns the highest (farthest) anscestor of node whithin the tree/pages in pageset.\n+\n+ :param node : the page\n+ :type node : ~cms.models.pages.page.Page\n+\n+ :param pageset: The pages (all pages or pages chosen by filter)\n+ :type pageset: list [ ~cms.models.pages.page.Page ]\n+\n+ :return: the highest (farthest) anscestor of node whithin the tree/pages in pageset\n+ :rtype: ~cms.models.pages.page.Page\n+ \"\"\"\n+ if node.parent in pageset:\n+ return get_highest_anscentor_in(node.parent, pageset)\n+ return node\ndiff --git a/src/cms/templatetags/tree_filters.py b/src/cms/templatetags/tree_filters.py\n--- a/src/cms/templatetags/tree_filters.py\n+++ b/src/cms/templatetags/tree_filters.py\n@@ -32,4 +32,4 @@\n :return: The list of all the node's children's ids\n :rtype: list [ int ]\n \"\"\"\n- return [child.id for child in node.children.all()]\n+ return [child.id for child in node.get_children()]\ndiff --git a/src/cms/views/pages/page_tree_view.py b/src/cms/views/pages/page_tree_view.py\n--- a/src/cms/views/pages/page_tree_view.py\n+++ b/src/cms/views/pages/page_tree_view.py\n@@ -10,7 +10,7 @@\n from ...constants import translation_status\n from ...decorators import region_permission_required, permission_required\n from ...forms import PageFilterForm\n-from ...models import Region, Language\n+from ...models import Region, Language, Page\n from .page_context_mixin import PageContextMixin\n \n logger = logging.getLogger(__name__)\n@@ -118,7 +118,8 @@\n return translation_status.OUTDATED in selected_status\n return translation_status.UP_TO_DATE in selected_status\n \n- pages = list(filter(page_filter, pages))\n+ pages = map(lambda p: p.id, list(filter(page_filter, pages)))\n+ pages = Page.objects.filter(id__in=pages).order_by()\n else:\n filter_form = PageFilterForm()\n filter_form.changed_data.clear()\n", "issue": "Page filter subpages not shown\n### Describe the Bug\r\nWhen filtering for pages which are not root-pages, these subpages will not be shown in the page tree view.\r\n\r\n\r\n### Steps to Reproduce\r\n1. Filter on the page view for any page which is not a root-page\r\n\r\n### Expected Behavior\r\nAll matching pages are shown\r\n\r\n\r\n### Actual Behavior\r\nOnly the root pages (and matching subpages of the root pages) get shown\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThis is a collection of tags and filters for :class:`~cms.models.pages.page.Page` objects.\n\"\"\"\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]_tag\ndef get_last_root_page(pages):\n \"\"\"\n This tag returns the last page on the root level.\n\n :param pages: The requested page tree\n :type pages: list [ ~cms.models.pages.page.Page ]\n\n :return: The last root page of the given page list\n :rtype: ~cms.models.pages.page.Page\n \"\"\"\n root_pages = list(filter(lambda p: not p.parent, pages))\n return root_pages[-1] if root_pages else None\n", "path": "src/cms/templatetags/page_filters.py"}, {"content": "\"\"\"\nThis is a collection of tags and filters for models which inherit from the MPTT model\n(:class:`~cms.models.pages.page.Page` and :class:`~cms.models.languages.language_tree_node.LanguageTreeNode`).\n\"\"\"\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]\ndef get_descendants(node):\n \"\"\"\n This filter returns the ids of all the node's descendants.\n\n :param node: The requested node\n :type node: ~cms.models.pages.page.Page or ~cms.models.languages.language_tree_node.LanguageTreeNode\n\n :return: The list of all the node's descendants' ids\n :rtype: list [ int ]\n \"\"\"\n return [descendant.id for descendant in node.get_descendants(include_self=True)]\n\n\[email protected]\ndef get_children(node):\n \"\"\"\n This filter returns the ids of all the node's direct children.\n\n :param node: The requested node\n :type node: ~cms.models.pages.page.Page or ~cms.models.languages.language_tree_node.LanguageTreeNode\n\n :return: The list of all the node's children's ids\n :rtype: list [ int ]\n \"\"\"\n return [child.id for child in node.children.all()]\n", "path": "src/cms/templatetags/tree_filters.py"}, {"content": "import logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import translation_status\nfrom ...decorators import region_permission_required, permission_required\nfrom ...forms import PageFilterForm\nfrom ...models import Region, Language\nfrom .page_context_mixin import PageContextMixin\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(region_permission_required, name=\"dispatch\")\n@method_decorator(permission_required(\"cms.view_page\"), name=\"dispatch\")\nclass PageTreeView(TemplateView, PageContextMixin):\n \"\"\"\n View for showing the page tree\n \"\"\"\n\n #: Template for list of non-archived pages\n template = \"pages/page_tree.html\"\n #: Template for list of archived pages\n template_archived = \"pages/page_tree_archived.html\"\n #: Whether or not to show archived pages\n archived = False\n\n @property\n def template_name(self):\n \"\"\"\n Select correct HTML template, depending on :attr:`~cms.views.pages.page_tree_view.PageTreeView.archived` flag\n (see :class:`~django.views.generic.base.TemplateResponseMixin`)\n\n :return: Path to HTML template\n :rtype: str\n \"\"\"\n\n return self.template_archived if self.archived else self.template\n\n # pylint: disable=too-many-locals\n def get(self, request, *args, **kwargs):\n \"\"\"\n Render page tree\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n\n # current region\n region_slug = kwargs.get(\"region_slug\")\n region = Region.get_current_region(request)\n\n # current language\n language_slug = kwargs.get(\"language_slug\")\n if language_slug:\n language = Language.objects.get(slug=language_slug)\n elif region.default_language:\n return redirect(\n \"pages\",\n **{\n \"region_slug\": region_slug,\n \"language_slug\": region.default_language.slug,\n }\n )\n else:\n messages.error(\n request,\n _(\"Please create at least one language node before creating pages.\"),\n )\n return redirect(\n \"language_tree\",\n **{\n \"region_slug\": region_slug,\n }\n )\n\n if not request.user.has_perm(\"cms.change_page\"):\n messages.warning(\n request, _(\"You don't have the permission to edit or create pages.\")\n )\n context = self.get_context_data(**kwargs)\n\n pages = region.get_pages(archived=self.archived)\n enable_drag_and_drop = True\n # Filter pages according to given filters, if any\n filter_data = kwargs.get(\"filter_data\")\n if filter_data:\n # Set data for filter form rendering\n filter_form = PageFilterForm(data=filter_data)\n if filter_form.is_valid():\n selected_status = filter_form.cleaned_data[\"translation_status\"]\n # only filter if at least one checkbox but not all are checked\n if 0 < len(selected_status) < len(translation_status.CHOICES):\n enable_drag_and_drop = False\n\n def page_filter(page):\n translation = page.get_translation(language_slug)\n if not translation:\n return translation_status.MISSING in selected_status\n if translation.currently_in_translation:\n return translation_status.IN_TRANSLATION in selected_status\n if translation.is_outdated:\n return translation_status.OUTDATED in selected_status\n return translation_status.UP_TO_DATE in selected_status\n\n pages = list(filter(page_filter, pages))\n else:\n filter_form = PageFilterForm()\n filter_form.changed_data.clear()\n\n return render(\n request,\n self.template_name,\n {\n **context,\n \"current_menu_item\": \"pages\",\n \"pages\": pages,\n \"archived_count\": region.get_pages(archived=True).count(),\n \"language\": language,\n \"languages\": region.languages,\n \"filter_form\": filter_form,\n \"enable_drag_and_drop\": enable_drag_and_drop,\n },\n )\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n Apply page filters and render page tree\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n return self.get(request, *args, **kwargs, filter_data=request.POST)\n", "path": "src/cms/views/pages/page_tree_view.py"}], "after_files": [{"content": "\"\"\"\nThis is a collection of tags and filters for :class:`~cms.models.pages.page.Page` objects.\n\"\"\"\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]_tag\ndef get_last_root_page(pages):\n \"\"\"\n This tag returns the last page on the root level.\n\n :param pages: The requested page tree\n :type pages: list [ ~cms.models.pages.page.Page ]\n\n :return: The last root page of the given page list\n :rtype: ~cms.models.pages.page.Page\n \"\"\"\n root_pages = list(filter(lambda p: not p.parent, pages))\n return root_pages[-1] if root_pages else None\n\n\[email protected]_tag\ndef get_depth_in(node, pageset):\n \"\"\"\n This tag returns the depth of node whithin the tree/pages in pageset.\n\n :param node : the page\n :type node : ~cms.models.pages.page.Page\n\n :param pageset: The pages (all pages or pages chosen by filter)\n :type pageset: list [ ~cms.models.pages.page.Page ]\n\n :return: the depth of node whithin the tree/pages in pageset\n :rtype: int\n \"\"\"\n if not node.parent in pageset:\n return 0\n return node.depth - get_highest_anscentor_in(node, pageset).depth\n\n\ndef get_highest_anscentor_in(node, pageset):\n \"\"\"\n This tag returns the highest (farthest) anscestor of node whithin the tree/pages in pageset.\n\n :param node : the page\n :type node : ~cms.models.pages.page.Page\n\n :param pageset: The pages (all pages or pages chosen by filter)\n :type pageset: list [ ~cms.models.pages.page.Page ]\n\n :return: the highest (farthest) anscestor of node whithin the tree/pages in pageset\n :rtype: ~cms.models.pages.page.Page\n \"\"\"\n if node.parent in pageset:\n return get_highest_anscentor_in(node.parent, pageset)\n return node\n", "path": "src/cms/templatetags/page_filters.py"}, {"content": "\"\"\"\nThis is a collection of tags and filters for models which inherit from the MPTT model\n(:class:`~cms.models.pages.page.Page` and :class:`~cms.models.languages.language_tree_node.LanguageTreeNode`).\n\"\"\"\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]\ndef get_descendants(node):\n \"\"\"\n This filter returns the ids of all the node's descendants.\n\n :param node: The requested node\n :type node: ~cms.models.pages.page.Page or ~cms.models.languages.language_tree_node.LanguageTreeNode\n\n :return: The list of all the node's descendants' ids\n :rtype: list [ int ]\n \"\"\"\n return [descendant.id for descendant in node.get_descendants(include_self=True)]\n\n\[email protected]\ndef get_children(node):\n \"\"\"\n This filter returns the ids of all the node's direct children.\n\n :param node: The requested node\n :type node: ~cms.models.pages.page.Page or ~cms.models.languages.language_tree_node.LanguageTreeNode\n\n :return: The list of all the node's children's ids\n :rtype: list [ int ]\n \"\"\"\n return [child.id for child in node.get_children()]\n", "path": "src/cms/templatetags/tree_filters.py"}, {"content": "import logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import translation_status\nfrom ...decorators import region_permission_required, permission_required\nfrom ...forms import PageFilterForm\nfrom ...models import Region, Language, Page\nfrom .page_context_mixin import PageContextMixin\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(region_permission_required, name=\"dispatch\")\n@method_decorator(permission_required(\"cms.view_page\"), name=\"dispatch\")\nclass PageTreeView(TemplateView, PageContextMixin):\n \"\"\"\n View for showing the page tree\n \"\"\"\n\n #: Template for list of non-archived pages\n template = \"pages/page_tree.html\"\n #: Template for list of archived pages\n template_archived = \"pages/page_tree_archived.html\"\n #: Whether or not to show archived pages\n archived = False\n\n @property\n def template_name(self):\n \"\"\"\n Select correct HTML template, depending on :attr:`~cms.views.pages.page_tree_view.PageTreeView.archived` flag\n (see :class:`~django.views.generic.base.TemplateResponseMixin`)\n\n :return: Path to HTML template\n :rtype: str\n \"\"\"\n\n return self.template_archived if self.archived else self.template\n\n # pylint: disable=too-many-locals\n def get(self, request, *args, **kwargs):\n \"\"\"\n Render page tree\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n\n # current region\n region_slug = kwargs.get(\"region_slug\")\n region = Region.get_current_region(request)\n\n # current language\n language_slug = kwargs.get(\"language_slug\")\n if language_slug:\n language = Language.objects.get(slug=language_slug)\n elif region.default_language:\n return redirect(\n \"pages\",\n **{\n \"region_slug\": region_slug,\n \"language_slug\": region.default_language.slug,\n }\n )\n else:\n messages.error(\n request,\n _(\"Please create at least one language node before creating pages.\"),\n )\n return redirect(\n \"language_tree\",\n **{\n \"region_slug\": region_slug,\n }\n )\n\n if not request.user.has_perm(\"cms.change_page\"):\n messages.warning(\n request, _(\"You don't have the permission to edit or create pages.\")\n )\n context = self.get_context_data(**kwargs)\n\n pages = region.get_pages(archived=self.archived)\n enable_drag_and_drop = True\n # Filter pages according to given filters, if any\n filter_data = kwargs.get(\"filter_data\")\n if filter_data:\n # Set data for filter form rendering\n filter_form = PageFilterForm(data=filter_data)\n if filter_form.is_valid():\n selected_status = filter_form.cleaned_data[\"translation_status\"]\n # only filter if at least one checkbox but not all are checked\n if 0 < len(selected_status) < len(translation_status.CHOICES):\n enable_drag_and_drop = False\n\n def page_filter(page):\n translation = page.get_translation(language_slug)\n if not translation:\n return translation_status.MISSING in selected_status\n if translation.currently_in_translation:\n return translation_status.IN_TRANSLATION in selected_status\n if translation.is_outdated:\n return translation_status.OUTDATED in selected_status\n return translation_status.UP_TO_DATE in selected_status\n\n pages = map(lambda p: p.id, list(filter(page_filter, pages)))\n pages = Page.objects.filter(id__in=pages).order_by()\n else:\n filter_form = PageFilterForm()\n filter_form.changed_data.clear()\n\n return render(\n request,\n self.template_name,\n {\n **context,\n \"current_menu_item\": \"pages\",\n \"pages\": pages,\n \"archived_count\": region.get_pages(archived=True).count(),\n \"language\": language,\n \"languages\": region.languages,\n \"filter_form\": filter_form,\n \"enable_drag_and_drop\": enable_drag_and_drop,\n },\n )\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n Apply page filters and render page tree\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n return self.get(request, *args, **kwargs, filter_data=request.POST)\n", "path": "src/cms/views/pages/page_tree_view.py"}]} | 2,312 | 765 |
gh_patches_debug_13599 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-4356 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CookieError Ilegal key %r, Jupyterhub not sanitizing header keys properly
<!-- Thank you for contributing. These HTML comments will not render in the issue, but you can delete them once you've read them if you prefer! -->
### Bug description
<!-- Use this section to clearly and concisely describe the bug. -->
End users occasionally are receiving timeouts on **Jupyterhub 3.1.0**. On review, the error log `/var/log/jupyterhub.log` contains many unexpected references to an Illegal Key in HTTPCookie.
#### Expected behaviour
<!-- Tell us what you thought would happen. -->
End users running R kernel ipynbs should not get unexpected timeouts due to invalid key in HTTP Cookies when authenticating to the hub via GitHub OAuth.
#### Actual behaviour
<!-- Tell us what actually happens. -->
End users occasionally get timeouts. Below is a reference of the actual error log, the first hit. Interestingly the error log has thousands of hits for that same illegal key value, which I believe is a Google Analytics tag & measurement ID.
```python
[E 2023-01-24 00:12:16.024 JupyterHub web:1798] Uncaught exception GET / (::ffff:xxx.xxx.xxx.xxx)
HTTPServerRequest(protocol='https', host='commjhub.asc.upenn.edu', method='GET', uri='/', version='HTTP/1.1', remote_ip='::ffff:xxx.xxx.xxx.xxx')
Traceback (most recent call last):
File "/usr/local/anaconda3/lib/python3.8/site-packages/tornado/web.py", line 1711, in _execute
result = method(*self.path_args, **self.path_kwargs)
File "/usr/local/anaconda3/lib/python3.8/site-packages/jupyterhub/handlers/base.py", line 1391, in get
self.redirect(url_path_join(self.hub.base_url, path), permanent=False)
File "/usr/local/anaconda3/lib/python3.8/site-packages/tornado/web.py", line 816, in redirect
self.finish()
File "/usr/local/anaconda3/lib/python3.8/site-packages/jupyterhub/handlers/base.py", line 184, in finish
super().finish(*args, **kwargs)
File "/usr/local/anaconda3/lib/python3.8/site-packages/tornado/web.py", line 1161, in finish
self._log()
File "/usr/local/anaconda3/lib/python3.8/site-packages/tornado/web.py", line 1746, in _log
self.application.log_request(self)
File "/usr/local/anaconda3/lib/python3.8/site-packages/tornado/web.py", line 2262, in log_request
self.settings["log_function"](handler)
File "/usr/local/anaconda3/lib/python3.8/site-packages/jupyterhub/log.py", line 143, in log_request
headers = _scrub_headers(request.headers)
File "/usr/local/anaconda3/lib/python3.8/site-packages/jupyterhub/log.py", line 108, in _scrub_headers
c = SimpleCookie(headers['Cookie'])
File "/usr/local/anaconda3/lib/python3.8/http/cookies.py", line 480, in __init__
self.load(input)
File "/usr/local/anaconda3/lib/python3.8/http/cookies.py", line 529, in load
self.__parse_string(rawdata)
File "/usr/local/anaconda3/lib/python3.8/http/cookies.py", line 593, in __parse_string
self.__set(key, rval, cval)
File "/usr/local/anaconda3/lib/python3.8/http/cookies.py", line 485, in __set
M.set(key, real_value, coded_value)
File "/usr/local/anaconda3/lib/python3.8/http/cookies.py", line 352, in set
raise CookieError('Illegal key %r' % (key,))
http.cookies.CookieError: Illegal key '_ga_SR2P22QVQQ,G-0HYE8YG0M6'
```
### How to reproduce
<!-- Use this section to describe the steps that a user would take to experience this bug. -->
Pass an invalid key for HTTP cookies. In my case, I think it's the comma value in the GA `http.cookies.CookieError: Illegal key '_ga_SR2P22QVQQ,G-0HYE8YG0M6'` based on information here: https://docs.python.org/3/library/http.cookies.html#module-http.cookies
> Note: On encountering an invalid cookie, CookieError is raised, so if your cookie data comes from a browser you should always prepare for invalid data and catch CookieError on parsing.
### Your personal set up
<!--
Tell us a little about the system you're using.
Please include information about how you installed,
e.g. are you using a distribution such as zero-to-jupyterhub or the-littlest-jupyterhub.
-->
- OS: Ubuntu 20.04.5 LTS
<!-- [e.g. ubuntu 20.04, macOS 11.0] -->
- Version(s):
- Jupyterhub 3.1.0
- Python 3.8.8
<!-- e.g. jupyterhub --version, python --version --->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jupyterhub/log.py`
Content:
```
1 """logging utilities"""
2 # Copyright (c) Jupyter Development Team.
3 # Distributed under the terms of the Modified BSD License.
4 import json
5 import logging
6 import traceback
7 from functools import partial
8 from http.cookies import SimpleCookie
9 from urllib.parse import urlparse, urlunparse
10
11 from tornado.log import LogFormatter, access_log
12 from tornado.web import HTTPError, StaticFileHandler
13
14 from .handlers.pages import HealthCheckHandler
15 from .metrics import prometheus_log_method
16
17
18 def coroutine_frames(all_frames):
19 """Extract coroutine boilerplate frames from a frame list
20
21 for better stack/traceback printing of coroutines
22 """
23 useful_frames = []
24 for frame in all_frames:
25 if frame[0] == '<string>' and frame[2] == 'raise_exc_info':
26 continue
27 # start out conservative with filename + function matching
28 # maybe just filename matching would be sufficient
29 elif frame[0].endswith('tornado/gen.py') and frame[2] in {
30 'run',
31 'wrapper',
32 '__init__',
33 }:
34 continue
35 elif frame[0].endswith('tornado/concurrent.py') and frame[2] == 'result':
36 continue
37 useful_frames.append(frame)
38 return useful_frames
39
40
41 def coroutine_traceback(typ, value, tb):
42 """Scrub coroutine frames from a traceback
43
44 Coroutine tracebacks have a bunch of identical uninformative frames at each yield point.
45 This removes those extra frames, so tracebacks should be easier to read.
46 This might be a horrible idea.
47
48 Returns a list of strings (like traceback.format_tb)
49 """
50 all_frames = traceback.extract_tb(tb)
51 useful_frames = coroutine_frames(all_frames)
52
53 tb_list = ['Traceback (most recent call last):\n']
54 tb_list.extend(traceback.format_list(useful_frames))
55 tb_list.extend(traceback.format_exception_only(typ, value))
56 return tb_list
57
58
59 class CoroutineLogFormatter(LogFormatter):
60 """Log formatter that scrubs coroutine frames"""
61
62 def formatException(self, exc_info):
63 return ''.join(coroutine_traceback(*exc_info))
64
65
66 # url params to be scrubbed if seen
67 # any url param that *contains* one of these
68 # will be scrubbed from logs
69 SCRUB_PARAM_KEYS = ('token', 'auth', 'key', 'code', 'state', '_xsrf')
70
71
72 def _scrub_uri(uri):
73 """scrub auth info from uri"""
74 if '/api/authorizations/cookie/' in uri or '/api/authorizations/token/' in uri:
75 uri = uri.rsplit('/', 1)[0] + '/[secret]'
76 parsed = urlparse(uri)
77 if parsed.query:
78 # check for potentially sensitive url params
79 # use manual list + split rather than parsing
80 # to minimally perturb original
81 parts = parsed.query.split('&')
82 changed = False
83 for i, s in enumerate(parts):
84 if '=' in s:
85 key, value = s.split('=', 1)
86 for substring in SCRUB_PARAM_KEYS:
87 if substring in key:
88 parts[i] = key + '=[secret]'
89 changed = True
90 if changed:
91 parsed = parsed._replace(query='&'.join(parts))
92 return urlunparse(parsed)
93 return uri
94
95
96 def _scrub_headers(headers):
97 """scrub auth info from headers"""
98 headers = dict(headers)
99 if 'Authorization' in headers:
100 auth = headers['Authorization']
101 if ' ' in auth:
102 auth_type = auth.split(' ', 1)[0]
103 else:
104 # no space, hide the whole thing in case there was a mistake
105 auth_type = ''
106 headers['Authorization'] = f'{auth_type} [secret]'
107 if 'Cookie' in headers:
108 c = SimpleCookie(headers['Cookie'])
109 redacted = []
110 for name in c.keys():
111 redacted.append(f"{name}=[secret]")
112 headers['Cookie'] = '; '.join(redacted)
113 return headers
114
115
116 # log_request adapted from IPython (BSD)
117
118
119 def log_request(handler):
120 """log a bit more information about each request than tornado's default
121
122 - move static file get success to debug-level (reduces noise)
123 - get proxied IP instead of proxy IP
124 - log referer for redirect and failed requests
125 - log user-agent for failed requests
126 - record per-request metrics in prometheus
127 """
128 status = handler.get_status()
129 request = handler.request
130 if status == 304 or (
131 status < 300 and isinstance(handler, (StaticFileHandler, HealthCheckHandler))
132 ):
133 # static-file success and 304 Found are debug-level
134 log_level = logging.DEBUG
135 elif status < 400:
136 log_level = logging.INFO
137 elif status < 500:
138 log_level = logging.WARNING
139 else:
140 log_level = logging.ERROR
141
142 uri = _scrub_uri(request.uri)
143 headers = _scrub_headers(request.headers)
144
145 request_time = 1000.0 * handler.request.request_time()
146
147 # always log slow responses (longer than 1s) at least info-level
148 if request_time >= 1000 and log_level < logging.INFO:
149 log_level = logging.INFO
150
151 log_method = partial(access_log.log, log_level)
152
153 try:
154 user = handler.current_user
155 except (HTTPError, RuntimeError):
156 username = ''
157 else:
158 if user is None:
159 username = ''
160 elif isinstance(user, str):
161 username = user
162 elif isinstance(user, dict):
163 username = user['name']
164 else:
165 username = user.name
166
167 ns = dict(
168 status=status,
169 method=request.method,
170 ip=request.remote_ip,
171 uri=uri,
172 request_time=request_time,
173 user=username,
174 location='',
175 )
176 msg = "{status} {method} {uri}{location} ({user}@{ip}) {request_time:.2f}ms"
177 if status >= 500 and status not in {502, 503}:
178 log_method(json.dumps(headers, indent=2))
179 elif status in {301, 302}:
180 # log redirect targets
181 # FIXME: _headers is private, but there doesn't appear to be a public way
182 # to get headers from tornado
183 location = handler._headers.get('Location')
184 if location:
185 ns['location'] = f' -> {_scrub_uri(location)}'
186 log_method(msg.format(**ns))
187 prometheus_log_method(handler)
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jupyterhub/log.py b/jupyterhub/log.py
--- a/jupyterhub/log.py
+++ b/jupyterhub/log.py
@@ -105,11 +105,16 @@
auth_type = ''
headers['Authorization'] = f'{auth_type} [secret]'
if 'Cookie' in headers:
- c = SimpleCookie(headers['Cookie'])
- redacted = []
- for name in c.keys():
- redacted.append(f"{name}=[secret]")
- headers['Cookie'] = '; '.join(redacted)
+ try:
+ c = SimpleCookie(headers['Cookie'])
+ except Exception as e:
+ # it's possible for browsers to send invalid cookies
+ headers['Cookie'] = f"Invalid Cookie: {e}"
+ else:
+ redacted = []
+ for name in c.keys():
+ redacted.append(f"{name}=[secret]")
+ headers['Cookie'] = '; '.join(redacted)
return headers
| {"golden_diff": "diff --git a/jupyterhub/log.py b/jupyterhub/log.py\n--- a/jupyterhub/log.py\n+++ b/jupyterhub/log.py\n@@ -105,11 +105,16 @@\n auth_type = ''\n headers['Authorization'] = f'{auth_type} [secret]'\n if 'Cookie' in headers:\n- c = SimpleCookie(headers['Cookie'])\n- redacted = []\n- for name in c.keys():\n- redacted.append(f\"{name}=[secret]\")\n- headers['Cookie'] = '; '.join(redacted)\n+ try:\n+ c = SimpleCookie(headers['Cookie'])\n+ except Exception as e:\n+ # it's possible for browsers to send invalid cookies\n+ headers['Cookie'] = f\"Invalid Cookie: {e}\"\n+ else:\n+ redacted = []\n+ for name in c.keys():\n+ redacted.append(f\"{name}=[secret]\")\n+ headers['Cookie'] = '; '.join(redacted)\n return headers\n", "issue": "CookieError Ilegal key %r, Jupyterhub not sanitizing header keys properly\n<!-- Thank you for contributing. These HTML comments will not render in the issue, but you can delete them once you've read them if you prefer! -->\r\n\r\n### Bug description\r\n\r\n<!-- Use this section to clearly and concisely describe the bug. -->\r\n\r\nEnd users occasionally are receiving timeouts on **Jupyterhub 3.1.0**. On review, the error log `/var/log/jupyterhub.log` contains many unexpected references to an Illegal Key in HTTPCookie.\r\n\r\n#### Expected behaviour\r\n\r\n<!-- Tell us what you thought would happen. -->\r\n\r\nEnd users running R kernel ipynbs should not get unexpected timeouts due to invalid key in HTTP Cookies when authenticating to the hub via GitHub OAuth.\r\n\r\n#### Actual behaviour\r\n\r\n<!-- Tell us what actually happens. -->\r\n\r\nEnd users occasionally get timeouts. Below is a reference of the actual error log, the first hit. Interestingly the error log has thousands of hits for that same illegal key value, which I believe is a Google Analytics tag & measurement ID.\r\n\r\n```python\r\n[E 2023-01-24 00:12:16.024 JupyterHub web:1798] Uncaught exception GET / (::ffff:xxx.xxx.xxx.xxx)\r\n HTTPServerRequest(protocol='https', host='commjhub.asc.upenn.edu', method='GET', uri='/', version='HTTP/1.1', remote_ip='::ffff:xxx.xxx.xxx.xxx')\r\n Traceback (most recent call last):\r\n File \"/usr/local/anaconda3/lib/python3.8/site-packages/tornado/web.py\", line 1711, in _execute\r\n result = method(*self.path_args, **self.path_kwargs)\r\n File \"/usr/local/anaconda3/lib/python3.8/site-packages/jupyterhub/handlers/base.py\", line 1391, in get\r\n self.redirect(url_path_join(self.hub.base_url, path), permanent=False)\r\n File \"/usr/local/anaconda3/lib/python3.8/site-packages/tornado/web.py\", line 816, in redirect\r\n self.finish()\r\n File \"/usr/local/anaconda3/lib/python3.8/site-packages/jupyterhub/handlers/base.py\", line 184, in finish\r\n super().finish(*args, **kwargs)\r\n File \"/usr/local/anaconda3/lib/python3.8/site-packages/tornado/web.py\", line 1161, in finish\r\n self._log()\r\n File \"/usr/local/anaconda3/lib/python3.8/site-packages/tornado/web.py\", line 1746, in _log\r\n self.application.log_request(self)\r\n File \"/usr/local/anaconda3/lib/python3.8/site-packages/tornado/web.py\", line 2262, in log_request\r\n self.settings[\"log_function\"](handler)\r\n File \"/usr/local/anaconda3/lib/python3.8/site-packages/jupyterhub/log.py\", line 143, in log_request\r\n headers = _scrub_headers(request.headers)\r\n File \"/usr/local/anaconda3/lib/python3.8/site-packages/jupyterhub/log.py\", line 108, in _scrub_headers\r\n c = SimpleCookie(headers['Cookie'])\r\n File \"/usr/local/anaconda3/lib/python3.8/http/cookies.py\", line 480, in __init__\r\n self.load(input)\r\n File \"/usr/local/anaconda3/lib/python3.8/http/cookies.py\", line 529, in load\r\n self.__parse_string(rawdata)\r\n File \"/usr/local/anaconda3/lib/python3.8/http/cookies.py\", line 593, in __parse_string\r\n self.__set(key, rval, cval)\r\n File \"/usr/local/anaconda3/lib/python3.8/http/cookies.py\", line 485, in __set\r\n M.set(key, real_value, coded_value)\r\n File \"/usr/local/anaconda3/lib/python3.8/http/cookies.py\", line 352, in set\r\n raise CookieError('Illegal key %r' % (key,))\r\n http.cookies.CookieError: Illegal key '_ga_SR2P22QVQQ,G-0HYE8YG0M6'\r\n```\r\n\r\n### How to reproduce\r\n\r\n<!-- Use this section to describe the steps that a user would take to experience this bug. -->\r\n\r\nPass an invalid key for HTTP cookies. In my case, I think it's the comma value in the GA `http.cookies.CookieError: Illegal key '_ga_SR2P22QVQQ,G-0HYE8YG0M6'` based on information here: https://docs.python.org/3/library/http.cookies.html#module-http.cookies\r\n> Note: On encountering an invalid cookie, CookieError is raised, so if your cookie data comes from a browser you should always prepare for invalid data and catch CookieError on parsing.\r\n\r\n### Your personal set up\r\n\r\n<!--\r\nTell us a little about the system you're using.\r\nPlease include information about how you installed,\r\ne.g. are you using a distribution such as zero-to-jupyterhub or the-littlest-jupyterhub.\r\n -->\r\n\r\n - OS: Ubuntu 20.04.5 LTS\r\n <!-- [e.g. ubuntu 20.04, macOS 11.0] -->\r\n - Version(s): \r\n - Jupyterhub 3.1.0\r\n - Python 3.8.8 \r\n <!-- e.g. jupyterhub --version, python --version --->\r\n\r\n\n", "before_files": [{"content": "\"\"\"logging utilities\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nimport json\nimport logging\nimport traceback\nfrom functools import partial\nfrom http.cookies import SimpleCookie\nfrom urllib.parse import urlparse, urlunparse\n\nfrom tornado.log import LogFormatter, access_log\nfrom tornado.web import HTTPError, StaticFileHandler\n\nfrom .handlers.pages import HealthCheckHandler\nfrom .metrics import prometheus_log_method\n\n\ndef coroutine_frames(all_frames):\n \"\"\"Extract coroutine boilerplate frames from a frame list\n\n for better stack/traceback printing of coroutines\n \"\"\"\n useful_frames = []\n for frame in all_frames:\n if frame[0] == '<string>' and frame[2] == 'raise_exc_info':\n continue\n # start out conservative with filename + function matching\n # maybe just filename matching would be sufficient\n elif frame[0].endswith('tornado/gen.py') and frame[2] in {\n 'run',\n 'wrapper',\n '__init__',\n }:\n continue\n elif frame[0].endswith('tornado/concurrent.py') and frame[2] == 'result':\n continue\n useful_frames.append(frame)\n return useful_frames\n\n\ndef coroutine_traceback(typ, value, tb):\n \"\"\"Scrub coroutine frames from a traceback\n\n Coroutine tracebacks have a bunch of identical uninformative frames at each yield point.\n This removes those extra frames, so tracebacks should be easier to read.\n This might be a horrible idea.\n\n Returns a list of strings (like traceback.format_tb)\n \"\"\"\n all_frames = traceback.extract_tb(tb)\n useful_frames = coroutine_frames(all_frames)\n\n tb_list = ['Traceback (most recent call last):\\n']\n tb_list.extend(traceback.format_list(useful_frames))\n tb_list.extend(traceback.format_exception_only(typ, value))\n return tb_list\n\n\nclass CoroutineLogFormatter(LogFormatter):\n \"\"\"Log formatter that scrubs coroutine frames\"\"\"\n\n def formatException(self, exc_info):\n return ''.join(coroutine_traceback(*exc_info))\n\n\n# url params to be scrubbed if seen\n# any url param that *contains* one of these\n# will be scrubbed from logs\nSCRUB_PARAM_KEYS = ('token', 'auth', 'key', 'code', 'state', '_xsrf')\n\n\ndef _scrub_uri(uri):\n \"\"\"scrub auth info from uri\"\"\"\n if '/api/authorizations/cookie/' in uri or '/api/authorizations/token/' in uri:\n uri = uri.rsplit('/', 1)[0] + '/[secret]'\n parsed = urlparse(uri)\n if parsed.query:\n # check for potentially sensitive url params\n # use manual list + split rather than parsing\n # to minimally perturb original\n parts = parsed.query.split('&')\n changed = False\n for i, s in enumerate(parts):\n if '=' in s:\n key, value = s.split('=', 1)\n for substring in SCRUB_PARAM_KEYS:\n if substring in key:\n parts[i] = key + '=[secret]'\n changed = True\n if changed:\n parsed = parsed._replace(query='&'.join(parts))\n return urlunparse(parsed)\n return uri\n\n\ndef _scrub_headers(headers):\n \"\"\"scrub auth info from headers\"\"\"\n headers = dict(headers)\n if 'Authorization' in headers:\n auth = headers['Authorization']\n if ' ' in auth:\n auth_type = auth.split(' ', 1)[0]\n else:\n # no space, hide the whole thing in case there was a mistake\n auth_type = ''\n headers['Authorization'] = f'{auth_type} [secret]'\n if 'Cookie' in headers:\n c = SimpleCookie(headers['Cookie'])\n redacted = []\n for name in c.keys():\n redacted.append(f\"{name}=[secret]\")\n headers['Cookie'] = '; '.join(redacted)\n return headers\n\n\n# log_request adapted from IPython (BSD)\n\n\ndef log_request(handler):\n \"\"\"log a bit more information about each request than tornado's default\n\n - move static file get success to debug-level (reduces noise)\n - get proxied IP instead of proxy IP\n - log referer for redirect and failed requests\n - log user-agent for failed requests\n - record per-request metrics in prometheus\n \"\"\"\n status = handler.get_status()\n request = handler.request\n if status == 304 or (\n status < 300 and isinstance(handler, (StaticFileHandler, HealthCheckHandler))\n ):\n # static-file success and 304 Found are debug-level\n log_level = logging.DEBUG\n elif status < 400:\n log_level = logging.INFO\n elif status < 500:\n log_level = logging.WARNING\n else:\n log_level = logging.ERROR\n\n uri = _scrub_uri(request.uri)\n headers = _scrub_headers(request.headers)\n\n request_time = 1000.0 * handler.request.request_time()\n\n # always log slow responses (longer than 1s) at least info-level\n if request_time >= 1000 and log_level < logging.INFO:\n log_level = logging.INFO\n\n log_method = partial(access_log.log, log_level)\n\n try:\n user = handler.current_user\n except (HTTPError, RuntimeError):\n username = ''\n else:\n if user is None:\n username = ''\n elif isinstance(user, str):\n username = user\n elif isinstance(user, dict):\n username = user['name']\n else:\n username = user.name\n\n ns = dict(\n status=status,\n method=request.method,\n ip=request.remote_ip,\n uri=uri,\n request_time=request_time,\n user=username,\n location='',\n )\n msg = \"{status} {method} {uri}{location} ({user}@{ip}) {request_time:.2f}ms\"\n if status >= 500 and status not in {502, 503}:\n log_method(json.dumps(headers, indent=2))\n elif status in {301, 302}:\n # log redirect targets\n # FIXME: _headers is private, but there doesn't appear to be a public way\n # to get headers from tornado\n location = handler._headers.get('Location')\n if location:\n ns['location'] = f' -> {_scrub_uri(location)}'\n log_method(msg.format(**ns))\n prometheus_log_method(handler)\n", "path": "jupyterhub/log.py"}], "after_files": [{"content": "\"\"\"logging utilities\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nimport json\nimport logging\nimport traceback\nfrom functools import partial\nfrom http.cookies import SimpleCookie\nfrom urllib.parse import urlparse, urlunparse\n\nfrom tornado.log import LogFormatter, access_log\nfrom tornado.web import HTTPError, StaticFileHandler\n\nfrom .handlers.pages import HealthCheckHandler\nfrom .metrics import prometheus_log_method\n\n\ndef coroutine_frames(all_frames):\n \"\"\"Extract coroutine boilerplate frames from a frame list\n\n for better stack/traceback printing of coroutines\n \"\"\"\n useful_frames = []\n for frame in all_frames:\n if frame[0] == '<string>' and frame[2] == 'raise_exc_info':\n continue\n # start out conservative with filename + function matching\n # maybe just filename matching would be sufficient\n elif frame[0].endswith('tornado/gen.py') and frame[2] in {\n 'run',\n 'wrapper',\n '__init__',\n }:\n continue\n elif frame[0].endswith('tornado/concurrent.py') and frame[2] == 'result':\n continue\n useful_frames.append(frame)\n return useful_frames\n\n\ndef coroutine_traceback(typ, value, tb):\n \"\"\"Scrub coroutine frames from a traceback\n\n Coroutine tracebacks have a bunch of identical uninformative frames at each yield point.\n This removes those extra frames, so tracebacks should be easier to read.\n This might be a horrible idea.\n\n Returns a list of strings (like traceback.format_tb)\n \"\"\"\n all_frames = traceback.extract_tb(tb)\n useful_frames = coroutine_frames(all_frames)\n\n tb_list = ['Traceback (most recent call last):\\n']\n tb_list.extend(traceback.format_list(useful_frames))\n tb_list.extend(traceback.format_exception_only(typ, value))\n return tb_list\n\n\nclass CoroutineLogFormatter(LogFormatter):\n \"\"\"Log formatter that scrubs coroutine frames\"\"\"\n\n def formatException(self, exc_info):\n return ''.join(coroutine_traceback(*exc_info))\n\n\n# url params to be scrubbed if seen\n# any url param that *contains* one of these\n# will be scrubbed from logs\nSCRUB_PARAM_KEYS = ('token', 'auth', 'key', 'code', 'state', '_xsrf')\n\n\ndef _scrub_uri(uri):\n \"\"\"scrub auth info from uri\"\"\"\n if '/api/authorizations/cookie/' in uri or '/api/authorizations/token/' in uri:\n uri = uri.rsplit('/', 1)[0] + '/[secret]'\n parsed = urlparse(uri)\n if parsed.query:\n # check for potentially sensitive url params\n # use manual list + split rather than parsing\n # to minimally perturb original\n parts = parsed.query.split('&')\n changed = False\n for i, s in enumerate(parts):\n if '=' in s:\n key, value = s.split('=', 1)\n for substring in SCRUB_PARAM_KEYS:\n if substring in key:\n parts[i] = key + '=[secret]'\n changed = True\n if changed:\n parsed = parsed._replace(query='&'.join(parts))\n return urlunparse(parsed)\n return uri\n\n\ndef _scrub_headers(headers):\n \"\"\"scrub auth info from headers\"\"\"\n headers = dict(headers)\n if 'Authorization' in headers:\n auth = headers['Authorization']\n if ' ' in auth:\n auth_type = auth.split(' ', 1)[0]\n else:\n # no space, hide the whole thing in case there was a mistake\n auth_type = ''\n headers['Authorization'] = f'{auth_type} [secret]'\n if 'Cookie' in headers:\n try:\n c = SimpleCookie(headers['Cookie'])\n except Exception as e:\n # it's possible for browsers to send invalid cookies\n headers['Cookie'] = f\"Invalid Cookie: {e}\"\n else:\n redacted = []\n for name in c.keys():\n redacted.append(f\"{name}=[secret]\")\n headers['Cookie'] = '; '.join(redacted)\n return headers\n\n\n# log_request adapted from IPython (BSD)\n\n\ndef log_request(handler):\n \"\"\"log a bit more information about each request than tornado's default\n\n - move static file get success to debug-level (reduces noise)\n - get proxied IP instead of proxy IP\n - log referer for redirect and failed requests\n - log user-agent for failed requests\n - record per-request metrics in prometheus\n \"\"\"\n status = handler.get_status()\n request = handler.request\n if status == 304 or (\n status < 300 and isinstance(handler, (StaticFileHandler, HealthCheckHandler))\n ):\n # static-file success and 304 Found are debug-level\n log_level = logging.DEBUG\n elif status < 400:\n log_level = logging.INFO\n elif status < 500:\n log_level = logging.WARNING\n else:\n log_level = logging.ERROR\n\n uri = _scrub_uri(request.uri)\n headers = _scrub_headers(request.headers)\n\n request_time = 1000.0 * handler.request.request_time()\n\n # always log slow responses (longer than 1s) at least info-level\n if request_time >= 1000 and log_level < logging.INFO:\n log_level = logging.INFO\n\n log_method = partial(access_log.log, log_level)\n\n try:\n user = handler.current_user\n except (HTTPError, RuntimeError):\n username = ''\n else:\n if user is None:\n username = ''\n elif isinstance(user, str):\n username = user\n elif isinstance(user, dict):\n username = user['name']\n else:\n username = user.name\n\n ns = dict(\n status=status,\n method=request.method,\n ip=request.remote_ip,\n uri=uri,\n request_time=request_time,\n user=username,\n location='',\n )\n msg = \"{status} {method} {uri}{location} ({user}@{ip}) {request_time:.2f}ms\"\n if status >= 500 and status not in {502, 503}:\n log_method(json.dumps(headers, indent=2))\n elif status in {301, 302}:\n # log redirect targets\n # FIXME: _headers is private, but there doesn't appear to be a public way\n # to get headers from tornado\n location = handler._headers.get('Location')\n if location:\n ns['location'] = f' -> {_scrub_uri(location)}'\n log_method(msg.format(**ns))\n prometheus_log_method(handler)\n", "path": "jupyterhub/log.py"}]} | 3,329 | 220 |
gh_patches_debug_14360 | rasdani/github-patches | git_diff | huggingface__dataset-viewer-2811 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
opus decoding error
see https://huggingface.co/datasets/stable-speech/mls_eng_10k/discussions/1#65ef6e9d440a5fc3d94a40ad
To fix this maybe we should pin `soundfile` library to `>=1.0.31` (first version that supported opus) like [we do in `datasets` library](https://github.com/huggingface/datasets/blob/main/src/datasets/config.py#L144).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libs/libcommon/src/libcommon/viewer_utils/asset.py`
Content:
```
1 # SPDX-License-Identifier: Apache-2.0
2 # Copyright 2022 The HuggingFace Authors.
3
4 from io import BytesIO
5 from pathlib import Path
6 from tempfile import NamedTemporaryFile
7 from typing import Optional, TypedDict
8 from urllib import parse
9
10 from PIL import Image, ImageOps
11 from pydub import AudioSegment # type:ignore
12
13 from libcommon.constants import DATASET_SEPARATOR
14 from libcommon.storage import StrPath, remove_dir
15 from libcommon.storage_client import StorageClient
16
17 SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE = {".wav": "audio/wav", ".mp3": "audio/mpeg"}
18
19
20 def delete_asset_dir(dataset: str, directory: StrPath) -> None:
21 dir_path = Path(directory).resolve() / dataset
22 remove_dir(dir_path)
23
24
25 class ImageSource(TypedDict):
26 src: str
27 height: int
28 width: int
29
30
31 class AudioSource(TypedDict):
32 src: str
33 type: str
34
35
36 def generate_object_key(
37 dataset: str, revision: str, config: str, split: str, row_idx: int, column: str, filename: str
38 ) -> str:
39 return f"{parse.quote(dataset)}/{DATASET_SEPARATOR}/{revision}/{DATASET_SEPARATOR}/{parse.quote(config)}/{parse.quote(split)}/{str(row_idx)}/{parse.quote(column)}/{filename}"
40
41
42 def create_image_file(
43 dataset: str,
44 revision: str,
45 config: str,
46 split: str,
47 row_idx: int,
48 column: str,
49 filename: str,
50 image: Image.Image,
51 format: str,
52 storage_client: StorageClient,
53 ) -> ImageSource:
54 object_key = generate_object_key(
55 dataset=dataset,
56 revision=revision,
57 config=config,
58 split=split,
59 row_idx=row_idx,
60 column=column,
61 filename=filename,
62 )
63 if storage_client.overwrite or not storage_client.exists(object_key):
64 image = ImageOps.exif_transpose(image) # type: ignore[assignment]
65 buffer = BytesIO()
66 image.save(fp=buffer, format=format)
67 buffer.seek(0)
68 with storage_client._fs.open(storage_client.get_full_path(object_key), "wb") as f:
69 f.write(buffer.read())
70 return ImageSource(src=storage_client.get_url(object_key), height=image.height, width=image.width)
71
72
73 def create_audio_file(
74 dataset: str,
75 revision: str,
76 config: str,
77 split: str,
78 row_idx: int,
79 column: str,
80 audio_file_bytes: bytes,
81 audio_file_extension: Optional[str],
82 filename: str,
83 storage_client: StorageClient,
84 ) -> list[AudioSource]:
85 object_key = generate_object_key(
86 dataset=dataset,
87 revision=revision,
88 config=config,
89 split=split,
90 row_idx=row_idx,
91 column=column,
92 filename=filename,
93 )
94 suffix = f".{filename.split('.')[-1]}"
95 if suffix not in SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE:
96 raise ValueError(
97 f"Audio format {suffix} is not supported. Supported formats are"
98 f" {','.join(SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE)}."
99 )
100 media_type = SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE[suffix]
101
102 if storage_client.overwrite or not storage_client.exists(object_key):
103 audio_path = storage_client.get_full_path(object_key)
104 if audio_file_extension == suffix:
105 with storage_client._fs.open(audio_path, "wb") as f:
106 f.write(audio_file_bytes)
107 else: # we need to convert
108 # might spawn a process to convert the audio file using ffmpeg
109 with NamedTemporaryFile("wb", suffix=audio_file_extension) as tmpfile:
110 tmpfile.write(audio_file_bytes)
111 segment: AudioSegment = AudioSegment.from_file(
112 tmpfile.name, audio_file_extension[1:] if audio_file_extension else None
113 )
114 buffer = BytesIO()
115 segment.export(buffer, format=suffix[1:])
116 buffer.seek(0)
117 with storage_client._fs.open(audio_path, "wb") as f:
118 f.write(buffer.read())
119 return [AudioSource(src=storage_client.get_url(object_key), type=media_type)]
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libs/libcommon/src/libcommon/viewer_utils/asset.py b/libs/libcommon/src/libcommon/viewer_utils/asset.py
--- a/libs/libcommon/src/libcommon/viewer_utils/asset.py
+++ b/libs/libcommon/src/libcommon/viewer_utils/asset.py
@@ -108,9 +108,7 @@
# might spawn a process to convert the audio file using ffmpeg
with NamedTemporaryFile("wb", suffix=audio_file_extension) as tmpfile:
tmpfile.write(audio_file_bytes)
- segment: AudioSegment = AudioSegment.from_file(
- tmpfile.name, audio_file_extension[1:] if audio_file_extension else None
- )
+ segment: AudioSegment = AudioSegment.from_file(tmpfile.name)
buffer = BytesIO()
segment.export(buffer, format=suffix[1:])
buffer.seek(0)
| {"golden_diff": "diff --git a/libs/libcommon/src/libcommon/viewer_utils/asset.py b/libs/libcommon/src/libcommon/viewer_utils/asset.py\n--- a/libs/libcommon/src/libcommon/viewer_utils/asset.py\n+++ b/libs/libcommon/src/libcommon/viewer_utils/asset.py\n@@ -108,9 +108,7 @@\n # might spawn a process to convert the audio file using ffmpeg\n with NamedTemporaryFile(\"wb\", suffix=audio_file_extension) as tmpfile:\n tmpfile.write(audio_file_bytes)\n- segment: AudioSegment = AudioSegment.from_file(\n- tmpfile.name, audio_file_extension[1:] if audio_file_extension else None\n- )\n+ segment: AudioSegment = AudioSegment.from_file(tmpfile.name)\n buffer = BytesIO()\n segment.export(buffer, format=suffix[1:])\n buffer.seek(0)\n", "issue": "opus decoding error\nsee https://huggingface.co/datasets/stable-speech/mls_eng_10k/discussions/1#65ef6e9d440a5fc3d94a40ad\r\n\r\nTo fix this maybe we should pin `soundfile` library to `>=1.0.31` (first version that supported opus) like [we do in `datasets` library](https://github.com/huggingface/datasets/blob/main/src/datasets/config.py#L144). \r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nfrom io import BytesIO\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\nfrom typing import Optional, TypedDict\nfrom urllib import parse\n\nfrom PIL import Image, ImageOps\nfrom pydub import AudioSegment # type:ignore\n\nfrom libcommon.constants import DATASET_SEPARATOR\nfrom libcommon.storage import StrPath, remove_dir\nfrom libcommon.storage_client import StorageClient\n\nSUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE = {\".wav\": \"audio/wav\", \".mp3\": \"audio/mpeg\"}\n\n\ndef delete_asset_dir(dataset: str, directory: StrPath) -> None:\n dir_path = Path(directory).resolve() / dataset\n remove_dir(dir_path)\n\n\nclass ImageSource(TypedDict):\n src: str\n height: int\n width: int\n\n\nclass AudioSource(TypedDict):\n src: str\n type: str\n\n\ndef generate_object_key(\n dataset: str, revision: str, config: str, split: str, row_idx: int, column: str, filename: str\n) -> str:\n return f\"{parse.quote(dataset)}/{DATASET_SEPARATOR}/{revision}/{DATASET_SEPARATOR}/{parse.quote(config)}/{parse.quote(split)}/{str(row_idx)}/{parse.quote(column)}/{filename}\"\n\n\ndef create_image_file(\n dataset: str,\n revision: str,\n config: str,\n split: str,\n row_idx: int,\n column: str,\n filename: str,\n image: Image.Image,\n format: str,\n storage_client: StorageClient,\n) -> ImageSource:\n object_key = generate_object_key(\n dataset=dataset,\n revision=revision,\n config=config,\n split=split,\n row_idx=row_idx,\n column=column,\n filename=filename,\n )\n if storage_client.overwrite or not storage_client.exists(object_key):\n image = ImageOps.exif_transpose(image) # type: ignore[assignment]\n buffer = BytesIO()\n image.save(fp=buffer, format=format)\n buffer.seek(0)\n with storage_client._fs.open(storage_client.get_full_path(object_key), \"wb\") as f:\n f.write(buffer.read())\n return ImageSource(src=storage_client.get_url(object_key), height=image.height, width=image.width)\n\n\ndef create_audio_file(\n dataset: str,\n revision: str,\n config: str,\n split: str,\n row_idx: int,\n column: str,\n audio_file_bytes: bytes,\n audio_file_extension: Optional[str],\n filename: str,\n storage_client: StorageClient,\n) -> list[AudioSource]:\n object_key = generate_object_key(\n dataset=dataset,\n revision=revision,\n config=config,\n split=split,\n row_idx=row_idx,\n column=column,\n filename=filename,\n )\n suffix = f\".{filename.split('.')[-1]}\"\n if suffix not in SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE:\n raise ValueError(\n f\"Audio format {suffix} is not supported. Supported formats are\"\n f\" {','.join(SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE)}.\"\n )\n media_type = SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE[suffix]\n\n if storage_client.overwrite or not storage_client.exists(object_key):\n audio_path = storage_client.get_full_path(object_key)\n if audio_file_extension == suffix:\n with storage_client._fs.open(audio_path, \"wb\") as f:\n f.write(audio_file_bytes)\n else: # we need to convert\n # might spawn a process to convert the audio file using ffmpeg\n with NamedTemporaryFile(\"wb\", suffix=audio_file_extension) as tmpfile:\n tmpfile.write(audio_file_bytes)\n segment: AudioSegment = AudioSegment.from_file(\n tmpfile.name, audio_file_extension[1:] if audio_file_extension else None\n )\n buffer = BytesIO()\n segment.export(buffer, format=suffix[1:])\n buffer.seek(0)\n with storage_client._fs.open(audio_path, \"wb\") as f:\n f.write(buffer.read())\n return [AudioSource(src=storage_client.get_url(object_key), type=media_type)]\n", "path": "libs/libcommon/src/libcommon/viewer_utils/asset.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nfrom io import BytesIO\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\nfrom typing import Optional, TypedDict\nfrom urllib import parse\n\nfrom PIL import Image, ImageOps\nfrom pydub import AudioSegment # type:ignore\n\nfrom libcommon.constants import DATASET_SEPARATOR\nfrom libcommon.storage import StrPath, remove_dir\nfrom libcommon.storage_client import StorageClient\n\nSUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE = {\".wav\": \"audio/wav\", \".mp3\": \"audio/mpeg\"}\n\n\ndef delete_asset_dir(dataset: str, directory: StrPath) -> None:\n dir_path = Path(directory).resolve() / dataset\n remove_dir(dir_path)\n\n\nclass ImageSource(TypedDict):\n src: str\n height: int\n width: int\n\n\nclass AudioSource(TypedDict):\n src: str\n type: str\n\n\ndef generate_object_key(\n dataset: str, revision: str, config: str, split: str, row_idx: int, column: str, filename: str\n) -> str:\n return f\"{parse.quote(dataset)}/{DATASET_SEPARATOR}/{revision}/{DATASET_SEPARATOR}/{parse.quote(config)}/{parse.quote(split)}/{str(row_idx)}/{parse.quote(column)}/{filename}\"\n\n\ndef create_image_file(\n dataset: str,\n revision: str,\n config: str,\n split: str,\n row_idx: int,\n column: str,\n filename: str,\n image: Image.Image,\n format: str,\n storage_client: StorageClient,\n) -> ImageSource:\n object_key = generate_object_key(\n dataset=dataset,\n revision=revision,\n config=config,\n split=split,\n row_idx=row_idx,\n column=column,\n filename=filename,\n )\n if storage_client.overwrite or not storage_client.exists(object_key):\n image = ImageOps.exif_transpose(image) # type: ignore[assignment]\n buffer = BytesIO()\n image.save(fp=buffer, format=format)\n buffer.seek(0)\n with storage_client._fs.open(storage_client.get_full_path(object_key), \"wb\") as f:\n f.write(buffer.read())\n return ImageSource(src=storage_client.get_url(object_key), height=image.height, width=image.width)\n\n\ndef create_audio_file(\n dataset: str,\n revision: str,\n config: str,\n split: str,\n row_idx: int,\n column: str,\n audio_file_bytes: bytes,\n audio_file_extension: Optional[str],\n filename: str,\n storage_client: StorageClient,\n) -> list[AudioSource]:\n object_key = generate_object_key(\n dataset=dataset,\n revision=revision,\n config=config,\n split=split,\n row_idx=row_idx,\n column=column,\n filename=filename,\n )\n suffix = f\".{filename.split('.')[-1]}\"\n if suffix not in SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE:\n raise ValueError(\n f\"Audio format {suffix} is not supported. Supported formats are\"\n f\" {','.join(SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE)}.\"\n )\n media_type = SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE[suffix]\n\n if storage_client.overwrite or not storage_client.exists(object_key):\n audio_path = storage_client.get_full_path(object_key)\n if audio_file_extension == suffix:\n with storage_client._fs.open(audio_path, \"wb\") as f:\n f.write(audio_file_bytes)\n else: # we need to convert\n # might spawn a process to convert the audio file using ffmpeg\n with NamedTemporaryFile(\"wb\", suffix=audio_file_extension) as tmpfile:\n tmpfile.write(audio_file_bytes)\n segment: AudioSegment = AudioSegment.from_file(tmpfile.name)\n buffer = BytesIO()\n segment.export(buffer, format=suffix[1:])\n buffer.seek(0)\n with storage_client._fs.open(audio_path, \"wb\") as f:\n f.write(buffer.read())\n return [AudioSource(src=storage_client.get_url(object_key), type=media_type)]\n", "path": "libs/libcommon/src/libcommon/viewer_utils/asset.py"}]} | 1,539 | 186 |
gh_patches_debug_6861 | rasdani/github-patches | git_diff | jazzband__pip-tools-1788 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unsafe requirements are no longer included in the requirements.txt generated by pip-compile
After v6.12.0 the unsafe packages pip and setuptools are no longer included in the requirements.txt when --allow-unsafe is used in pip-compile.
There was a change made in writer.py (https://github.com/jazzband/pip-tools/pull/1766)
I think this line should be using unsafe_packages if the user has specified allow_unsafe rather than if they have not:
unsafe_packages = unsafe_packages if **not** self.allow_unsafe else set()
I think this should be:
unsafe_packages = unsafe_packages if self.allow_unsafe else set()
#### Environment Versions
1. Ubuntu 18
1. Python version: 3.7.11
1. pip version: 22.3.1
1. pip-tools version: 6.12.1
#### Steps to replicate
Ensure requirements.in file includes pip and then run:
pip-compile --verbose --allow-unsafe --output-file requirements.txt requirements.in
#### Expected result
requirements.txt should end with this:
# The following packages are considered to be unsafe in a requirements file:
pip==22.3.1
# via -r requirements.in
#### Actual result
The unsafe packages are not listed in the requirements.txt at all
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `piptools/writer.py`
Content:
```
1 from __future__ import annotations
2
3 import io
4 import os
5 import re
6 import sys
7 from itertools import chain
8 from typing import BinaryIO, Iterable, Iterator, cast
9
10 from click import unstyle
11 from click.core import Context
12 from pip._internal.models.format_control import FormatControl
13 from pip._internal.req.req_install import InstallRequirement
14 from pip._vendor.packaging.markers import Marker
15 from pip._vendor.packaging.utils import canonicalize_name
16
17 from .logging import log
18 from .utils import (
19 comment,
20 dedup,
21 format_requirement,
22 get_compile_command,
23 key_from_ireq,
24 strip_extras,
25 )
26
27 MESSAGE_UNHASHED_PACKAGE = comment(
28 "# WARNING: pip install will require the following package to be hashed."
29 "\n# Consider using a hashable URL like "
30 "https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip"
31 )
32
33 MESSAGE_UNSAFE_PACKAGES_UNPINNED = comment(
34 "# WARNING: The following packages were not pinned, but pip requires them to be"
35 "\n# pinned when the requirements file includes hashes. "
36 "Consider using the --allow-unsafe flag."
37 )
38
39 MESSAGE_UNSAFE_PACKAGES = comment(
40 "# The following packages are considered to be unsafe in a requirements file:"
41 )
42
43 MESSAGE_UNINSTALLABLE = (
44 "The generated requirements file may be rejected by pip install. "
45 "See # WARNING lines for details."
46 )
47
48
49 strip_comes_from_line_re = re.compile(r" \(line \d+\)$")
50
51
52 def _comes_from_as_string(comes_from: str | InstallRequirement) -> str:
53 if isinstance(comes_from, str):
54 return strip_comes_from_line_re.sub("", comes_from)
55 return cast(str, canonicalize_name(key_from_ireq(comes_from)))
56
57
58 def annotation_style_split(required_by: set[str]) -> str:
59 sorted_required_by = sorted(required_by)
60 if len(sorted_required_by) == 1:
61 source = sorted_required_by[0]
62 annotation = "# via " + source
63 else:
64 annotation_lines = ["# via"]
65 for source in sorted_required_by:
66 annotation_lines.append(" # " + source)
67 annotation = "\n".join(annotation_lines)
68 return annotation
69
70
71 def annotation_style_line(required_by: set[str]) -> str:
72 return f"# via {', '.join(sorted(required_by))}"
73
74
75 class OutputWriter:
76 def __init__(
77 self,
78 dst_file: BinaryIO,
79 click_ctx: Context,
80 dry_run: bool,
81 emit_header: bool,
82 emit_index_url: bool,
83 emit_trusted_host: bool,
84 annotate: bool,
85 annotation_style: str,
86 strip_extras: bool,
87 generate_hashes: bool,
88 default_index_url: str,
89 index_urls: Iterable[str],
90 trusted_hosts: Iterable[str],
91 format_control: FormatControl,
92 linesep: str,
93 allow_unsafe: bool,
94 find_links: list[str],
95 emit_find_links: bool,
96 emit_options: bool,
97 ) -> None:
98 self.dst_file = dst_file
99 self.click_ctx = click_ctx
100 self.dry_run = dry_run
101 self.emit_header = emit_header
102 self.emit_index_url = emit_index_url
103 self.emit_trusted_host = emit_trusted_host
104 self.annotate = annotate
105 self.annotation_style = annotation_style
106 self.strip_extras = strip_extras
107 self.generate_hashes = generate_hashes
108 self.default_index_url = default_index_url
109 self.index_urls = index_urls
110 self.trusted_hosts = trusted_hosts
111 self.format_control = format_control
112 self.linesep = linesep
113 self.allow_unsafe = allow_unsafe
114 self.find_links = find_links
115 self.emit_find_links = emit_find_links
116 self.emit_options = emit_options
117
118 def _sort_key(self, ireq: InstallRequirement) -> tuple[bool, str]:
119 return (not ireq.editable, key_from_ireq(ireq))
120
121 def write_header(self) -> Iterator[str]:
122 if self.emit_header:
123 yield comment("#")
124 yield comment(
125 "# This file is autogenerated by pip-compile with Python "
126 f"{sys.version_info.major}.{sys.version_info.minor}"
127 )
128 yield comment("# by the following command:")
129 yield comment("#")
130 compile_command = os.environ.get(
131 "CUSTOM_COMPILE_COMMAND"
132 ) or get_compile_command(self.click_ctx)
133 yield comment(f"# {compile_command}")
134 yield comment("#")
135
136 def write_index_options(self) -> Iterator[str]:
137 if self.emit_index_url:
138 for index, index_url in enumerate(dedup(self.index_urls)):
139 if index == 0 and index_url.rstrip("/") == self.default_index_url:
140 continue
141 flag = "--index-url" if index == 0 else "--extra-index-url"
142 yield f"{flag} {index_url}"
143
144 def write_trusted_hosts(self) -> Iterator[str]:
145 if self.emit_trusted_host:
146 for trusted_host in dedup(self.trusted_hosts):
147 yield f"--trusted-host {trusted_host}"
148
149 def write_format_controls(self) -> Iterator[str]:
150 for nb in dedup(sorted(self.format_control.no_binary)):
151 yield f"--no-binary {nb}"
152 for ob in dedup(sorted(self.format_control.only_binary)):
153 yield f"--only-binary {ob}"
154
155 def write_find_links(self) -> Iterator[str]:
156 if self.emit_find_links:
157 for find_link in dedup(self.find_links):
158 yield f"--find-links {find_link}"
159
160 def write_flags(self) -> Iterator[str]:
161 if not self.emit_options:
162 return
163 emitted = False
164 for line in chain(
165 self.write_index_options(),
166 self.write_find_links(),
167 self.write_trusted_hosts(),
168 self.write_format_controls(),
169 ):
170 emitted = True
171 yield line
172 if emitted:
173 yield ""
174
175 def _iter_lines(
176 self,
177 results: set[InstallRequirement],
178 unsafe_requirements: set[InstallRequirement],
179 unsafe_packages: set[str],
180 markers: dict[str, Marker],
181 hashes: dict[InstallRequirement, set[str]] | None = None,
182 ) -> Iterator[str]:
183 # default values
184 unsafe_packages = unsafe_packages if not self.allow_unsafe else set()
185 hashes = hashes or {}
186
187 # Check for unhashed or unpinned packages if at least one package does have
188 # hashes, which will trigger pip install's --require-hashes mode.
189 warn_uninstallable = False
190 has_hashes = hashes and any(hash for hash in hashes.values())
191
192 yielded = False
193
194 for line in self.write_header():
195 yield line
196 yielded = True
197 for line in self.write_flags():
198 yield line
199 yielded = True
200
201 unsafe_requirements = unsafe_requirements or {
202 r for r in results if r.name in unsafe_packages
203 }
204 packages = {r for r in results if r.name not in unsafe_packages}
205
206 if packages:
207 for ireq in sorted(packages, key=self._sort_key):
208 if has_hashes and not hashes.get(ireq):
209 yield MESSAGE_UNHASHED_PACKAGE
210 warn_uninstallable = True
211 line = self._format_requirement(
212 ireq, markers.get(key_from_ireq(ireq)), hashes=hashes
213 )
214 yield line
215 yielded = True
216
217 if unsafe_requirements:
218 yield ""
219 yielded = True
220 if has_hashes and not self.allow_unsafe:
221 yield MESSAGE_UNSAFE_PACKAGES_UNPINNED
222 warn_uninstallable = True
223 else:
224 yield MESSAGE_UNSAFE_PACKAGES
225
226 for ireq in sorted(unsafe_requirements, key=self._sort_key):
227 ireq_key = key_from_ireq(ireq)
228 if not self.allow_unsafe:
229 yield comment(f"# {ireq_key}")
230 else:
231 line = self._format_requirement(
232 ireq, marker=markers.get(ireq_key), hashes=hashes
233 )
234 yield line
235
236 # Yield even when there's no real content, so that blank files are written
237 if not yielded:
238 yield ""
239
240 if warn_uninstallable:
241 log.warning(MESSAGE_UNINSTALLABLE)
242
243 def write(
244 self,
245 results: set[InstallRequirement],
246 unsafe_requirements: set[InstallRequirement],
247 unsafe_packages: set[str],
248 markers: dict[str, Marker],
249 hashes: dict[InstallRequirement, set[str]] | None,
250 ) -> None:
251
252 if not self.dry_run:
253 dst_file = io.TextIOWrapper(
254 self.dst_file,
255 encoding="utf8",
256 newline=self.linesep,
257 line_buffering=True,
258 )
259 try:
260 for line in self._iter_lines(
261 results, unsafe_requirements, unsafe_packages, markers, hashes
262 ):
263 if self.dry_run:
264 # Bypass the log level to always print this during a dry run
265 log.log(line)
266 else:
267 log.info(line)
268 dst_file.write(unstyle(line))
269 dst_file.write("\n")
270 finally:
271 if not self.dry_run:
272 dst_file.detach()
273
274 def _format_requirement(
275 self,
276 ireq: InstallRequirement,
277 marker: Marker | None = None,
278 hashes: dict[InstallRequirement, set[str]] | None = None,
279 ) -> str:
280 ireq_hashes = (hashes if hashes is not None else {}).get(ireq)
281
282 line = format_requirement(ireq, marker=marker, hashes=ireq_hashes)
283 if self.strip_extras:
284 line = strip_extras(line)
285
286 if not self.annotate:
287 return line
288
289 # Annotate what packages or reqs-ins this package is required by
290 required_by = set()
291 if hasattr(ireq, "_source_ireqs"):
292 required_by |= {
293 _comes_from_as_string(src_ireq.comes_from)
294 for src_ireq in ireq._source_ireqs
295 if src_ireq.comes_from
296 }
297
298 if ireq.comes_from:
299 required_by.add(_comes_from_as_string(ireq.comes_from))
300
301 required_by |= set(getattr(ireq, "_required_by", set()))
302
303 if required_by:
304 if self.annotation_style == "split":
305 annotation = annotation_style_split(required_by)
306 sep = "\n "
307 elif self.annotation_style == "line":
308 annotation = annotation_style_line(required_by)
309 sep = "\n " if ireq_hashes else " "
310 else: # pragma: no cover
311 raise ValueError("Invalid value for annotation style")
312 if self.strip_extras:
313 annotation = strip_extras(annotation)
314 # 24 is one reasonable column size to use here, that we've used in the past
315 lines = f"{line:24}{sep}{comment(annotation)}".splitlines()
316 line = "\n".join(ln.rstrip() for ln in lines)
317
318 return line
319
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/piptools/writer.py b/piptools/writer.py
--- a/piptools/writer.py
+++ b/piptools/writer.py
@@ -181,7 +181,7 @@
hashes: dict[InstallRequirement, set[str]] | None = None,
) -> Iterator[str]:
# default values
- unsafe_packages = unsafe_packages if not self.allow_unsafe else set()
+ unsafe_packages = unsafe_packages if self.allow_unsafe else set()
hashes = hashes or {}
# Check for unhashed or unpinned packages if at least one package does have
| {"golden_diff": "diff --git a/piptools/writer.py b/piptools/writer.py\n--- a/piptools/writer.py\n+++ b/piptools/writer.py\n@@ -181,7 +181,7 @@\n hashes: dict[InstallRequirement, set[str]] | None = None,\n ) -> Iterator[str]:\n # default values\n- unsafe_packages = unsafe_packages if not self.allow_unsafe else set()\n+ unsafe_packages = unsafe_packages if self.allow_unsafe else set()\n hashes = hashes or {}\n \n # Check for unhashed or unpinned packages if at least one package does have\n", "issue": "Unsafe requirements are no longer included in the requirements.txt generated by pip-compile\nAfter v6.12.0 the unsafe packages pip and setuptools are no longer included in the requirements.txt when --allow-unsafe is used in pip-compile.\r\n \r\nThere was a change made in writer.py (https://github.com/jazzband/pip-tools/pull/1766)\r\nI think this line should be using unsafe_packages if the user has specified allow_unsafe rather than if they have not:\r\n\r\nunsafe_packages = unsafe_packages if **not** self.allow_unsafe else set()\r\nI think this should be:\r\nunsafe_packages = unsafe_packages if self.allow_unsafe else set()\r\n\r\n#### Environment Versions\r\n\r\n1. Ubuntu 18\r\n1. Python version: 3.7.11\r\n1. pip version: 22.3.1\r\n1. pip-tools version: 6.12.1\r\n\r\n#### Steps to replicate\r\n\r\nEnsure requirements.in file includes pip and then run:\r\npip-compile --verbose --allow-unsafe --output-file requirements.txt requirements.in\r\n\r\n#### Expected result\r\n\r\nrequirements.txt should end with this:\r\n\r\n# The following packages are considered to be unsafe in a requirements file:\r\npip==22.3.1\r\n # via -r requirements.in\r\n\r\n#### Actual result\r\n\r\nThe unsafe packages are not listed in the requirements.txt at all\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport io\nimport os\nimport re\nimport sys\nfrom itertools import chain\nfrom typing import BinaryIO, Iterable, Iterator, cast\n\nfrom click import unstyle\nfrom click.core import Context\nfrom pip._internal.models.format_control import FormatControl\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._vendor.packaging.markers import Marker\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom .logging import log\nfrom .utils import (\n comment,\n dedup,\n format_requirement,\n get_compile_command,\n key_from_ireq,\n strip_extras,\n)\n\nMESSAGE_UNHASHED_PACKAGE = comment(\n \"# WARNING: pip install will require the following package to be hashed.\"\n \"\\n# Consider using a hashable URL like \"\n \"https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip\"\n)\n\nMESSAGE_UNSAFE_PACKAGES_UNPINNED = comment(\n \"# WARNING: The following packages were not pinned, but pip requires them to be\"\n \"\\n# pinned when the requirements file includes hashes. \"\n \"Consider using the --allow-unsafe flag.\"\n)\n\nMESSAGE_UNSAFE_PACKAGES = comment(\n \"# The following packages are considered to be unsafe in a requirements file:\"\n)\n\nMESSAGE_UNINSTALLABLE = (\n \"The generated requirements file may be rejected by pip install. \"\n \"See # WARNING lines for details.\"\n)\n\n\nstrip_comes_from_line_re = re.compile(r\" \\(line \\d+\\)$\")\n\n\ndef _comes_from_as_string(comes_from: str | InstallRequirement) -> str:\n if isinstance(comes_from, str):\n return strip_comes_from_line_re.sub(\"\", comes_from)\n return cast(str, canonicalize_name(key_from_ireq(comes_from)))\n\n\ndef annotation_style_split(required_by: set[str]) -> str:\n sorted_required_by = sorted(required_by)\n if len(sorted_required_by) == 1:\n source = sorted_required_by[0]\n annotation = \"# via \" + source\n else:\n annotation_lines = [\"# via\"]\n for source in sorted_required_by:\n annotation_lines.append(\" # \" + source)\n annotation = \"\\n\".join(annotation_lines)\n return annotation\n\n\ndef annotation_style_line(required_by: set[str]) -> str:\n return f\"# via {', '.join(sorted(required_by))}\"\n\n\nclass OutputWriter:\n def __init__(\n self,\n dst_file: BinaryIO,\n click_ctx: Context,\n dry_run: bool,\n emit_header: bool,\n emit_index_url: bool,\n emit_trusted_host: bool,\n annotate: bool,\n annotation_style: str,\n strip_extras: bool,\n generate_hashes: bool,\n default_index_url: str,\n index_urls: Iterable[str],\n trusted_hosts: Iterable[str],\n format_control: FormatControl,\n linesep: str,\n allow_unsafe: bool,\n find_links: list[str],\n emit_find_links: bool,\n emit_options: bool,\n ) -> None:\n self.dst_file = dst_file\n self.click_ctx = click_ctx\n self.dry_run = dry_run\n self.emit_header = emit_header\n self.emit_index_url = emit_index_url\n self.emit_trusted_host = emit_trusted_host\n self.annotate = annotate\n self.annotation_style = annotation_style\n self.strip_extras = strip_extras\n self.generate_hashes = generate_hashes\n self.default_index_url = default_index_url\n self.index_urls = index_urls\n self.trusted_hosts = trusted_hosts\n self.format_control = format_control\n self.linesep = linesep\n self.allow_unsafe = allow_unsafe\n self.find_links = find_links\n self.emit_find_links = emit_find_links\n self.emit_options = emit_options\n\n def _sort_key(self, ireq: InstallRequirement) -> tuple[bool, str]:\n return (not ireq.editable, key_from_ireq(ireq))\n\n def write_header(self) -> Iterator[str]:\n if self.emit_header:\n yield comment(\"#\")\n yield comment(\n \"# This file is autogenerated by pip-compile with Python \"\n f\"{sys.version_info.major}.{sys.version_info.minor}\"\n )\n yield comment(\"# by the following command:\")\n yield comment(\"#\")\n compile_command = os.environ.get(\n \"CUSTOM_COMPILE_COMMAND\"\n ) or get_compile_command(self.click_ctx)\n yield comment(f\"# {compile_command}\")\n yield comment(\"#\")\n\n def write_index_options(self) -> Iterator[str]:\n if self.emit_index_url:\n for index, index_url in enumerate(dedup(self.index_urls)):\n if index == 0 and index_url.rstrip(\"/\") == self.default_index_url:\n continue\n flag = \"--index-url\" if index == 0 else \"--extra-index-url\"\n yield f\"{flag} {index_url}\"\n\n def write_trusted_hosts(self) -> Iterator[str]:\n if self.emit_trusted_host:\n for trusted_host in dedup(self.trusted_hosts):\n yield f\"--trusted-host {trusted_host}\"\n\n def write_format_controls(self) -> Iterator[str]:\n for nb in dedup(sorted(self.format_control.no_binary)):\n yield f\"--no-binary {nb}\"\n for ob in dedup(sorted(self.format_control.only_binary)):\n yield f\"--only-binary {ob}\"\n\n def write_find_links(self) -> Iterator[str]:\n if self.emit_find_links:\n for find_link in dedup(self.find_links):\n yield f\"--find-links {find_link}\"\n\n def write_flags(self) -> Iterator[str]:\n if not self.emit_options:\n return\n emitted = False\n for line in chain(\n self.write_index_options(),\n self.write_find_links(),\n self.write_trusted_hosts(),\n self.write_format_controls(),\n ):\n emitted = True\n yield line\n if emitted:\n yield \"\"\n\n def _iter_lines(\n self,\n results: set[InstallRequirement],\n unsafe_requirements: set[InstallRequirement],\n unsafe_packages: set[str],\n markers: dict[str, Marker],\n hashes: dict[InstallRequirement, set[str]] | None = None,\n ) -> Iterator[str]:\n # default values\n unsafe_packages = unsafe_packages if not self.allow_unsafe else set()\n hashes = hashes or {}\n\n # Check for unhashed or unpinned packages if at least one package does have\n # hashes, which will trigger pip install's --require-hashes mode.\n warn_uninstallable = False\n has_hashes = hashes and any(hash for hash in hashes.values())\n\n yielded = False\n\n for line in self.write_header():\n yield line\n yielded = True\n for line in self.write_flags():\n yield line\n yielded = True\n\n unsafe_requirements = unsafe_requirements or {\n r for r in results if r.name in unsafe_packages\n }\n packages = {r for r in results if r.name not in unsafe_packages}\n\n if packages:\n for ireq in sorted(packages, key=self._sort_key):\n if has_hashes and not hashes.get(ireq):\n yield MESSAGE_UNHASHED_PACKAGE\n warn_uninstallable = True\n line = self._format_requirement(\n ireq, markers.get(key_from_ireq(ireq)), hashes=hashes\n )\n yield line\n yielded = True\n\n if unsafe_requirements:\n yield \"\"\n yielded = True\n if has_hashes and not self.allow_unsafe:\n yield MESSAGE_UNSAFE_PACKAGES_UNPINNED\n warn_uninstallable = True\n else:\n yield MESSAGE_UNSAFE_PACKAGES\n\n for ireq in sorted(unsafe_requirements, key=self._sort_key):\n ireq_key = key_from_ireq(ireq)\n if not self.allow_unsafe:\n yield comment(f\"# {ireq_key}\")\n else:\n line = self._format_requirement(\n ireq, marker=markers.get(ireq_key), hashes=hashes\n )\n yield line\n\n # Yield even when there's no real content, so that blank files are written\n if not yielded:\n yield \"\"\n\n if warn_uninstallable:\n log.warning(MESSAGE_UNINSTALLABLE)\n\n def write(\n self,\n results: set[InstallRequirement],\n unsafe_requirements: set[InstallRequirement],\n unsafe_packages: set[str],\n markers: dict[str, Marker],\n hashes: dict[InstallRequirement, set[str]] | None,\n ) -> None:\n\n if not self.dry_run:\n dst_file = io.TextIOWrapper(\n self.dst_file,\n encoding=\"utf8\",\n newline=self.linesep,\n line_buffering=True,\n )\n try:\n for line in self._iter_lines(\n results, unsafe_requirements, unsafe_packages, markers, hashes\n ):\n if self.dry_run:\n # Bypass the log level to always print this during a dry run\n log.log(line)\n else:\n log.info(line)\n dst_file.write(unstyle(line))\n dst_file.write(\"\\n\")\n finally:\n if not self.dry_run:\n dst_file.detach()\n\n def _format_requirement(\n self,\n ireq: InstallRequirement,\n marker: Marker | None = None,\n hashes: dict[InstallRequirement, set[str]] | None = None,\n ) -> str:\n ireq_hashes = (hashes if hashes is not None else {}).get(ireq)\n\n line = format_requirement(ireq, marker=marker, hashes=ireq_hashes)\n if self.strip_extras:\n line = strip_extras(line)\n\n if not self.annotate:\n return line\n\n # Annotate what packages or reqs-ins this package is required by\n required_by = set()\n if hasattr(ireq, \"_source_ireqs\"):\n required_by |= {\n _comes_from_as_string(src_ireq.comes_from)\n for src_ireq in ireq._source_ireqs\n if src_ireq.comes_from\n }\n\n if ireq.comes_from:\n required_by.add(_comes_from_as_string(ireq.comes_from))\n\n required_by |= set(getattr(ireq, \"_required_by\", set()))\n\n if required_by:\n if self.annotation_style == \"split\":\n annotation = annotation_style_split(required_by)\n sep = \"\\n \"\n elif self.annotation_style == \"line\":\n annotation = annotation_style_line(required_by)\n sep = \"\\n \" if ireq_hashes else \" \"\n else: # pragma: no cover\n raise ValueError(\"Invalid value for annotation style\")\n if self.strip_extras:\n annotation = strip_extras(annotation)\n # 24 is one reasonable column size to use here, that we've used in the past\n lines = f\"{line:24}{sep}{comment(annotation)}\".splitlines()\n line = \"\\n\".join(ln.rstrip() for ln in lines)\n\n return line\n", "path": "piptools/writer.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport io\nimport os\nimport re\nimport sys\nfrom itertools import chain\nfrom typing import BinaryIO, Iterable, Iterator, cast\n\nfrom click import unstyle\nfrom click.core import Context\nfrom pip._internal.models.format_control import FormatControl\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._vendor.packaging.markers import Marker\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom .logging import log\nfrom .utils import (\n comment,\n dedup,\n format_requirement,\n get_compile_command,\n key_from_ireq,\n strip_extras,\n)\n\nMESSAGE_UNHASHED_PACKAGE = comment(\n \"# WARNING: pip install will require the following package to be hashed.\"\n \"\\n# Consider using a hashable URL like \"\n \"https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip\"\n)\n\nMESSAGE_UNSAFE_PACKAGES_UNPINNED = comment(\n \"# WARNING: The following packages were not pinned, but pip requires them to be\"\n \"\\n# pinned when the requirements file includes hashes. \"\n \"Consider using the --allow-unsafe flag.\"\n)\n\nMESSAGE_UNSAFE_PACKAGES = comment(\n \"# The following packages are considered to be unsafe in a requirements file:\"\n)\n\nMESSAGE_UNINSTALLABLE = (\n \"The generated requirements file may be rejected by pip install. \"\n \"See # WARNING lines for details.\"\n)\n\n\nstrip_comes_from_line_re = re.compile(r\" \\(line \\d+\\)$\")\n\n\ndef _comes_from_as_string(comes_from: str | InstallRequirement) -> str:\n if isinstance(comes_from, str):\n return strip_comes_from_line_re.sub(\"\", comes_from)\n return cast(str, canonicalize_name(key_from_ireq(comes_from)))\n\n\ndef annotation_style_split(required_by: set[str]) -> str:\n sorted_required_by = sorted(required_by)\n if len(sorted_required_by) == 1:\n source = sorted_required_by[0]\n annotation = \"# via \" + source\n else:\n annotation_lines = [\"# via\"]\n for source in sorted_required_by:\n annotation_lines.append(\" # \" + source)\n annotation = \"\\n\".join(annotation_lines)\n return annotation\n\n\ndef annotation_style_line(required_by: set[str]) -> str:\n return f\"# via {', '.join(sorted(required_by))}\"\n\n\nclass OutputWriter:\n def __init__(\n self,\n dst_file: BinaryIO,\n click_ctx: Context,\n dry_run: bool,\n emit_header: bool,\n emit_index_url: bool,\n emit_trusted_host: bool,\n annotate: bool,\n annotation_style: str,\n strip_extras: bool,\n generate_hashes: bool,\n default_index_url: str,\n index_urls: Iterable[str],\n trusted_hosts: Iterable[str],\n format_control: FormatControl,\n linesep: str,\n allow_unsafe: bool,\n find_links: list[str],\n emit_find_links: bool,\n emit_options: bool,\n ) -> None:\n self.dst_file = dst_file\n self.click_ctx = click_ctx\n self.dry_run = dry_run\n self.emit_header = emit_header\n self.emit_index_url = emit_index_url\n self.emit_trusted_host = emit_trusted_host\n self.annotate = annotate\n self.annotation_style = annotation_style\n self.strip_extras = strip_extras\n self.generate_hashes = generate_hashes\n self.default_index_url = default_index_url\n self.index_urls = index_urls\n self.trusted_hosts = trusted_hosts\n self.format_control = format_control\n self.linesep = linesep\n self.allow_unsafe = allow_unsafe\n self.find_links = find_links\n self.emit_find_links = emit_find_links\n self.emit_options = emit_options\n\n def _sort_key(self, ireq: InstallRequirement) -> tuple[bool, str]:\n return (not ireq.editable, key_from_ireq(ireq))\n\n def write_header(self) -> Iterator[str]:\n if self.emit_header:\n yield comment(\"#\")\n yield comment(\n \"# This file is autogenerated by pip-compile with Python \"\n f\"{sys.version_info.major}.{sys.version_info.minor}\"\n )\n yield comment(\"# by the following command:\")\n yield comment(\"#\")\n compile_command = os.environ.get(\n \"CUSTOM_COMPILE_COMMAND\"\n ) or get_compile_command(self.click_ctx)\n yield comment(f\"# {compile_command}\")\n yield comment(\"#\")\n\n def write_index_options(self) -> Iterator[str]:\n if self.emit_index_url:\n for index, index_url in enumerate(dedup(self.index_urls)):\n if index == 0 and index_url.rstrip(\"/\") == self.default_index_url:\n continue\n flag = \"--index-url\" if index == 0 else \"--extra-index-url\"\n yield f\"{flag} {index_url}\"\n\n def write_trusted_hosts(self) -> Iterator[str]:\n if self.emit_trusted_host:\n for trusted_host in dedup(self.trusted_hosts):\n yield f\"--trusted-host {trusted_host}\"\n\n def write_format_controls(self) -> Iterator[str]:\n for nb in dedup(sorted(self.format_control.no_binary)):\n yield f\"--no-binary {nb}\"\n for ob in dedup(sorted(self.format_control.only_binary)):\n yield f\"--only-binary {ob}\"\n\n def write_find_links(self) -> Iterator[str]:\n if self.emit_find_links:\n for find_link in dedup(self.find_links):\n yield f\"--find-links {find_link}\"\n\n def write_flags(self) -> Iterator[str]:\n if not self.emit_options:\n return\n emitted = False\n for line in chain(\n self.write_index_options(),\n self.write_find_links(),\n self.write_trusted_hosts(),\n self.write_format_controls(),\n ):\n emitted = True\n yield line\n if emitted:\n yield \"\"\n\n def _iter_lines(\n self,\n results: set[InstallRequirement],\n unsafe_requirements: set[InstallRequirement],\n unsafe_packages: set[str],\n markers: dict[str, Marker],\n hashes: dict[InstallRequirement, set[str]] | None = None,\n ) -> Iterator[str]:\n # default values\n unsafe_packages = unsafe_packages if self.allow_unsafe else set()\n hashes = hashes or {}\n\n # Check for unhashed or unpinned packages if at least one package does have\n # hashes, which will trigger pip install's --require-hashes mode.\n warn_uninstallable = False\n has_hashes = hashes and any(hash for hash in hashes.values())\n\n yielded = False\n\n for line in self.write_header():\n yield line\n yielded = True\n for line in self.write_flags():\n yield line\n yielded = True\n\n unsafe_requirements = unsafe_requirements or {\n r for r in results if r.name in unsafe_packages\n }\n packages = {r for r in results if r.name not in unsafe_packages}\n\n if packages:\n for ireq in sorted(packages, key=self._sort_key):\n if has_hashes and not hashes.get(ireq):\n yield MESSAGE_UNHASHED_PACKAGE\n warn_uninstallable = True\n line = self._format_requirement(\n ireq, markers.get(key_from_ireq(ireq)), hashes=hashes\n )\n yield line\n yielded = True\n\n if unsafe_requirements:\n yield \"\"\n yielded = True\n if has_hashes and not self.allow_unsafe:\n yield MESSAGE_UNSAFE_PACKAGES_UNPINNED\n warn_uninstallable = True\n else:\n yield MESSAGE_UNSAFE_PACKAGES\n\n for ireq in sorted(unsafe_requirements, key=self._sort_key):\n ireq_key = key_from_ireq(ireq)\n if not self.allow_unsafe:\n yield comment(f\"# {ireq_key}\")\n else:\n line = self._format_requirement(\n ireq, marker=markers.get(ireq_key), hashes=hashes\n )\n yield line\n\n # Yield even when there's no real content, so that blank files are written\n if not yielded:\n yield \"\"\n\n if warn_uninstallable:\n log.warning(MESSAGE_UNINSTALLABLE)\n\n def write(\n self,\n results: set[InstallRequirement],\n unsafe_requirements: set[InstallRequirement],\n unsafe_packages: set[str],\n markers: dict[str, Marker],\n hashes: dict[InstallRequirement, set[str]] | None,\n ) -> None:\n\n if not self.dry_run:\n dst_file = io.TextIOWrapper(\n self.dst_file,\n encoding=\"utf8\",\n newline=self.linesep,\n line_buffering=True,\n )\n try:\n for line in self._iter_lines(\n results, unsafe_requirements, unsafe_packages, markers, hashes\n ):\n if self.dry_run:\n # Bypass the log level to always print this during a dry run\n log.log(line)\n else:\n log.info(line)\n dst_file.write(unstyle(line))\n dst_file.write(\"\\n\")\n finally:\n if not self.dry_run:\n dst_file.detach()\n\n def _format_requirement(\n self,\n ireq: InstallRequirement,\n marker: Marker | None = None,\n hashes: dict[InstallRequirement, set[str]] | None = None,\n ) -> str:\n ireq_hashes = (hashes if hashes is not None else {}).get(ireq)\n\n line = format_requirement(ireq, marker=marker, hashes=ireq_hashes)\n if self.strip_extras:\n line = strip_extras(line)\n\n if not self.annotate:\n return line\n\n # Annotate what packages or reqs-ins this package is required by\n required_by = set()\n if hasattr(ireq, \"_source_ireqs\"):\n required_by |= {\n _comes_from_as_string(src_ireq.comes_from)\n for src_ireq in ireq._source_ireqs\n if src_ireq.comes_from\n }\n\n if ireq.comes_from:\n required_by.add(_comes_from_as_string(ireq.comes_from))\n\n required_by |= set(getattr(ireq, \"_required_by\", set()))\n\n if required_by:\n if self.annotation_style == \"split\":\n annotation = annotation_style_split(required_by)\n sep = \"\\n \"\n elif self.annotation_style == \"line\":\n annotation = annotation_style_line(required_by)\n sep = \"\\n \" if ireq_hashes else \" \"\n else: # pragma: no cover\n raise ValueError(\"Invalid value for annotation style\")\n if self.strip_extras:\n annotation = strip_extras(annotation)\n # 24 is one reasonable column size to use here, that we've used in the past\n lines = f\"{line:24}{sep}{comment(annotation)}\".splitlines()\n line = \"\\n\".join(ln.rstrip() for ln in lines)\n\n return line\n", "path": "piptools/writer.py"}]} | 3,754 | 133 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.