problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_24500
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-23
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create `resolve_to` utility
Python variables are usually named using the `snake_case`, whereas majority of JS written today uses the `camelCase`. Ariadne will need to provide a way to resolve one to another.
The idea is to create `resolver_for` function taking single `name` argument and returning resolver for specified attr/key name, enabling **explicit** mapping as such:
```
resolvers_map = {
"User": {
"lastVisitedOn": resolve_to("last_visited_on")
}
}
```
Other advantage of having such utility would be arbitrary field names:
```
resolvers_map = {
"User": {
"lastVisitedOn": resolve_to("last_visit")
}
}
```
</issue>
<code>
[start of ariadne/resolvers.py]
1 from graphql import GraphQLObjectType, GraphQLScalarType, GraphQLSchema
2 from graphql.execution.base import ResolveInfo
3
4
5 def default_resolver(parent, info: ResolveInfo):
6 if isinstance(parent, dict):
7 return parent.get(info.field_name)
8 return getattr(parent, info.field_name, None)
9
10
11 def add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):
12 for type_name, type_object in schema.get_type_map().items():
13 if isinstance(type_object, GraphQLObjectType):
14 add_resolve_functions_to_object(type_name, type_object, resolvers)
15 if isinstance(type_object, GraphQLScalarType):
16 add_resolve_function_to_scalar(type_name, type_object, resolvers)
17
18
19 def add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict):
20 type_resolver = resolvers.get(name, {})
21 for field_name, field_object in obj.fields.items():
22 field_resolver = type_resolver.get(field_name, default_resolver)
23 field_object.resolver = field_resolver
24
25
26 def add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):
27 serializer = resolvers.get(name, obj.serialize)
28 obj.serialize = serializer
29
[end of ariadne/resolvers.py]
[start of ariadne/__init__.py]
1 from .build_schema import build_schema_from_type_definitions
2 from .executable_schema import make_executable_schema
3 from .resolvers import add_resolve_functions_to_schema, default_resolver
4
5 __all__ = [
6 "add_resolve_functions_to_schema",
7 "build_schema_from_type_definitions",
8 "default_resolver",
9 "make_executable_schema",
10 ]
11
[end of ariadne/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ariadne/__init__.py b/ariadne/__init__.py
--- a/ariadne/__init__.py
+++ b/ariadne/__init__.py
@@ -1,10 +1,11 @@
from .build_schema import build_schema_from_type_definitions
from .executable_schema import make_executable_schema
-from .resolvers import add_resolve_functions_to_schema, default_resolver
+from .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to
__all__ = [
"add_resolve_functions_to_schema",
"build_schema_from_type_definitions",
"default_resolver",
"make_executable_schema",
+ "resolve_to",
]
diff --git a/ariadne/resolvers.py b/ariadne/resolvers.py
--- a/ariadne/resolvers.py
+++ b/ariadne/resolvers.py
@@ -2,10 +2,21 @@
from graphql.execution.base import ResolveInfo
-def default_resolver(parent, info: ResolveInfo):
+def resolve_parent_field(parent, name: str):
if isinstance(parent, dict):
- return parent.get(info.field_name)
- return getattr(parent, info.field_name, None)
+ return parent.get(name)
+ return getattr(parent, name, None)
+
+
+def default_resolver(parent, info: ResolveInfo):
+ return resolve_parent_field(parent, info.field_name)
+
+
+def resolve_to(name: str):
+ def resolver(parent, *_):
+ return resolve_parent_field(parent, name)
+
+ return resolver
def add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):
|
{"golden_diff": "diff --git a/ariadne/__init__.py b/ariadne/__init__.py\n--- a/ariadne/__init__.py\n+++ b/ariadne/__init__.py\n@@ -1,10 +1,11 @@\n from .build_schema import build_schema_from_type_definitions\n from .executable_schema import make_executable_schema\n-from .resolvers import add_resolve_functions_to_schema, default_resolver\n+from .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to\n \n __all__ = [\n \"add_resolve_functions_to_schema\",\n \"build_schema_from_type_definitions\",\n \"default_resolver\",\n \"make_executable_schema\",\n+ \"resolve_to\",\n ]\ndiff --git a/ariadne/resolvers.py b/ariadne/resolvers.py\n--- a/ariadne/resolvers.py\n+++ b/ariadne/resolvers.py\n@@ -2,10 +2,21 @@\n from graphql.execution.base import ResolveInfo\n \n \n-def default_resolver(parent, info: ResolveInfo):\n+def resolve_parent_field(parent, name: str):\n if isinstance(parent, dict):\n- return parent.get(info.field_name)\n- return getattr(parent, info.field_name, None)\n+ return parent.get(name)\n+ return getattr(parent, name, None)\n+\n+\n+def default_resolver(parent, info: ResolveInfo):\n+ return resolve_parent_field(parent, info.field_name)\n+\n+\n+def resolve_to(name: str):\n+ def resolver(parent, *_):\n+ return resolve_parent_field(parent, name)\n+\n+ return resolver\n \n \n def add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):\n", "issue": "Create `resolve_to` utility\nPython variables are usually named using the `snake_case`, whereas majority of JS written today uses the `camelCase`. Ariadne will need to provide a way to resolve one to another.\r\n\r\nThe idea is to create `resolver_for` function taking single `name` argument and returning resolver for specified attr/key name, enabling **explicit** mapping as such:\r\n\r\n```\r\nresolvers_map = {\r\n \"User\": {\r\n \"lastVisitedOn\": resolve_to(\"last_visited_on\")\r\n }\r\n}\r\n```\r\n\r\nOther advantage of having such utility would be arbitrary field names:\r\n\r\n```\r\nresolvers_map = {\r\n \"User\": {\r\n \"lastVisitedOn\": resolve_to(\"last_visit\")\r\n }\r\n}\r\n```\n", "before_files": [{"content": "from graphql import GraphQLObjectType, GraphQLScalarType, GraphQLSchema\nfrom graphql.execution.base import ResolveInfo\n\n\ndef default_resolver(parent, info: ResolveInfo):\n if isinstance(parent, dict):\n return parent.get(info.field_name)\n return getattr(parent, info.field_name, None)\n\n\ndef add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):\n for type_name, type_object in schema.get_type_map().items():\n if isinstance(type_object, GraphQLObjectType):\n add_resolve_functions_to_object(type_name, type_object, resolvers)\n if isinstance(type_object, GraphQLScalarType):\n add_resolve_function_to_scalar(type_name, type_object, resolvers)\n\n\ndef add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict):\n type_resolver = resolvers.get(name, {})\n for field_name, field_object in obj.fields.items():\n field_resolver = type_resolver.get(field_name, default_resolver)\n field_object.resolver = field_resolver\n\n\ndef add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):\n serializer = resolvers.get(name, obj.serialize)\n obj.serialize = serializer\n", "path": "ariadne/resolvers.py"}, {"content": "from .build_schema import build_schema_from_type_definitions\nfrom .executable_schema import make_executable_schema\nfrom .resolvers import add_resolve_functions_to_schema, default_resolver\n\n__all__ = [\n \"add_resolve_functions_to_schema\",\n \"build_schema_from_type_definitions\",\n \"default_resolver\",\n \"make_executable_schema\",\n]\n", "path": "ariadne/__init__.py"}]}
| 1,085 | 352 |
gh_patches_debug_5210
|
rasdani/github-patches
|
git_diff
|
fedora-infra__bodhi-3464
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Greenwave consumer should not process the compose results
Currently the greenwave consumer consumes all the messages from `topic=org.fedoraproject.prod.greenwave.decision.update` but some messages on this topic are related to compose results and not koji build.
The consumer should filter the messages based on the `subject_type`, and only process `koji_build` type.
See a koji_build example https://apps.fedoraproject.org/datagrepper/id?id=2019-efb310ed-6acf-4742-809f-90a40c2a5855&is_raw=true&size=extra-large
And a compose example https://apps.fedoraproject.org/datagrepper/id?id=2019-33002e05-241c-4180-8953-5a29cea4443d&is_raw=true&size=extra-large
This is currently raising the following exception in production :
``` 2019-07-26 15:16:55,501 ERROR [bodhi.server][PoolThread-twisted.internet.reactor-0] Bodhi failed to send POST request to Greenwave at the following URL "https://greenwave-web-greenwave.app.os.fedoraproject.org/api/v1.0/decision". The status code was "502". The error was "{'message': 'Failed to retrieve SCM URL from Koji build "Fedora-Workstation-Rawhide-20190726.n.0" at "https://koji.fedoraproject.org/kojihub" (expected SCM URL in "source" attribute)'}". ```
</issue>
<code>
[start of bodhi/server/consumers/greenwave.py]
1 # Copyright © 2019 Red Hat, Inc.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License along with
16 # this program; if not, write to the Free Software Foundation, Inc., 51
17 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """
19 The "greenwave handler".
20
21 This module is responsible for listening for messages from greenwave.
22 It then updates the policies of the build that greenwave checked.
23 """
24
25 import logging
26
27 import fedora_messaging
28
29 from bodhi.server.models import Build
30 from bodhi.server.util import transactional_session_maker
31
32 log = logging.getLogger(__name__)
33
34
35 class GreenwaveHandler:
36 """
37 The Bodhi Greenwave Handler.
38
39 A fedora-messaging listener waiting for messages from greenwave about enforced policies.
40 """
41
42 def __init__(self):
43 """Initialize the GreenwaveHandler."""
44 self.db_factory = transactional_session_maker()
45
46 def __call__(self, message: fedora_messaging.api.Message):
47 """Handle messages arriving with the configured topic."""
48 msg = message.body
49 if not msg:
50 log.debug("Ignoring message without body.")
51 return
52
53 subject_identifier = msg.get("subject_identifier")
54
55 if subject_identifier is None:
56 log.debug("Couldn't find subject_identifier in Greenwave message")
57 return
58
59 with self.db_factory():
60
61 build = Build.get(subject_identifier)
62 if build is None:
63 log.debug(f"Couldn't find build {subject_identifier} in DB")
64 return
65
66 log.info(f"Updating the test_gating_status for: {build.update.alias}")
67 build.update.update_test_gating_status()
68
[end of bodhi/server/consumers/greenwave.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bodhi/server/consumers/greenwave.py b/bodhi/server/consumers/greenwave.py
--- a/bodhi/server/consumers/greenwave.py
+++ b/bodhi/server/consumers/greenwave.py
@@ -56,6 +56,11 @@
log.debug("Couldn't find subject_identifier in Greenwave message")
return
+ subject_type = msg.get("subject_type")
+ if subject_type == "compose":
+ log.debug("Not requesting a decision for a compose")
+ return
+
with self.db_factory():
build = Build.get(subject_identifier)
|
{"golden_diff": "diff --git a/bodhi/server/consumers/greenwave.py b/bodhi/server/consumers/greenwave.py\n--- a/bodhi/server/consumers/greenwave.py\n+++ b/bodhi/server/consumers/greenwave.py\n@@ -56,6 +56,11 @@\n log.debug(\"Couldn't find subject_identifier in Greenwave message\")\n return\n \n+ subject_type = msg.get(\"subject_type\")\n+ if subject_type == \"compose\":\n+ log.debug(\"Not requesting a decision for a compose\")\n+ return\n+\n with self.db_factory():\n \n build = Build.get(subject_identifier)\n", "issue": "Greenwave consumer should not process the compose results\nCurrently the greenwave consumer consumes all the messages from `topic=org.fedoraproject.prod.greenwave.decision.update` but some messages on this topic are related to compose results and not koji build.\r\n\r\nThe consumer should filter the messages based on the `subject_type`, and only process `koji_build` type.\r\n\r\nSee a koji_build example https://apps.fedoraproject.org/datagrepper/id?id=2019-efb310ed-6acf-4742-809f-90a40c2a5855&is_raw=true&size=extra-large\r\n\r\nAnd a compose example https://apps.fedoraproject.org/datagrepper/id?id=2019-33002e05-241c-4180-8953-5a29cea4443d&is_raw=true&size=extra-large\r\n\r\nThis is currently raising the following exception in production : \r\n``` 2019-07-26 15:16:55,501 ERROR [bodhi.server][PoolThread-twisted.internet.reactor-0] Bodhi failed to send POST request to Greenwave at the following URL \"https://greenwave-web-greenwave.app.os.fedoraproject.org/api/v1.0/decision\". The status code was \"502\". The error was \"{'message': 'Failed to retrieve SCM URL from Koji build \"Fedora-Workstation-Rawhide-20190726.n.0\" at \"https://koji.fedoraproject.org/kojihub\" (expected SCM URL in \"source\" attribute)'}\". ```\n", "before_files": [{"content": "# Copyright \u00a9 2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe \"greenwave handler\".\n\nThis module is responsible for listening for messages from greenwave.\nIt then updates the policies of the build that greenwave checked.\n\"\"\"\n\nimport logging\n\nimport fedora_messaging\n\nfrom bodhi.server.models import Build\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger(__name__)\n\n\nclass GreenwaveHandler:\n \"\"\"\n The Bodhi Greenwave Handler.\n\n A fedora-messaging listener waiting for messages from greenwave about enforced policies.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the GreenwaveHandler.\"\"\"\n self.db_factory = transactional_session_maker()\n\n def __call__(self, message: fedora_messaging.api.Message):\n \"\"\"Handle messages arriving with the configured topic.\"\"\"\n msg = message.body\n if not msg:\n log.debug(\"Ignoring message without body.\")\n return\n\n subject_identifier = msg.get(\"subject_identifier\")\n\n if subject_identifier is None:\n log.debug(\"Couldn't find subject_identifier in Greenwave message\")\n return\n\n with self.db_factory():\n\n build = Build.get(subject_identifier)\n if build is None:\n log.debug(f\"Couldn't find build {subject_identifier} in DB\")\n return\n\n log.info(f\"Updating the test_gating_status for: {build.update.alias}\")\n build.update.update_test_gating_status()\n", "path": "bodhi/server/consumers/greenwave.py"}]}
| 1,533 | 139 |
gh_patches_debug_36513
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-692
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mods+ should be permitted to use any command in #checkpoint
Currently the bot will ignore all commands inside #checkpoint. However, there are cases where this is not ideal, for example if we want to clean the channel up with the !clean command.
Write a fix that allows moderators, admins and owners to use any command in this channel, but still ignores everything but `!accept` from anyone else.
Make the bot delete any message it posts in #checkpoint after a while
Currently, messages posted in #checkpoint by the bot will usually be posted with a `delete_after` of 10 seconds or so, so they will automatically be removed. This helps the channel stay clean so that the important messages at the top of it will stay there. The bot also automatically deletes any messages posted in this channel by other users.
However, when the bot posts messages in this channel that are not related to the Verification cog, they will never get deleted. For example, if a user in this channel triggers the spam filter, the bot will post a message about muting the user which will never be deleted.
We want this channel to stay clean, so let's implement some logics that ensure that **any message posted by the bot** in this channel will be deleted after a short time, like 10 seconds.
</issue>
<code>
[start of bot/cogs/verification.py]
1 import logging
2 from datetime import datetime
3
4 from discord import Colour, Message, NotFound, Object
5 from discord.ext import tasks
6 from discord.ext.commands import Cog, Context, command
7
8 from bot.bot import Bot
9 from bot.cogs.moderation import ModLog
10 from bot.constants import (
11 Bot as BotConfig,
12 Channels, Colours, Event,
13 Filter, Icons, Roles
14 )
15 from bot.decorators import InChannelCheckFailure, in_channel, without_role
16
17 log = logging.getLogger(__name__)
18
19 WELCOME_MESSAGE = f"""
20 Hello! Welcome to the server, and thanks for verifying yourself!
21
22 For your records, these are the documents you accepted:
23
24 `1)` Our rules, here: <https://pythondiscord.com/pages/rules>
25 `2)` Our privacy policy, here: <https://pythondiscord.com/pages/privacy> - you can find information on how to have \
26 your information removed here as well.
27
28 Feel free to review them at any point!
29
30 Additionally, if you'd like to receive notifications for the announcements we post in <#{Channels.announcements}> \
31 from time to time, you can send `!subscribe` to <#{Channels.bot}> at any time to assign yourself the \
32 **Announcements** role. We'll mention this role every time we make an announcement.
33
34 If you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to <#{Channels.bot}>.
35 """
36
37 PERIODIC_PING = (
38 f"@everyone To verify that you have read our rules, please type `{BotConfig.prefix}accept`."
39 f" If you encounter any problems during the verification process, ping the <@&{Roles.admin}> role in this channel."
40 )
41
42
43 class Verification(Cog):
44 """User verification and role self-management."""
45
46 def __init__(self, bot: Bot):
47 self.bot = bot
48 self.periodic_ping.start()
49
50 @property
51 def mod_log(self) -> ModLog:
52 """Get currently loaded ModLog cog instance."""
53 return self.bot.get_cog("ModLog")
54
55 @Cog.listener()
56 async def on_message(self, message: Message) -> None:
57 """Check new message event for messages to the checkpoint channel & process."""
58 if message.author.bot:
59 return # They're a bot, ignore
60
61 if message.channel.id != Channels.verification:
62 return # Only listen for #checkpoint messages
63
64 # if a user mentions a role or guild member
65 # alert the mods in mod-alerts channel
66 if message.mentions or message.role_mentions:
67 log.debug(
68 f"{message.author} mentioned one or more users "
69 f"and/or roles in {message.channel.name}"
70 )
71
72 embed_text = (
73 f"{message.author.mention} sent a message in "
74 f"{message.channel.mention} that contained user and/or role mentions."
75 f"\n\n**Original message:**\n>>> {message.content}"
76 )
77
78 # Send pretty mod log embed to mod-alerts
79 await self.mod_log.send_log_message(
80 icon_url=Icons.filtering,
81 colour=Colour(Colours.soft_red),
82 title=f"User/Role mentioned in {message.channel.name}",
83 text=embed_text,
84 thumbnail=message.author.avatar_url_as(static_format="png"),
85 channel_id=Channels.mod_alerts,
86 ping_everyone=Filter.ping_everyone,
87 )
88
89 ctx = await self.bot.get_context(message) # type: Context
90
91 if ctx.command is not None and ctx.command.name == "accept":
92 return # They used the accept command
93
94 for role in ctx.author.roles:
95 if role.id == Roles.verified:
96 log.warning(f"{ctx.author} posted '{ctx.message.content}' "
97 "in the verification channel, but is already verified.")
98 return # They're already verified
99
100 log.debug(f"{ctx.author} posted '{ctx.message.content}' in the verification "
101 "channel. We are providing instructions how to verify.")
102 await ctx.send(
103 f"{ctx.author.mention} Please type `!accept` to verify that you accept our rules, "
104 f"and gain access to the rest of the server.",
105 delete_after=20
106 )
107
108 log.trace(f"Deleting the message posted by {ctx.author}")
109
110 try:
111 await ctx.message.delete()
112 except NotFound:
113 log.trace("No message found, it must have been deleted by another bot.")
114
115 @command(name='accept', aliases=('verify', 'verified', 'accepted'), hidden=True)
116 @without_role(Roles.verified)
117 @in_channel(Channels.verification)
118 async def accept_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
119 """Accept our rules and gain access to the rest of the server."""
120 log.debug(f"{ctx.author} called !accept. Assigning the 'Developer' role.")
121 await ctx.author.add_roles(Object(Roles.verified), reason="Accepted the rules")
122 try:
123 await ctx.author.send(WELCOME_MESSAGE)
124 except Exception:
125 # Catch the exception, in case they have DMs off or something
126 log.exception(f"Unable to send welcome message to user {ctx.author}.")
127
128 log.trace(f"Deleting the message posted by {ctx.author}.")
129
130 try:
131 self.mod_log.ignore(Event.message_delete, ctx.message.id)
132 await ctx.message.delete()
133 except NotFound:
134 log.trace("No message found, it must have been deleted by another bot.")
135
136 @command(name='subscribe')
137 @in_channel(Channels.bot)
138 async def subscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
139 """Subscribe to announcement notifications by assigning yourself the role."""
140 has_role = False
141
142 for role in ctx.author.roles:
143 if role.id == Roles.announcements:
144 has_role = True
145 break
146
147 if has_role:
148 await ctx.send(f"{ctx.author.mention} You're already subscribed!")
149 return
150
151 log.debug(f"{ctx.author} called !subscribe. Assigning the 'Announcements' role.")
152 await ctx.author.add_roles(Object(Roles.announcements), reason="Subscribed to announcements")
153
154 log.trace(f"Deleting the message posted by {ctx.author}.")
155
156 await ctx.send(
157 f"{ctx.author.mention} Subscribed to <#{Channels.announcements}> notifications.",
158 )
159
160 @command(name='unsubscribe')
161 @in_channel(Channels.bot)
162 async def unsubscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
163 """Unsubscribe from announcement notifications by removing the role from yourself."""
164 has_role = False
165
166 for role in ctx.author.roles:
167 if role.id == Roles.announcements:
168 has_role = True
169 break
170
171 if not has_role:
172 await ctx.send(f"{ctx.author.mention} You're already unsubscribed!")
173 return
174
175 log.debug(f"{ctx.author} called !unsubscribe. Removing the 'Announcements' role.")
176 await ctx.author.remove_roles(Object(Roles.announcements), reason="Unsubscribed from announcements")
177
178 log.trace(f"Deleting the message posted by {ctx.author}.")
179
180 await ctx.send(
181 f"{ctx.author.mention} Unsubscribed from <#{Channels.announcements}> notifications."
182 )
183
184 # This cannot be static (must have a __func__ attribute).
185 async def cog_command_error(self, ctx: Context, error: Exception) -> None:
186 """Check for & ignore any InChannelCheckFailure."""
187 if isinstance(error, InChannelCheckFailure):
188 error.handled = True
189
190 @staticmethod
191 def bot_check(ctx: Context) -> bool:
192 """Block any command within the verification channel that is not !accept."""
193 if ctx.channel.id == Channels.verification:
194 return ctx.command.name == "accept"
195 else:
196 return True
197
198 @tasks.loop(hours=12)
199 async def periodic_ping(self) -> None:
200 """Every week, mention @everyone to remind them to verify."""
201 messages = self.bot.get_channel(Channels.verification).history(limit=10)
202 need_to_post = True # True if a new message needs to be sent.
203
204 async for message in messages:
205 if message.author == self.bot.user and message.content == PERIODIC_PING:
206 delta = datetime.utcnow() - message.created_at # Time since last message.
207 if delta.days >= 7: # Message is older than a week.
208 await message.delete()
209 else:
210 need_to_post = False
211
212 break
213
214 if need_to_post:
215 await self.bot.get_channel(Channels.verification).send(PERIODIC_PING)
216
217 @periodic_ping.before_loop
218 async def before_ping(self) -> None:
219 """Only start the loop when the bot is ready."""
220 await self.bot.wait_until_ready()
221
222 def cog_unload(self) -> None:
223 """Cancel the periodic ping task when the cog is unloaded."""
224 self.periodic_ping.cancel()
225
226
227 def setup(bot: Bot) -> None:
228 """Load the Verification cog."""
229 bot.add_cog(Verification(bot))
230
[end of bot/cogs/verification.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bot/cogs/verification.py b/bot/cogs/verification.py
--- a/bot/cogs/verification.py
+++ b/bot/cogs/verification.py
@@ -10,9 +10,10 @@
from bot.constants import (
Bot as BotConfig,
Channels, Colours, Event,
- Filter, Icons, Roles
+ Filter, Icons, MODERATION_ROLES, Roles
)
from bot.decorators import InChannelCheckFailure, in_channel, without_role
+from bot.utils.checks import without_role_check
log = logging.getLogger(__name__)
@@ -38,6 +39,7 @@
f"@everyone To verify that you have read our rules, please type `{BotConfig.prefix}accept`."
f" If you encounter any problems during the verification process, ping the <@&{Roles.admin}> role in this channel."
)
+BOT_MESSAGE_DELETE_DELAY = 10
class Verification(Cog):
@@ -55,12 +57,16 @@
@Cog.listener()
async def on_message(self, message: Message) -> None:
"""Check new message event for messages to the checkpoint channel & process."""
- if message.author.bot:
- return # They're a bot, ignore
-
if message.channel.id != Channels.verification:
return # Only listen for #checkpoint messages
+ if message.author.bot:
+ # They're a bot, delete their message after the delay.
+ # But not the periodic ping; we like that one.
+ if message.content != PERIODIC_PING:
+ await message.delete(delay=BOT_MESSAGE_DELETE_DELAY)
+ return
+
# if a user mentions a role or guild member
# alert the mods in mod-alerts channel
if message.mentions or message.role_mentions:
@@ -190,7 +196,7 @@
@staticmethod
def bot_check(ctx: Context) -> bool:
"""Block any command within the verification channel that is not !accept."""
- if ctx.channel.id == Channels.verification:
+ if ctx.channel.id == Channels.verification and without_role_check(ctx, *MODERATION_ROLES):
return ctx.command.name == "accept"
else:
return True
|
{"golden_diff": "diff --git a/bot/cogs/verification.py b/bot/cogs/verification.py\n--- a/bot/cogs/verification.py\n+++ b/bot/cogs/verification.py\n@@ -10,9 +10,10 @@\n from bot.constants import (\n Bot as BotConfig,\n Channels, Colours, Event,\n- Filter, Icons, Roles\n+ Filter, Icons, MODERATION_ROLES, Roles\n )\n from bot.decorators import InChannelCheckFailure, in_channel, without_role\n+from bot.utils.checks import without_role_check\n \n log = logging.getLogger(__name__)\n \n@@ -38,6 +39,7 @@\n f\"@everyone To verify that you have read our rules, please type `{BotConfig.prefix}accept`.\"\n f\" If you encounter any problems during the verification process, ping the <@&{Roles.admin}> role in this channel.\"\n )\n+BOT_MESSAGE_DELETE_DELAY = 10\n \n \n class Verification(Cog):\n@@ -55,12 +57,16 @@\n @Cog.listener()\n async def on_message(self, message: Message) -> None:\n \"\"\"Check new message event for messages to the checkpoint channel & process.\"\"\"\n- if message.author.bot:\n- return # They're a bot, ignore\n-\n if message.channel.id != Channels.verification:\n return # Only listen for #checkpoint messages\n \n+ if message.author.bot:\n+ # They're a bot, delete their message after the delay.\n+ # But not the periodic ping; we like that one.\n+ if message.content != PERIODIC_PING:\n+ await message.delete(delay=BOT_MESSAGE_DELETE_DELAY)\n+ return\n+\n # if a user mentions a role or guild member\n # alert the mods in mod-alerts channel\n if message.mentions or message.role_mentions:\n@@ -190,7 +196,7 @@\n @staticmethod\n def bot_check(ctx: Context) -> bool:\n \"\"\"Block any command within the verification channel that is not !accept.\"\"\"\n- if ctx.channel.id == Channels.verification:\n+ if ctx.channel.id == Channels.verification and without_role_check(ctx, *MODERATION_ROLES):\n return ctx.command.name == \"accept\"\n else:\n return True\n", "issue": "Mods+ should be permitted to use any command in #checkpoint\nCurrently the bot will ignore all commands inside #checkpoint. However, there are cases where this is not ideal, for example if we want to clean the channel up with the !clean command.\r\n\r\nWrite a fix that allows moderators, admins and owners to use any command in this channel, but still ignores everything but `!accept` from anyone else.\nMake the bot delete any message it posts in #checkpoint after a while\nCurrently, messages posted in #checkpoint by the bot will usually be posted with a `delete_after` of 10 seconds or so, so they will automatically be removed. This helps the channel stay clean so that the important messages at the top of it will stay there. The bot also automatically deletes any messages posted in this channel by other users.\r\n\r\nHowever, when the bot posts messages in this channel that are not related to the Verification cog, they will never get deleted. For example, if a user in this channel triggers the spam filter, the bot will post a message about muting the user which will never be deleted.\r\n\r\nWe want this channel to stay clean, so let's implement some logics that ensure that **any message posted by the bot** in this channel will be deleted after a short time, like 10 seconds.\n", "before_files": [{"content": "import logging\nfrom datetime import datetime\n\nfrom discord import Colour, Message, NotFound, Object\nfrom discord.ext import tasks\nfrom discord.ext.commands import Cog, Context, command\n\nfrom bot.bot import Bot\nfrom bot.cogs.moderation import ModLog\nfrom bot.constants import (\n Bot as BotConfig,\n Channels, Colours, Event,\n Filter, Icons, Roles\n)\nfrom bot.decorators import InChannelCheckFailure, in_channel, without_role\n\nlog = logging.getLogger(__name__)\n\nWELCOME_MESSAGE = f\"\"\"\nHello! Welcome to the server, and thanks for verifying yourself!\n\nFor your records, these are the documents you accepted:\n\n`1)` Our rules, here: <https://pythondiscord.com/pages/rules>\n`2)` Our privacy policy, here: <https://pythondiscord.com/pages/privacy> - you can find information on how to have \\\nyour information removed here as well.\n\nFeel free to review them at any point!\n\nAdditionally, if you'd like to receive notifications for the announcements we post in <#{Channels.announcements}> \\\nfrom time to time, you can send `!subscribe` to <#{Channels.bot}> at any time to assign yourself the \\\n**Announcements** role. We'll mention this role every time we make an announcement.\n\nIf you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to <#{Channels.bot}>.\n\"\"\"\n\nPERIODIC_PING = (\n f\"@everyone To verify that you have read our rules, please type `{BotConfig.prefix}accept`.\"\n f\" If you encounter any problems during the verification process, ping the <@&{Roles.admin}> role in this channel.\"\n)\n\n\nclass Verification(Cog):\n \"\"\"User verification and role self-management.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.periodic_ping.start()\n\n @property\n def mod_log(self) -> ModLog:\n \"\"\"Get currently loaded ModLog cog instance.\"\"\"\n return self.bot.get_cog(\"ModLog\")\n\n @Cog.listener()\n async def on_message(self, message: Message) -> None:\n \"\"\"Check new message event for messages to the checkpoint channel & process.\"\"\"\n if message.author.bot:\n return # They're a bot, ignore\n\n if message.channel.id != Channels.verification:\n return # Only listen for #checkpoint messages\n\n # if a user mentions a role or guild member\n # alert the mods in mod-alerts channel\n if message.mentions or message.role_mentions:\n log.debug(\n f\"{message.author} mentioned one or more users \"\n f\"and/or roles in {message.channel.name}\"\n )\n\n embed_text = (\n f\"{message.author.mention} sent a message in \"\n f\"{message.channel.mention} that contained user and/or role mentions.\"\n f\"\\n\\n**Original message:**\\n>>> {message.content}\"\n )\n\n # Send pretty mod log embed to mod-alerts\n await self.mod_log.send_log_message(\n icon_url=Icons.filtering,\n colour=Colour(Colours.soft_red),\n title=f\"User/Role mentioned in {message.channel.name}\",\n text=embed_text,\n thumbnail=message.author.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.mod_alerts,\n ping_everyone=Filter.ping_everyone,\n )\n\n ctx = await self.bot.get_context(message) # type: Context\n\n if ctx.command is not None and ctx.command.name == \"accept\":\n return # They used the accept command\n\n for role in ctx.author.roles:\n if role.id == Roles.verified:\n log.warning(f\"{ctx.author} posted '{ctx.message.content}' \"\n \"in the verification channel, but is already verified.\")\n return # They're already verified\n\n log.debug(f\"{ctx.author} posted '{ctx.message.content}' in the verification \"\n \"channel. We are providing instructions how to verify.\")\n await ctx.send(\n f\"{ctx.author.mention} Please type `!accept` to verify that you accept our rules, \"\n f\"and gain access to the rest of the server.\",\n delete_after=20\n )\n\n log.trace(f\"Deleting the message posted by {ctx.author}\")\n\n try:\n await ctx.message.delete()\n except NotFound:\n log.trace(\"No message found, it must have been deleted by another bot.\")\n\n @command(name='accept', aliases=('verify', 'verified', 'accepted'), hidden=True)\n @without_role(Roles.verified)\n @in_channel(Channels.verification)\n async def accept_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Accept our rules and gain access to the rest of the server.\"\"\"\n log.debug(f\"{ctx.author} called !accept. Assigning the 'Developer' role.\")\n await ctx.author.add_roles(Object(Roles.verified), reason=\"Accepted the rules\")\n try:\n await ctx.author.send(WELCOME_MESSAGE)\n except Exception:\n # Catch the exception, in case they have DMs off or something\n log.exception(f\"Unable to send welcome message to user {ctx.author}.\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n try:\n self.mod_log.ignore(Event.message_delete, ctx.message.id)\n await ctx.message.delete()\n except NotFound:\n log.trace(\"No message found, it must have been deleted by another bot.\")\n\n @command(name='subscribe')\n @in_channel(Channels.bot)\n async def subscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Subscribe to announcement notifications by assigning yourself the role.\"\"\"\n has_role = False\n\n for role in ctx.author.roles:\n if role.id == Roles.announcements:\n has_role = True\n break\n\n if has_role:\n await ctx.send(f\"{ctx.author.mention} You're already subscribed!\")\n return\n\n log.debug(f\"{ctx.author} called !subscribe. Assigning the 'Announcements' role.\")\n await ctx.author.add_roles(Object(Roles.announcements), reason=\"Subscribed to announcements\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n await ctx.send(\n f\"{ctx.author.mention} Subscribed to <#{Channels.announcements}> notifications.\",\n )\n\n @command(name='unsubscribe')\n @in_channel(Channels.bot)\n async def unsubscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Unsubscribe from announcement notifications by removing the role from yourself.\"\"\"\n has_role = False\n\n for role in ctx.author.roles:\n if role.id == Roles.announcements:\n has_role = True\n break\n\n if not has_role:\n await ctx.send(f\"{ctx.author.mention} You're already unsubscribed!\")\n return\n\n log.debug(f\"{ctx.author} called !unsubscribe. Removing the 'Announcements' role.\")\n await ctx.author.remove_roles(Object(Roles.announcements), reason=\"Unsubscribed from announcements\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n await ctx.send(\n f\"{ctx.author.mention} Unsubscribed from <#{Channels.announcements}> notifications.\"\n )\n\n # This cannot be static (must have a __func__ attribute).\n async def cog_command_error(self, ctx: Context, error: Exception) -> None:\n \"\"\"Check for & ignore any InChannelCheckFailure.\"\"\"\n if isinstance(error, InChannelCheckFailure):\n error.handled = True\n\n @staticmethod\n def bot_check(ctx: Context) -> bool:\n \"\"\"Block any command within the verification channel that is not !accept.\"\"\"\n if ctx.channel.id == Channels.verification:\n return ctx.command.name == \"accept\"\n else:\n return True\n\n @tasks.loop(hours=12)\n async def periodic_ping(self) -> None:\n \"\"\"Every week, mention @everyone to remind them to verify.\"\"\"\n messages = self.bot.get_channel(Channels.verification).history(limit=10)\n need_to_post = True # True if a new message needs to be sent.\n\n async for message in messages:\n if message.author == self.bot.user and message.content == PERIODIC_PING:\n delta = datetime.utcnow() - message.created_at # Time since last message.\n if delta.days >= 7: # Message is older than a week.\n await message.delete()\n else:\n need_to_post = False\n\n break\n\n if need_to_post:\n await self.bot.get_channel(Channels.verification).send(PERIODIC_PING)\n\n @periodic_ping.before_loop\n async def before_ping(self) -> None:\n \"\"\"Only start the loop when the bot is ready.\"\"\"\n await self.bot.wait_until_ready()\n\n def cog_unload(self) -> None:\n \"\"\"Cancel the periodic ping task when the cog is unloaded.\"\"\"\n self.periodic_ping.cancel()\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Verification cog.\"\"\"\n bot.add_cog(Verification(bot))\n", "path": "bot/cogs/verification.py"}]}
| 3,353 | 489 |
gh_patches_debug_25905
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-3857
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bloomberg] Extracter does not work.
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2022.05.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
USA
### Description
Bloomberg extractor complains about being unable to extract ID, same for any video.
I used "https://www.bloomberg.com/news/videos/2022-05-19/bloomberg-studio-1-0-roblox-ceo-david-baszucki-video" as an example.
### Verbose log
```shell
[debug] Command-line config: ['-vU', 'https://www.bloomberg.com/news/videos/2022-05-19/bloomberg-studio-1-0-roblox-ceo-david-baszucki-video']
[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8, error utf-8, screen utf-8
[debug] Lazy loading extractors is disabled
[debug] Python version 3.10.4 (CPython 64bit) - Windows-10-10.0.19044-SP0
[debug] Checking exe version: ffprobe -bsfs
[debug] Checking exe version: ffmpeg -bsfs
[debug] exe versions: ffmpeg N-106452-g7fbf1dd47e-20220329 (setts), ffprobe N-106452-g7fbf1dd47e-20220329
[debug] Optional libraries: Cryptodome-3.14.1, brotli-1.0.9, certifi-2022.05.18, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
Latest version: 2022.05.18, Current version: 2022.05.18
yt-dlp is up to date (2022.05.18)
[debug] [Bloomberg] Extracting URL: https://www.bloomberg.com/news/videos/2022-05-19/bloomberg-studio-1-0-roblox-ceo-david-baszucki-video
[Bloomberg] bloomberg-studio-1-0-roblox-ceo-david-baszucki-video: Downloading webpage
ERROR: [Bloomberg] bloomberg-studio-1-0-roblox-ceo-david-baszucki-video: Unable to extract id; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U
File "C:\Users\mystu\AppData\Local\Programs\Python\Python310\lib\site-packages\yt_dlp\extractor\common.py", line 642, in extract
ie_result = self._real_extract(url)
File "C:\Users\mystu\AppData\Local\Programs\Python\Python310\lib\site-packages\yt_dlp\extractor\bloomberg.py", line 54, in _real_extract
bplayer_data = self._parse_json(self._search_regex(
File "C:\Users\mystu\AppData\Local\Programs\Python\Python310\lib\site-packages\yt_dlp\extractor\common.py", line 1226, in _search_regex
raise RegexNotFoundError('Unable to extract %s' % _name)
```
</issue>
<code>
[start of yt_dlp/extractor/bloomberg.py]
1 import re
2
3 from .common import InfoExtractor
4
5
6 class BloombergIE(InfoExtractor):
7 _VALID_URL = r'https?://(?:www\.)?bloomberg\.com/(?:[^/]+/)*(?P<id>[^/?#]+)'
8
9 _TESTS = [{
10 'url': 'http://www.bloomberg.com/news/videos/b/aaeae121-5949-481e-a1ce-4562db6f5df2',
11 # The md5 checksum changes
12 'info_dict': {
13 'id': 'qurhIVlJSB6hzkVi229d8g',
14 'ext': 'flv',
15 'title': 'Shah\'s Presentation on Foreign-Exchange Strategies',
16 'description': 'md5:a8ba0302912d03d246979735c17d2761',
17 },
18 'params': {
19 'format': 'best[format_id^=hds]',
20 },
21 }, {
22 # video ID in BPlayer(...)
23 'url': 'http://www.bloomberg.com/features/2016-hello-world-new-zealand/',
24 'info_dict': {
25 'id': '938c7e72-3f25-4ddb-8b85-a9be731baa74',
26 'ext': 'flv',
27 'title': 'Meet the Real-Life Tech Wizards of Middle Earth',
28 'description': 'Hello World, Episode 1: New Zealand’s freaky AI babies, robot exoskeletons, and a virtual you.',
29 },
30 'params': {
31 'format': 'best[format_id^=hds]',
32 },
33 }, {
34 # data-bmmrid=
35 'url': 'https://www.bloomberg.com/politics/articles/2017-02-08/le-pen-aide-briefed-french-central-banker-on-plan-to-print-money',
36 'only_matching': True,
37 }, {
38 'url': 'http://www.bloomberg.com/news/articles/2015-11-12/five-strange-things-that-have-been-happening-in-financial-markets',
39 'only_matching': True,
40 }, {
41 'url': 'http://www.bloomberg.com/politics/videos/2015-11-25/karl-rove-on-jeb-bush-s-struggles-stopping-trump',
42 'only_matching': True,
43 }]
44
45 def _real_extract(self, url):
46 name = self._match_id(url)
47 webpage = self._download_webpage(url, name)
48 video_id = self._search_regex(
49 (r'["\']bmmrId["\']\s*:\s*(["\'])(?P<id>(?:(?!\1).)+)\1',
50 r'videoId\s*:\s*(["\'])(?P<id>(?:(?!\1).)+)\1',
51 r'data-bmmrid=(["\'])(?P<id>(?:(?!\1).)+)\1'),
52 webpage, 'id', group='id', default=None)
53 if not video_id:
54 bplayer_data = self._parse_json(self._search_regex(
55 r'BPlayer\(null,\s*({[^;]+})\);', webpage, 'id'), name)
56 video_id = bplayer_data['id']
57 title = re.sub(': Video$', '', self._og_search_title(webpage))
58
59 embed_info = self._download_json(
60 'http://www.bloomberg.com/api/embed?id=%s' % video_id, video_id)
61 formats = []
62 for stream in embed_info['streams']:
63 stream_url = stream.get('url')
64 if not stream_url:
65 continue
66 if stream['muxing_format'] == 'TS':
67 formats.extend(self._extract_m3u8_formats(
68 stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
69 else:
70 formats.extend(self._extract_f4m_formats(
71 stream_url, video_id, f4m_id='hds', fatal=False))
72 self._sort_formats(formats)
73
74 return {
75 'id': video_id,
76 'title': title,
77 'formats': formats,
78 'description': self._og_search_description(webpage),
79 'thumbnail': self._og_search_thumbnail(webpage),
80 }
81
[end of yt_dlp/extractor/bloomberg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/yt_dlp/extractor/bloomberg.py b/yt_dlp/extractor/bloomberg.py
--- a/yt_dlp/extractor/bloomberg.py
+++ b/yt_dlp/extractor/bloomberg.py
@@ -7,13 +7,11 @@
_VALID_URL = r'https?://(?:www\.)?bloomberg\.com/(?:[^/]+/)*(?P<id>[^/?#]+)'
_TESTS = [{
- 'url': 'http://www.bloomberg.com/news/videos/b/aaeae121-5949-481e-a1ce-4562db6f5df2',
- # The md5 checksum changes
+ 'url': 'https://www.bloomberg.com/news/videos/2021-09-14/apple-unveils-the-new-iphone-13-stock-doesn-t-move-much-video',
'info_dict': {
- 'id': 'qurhIVlJSB6hzkVi229d8g',
+ 'id': 'V8cFcYMxTHaMcEiiYVr39A',
'ext': 'flv',
- 'title': 'Shah\'s Presentation on Foreign-Exchange Strategies',
- 'description': 'md5:a8ba0302912d03d246979735c17d2761',
+ 'title': 'Apple Unveils the New IPhone 13, Stock Doesn\'t Move Much',
},
'params': {
'format': 'best[format_id^=hds]',
@@ -57,7 +55,7 @@
title = re.sub(': Video$', '', self._og_search_title(webpage))
embed_info = self._download_json(
- 'http://www.bloomberg.com/api/embed?id=%s' % video_id, video_id)
+ 'http://www.bloomberg.com/multimedia/api/embed?id=%s' % video_id, video_id)
formats = []
for stream in embed_info['streams']:
stream_url = stream.get('url')
|
{"golden_diff": "diff --git a/yt_dlp/extractor/bloomberg.py b/yt_dlp/extractor/bloomberg.py\n--- a/yt_dlp/extractor/bloomberg.py\n+++ b/yt_dlp/extractor/bloomberg.py\n@@ -7,13 +7,11 @@\n _VALID_URL = r'https?://(?:www\\.)?bloomberg\\.com/(?:[^/]+/)*(?P<id>[^/?#]+)'\n \n _TESTS = [{\n- 'url': 'http://www.bloomberg.com/news/videos/b/aaeae121-5949-481e-a1ce-4562db6f5df2',\n- # The md5 checksum changes\n+ 'url': 'https://www.bloomberg.com/news/videos/2021-09-14/apple-unveils-the-new-iphone-13-stock-doesn-t-move-much-video',\n 'info_dict': {\n- 'id': 'qurhIVlJSB6hzkVi229d8g',\n+ 'id': 'V8cFcYMxTHaMcEiiYVr39A',\n 'ext': 'flv',\n- 'title': 'Shah\\'s Presentation on Foreign-Exchange Strategies',\n- 'description': 'md5:a8ba0302912d03d246979735c17d2761',\n+ 'title': 'Apple Unveils the New IPhone 13, Stock Doesn\\'t Move Much',\n },\n 'params': {\n 'format': 'best[format_id^=hds]',\n@@ -57,7 +55,7 @@\n title = re.sub(': Video$', '', self._og_search_title(webpage))\n \n embed_info = self._download_json(\n- 'http://www.bloomberg.com/api/embed?id=%s' % video_id, video_id)\n+ 'http://www.bloomberg.com/multimedia/api/embed?id=%s' % video_id, video_id)\n formats = []\n for stream in embed_info['streams']:\n stream_url = stream.get('url')\n", "issue": "[Bloomberg] Extracter does not work.\n### Checklist\n\n- [X] I'm reporting a broken site\n- [X] I've verified that I'm running yt-dlp version **2022.05.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nUSA\n\n### Description\n\nBloomberg extractor complains about being unable to extract ID, same for any video.\r\nI used \"https://www.bloomberg.com/news/videos/2022-05-19/bloomberg-studio-1-0-roblox-ceo-david-baszucki-video\" as an example.\n\n### Verbose log\n\n```shell\n[debug] Command-line config: ['-vU', 'https://www.bloomberg.com/news/videos/2022-05-19/bloomberg-studio-1-0-roblox-ceo-david-baszucki-video']\r\n[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8, error utf-8, screen utf-8\r\n[debug] Lazy loading extractors is disabled\r\n[debug] Python version 3.10.4 (CPython 64bit) - Windows-10-10.0.19044-SP0\r\n[debug] Checking exe version: ffprobe -bsfs\r\n[debug] Checking exe version: ffmpeg -bsfs\r\n[debug] exe versions: ffmpeg N-106452-g7fbf1dd47e-20220329 (setts), ffprobe N-106452-g7fbf1dd47e-20220329\r\n[debug] Optional libraries: Cryptodome-3.14.1, brotli-1.0.9, certifi-2022.05.18, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3\r\n[debug] Proxy map: {}\r\nLatest version: 2022.05.18, Current version: 2022.05.18\r\nyt-dlp is up to date (2022.05.18)\r\n[debug] [Bloomberg] Extracting URL: https://www.bloomberg.com/news/videos/2022-05-19/bloomberg-studio-1-0-roblox-ceo-david-baszucki-video\r\n[Bloomberg] bloomberg-studio-1-0-roblox-ceo-david-baszucki-video: Downloading webpage\r\nERROR: [Bloomberg] bloomberg-studio-1-0-roblox-ceo-david-baszucki-video: Unable to extract id; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U\r\n File \"C:\\Users\\mystu\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\yt_dlp\\extractor\\common.py\", line 642, in extract\r\n ie_result = self._real_extract(url)\r\n File \"C:\\Users\\mystu\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\yt_dlp\\extractor\\bloomberg.py\", line 54, in _real_extract\r\n bplayer_data = self._parse_json(self._search_regex(\r\n File \"C:\\Users\\mystu\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\yt_dlp\\extractor\\common.py\", line 1226, in _search_regex\r\n raise RegexNotFoundError('Unable to extract %s' % _name)\n```\n\n", "before_files": [{"content": "import re\n\nfrom .common import InfoExtractor\n\n\nclass BloombergIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?bloomberg\\.com/(?:[^/]+/)*(?P<id>[^/?#]+)'\n\n _TESTS = [{\n 'url': 'http://www.bloomberg.com/news/videos/b/aaeae121-5949-481e-a1ce-4562db6f5df2',\n # The md5 checksum changes\n 'info_dict': {\n 'id': 'qurhIVlJSB6hzkVi229d8g',\n 'ext': 'flv',\n 'title': 'Shah\\'s Presentation on Foreign-Exchange Strategies',\n 'description': 'md5:a8ba0302912d03d246979735c17d2761',\n },\n 'params': {\n 'format': 'best[format_id^=hds]',\n },\n }, {\n # video ID in BPlayer(...)\n 'url': 'http://www.bloomberg.com/features/2016-hello-world-new-zealand/',\n 'info_dict': {\n 'id': '938c7e72-3f25-4ddb-8b85-a9be731baa74',\n 'ext': 'flv',\n 'title': 'Meet the Real-Life Tech Wizards of Middle Earth',\n 'description': 'Hello World, Episode 1: New Zealand\u2019s freaky AI babies, robot exoskeletons, and a virtual you.',\n },\n 'params': {\n 'format': 'best[format_id^=hds]',\n },\n }, {\n # data-bmmrid=\n 'url': 'https://www.bloomberg.com/politics/articles/2017-02-08/le-pen-aide-briefed-french-central-banker-on-plan-to-print-money',\n 'only_matching': True,\n }, {\n 'url': 'http://www.bloomberg.com/news/articles/2015-11-12/five-strange-things-that-have-been-happening-in-financial-markets',\n 'only_matching': True,\n }, {\n 'url': 'http://www.bloomberg.com/politics/videos/2015-11-25/karl-rove-on-jeb-bush-s-struggles-stopping-trump',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n name = self._match_id(url)\n webpage = self._download_webpage(url, name)\n video_id = self._search_regex(\n (r'[\"\\']bmmrId[\"\\']\\s*:\\s*([\"\\'])(?P<id>(?:(?!\\1).)+)\\1',\n r'videoId\\s*:\\s*([\"\\'])(?P<id>(?:(?!\\1).)+)\\1',\n r'data-bmmrid=([\"\\'])(?P<id>(?:(?!\\1).)+)\\1'),\n webpage, 'id', group='id', default=None)\n if not video_id:\n bplayer_data = self._parse_json(self._search_regex(\n r'BPlayer\\(null,\\s*({[^;]+})\\);', webpage, 'id'), name)\n video_id = bplayer_data['id']\n title = re.sub(': Video$', '', self._og_search_title(webpage))\n\n embed_info = self._download_json(\n 'http://www.bloomberg.com/api/embed?id=%s' % video_id, video_id)\n formats = []\n for stream in embed_info['streams']:\n stream_url = stream.get('url')\n if not stream_url:\n continue\n if stream['muxing_format'] == 'TS':\n formats.extend(self._extract_m3u8_formats(\n stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False))\n else:\n formats.extend(self._extract_f4m_formats(\n stream_url, video_id, f4m_id='hds', fatal=False))\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'formats': formats,\n 'description': self._og_search_description(webpage),\n 'thumbnail': self._og_search_thumbnail(webpage),\n }\n", "path": "yt_dlp/extractor/bloomberg.py"}]}
| 2,761 | 481 |
gh_patches_debug_36476
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-3578
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DockerLatentWorker: pull specified image if it is not yet on the system
This is merely an enhancement, but it would be nice if the specified image is pulled from dockerhub if it is not on yet on the host system.
This should be pretty easy to accomplish using docker-py, but I don't know if this a desired feature.
</issue>
<code>
[start of master/buildbot/worker/docker.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from __future__ import absolute_import
17 from __future__ import division
18 from __future__ import print_function
19
20 import hashlib
21 import json
22 import socket
23 from io import BytesIO
24
25 from twisted.internet import defer
26 from twisted.internet import threads
27 from twisted.python import log
28
29 from buildbot import config
30 from buildbot.interfaces import LatentWorkerFailedToSubstantiate
31 from buildbot.util import unicode2bytes
32 from buildbot.worker import AbstractLatentWorker
33
34 try:
35 import docker
36 from docker import client
37 from docker.errors import NotFound
38 _hush_pyflakes = [docker, client]
39 except ImportError:
40 docker = None
41 client = None
42
43
44 def _handle_stream_line(line):
45 """\
46 Input is the json representation of: {'stream': "Content\ncontent"}
47 Output is a generator yield "Content", and then "content"
48 """
49 # XXX This necessary processing is probably a bug from docker-py,
50 # hence, might break if the bug is fixed, i.e. we should get decoded JSON
51 # directly from the API.
52 line = json.loads(line)
53 if 'error' in line:
54 content = "ERROR: " + line['error']
55 else:
56 content = line.get('stream', '')
57 for streamline in content.split('\n'):
58 if streamline:
59 yield streamline
60
61
62 class DockerBaseWorker(AbstractLatentWorker):
63
64 def checkConfig(self, name, password=None, image=None,
65 masterFQDN=None, **kwargs):
66
67 # Set build_wait_timeout to 0 if not explicitly set: Starting a
68 # container is almost immediate, we can afford doing so for each build.
69 if 'build_wait_timeout' not in kwargs:
70 kwargs['build_wait_timeout'] = 0
71 if image is not None and not isinstance(image, str):
72 if not hasattr(image, 'getRenderingFor'):
73 config.error("image must be a string")
74
75 AbstractLatentWorker.checkConfig(self, name, password, **kwargs)
76
77 def reconfigService(self, name, password=None, image=None,
78 masterFQDN=None, **kwargs):
79 # Set build_wait_timeout to 0 if not explicitly set: Starting a
80 # container is almost immediate, we can afford doing so for each build.
81 if 'build_wait_timeout' not in kwargs:
82 kwargs['build_wait_timeout'] = 0
83 if password is None:
84 password = self.getRandomPass()
85 if masterFQDN is None:
86 masterFQDN = socket.getfqdn()
87 self.masterFQDN = masterFQDN
88 self.image = image
89 masterName = unicode2bytes(self.master.name)
90 self.masterhash = hashlib.sha1(masterName).hexdigest()[:6]
91 return AbstractLatentWorker.reconfigService(self, name, password, **kwargs)
92
93 def getContainerName(self):
94 return ('%s-%s' % ('buildbot' + self.masterhash, self.workername)).replace("_", "-")
95
96 @property
97 def shortid(self):
98 if self.instance is None:
99 return None
100 return self.instance['Id'][:6]
101
102 def createEnvironment(self):
103 result = {
104 "BUILDMASTER": self.masterFQDN,
105 "WORKERNAME": self.name,
106 "WORKERPASS": self.password
107 }
108 if self.registration is not None:
109 result["BUILDMASTER_PORT"] = str(self.registration.getPBPort())
110 if ":" in self.masterFQDN:
111 result["BUILDMASTER"], result["BUILDMASTER_PORT"] = self.masterFQDN.split(":")
112 return result
113
114
115 class DockerLatentWorker(DockerBaseWorker):
116 instance = None
117
118 def checkConfig(self, name, password, docker_host, image=None, command=None,
119 volumes=None, dockerfile=None, version=None, tls=None, followStartupLogs=False,
120 masterFQDN=None, hostconfig=None, **kwargs):
121
122 DockerBaseWorker.checkConfig(self, name, password, image, masterFQDN, **kwargs)
123
124 if not client:
125 config.error("The python module 'docker-py>=1.4' is needed to use a"
126 " DockerLatentWorker")
127 if not image and not dockerfile:
128 config.error("DockerLatentWorker: You need to specify at least"
129 " an image name, or a dockerfile")
130
131 # Following block is only for checking config errors,
132 # actual parsing happens in self.parse_volumes()
133 # Renderables can be direct volumes definition or list member
134 if isinstance(volumes, list):
135 for volume_string in (volumes or []):
136 if not isinstance(volume_string, str):
137 continue
138 try:
139 bind, volume = volume_string.split(":", 1)
140 except ValueError:
141 config.error("Invalid volume definition for docker "
142 "%s. Skipping..." % volume_string)
143 continue
144
145 @defer.inlineCallbacks
146 def reconfigService(self, name, password, docker_host, image=None, command=None,
147 volumes=None, dockerfile=None, version=None, tls=None, followStartupLogs=False,
148 masterFQDN=None, hostconfig=None, **kwargs):
149
150 yield DockerBaseWorker.reconfigService(self, name, password, image, masterFQDN, **kwargs)
151 self.volumes = volumes or []
152 self.followStartupLogs = followStartupLogs
153
154 self.command = command or []
155 self.dockerfile = dockerfile
156 self.hostconfig = hostconfig or {}
157 # Prepare the parameters for the Docker Client object.
158 self.client_args = {'base_url': docker_host}
159 if version is not None:
160 self.client_args['version'] = version
161 if tls is not None:
162 self.client_args['tls'] = tls
163
164 def _thd_parse_volumes(self, volumes):
165 volume_list = []
166 binds = {}
167 for volume_string in (volumes or []):
168 try:
169 bind, volume = volume_string.split(":", 1)
170 except ValueError:
171 config.error("Invalid volume definition for docker "
172 "%s. Skipping..." % volume_string)
173 continue
174
175 ro = False
176 if volume.endswith(':ro') or volume.endswith(':rw'):
177 ro = volume[-2:] == 'ro'
178 volume = volume[:-3]
179
180 volume_list.append(volume)
181 binds[bind] = {'bind': volume, 'ro': ro}
182 return volume_list, binds
183
184 def _getDockerClient(self):
185 if docker.version[0] == '1':
186 docker_client = client.Client(**self.client_args)
187 else:
188 docker_client = client.APIClient(**self.client_args)
189 return docker_client
190
191 @defer.inlineCallbacks
192 def start_instance(self, build):
193 if self.instance is not None:
194 raise ValueError('instance active')
195 image = yield build.render(self.image)
196 dockerfile = yield build.render(self.dockerfile)
197 volumes = yield build.render(self.volumes)
198 res = yield threads.deferToThread(self._thd_start_instance, image, dockerfile, volumes)
199 defer.returnValue(res)
200
201 def _image_exists(self, client, name):
202 # Make sure the image exists
203 for image in client.images():
204 for tag in image['RepoTags'] or []:
205 if ':' in name and tag == name:
206 return True
207 if tag.startswith(name + ':'):
208 return True
209 return False
210
211 def _thd_start_instance(self, image, dockerfile, volumes):
212 docker_client = self._getDockerClient()
213 # cleanup the old instances
214 instances = docker_client.containers(
215 all=1,
216 filters=dict(name=self.getContainerName()))
217 for instance in instances:
218 try:
219 docker_client.remove_container(instance['Id'], v=True, force=True)
220 except NotFound:
221 pass # that's a race condition
222
223 found = False
224 if image is not None:
225 found = self._image_exists(docker_client, image)
226 else:
227 image = '%s_%s_image' % (self.workername, id(self))
228 if (not found) and (dockerfile is not None):
229 log.msg("Image '%s' not found, building it from scratch" %
230 image)
231 for line in docker_client.build(fileobj=BytesIO(dockerfile.encode('utf-8')),
232 tag=image):
233 for streamline in _handle_stream_line(line):
234 log.msg(streamline)
235
236 if (not self._image_exists(docker_client, image)):
237 log.msg("Image '%s' not found" % image)
238 raise LatentWorkerFailedToSubstantiate(
239 'Image "%s" not found on docker host.' % image
240 )
241
242 volumes, binds = self._thd_parse_volumes(volumes)
243 host_conf = self.hostconfig.copy()
244 host_conf['binds'] = binds
245 host_conf = docker_client.create_host_config(**host_conf)
246
247 instance = docker_client.create_container(
248 image,
249 self.command,
250 name=self.getContainerName(),
251 volumes=volumes,
252 environment=self.createEnvironment(),
253 host_config=host_conf
254 )
255
256 if instance.get('Id') is None:
257 log.msg('Failed to create the container')
258 raise LatentWorkerFailedToSubstantiate(
259 'Failed to start container'
260 )
261 shortid = instance['Id'][:6]
262 log.msg('Container created, Id: %s...' % (shortid,))
263 instance['image'] = image
264 self.instance = instance
265 docker_client.start(instance)
266 log.msg('Container started')
267 if self.followStartupLogs:
268 logs = docker_client.attach(
269 container=instance, stdout=True, stderr=True, stream=True)
270 for line in logs:
271 log.msg("docker VM %s: %s" % (shortid, line.strip()))
272 if self.conn:
273 break
274 del logs
275 return [instance['Id'], image]
276
277 def stop_instance(self, fast=False):
278 if self.instance is None:
279 # be gentle. Something may just be trying to alert us that an
280 # instance never attached, and it's because, somehow, we never
281 # started.
282 return defer.succeed(None)
283 instance = self.instance
284 self.instance = None
285 return threads.deferToThread(self._thd_stop_instance, instance, fast)
286
287 def _thd_stop_instance(self, instance, fast):
288 docker_client = self._getDockerClient()
289 log.msg('Stopping container %s...' % instance['Id'][:6])
290 docker_client.stop(instance['Id'])
291 if not fast:
292 docker_client.wait(instance['Id'])
293 docker_client.remove_container(instance['Id'], v=True, force=True)
294 if self.image is None:
295 try:
296 docker_client.remove_image(image=instance['image'])
297 except docker.errors.APIError as e:
298 log.msg('Error while removing the image: %s', e)
299
[end of master/buildbot/worker/docker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/master/buildbot/worker/docker.py b/master/buildbot/worker/docker.py
--- a/master/buildbot/worker/docker.py
+++ b/master/buildbot/worker/docker.py
@@ -117,7 +117,7 @@
def checkConfig(self, name, password, docker_host, image=None, command=None,
volumes=None, dockerfile=None, version=None, tls=None, followStartupLogs=False,
- masterFQDN=None, hostconfig=None, **kwargs):
+ masterFQDN=None, hostconfig=None, autopull=False, **kwargs):
DockerBaseWorker.checkConfig(self, name, password, image, masterFQDN, **kwargs)
@@ -145,7 +145,7 @@
@defer.inlineCallbacks
def reconfigService(self, name, password, docker_host, image=None, command=None,
volumes=None, dockerfile=None, version=None, tls=None, followStartupLogs=False,
- masterFQDN=None, hostconfig=None, **kwargs):
+ masterFQDN=None, hostconfig=None, autopull=False, **kwargs):
yield DockerBaseWorker.reconfigService(self, name, password, image, masterFQDN, **kwargs)
self.volumes = volumes or []
@@ -154,6 +154,7 @@
self.command = command or []
self.dockerfile = dockerfile
self.hostconfig = hostconfig or {}
+ self.autopull = autopull
# Prepare the parameters for the Docker Client object.
self.client_args = {'base_url': docker_host}
if version is not None:
@@ -233,6 +234,11 @@
for streamline in _handle_stream_line(line):
log.msg(streamline)
+ if ((not self._image_exists(docker_client, image))) and self.autopull:
+ log.msg("Image '%s' not found, pulling from registry" %
+ image)
+ docker_client.pull(image)
+
if (not self._image_exists(docker_client, image)):
log.msg("Image '%s' not found" % image)
raise LatentWorkerFailedToSubstantiate(
|
{"golden_diff": "diff --git a/master/buildbot/worker/docker.py b/master/buildbot/worker/docker.py\n--- a/master/buildbot/worker/docker.py\n+++ b/master/buildbot/worker/docker.py\n@@ -117,7 +117,7 @@\n \n def checkConfig(self, name, password, docker_host, image=None, command=None,\n volumes=None, dockerfile=None, version=None, tls=None, followStartupLogs=False,\n- masterFQDN=None, hostconfig=None, **kwargs):\n+ masterFQDN=None, hostconfig=None, autopull=False, **kwargs):\n \n DockerBaseWorker.checkConfig(self, name, password, image, masterFQDN, **kwargs)\n \n@@ -145,7 +145,7 @@\n @defer.inlineCallbacks\n def reconfigService(self, name, password, docker_host, image=None, command=None,\n volumes=None, dockerfile=None, version=None, tls=None, followStartupLogs=False,\n- masterFQDN=None, hostconfig=None, **kwargs):\n+ masterFQDN=None, hostconfig=None, autopull=False, **kwargs):\n \n yield DockerBaseWorker.reconfigService(self, name, password, image, masterFQDN, **kwargs)\n self.volumes = volumes or []\n@@ -154,6 +154,7 @@\n self.command = command or []\n self.dockerfile = dockerfile\n self.hostconfig = hostconfig or {}\n+ self.autopull = autopull\n # Prepare the parameters for the Docker Client object.\n self.client_args = {'base_url': docker_host}\n if version is not None:\n@@ -233,6 +234,11 @@\n for streamline in _handle_stream_line(line):\n log.msg(streamline)\n \n+ if ((not self._image_exists(docker_client, image))) and self.autopull:\n+ log.msg(\"Image '%s' not found, pulling from registry\" %\n+ image)\n+ docker_client.pull(image)\n+\n if (not self._image_exists(docker_client, image)):\n log.msg(\"Image '%s' not found\" % image)\n raise LatentWorkerFailedToSubstantiate(\n", "issue": "DockerLatentWorker: pull specified image if it is not yet on the system\nThis is merely an enhancement, but it would be nice if the specified image is pulled from dockerhub if it is not on yet on the host system. \r\nThis should be pretty easy to accomplish using docker-py, but I don't know if this a desired feature.\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport hashlib\nimport json\nimport socket\nfrom io import BytesIO\n\nfrom twisted.internet import defer\nfrom twisted.internet import threads\nfrom twisted.python import log\n\nfrom buildbot import config\nfrom buildbot.interfaces import LatentWorkerFailedToSubstantiate\nfrom buildbot.util import unicode2bytes\nfrom buildbot.worker import AbstractLatentWorker\n\ntry:\n import docker\n from docker import client\n from docker.errors import NotFound\n _hush_pyflakes = [docker, client]\nexcept ImportError:\n docker = None\n client = None\n\n\ndef _handle_stream_line(line):\n \"\"\"\\\n Input is the json representation of: {'stream': \"Content\\ncontent\"}\n Output is a generator yield \"Content\", and then \"content\"\n \"\"\"\n # XXX This necessary processing is probably a bug from docker-py,\n # hence, might break if the bug is fixed, i.e. we should get decoded JSON\n # directly from the API.\n line = json.loads(line)\n if 'error' in line:\n content = \"ERROR: \" + line['error']\n else:\n content = line.get('stream', '')\n for streamline in content.split('\\n'):\n if streamline:\n yield streamline\n\n\nclass DockerBaseWorker(AbstractLatentWorker):\n\n def checkConfig(self, name, password=None, image=None,\n masterFQDN=None, **kwargs):\n\n # Set build_wait_timeout to 0 if not explicitly set: Starting a\n # container is almost immediate, we can afford doing so for each build.\n if 'build_wait_timeout' not in kwargs:\n kwargs['build_wait_timeout'] = 0\n if image is not None and not isinstance(image, str):\n if not hasattr(image, 'getRenderingFor'):\n config.error(\"image must be a string\")\n\n AbstractLatentWorker.checkConfig(self, name, password, **kwargs)\n\n def reconfigService(self, name, password=None, image=None,\n masterFQDN=None, **kwargs):\n # Set build_wait_timeout to 0 if not explicitly set: Starting a\n # container is almost immediate, we can afford doing so for each build.\n if 'build_wait_timeout' not in kwargs:\n kwargs['build_wait_timeout'] = 0\n if password is None:\n password = self.getRandomPass()\n if masterFQDN is None:\n masterFQDN = socket.getfqdn()\n self.masterFQDN = masterFQDN\n self.image = image\n masterName = unicode2bytes(self.master.name)\n self.masterhash = hashlib.sha1(masterName).hexdigest()[:6]\n return AbstractLatentWorker.reconfigService(self, name, password, **kwargs)\n\n def getContainerName(self):\n return ('%s-%s' % ('buildbot' + self.masterhash, self.workername)).replace(\"_\", \"-\")\n\n @property\n def shortid(self):\n if self.instance is None:\n return None\n return self.instance['Id'][:6]\n\n def createEnvironment(self):\n result = {\n \"BUILDMASTER\": self.masterFQDN,\n \"WORKERNAME\": self.name,\n \"WORKERPASS\": self.password\n }\n if self.registration is not None:\n result[\"BUILDMASTER_PORT\"] = str(self.registration.getPBPort())\n if \":\" in self.masterFQDN:\n result[\"BUILDMASTER\"], result[\"BUILDMASTER_PORT\"] = self.masterFQDN.split(\":\")\n return result\n\n\nclass DockerLatentWorker(DockerBaseWorker):\n instance = None\n\n def checkConfig(self, name, password, docker_host, image=None, command=None,\n volumes=None, dockerfile=None, version=None, tls=None, followStartupLogs=False,\n masterFQDN=None, hostconfig=None, **kwargs):\n\n DockerBaseWorker.checkConfig(self, name, password, image, masterFQDN, **kwargs)\n\n if not client:\n config.error(\"The python module 'docker-py>=1.4' is needed to use a\"\n \" DockerLatentWorker\")\n if not image and not dockerfile:\n config.error(\"DockerLatentWorker: You need to specify at least\"\n \" an image name, or a dockerfile\")\n\n # Following block is only for checking config errors,\n # actual parsing happens in self.parse_volumes()\n # Renderables can be direct volumes definition or list member\n if isinstance(volumes, list):\n for volume_string in (volumes or []):\n if not isinstance(volume_string, str):\n continue\n try:\n bind, volume = volume_string.split(\":\", 1)\n except ValueError:\n config.error(\"Invalid volume definition for docker \"\n \"%s. Skipping...\" % volume_string)\n continue\n\n @defer.inlineCallbacks\n def reconfigService(self, name, password, docker_host, image=None, command=None,\n volumes=None, dockerfile=None, version=None, tls=None, followStartupLogs=False,\n masterFQDN=None, hostconfig=None, **kwargs):\n\n yield DockerBaseWorker.reconfigService(self, name, password, image, masterFQDN, **kwargs)\n self.volumes = volumes or []\n self.followStartupLogs = followStartupLogs\n\n self.command = command or []\n self.dockerfile = dockerfile\n self.hostconfig = hostconfig or {}\n # Prepare the parameters for the Docker Client object.\n self.client_args = {'base_url': docker_host}\n if version is not None:\n self.client_args['version'] = version\n if tls is not None:\n self.client_args['tls'] = tls\n\n def _thd_parse_volumes(self, volumes):\n volume_list = []\n binds = {}\n for volume_string in (volumes or []):\n try:\n bind, volume = volume_string.split(\":\", 1)\n except ValueError:\n config.error(\"Invalid volume definition for docker \"\n \"%s. Skipping...\" % volume_string)\n continue\n\n ro = False\n if volume.endswith(':ro') or volume.endswith(':rw'):\n ro = volume[-2:] == 'ro'\n volume = volume[:-3]\n\n volume_list.append(volume)\n binds[bind] = {'bind': volume, 'ro': ro}\n return volume_list, binds\n\n def _getDockerClient(self):\n if docker.version[0] == '1':\n docker_client = client.Client(**self.client_args)\n else:\n docker_client = client.APIClient(**self.client_args)\n return docker_client\n\n @defer.inlineCallbacks\n def start_instance(self, build):\n if self.instance is not None:\n raise ValueError('instance active')\n image = yield build.render(self.image)\n dockerfile = yield build.render(self.dockerfile)\n volumes = yield build.render(self.volumes)\n res = yield threads.deferToThread(self._thd_start_instance, image, dockerfile, volumes)\n defer.returnValue(res)\n\n def _image_exists(self, client, name):\n # Make sure the image exists\n for image in client.images():\n for tag in image['RepoTags'] or []:\n if ':' in name and tag == name:\n return True\n if tag.startswith(name + ':'):\n return True\n return False\n\n def _thd_start_instance(self, image, dockerfile, volumes):\n docker_client = self._getDockerClient()\n # cleanup the old instances\n instances = docker_client.containers(\n all=1,\n filters=dict(name=self.getContainerName()))\n for instance in instances:\n try:\n docker_client.remove_container(instance['Id'], v=True, force=True)\n except NotFound:\n pass # that's a race condition\n\n found = False\n if image is not None:\n found = self._image_exists(docker_client, image)\n else:\n image = '%s_%s_image' % (self.workername, id(self))\n if (not found) and (dockerfile is not None):\n log.msg(\"Image '%s' not found, building it from scratch\" %\n image)\n for line in docker_client.build(fileobj=BytesIO(dockerfile.encode('utf-8')),\n tag=image):\n for streamline in _handle_stream_line(line):\n log.msg(streamline)\n\n if (not self._image_exists(docker_client, image)):\n log.msg(\"Image '%s' not found\" % image)\n raise LatentWorkerFailedToSubstantiate(\n 'Image \"%s\" not found on docker host.' % image\n )\n\n volumes, binds = self._thd_parse_volumes(volumes)\n host_conf = self.hostconfig.copy()\n host_conf['binds'] = binds\n host_conf = docker_client.create_host_config(**host_conf)\n\n instance = docker_client.create_container(\n image,\n self.command,\n name=self.getContainerName(),\n volumes=volumes,\n environment=self.createEnvironment(),\n host_config=host_conf\n )\n\n if instance.get('Id') is None:\n log.msg('Failed to create the container')\n raise LatentWorkerFailedToSubstantiate(\n 'Failed to start container'\n )\n shortid = instance['Id'][:6]\n log.msg('Container created, Id: %s...' % (shortid,))\n instance['image'] = image\n self.instance = instance\n docker_client.start(instance)\n log.msg('Container started')\n if self.followStartupLogs:\n logs = docker_client.attach(\n container=instance, stdout=True, stderr=True, stream=True)\n for line in logs:\n log.msg(\"docker VM %s: %s\" % (shortid, line.strip()))\n if self.conn:\n break\n del logs\n return [instance['Id'], image]\n\n def stop_instance(self, fast=False):\n if self.instance is None:\n # be gentle. Something may just be trying to alert us that an\n # instance never attached, and it's because, somehow, we never\n # started.\n return defer.succeed(None)\n instance = self.instance\n self.instance = None\n return threads.deferToThread(self._thd_stop_instance, instance, fast)\n\n def _thd_stop_instance(self, instance, fast):\n docker_client = self._getDockerClient()\n log.msg('Stopping container %s...' % instance['Id'][:6])\n docker_client.stop(instance['Id'])\n if not fast:\n docker_client.wait(instance['Id'])\n docker_client.remove_container(instance['Id'], v=True, force=True)\n if self.image is None:\n try:\n docker_client.remove_image(image=instance['image'])\n except docker.errors.APIError as e:\n log.msg('Error while removing the image: %s', e)\n", "path": "master/buildbot/worker/docker.py"}]}
| 3,897 | 475 |
gh_patches_debug_24389
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-4189
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'."
**Version**
3.16, but probably all versions
**Describe the bug**
- Reclaim space for repository fails with the following error.
~~~
Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})"
~~~
**To Reproduce**
See BZ
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2169322
</issue>
<code>
[start of pulpcore/app/tasks/reclaim_space.py]
1 from pulpcore.app.models import (
2 Artifact,
3 Content,
4 ContentArtifact,
5 ProgressReport,
6 PublishedMetadata,
7 Repository,
8 RepositoryVersion,
9 )
10 from pulpcore.app.util import get_domain
11
12
13 def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
14 """
15 This task frees-up disk space by removing Artifact files from the filesystem for Content
16 exclusive to the list of provided repos.
17
18 Note: content marked as `proctected` will be excluded from the reclaim disk space.
19
20 Kwargs:
21 repo_pks (list): A list of repo pks the disk reclaim space is performed on.
22 keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim
23 disk space.
24 force (bool): If True, uploaded content will be taken into account.
25
26 """
27 reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)
28 for repo in reclaimed_repos:
29 repo.invalidate_cache(everything=True)
30
31 domain = get_domain()
32 rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)
33 c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)
34 c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)
35 c_reclaim_qs = c_reclaim_qs.exclude(
36 pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()
37 )
38
39 if keeplist_rv_pks:
40 rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)
41 rv_content = Content.objects.none()
42 for rv in rv_qs.iterator():
43 rv_content |= rv.content
44 c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)
45
46 content_distinct = c_reclaim_qs.distinct("pulp_type")
47 unprotected = []
48 for content in content_distinct:
49 if not content.cast().PROTECTED_FROM_RECLAIM:
50 unprotected.append(content.pulp_type)
51
52 ca_qs = ContentArtifact.objects.select_related("content", "artifact").filter(
53 content__in=c_reclaim_qs.values("pk"), artifact__isnull=False
54 )
55 if not force:
56 ca_qs = ca_qs.filter(remoteartifact__isnull=False)
57 artifact_pks = set()
58 ca_to_update = []
59 for ca in ca_qs.iterator():
60 if ca.content.pulp_type in unprotected:
61 artifact_pks.add(ca.artifact.pk)
62 ca.artifact = None
63 ca_to_update.append(ca)
64
65 ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=["artifact"], batch_size=1000)
66 artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)
67 progress_bar = ProgressReport(
68 message="Reclaim disk space",
69 total=artifacts_to_delete.count(),
70 code="reclaim-space.artifact",
71 done=0,
72 state="running",
73 )
74 progress_bar.save()
75
76 counter = 0
77 interval = 100
78 for artifact in artifacts_to_delete.iterator():
79 # we need to manually call delete() because it cleans up the file on the filesystem
80 artifact.delete()
81 progress_bar.done += 1
82 counter += 1
83
84 if counter >= interval:
85 progress_bar.save()
86 counter = 0
87
88 progress_bar.state = "completed"
89 progress_bar.save()
90
[end of pulpcore/app/tasks/reclaim_space.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py
--- a/pulpcore/app/tasks/reclaim_space.py
+++ b/pulpcore/app/tasks/reclaim_space.py
@@ -1,3 +1,7 @@
+from logging import getLogger
+
+from django.db.models.deletion import ProtectedError
+
from pulpcore.app.models import (
Artifact,
Content,
@@ -9,6 +13,8 @@
)
from pulpcore.app.util import get_domain
+log = getLogger(__name__)
+
def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
"""
@@ -76,10 +82,16 @@
counter = 0
interval = 100
for artifact in artifacts_to_delete.iterator():
- # we need to manually call delete() because it cleans up the file on the filesystem
- artifact.delete()
- progress_bar.done += 1
- counter += 1
+ try:
+ # we need to manually call delete() because it cleans up the file on the filesystem
+ artifact.delete()
+ except ProtectedError as e:
+ # Rarely artifact could be shared between to different content units.
+ # Just log and skip the artifact deletion in this case
+ log.info(e)
+ else:
+ progress_bar.done += 1
+ counter += 1
if counter >= interval:
progress_bar.save()
|
{"golden_diff": "diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py\n--- a/pulpcore/app/tasks/reclaim_space.py\n+++ b/pulpcore/app/tasks/reclaim_space.py\n@@ -1,3 +1,7 @@\n+from logging import getLogger\n+\n+from django.db.models.deletion import ProtectedError\n+\n from pulpcore.app.models import (\n Artifact,\n Content,\n@@ -9,6 +13,8 @@\n )\n from pulpcore.app.util import get_domain\n \n+log = getLogger(__name__)\n+\n \n def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n@@ -76,10 +82,16 @@\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n- # we need to manually call delete() because it cleans up the file on the filesystem\n- artifact.delete()\n- progress_bar.done += 1\n- counter += 1\n+ try:\n+ # we need to manually call delete() because it cleans up the file on the filesystem\n+ artifact.delete()\n+ except ProtectedError as e:\n+ # Rarely artifact could be shared between to different content units.\n+ # Just log and skip the artifact deletion in this case\n+ log.info(e)\n+ else:\n+ progress_bar.done += 1\n+ counter += 1\n \n if counter >= interval:\n progress_bar.save()\n", "issue": "Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\"\n**Version**\r\n3.16, but probably all versions\r\n\r\n**Describe the bug**\r\n\r\n- Reclaim space for repository fails with the following error.\r\n\r\n ~~~\r\n Task paused with error: \"(\"Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})\"\r\n ~~~\r\n\r\n**To Reproduce**\r\nSee BZ\r\n\r\n**Additional context**\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=2169322\r\n\n", "before_files": [{"content": "from pulpcore.app.models import (\n Artifact,\n Content,\n ContentArtifact,\n ProgressReport,\n PublishedMetadata,\n Repository,\n RepositoryVersion,\n)\nfrom pulpcore.app.util import get_domain\n\n\ndef reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n This task frees-up disk space by removing Artifact files from the filesystem for Content\n exclusive to the list of provided repos.\n\n Note: content marked as `proctected` will be excluded from the reclaim disk space.\n\n Kwargs:\n repo_pks (list): A list of repo pks the disk reclaim space is performed on.\n keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim\n disk space.\n force (bool): If True, uploaded content will be taken into account.\n\n \"\"\"\n reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)\n for repo in reclaimed_repos:\n repo.invalidate_cache(everything=True)\n\n domain = get_domain()\n rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)\n c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)\n c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)\n c_reclaim_qs = c_reclaim_qs.exclude(\n pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()\n )\n\n if keeplist_rv_pks:\n rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)\n rv_content = Content.objects.none()\n for rv in rv_qs.iterator():\n rv_content |= rv.content\n c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)\n\n content_distinct = c_reclaim_qs.distinct(\"pulp_type\")\n unprotected = []\n for content in content_distinct:\n if not content.cast().PROTECTED_FROM_RECLAIM:\n unprotected.append(content.pulp_type)\n\n ca_qs = ContentArtifact.objects.select_related(\"content\", \"artifact\").filter(\n content__in=c_reclaim_qs.values(\"pk\"), artifact__isnull=False\n )\n if not force:\n ca_qs = ca_qs.filter(remoteartifact__isnull=False)\n artifact_pks = set()\n ca_to_update = []\n for ca in ca_qs.iterator():\n if ca.content.pulp_type in unprotected:\n artifact_pks.add(ca.artifact.pk)\n ca.artifact = None\n ca_to_update.append(ca)\n\n ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=[\"artifact\"], batch_size=1000)\n artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)\n progress_bar = ProgressReport(\n message=\"Reclaim disk space\",\n total=artifacts_to_delete.count(),\n code=\"reclaim-space.artifact\",\n done=0,\n state=\"running\",\n )\n progress_bar.save()\n\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n # we need to manually call delete() because it cleans up the file on the filesystem\n artifact.delete()\n progress_bar.done += 1\n counter += 1\n\n if counter >= interval:\n progress_bar.save()\n counter = 0\n\n progress_bar.state = \"completed\"\n progress_bar.save()\n", "path": "pulpcore/app/tasks/reclaim_space.py"}]}
| 1,627 | 322 |
gh_patches_debug_42390
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-433
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Aggregate reward points redemptions in export per user
When exporting the redemptions of an event, all of them are listed in the Excel export. The redemptions of one user should be aggregated into one line.
Also the number of redemptions on the staff page should be replaced by the number of users who redeemed points for an event.
So when a user redeems 1 point and then 2 points for the same event, the export should only state that 3 points have been redeemed in total.
</issue>
<code>
[start of evap/rewards/exporters.py]
1 from evap.rewards.models import RewardPointRedemption, RewardPointRedemptionEvent
2
3 from django.utils.translation import ugettext as _
4
5 from operator import attrgetter
6
7 from collections import OrderedDict
8 from collections import defaultdict
9 import datetime
10 import xlwt
11
12 from operator import itemgetter
13
14 from evap.results.exporters import writen, writec
15
16
17 class ExcelExporter(object):
18
19 def __init__(self, reward_point_redemptions):
20 self.reward_point_redemptions = reward_point_redemptions
21
22 styles = {
23 'default': xlwt.Style.default_style,
24 'bold': xlwt.easyxf('font: bold on'),
25 }
26
27 def export(self, response):
28 redemptions = self.reward_point_redemptions
29 redemptions = sorted(redemptions, key=attrgetter('user_profile.user.last_name', 'user_profile.user.first_name'))
30
31 self.workbook = xlwt.Workbook()
32 self.sheet = self.workbook.add_sheet(_(u"Redemptions"))
33 self.row = 0
34 self.col = 0
35
36 writec(self, _("Last name"), "bold")
37 writec(self, _("First name"), "bold")
38 writec(self, _("Email address"), "bold")
39 writec(self, _("Number of points"), "bold")
40
41 for redemption in redemptions:
42 user = redemption.user_profile.user
43 writen(self, user.last_name, "default")
44 writec(self, user.first_name, "default")
45 writec(self, user.email, "default")
46 writec(self, redemption.value, "default")
47
48 self.workbook.save(response)
49
[end of evap/rewards/exporters.py]
[start of evap/rewards/views.py]
1 from django.contrib import messages
2 from django.db import transaction
3 from django.shortcuts import get_object_or_404, redirect, render_to_response
4 from django.template import RequestContext
5 from django.utils.translation import ugettext as _
6 from django.utils.translation import get_language
7 from django.http import HttpResponse
8 from datetime import datetime
9 from operator import attrgetter
10
11 from evap.evaluation.auth import reward_user_required, fsr_required
12 from evap.evaluation.models import Semester, Course
13
14 from evap.fsr.views import semester_view
15
16 from evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation
17 from evap.rewards.tools import save_redemptions, reward_points_of_user, can_user_use_reward_points
18 from evap.rewards.forms import RewardPointRedemptionEventForm
19 from evap.rewards.exporters import ExcelExporter
20
21 @reward_user_required
22 def index(request):
23 if request.method == 'POST':
24 redemptions = {}
25 for key, value in request.POST.iteritems():
26 if(key.startswith('points-')):
27 event_id = int(key.rpartition('-')[2])
28 redemptions[event_id] = int(value)
29
30 if save_redemptions(request, redemptions):
31 messages.success(request, _("You successfully redeemed your points."))
32 else:
33 messages.warning(request, _("You don't have enough reward points."))
34
35 total_points_available = reward_points_of_user(request.user.userprofile)
36 reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user.userprofile)
37 reward_point_redemptions = RewardPointRedemption.objects.filter(user_profile=request.user.userprofile)
38 events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now())
39 events = sorted(events, key=lambda event: event.date)
40
41 reward_point_actions=[]
42 for granting in reward_point_grantings:
43 reward_point_actions.append((granting.granting_time, _('Reward for') + ' ' + granting.semester.name, granting.value, ''))
44 for redemption in reward_point_redemptions:
45 reward_point_actions.append((redemption.redemption_time, redemption.event.name, '', redemption.value))
46
47 reward_point_actions.sort(key=lambda action: action[0], reverse=True)
48
49 return render_to_response(
50 "rewards_index.html",
51 dict(
52 reward_point_actions=reward_point_actions,
53 total_points_available=total_points_available,
54 events=events,
55 point_selection=[x for x in range(0,total_points_available+1)]
56 ),
57 context_instance=RequestContext(request))
58
59
60 @fsr_required
61 def semester_reward_points(request, semester_id):
62 semester = get_object_or_404(Semester, id=semester_id)
63 courses = Course.objects.filter(semester=semester)
64 participants = set()
65 for course in courses:
66 for participant in course.participants.all():
67 if can_user_use_reward_points(participant.userprofile):
68 participants.add(participant)
69 participants = sorted(participants, key=attrgetter('last_name', 'first_name'))
70
71 data = []
72 for participant in participants:
73 number_of_courses = Course.objects.filter(semester=semester, participants=participant).count()
74 number_of_courses_voted_for = Course.objects.filter(semester=semester, voters=participant).count()
75 earned_reward_points = RewardPointGranting.objects.filter(semester=semester, user_profile=participant.userprofile).exists()
76 data.append((participant, number_of_courses_voted_for, number_of_courses, earned_reward_points))
77
78 return render_to_response("rewards_semester_reward_points_view.html", dict(semester=semester, data=data, disable_breadcrumb_semester=False), context_instance=RequestContext(request))
79
80
81 @fsr_required
82 def reward_point_redemption_events(request):
83 upcoming_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by('date')
84 past_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__lt=datetime.now()).order_by('-date')
85 return render_to_response("rewards_reward_point_redemption_events.html", dict(upcoming_events=upcoming_events, past_events=past_events), context_instance=RequestContext(request))
86
87
88 @fsr_required
89 def reward_point_redemption_event_create(request):
90 event = RewardPointRedemptionEvent()
91 form = RewardPointRedemptionEventForm(request.POST or None, instance=event)
92
93 if form.is_valid():
94 form.save()
95 messages.success(request, _("Successfully created event."))
96 return redirect('evap.rewards.views.reward_point_redemption_events')
97 else:
98 return render_to_response("rewards_reward_point_redemption_event_form.html", dict(form=form), context_instance=RequestContext(request))
99
100
101 @fsr_required
102 def reward_point_redemption_event_edit(request, event_id):
103 event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)
104 form = RewardPointRedemptionEventForm(request.POST or None, instance=event)
105
106 if form.is_valid():
107 event = form.save()
108
109 messages.success(request, _("Successfully updated event."))
110 return redirect('evap.rewards.views.reward_point_redemption_events')
111 else:
112 return render_to_response("rewards_reward_point_redemption_event_form.html", dict(event=event, form=form), context_instance=RequestContext(request))
113
114
115 @fsr_required
116 def reward_point_redemption_event_delete(request, event_id):
117 event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)
118
119 if event.can_delete:
120 if request.method == 'POST':
121 event.delete()
122 return redirect('evap.rewards.views.reward_point_redemption_events')
123 else:
124 return render_to_response("rewards_reward_point_redemption_event_delete.html", dict(event=event), context_instance=RequestContext(request))
125 else:
126 messages.warning(request, _("This event cannot be deleted because some users already redeemed points for it."))
127 return redirect('evap.rewards.views.reward_point_redemption_events')
128
129
130 @fsr_required
131 def reward_point_redemption_event_export(request, event_id):
132 event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)
133
134 filename = _("RewardPoints")+"-%s-%s-%s.xls" % (event.date, event.name, get_language())
135
136 response = HttpResponse(content_type="application/vnd.ms-excel")
137 response["Content-Disposition"] = "attachment; filename=\"%s\"" % filename
138
139 ExcelExporter(event.reward_point_redemptions.all()).export(response)
140
141 return response
142
143
144 @fsr_required
145 def semester_activation(request, semester_id, active):
146 if active == 'on':
147 active = True
148 else:
149 active = False
150
151 try:
152 activation = SemesterActivation.objects.filter(semester=Semester.objects.get(id=semester_id)).get()
153 activation.is_active = active
154 except SemesterActivation.DoesNotExist:
155 activation = SemesterActivation(semester=Semester.objects.get(id=semester_id), is_active=active)
156 activation.save()
157
158 return semester_view(request=request, semester_id=semester_id)
159
[end of evap/rewards/views.py]
[start of evap/rewards/models.py]
1 from django.utils.translation import ugettext_lazy as _
2 from django.db import models
3
4 class RewardPointRedemptionEvent(models.Model):
5 name = models.CharField(max_length=1024, verbose_name=_(u"event name"))
6 date = models.DateField(verbose_name=_(u"event date"))
7 redeem_end_date = models.DateField(verbose_name=_(u"redemption end date"))
8
9 @property
10 def can_delete(self):
11 if RewardPointRedemption.objects.filter(event=self).exists():
12 return False
13 return True
14
15 class RewardPointGranting(models.Model):
16 user_profile = models.ForeignKey('evaluation.UserProfile', related_name="reward_point_grantings")
17 semester = models.ForeignKey('evaluation.Semester', related_name="reward_point_grantings", blank=True, null=True)
18 granting_time = models.DateTimeField(verbose_name=_(u"granting time"), auto_now_add=True)
19 value = models.IntegerField(verbose_name=_(u"value"), default=0)
20
21 class RewardPointRedemption(models.Model):
22 user_profile = models.ForeignKey('evaluation.UserProfile', related_name="reward_point_redemptions")
23 redemption_time = models.DateTimeField(verbose_name=_(u"redemption time"), auto_now_add=True)
24 value = models.IntegerField(verbose_name=_(u"value"), default=0)
25 event = models.ForeignKey(RewardPointRedemptionEvent, related_name="reward_point_redemptions")
26
27 class SemesterActivation(models.Model):
28 semester = models.ForeignKey('evaluation.Semester', related_name='rewards_active', unique=True)
29 is_active = models.BooleanField(default=False)
30
[end of evap/rewards/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/evap/rewards/exporters.py b/evap/rewards/exporters.py
--- a/evap/rewards/exporters.py
+++ b/evap/rewards/exporters.py
@@ -2,8 +2,6 @@
from django.utils.translation import ugettext as _
-from operator import attrgetter
-
from collections import OrderedDict
from collections import defaultdict
import datetime
@@ -16,8 +14,8 @@
class ExcelExporter(object):
- def __init__(self, reward_point_redemptions):
- self.reward_point_redemptions = reward_point_redemptions
+ def __init__(self, redemptions_by_user):
+ self.redemptions_by_user = redemptions_by_user
styles = {
'default': xlwt.Style.default_style,
@@ -25,9 +23,8 @@
}
def export(self, response):
- redemptions = self.reward_point_redemptions
- redemptions = sorted(redemptions, key=attrgetter('user_profile.user.last_name', 'user_profile.user.first_name'))
-
+ redemptions_dict = self.redemptions_by_user
+
self.workbook = xlwt.Workbook()
self.sheet = self.workbook.add_sheet(_(u"Redemptions"))
self.row = 0
@@ -38,11 +35,10 @@
writec(self, _("Email address"), "bold")
writec(self, _("Number of points"), "bold")
- for redemption in redemptions:
- user = redemption.user_profile.user
- writen(self, user.last_name, "default")
- writec(self, user.first_name, "default")
- writec(self, user.email, "default")
- writec(self, redemption.value, "default")
+ for user_profile, value in redemptions_dict.items():
+ writen(self, user_profile.user.last_name, "default")
+ writec(self, user_profile.user.first_name, "default")
+ writec(self, user_profile.user.email, "default")
+ writec(self, value, "default")
self.workbook.save(response)
diff --git a/evap/rewards/models.py b/evap/rewards/models.py
--- a/evap/rewards/models.py
+++ b/evap/rewards/models.py
@@ -1,6 +1,10 @@
from django.utils.translation import ugettext_lazy as _
from django.db import models
+from collections import OrderedDict
+
+from operator import attrgetter
+
class RewardPointRedemptionEvent(models.Model):
name = models.CharField(max_length=1024, verbose_name=_(u"event name"))
date = models.DateField(verbose_name=_(u"event date"))
@@ -11,6 +15,15 @@
if RewardPointRedemption.objects.filter(event=self).exists():
return False
return True
+
+ def redemptions_by_user(self):
+ redemptions = self.reward_point_redemptions.order_by('user_profile__user__last_name', 'user_profile__user__first_name')
+ redemptions_dict = OrderedDict()
+ for redemption in redemptions:
+ if not redemption.user_profile in redemptions_dict:
+ redemptions_dict[redemption.user_profile] = 0
+ redemptions_dict[redemption.user_profile] += redemption.value
+ return redemptions_dict
class RewardPointGranting(models.Model):
user_profile = models.ForeignKey('evaluation.UserProfile', related_name="reward_point_grantings")
diff --git a/evap/rewards/views.py b/evap/rewards/views.py
--- a/evap/rewards/views.py
+++ b/evap/rewards/views.py
@@ -136,7 +136,7 @@
response = HttpResponse(content_type="application/vnd.ms-excel")
response["Content-Disposition"] = "attachment; filename=\"%s\"" % filename
- ExcelExporter(event.reward_point_redemptions.all()).export(response)
+ ExcelExporter(event.redemptions_by_user()).export(response)
return response
|
{"golden_diff": "diff --git a/evap/rewards/exporters.py b/evap/rewards/exporters.py\n--- a/evap/rewards/exporters.py\n+++ b/evap/rewards/exporters.py\n@@ -2,8 +2,6 @@\n \n from django.utils.translation import ugettext as _\n \n-from operator import attrgetter\n-\n from collections import OrderedDict\n from collections import defaultdict\n import datetime\n@@ -16,8 +14,8 @@\n \n class ExcelExporter(object):\n \n- def __init__(self, reward_point_redemptions):\n- self.reward_point_redemptions = reward_point_redemptions\n+ def __init__(self, redemptions_by_user):\n+ self.redemptions_by_user = redemptions_by_user\n \n styles = {\n 'default': xlwt.Style.default_style,\n@@ -25,9 +23,8 @@\n }\n \n def export(self, response):\n- redemptions = self.reward_point_redemptions\n- redemptions = sorted(redemptions, key=attrgetter('user_profile.user.last_name', 'user_profile.user.first_name'))\n-\n+ redemptions_dict = self.redemptions_by_user\n+ \n self.workbook = xlwt.Workbook()\n self.sheet = self.workbook.add_sheet(_(u\"Redemptions\"))\n self.row = 0\n@@ -38,11 +35,10 @@\n writec(self, _(\"Email address\"), \"bold\")\n writec(self, _(\"Number of points\"), \"bold\")\n \n- for redemption in redemptions:\n- user = redemption.user_profile.user\n- writen(self, user.last_name, \"default\")\n- writec(self, user.first_name, \"default\")\n- writec(self, user.email, \"default\")\n- writec(self, redemption.value, \"default\")\n+ for user_profile, value in redemptions_dict.items():\n+ writen(self, user_profile.user.last_name, \"default\")\n+ writec(self, user_profile.user.first_name, \"default\")\n+ writec(self, user_profile.user.email, \"default\")\n+ writec(self, value, \"default\")\n \n self.workbook.save(response)\ndiff --git a/evap/rewards/models.py b/evap/rewards/models.py\n--- a/evap/rewards/models.py\n+++ b/evap/rewards/models.py\n@@ -1,6 +1,10 @@\n from django.utils.translation import ugettext_lazy as _\n from django.db import models\n \n+from collections import OrderedDict\n+\n+from operator import attrgetter\n+\n class RewardPointRedemptionEvent(models.Model):\n name = models.CharField(max_length=1024, verbose_name=_(u\"event name\"))\n date = models.DateField(verbose_name=_(u\"event date\"))\n@@ -11,6 +15,15 @@\n if RewardPointRedemption.objects.filter(event=self).exists():\n return False\n return True\n+ \n+ def redemptions_by_user(self):\n+ redemptions = self.reward_point_redemptions.order_by('user_profile__user__last_name', 'user_profile__user__first_name')\n+ redemptions_dict = OrderedDict()\n+ for redemption in redemptions:\n+ if not redemption.user_profile in redemptions_dict:\n+ redemptions_dict[redemption.user_profile] = 0\n+ redemptions_dict[redemption.user_profile] += redemption.value\n+ return redemptions_dict\n \n class RewardPointGranting(models.Model):\n user_profile = models.ForeignKey('evaluation.UserProfile', related_name=\"reward_point_grantings\")\ndiff --git a/evap/rewards/views.py b/evap/rewards/views.py\n--- a/evap/rewards/views.py\n+++ b/evap/rewards/views.py\n@@ -136,7 +136,7 @@\n response = HttpResponse(content_type=\"application/vnd.ms-excel\")\n response[\"Content-Disposition\"] = \"attachment; filename=\\\"%s\\\"\" % filename\n \n- ExcelExporter(event.reward_point_redemptions.all()).export(response)\n+ ExcelExporter(event.redemptions_by_user()).export(response)\n \n return response\n", "issue": "Aggregate reward points redemptions in export per user\nWhen exporting the redemptions of an event, all of them are listed in the Excel export. The redemptions of one user should be aggregated into one line.\nAlso the number of redemptions on the staff page should be replaced by the number of users who redeemed points for an event.\n\nSo when a user redeems 1 point and then 2 points for the same event, the export should only state that 3 points have been redeemed in total.\n\n", "before_files": [{"content": "from evap.rewards.models import RewardPointRedemption, RewardPointRedemptionEvent\n\nfrom django.utils.translation import ugettext as _\n\nfrom operator import attrgetter\n\nfrom collections import OrderedDict\nfrom collections import defaultdict\nimport datetime\nimport xlwt\n\nfrom operator import itemgetter\n\nfrom evap.results.exporters import writen, writec\n\n\nclass ExcelExporter(object):\n\n def __init__(self, reward_point_redemptions):\n self.reward_point_redemptions = reward_point_redemptions\n\n styles = {\n 'default': xlwt.Style.default_style,\n 'bold': xlwt.easyxf('font: bold on'),\n }\n\n def export(self, response):\n redemptions = self.reward_point_redemptions\n redemptions = sorted(redemptions, key=attrgetter('user_profile.user.last_name', 'user_profile.user.first_name'))\n\n self.workbook = xlwt.Workbook()\n self.sheet = self.workbook.add_sheet(_(u\"Redemptions\"))\n self.row = 0\n self.col = 0\n\n writec(self, _(\"Last name\"), \"bold\")\n writec(self, _(\"First name\"), \"bold\")\n writec(self, _(\"Email address\"), \"bold\")\n writec(self, _(\"Number of points\"), \"bold\")\n\n for redemption in redemptions:\n user = redemption.user_profile.user\n writen(self, user.last_name, \"default\")\n writec(self, user.first_name, \"default\")\n writec(self, user.email, \"default\")\n writec(self, redemption.value, \"default\")\n\n self.workbook.save(response)\n", "path": "evap/rewards/exporters.py"}, {"content": "from django.contrib import messages\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext as _\nfrom django.utils.translation import get_language\nfrom django.http import HttpResponse\nfrom datetime import datetime\nfrom operator import attrgetter\n\nfrom evap.evaluation.auth import reward_user_required, fsr_required\nfrom evap.evaluation.models import Semester, Course\n\nfrom evap.fsr.views import semester_view\n\nfrom evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation\nfrom evap.rewards.tools import save_redemptions, reward_points_of_user, can_user_use_reward_points\nfrom evap.rewards.forms import RewardPointRedemptionEventForm\nfrom evap.rewards.exporters import ExcelExporter\n\n@reward_user_required\ndef index(request):\n if request.method == 'POST':\n redemptions = {}\n for key, value in request.POST.iteritems():\n if(key.startswith('points-')):\n event_id = int(key.rpartition('-')[2])\n redemptions[event_id] = int(value)\n \n if save_redemptions(request, redemptions):\n messages.success(request, _(\"You successfully redeemed your points.\"))\n else:\n messages.warning(request, _(\"You don't have enough reward points.\")) \n\n total_points_available = reward_points_of_user(request.user.userprofile)\n reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user.userprofile)\n reward_point_redemptions = RewardPointRedemption.objects.filter(user_profile=request.user.userprofile)\n events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now())\n events = sorted(events, key=lambda event: event.date)\n\n reward_point_actions=[]\n for granting in reward_point_grantings:\n reward_point_actions.append((granting.granting_time, _('Reward for') + ' ' + granting.semester.name, granting.value, ''))\n for redemption in reward_point_redemptions:\n reward_point_actions.append((redemption.redemption_time, redemption.event.name, '', redemption.value))\n\n reward_point_actions.sort(key=lambda action: action[0], reverse=True)\n\n return render_to_response(\n \"rewards_index.html\",\n dict(\n reward_point_actions=reward_point_actions,\n total_points_available=total_points_available,\n events=events,\n point_selection=[x for x in range(0,total_points_available+1)]\n ),\n context_instance=RequestContext(request))\n\n\n@fsr_required\ndef semester_reward_points(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n courses = Course.objects.filter(semester=semester)\n participants = set()\n for course in courses:\n for participant in course.participants.all():\n if can_user_use_reward_points(participant.userprofile):\n participants.add(participant)\n participants = sorted(participants, key=attrgetter('last_name', 'first_name'))\n\n data = []\n for participant in participants:\n number_of_courses = Course.objects.filter(semester=semester, participants=participant).count()\n number_of_courses_voted_for = Course.objects.filter(semester=semester, voters=participant).count()\n earned_reward_points = RewardPointGranting.objects.filter(semester=semester, user_profile=participant.userprofile).exists()\n data.append((participant, number_of_courses_voted_for, number_of_courses, earned_reward_points))\n\n return render_to_response(\"rewards_semester_reward_points_view.html\", dict(semester=semester, data=data, disable_breadcrumb_semester=False), context_instance=RequestContext(request))\n\n\n@fsr_required\ndef reward_point_redemption_events(request):\n upcoming_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by('date')\n past_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__lt=datetime.now()).order_by('-date')\n return render_to_response(\"rewards_reward_point_redemption_events.html\", dict(upcoming_events=upcoming_events, past_events=past_events), context_instance=RequestContext(request))\n\n\n@fsr_required\ndef reward_point_redemption_event_create(request):\n event = RewardPointRedemptionEvent()\n form = RewardPointRedemptionEventForm(request.POST or None, instance=event)\n\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Successfully created event.\"))\n return redirect('evap.rewards.views.reward_point_redemption_events')\n else:\n return render_to_response(\"rewards_reward_point_redemption_event_form.html\", dict(form=form), context_instance=RequestContext(request))\n\n\n@fsr_required\ndef reward_point_redemption_event_edit(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n form = RewardPointRedemptionEventForm(request.POST or None, instance=event)\n\n if form.is_valid():\n event = form.save()\n\n messages.success(request, _(\"Successfully updated event.\"))\n return redirect('evap.rewards.views.reward_point_redemption_events')\n else:\n return render_to_response(\"rewards_reward_point_redemption_event_form.html\", dict(event=event, form=form), context_instance=RequestContext(request))\n\n\n@fsr_required\ndef reward_point_redemption_event_delete(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n\n if event.can_delete:\n if request.method == 'POST':\n event.delete()\n return redirect('evap.rewards.views.reward_point_redemption_events')\n else:\n return render_to_response(\"rewards_reward_point_redemption_event_delete.html\", dict(event=event), context_instance=RequestContext(request))\n else:\n messages.warning(request, _(\"This event cannot be deleted because some users already redeemed points for it.\"))\n return redirect('evap.rewards.views.reward_point_redemption_events')\n\n\n@fsr_required\ndef reward_point_redemption_event_export(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n\n filename = _(\"RewardPoints\")+\"-%s-%s-%s.xls\" % (event.date, event.name, get_language())\n\n response = HttpResponse(content_type=\"application/vnd.ms-excel\")\n response[\"Content-Disposition\"] = \"attachment; filename=\\\"%s\\\"\" % filename\n\n ExcelExporter(event.reward_point_redemptions.all()).export(response)\n\n return response\n\n\n@fsr_required\ndef semester_activation(request, semester_id, active):\n if active == 'on':\n active = True\n else:\n active = False\n\n try:\n activation = SemesterActivation.objects.filter(semester=Semester.objects.get(id=semester_id)).get()\n activation.is_active = active\n except SemesterActivation.DoesNotExist:\n activation = SemesterActivation(semester=Semester.objects.get(id=semester_id), is_active=active)\n activation.save()\n\n return semester_view(request=request, semester_id=semester_id)\n", "path": "evap/rewards/views.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\nfrom django.db import models\n\nclass RewardPointRedemptionEvent(models.Model):\n name = models.CharField(max_length=1024, verbose_name=_(u\"event name\"))\n date = models.DateField(verbose_name=_(u\"event date\"))\n redeem_end_date = models.DateField(verbose_name=_(u\"redemption end date\"))\n\n @property\n def can_delete(self):\n if RewardPointRedemption.objects.filter(event=self).exists():\n return False\n return True\n\nclass RewardPointGranting(models.Model):\n user_profile = models.ForeignKey('evaluation.UserProfile', related_name=\"reward_point_grantings\")\n semester = models.ForeignKey('evaluation.Semester', related_name=\"reward_point_grantings\", blank=True, null=True)\n granting_time = models.DateTimeField(verbose_name=_(u\"granting time\"), auto_now_add=True)\n value = models.IntegerField(verbose_name=_(u\"value\"), default=0)\n\nclass RewardPointRedemption(models.Model):\n user_profile = models.ForeignKey('evaluation.UserProfile', related_name=\"reward_point_redemptions\")\n redemption_time = models.DateTimeField(verbose_name=_(u\"redemption time\"), auto_now_add=True)\n value = models.IntegerField(verbose_name=_(u\"value\"), default=0)\n event = models.ForeignKey(RewardPointRedemptionEvent, related_name=\"reward_point_redemptions\")\n\nclass SemesterActivation(models.Model):\n semester = models.ForeignKey('evaluation.Semester', related_name='rewards_active', unique=True)\n is_active = models.BooleanField(default=False)\n", "path": "evap/rewards/models.py"}]}
| 3,374 | 899 |
gh_patches_debug_37435
|
rasdani/github-patches
|
git_diff
|
google__turbinia-1315
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
turbinia-client status summary formatting warning
Running turbini-client status summary might yield the following warning for some requests:
```WARNING turbinia - Error formatting the Markdown report: unsupported format string passed to NoneType.__format__```
</issue>
<code>
[start of turbinia/api/cli/turbinia_client/helpers/formatter.py]
1 #!/usr/bin/env python
2 #
3 # Copyright 2017 Google Inc.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 """Methods for formatting text."""
17
18 from __future__ import annotations
19
20 from abc import ABC, abstractmethod
21 from click import echo as click_echo
22
23 import logging
24 import json
25
26 log = logging.getLogger('turbinia')
27
28
29 def echo_json(json_data: dict) -> None:
30 """Pretty print JSON data."""
31 if isinstance(json_data, dict):
32 click_echo(json.dumps(json_data, indent=2))
33
34
35 class MarkdownReportComponent(ABC):
36 """Components for generating Turbinia request/task
37 markdown reports.
38 """
39
40 def __init__(self):
41 """Instantiates a MarkdownReportComponent object."""
42 self._components: list(MarkdownReportComponent) = []
43 self._parent: MarkdownReportComponent = None
44 self._report: str = None
45
46 @property
47 def components(self):
48 """Returns the components list."""
49 return self._components
50
51 @property
52 def report(self):
53 """Returns the markdown report text."""
54 return self._report
55
56 @report.setter
57 def report(self, report):
58 self._report = report
59
60 @property
61 def parent(self) -> MarkdownReportComponent:
62 """Returns the parent object."""
63 return self._parent
64
65 @parent.setter
66 def parent(self, parent: MarkdownReportComponent):
67 self._parent = parent
68
69 def bold(self, text):
70 """Formats text as bold in Markdown format.
71
72 Args:
73 text(string): Text to format
74
75 Return:
76 string: Formatted text.
77 """
78 return f'**{text.strip():s}**'
79
80 def heading1(self, text):
81 """Formats text as heading 1 in Markdown format.
82
83 Args:
84 text(string): Text to format
85
86 Return:
87 string: Formatted text.
88 """
89 return f'# {text.strip():s}'
90
91 def heading2(self, text):
92 """Formats text as heading 2 in Markdown format.
93
94 Args:
95 text(string): Text to format
96
97 Return:
98 string: Formatted text.
99 """
100 return f'## {text.strip():s}'
101
102 def heading3(self, text):
103 """Formats text as heading 3 in Markdown format.
104
105 Args:
106 text(string): Text to format
107
108 Return:
109 string: Formatted text.
110 """
111 return f'### {text.strip():s}'
112
113 def heading4(self, text):
114 """Formats text as heading 4 in Markdown format.
115
116 Args:
117 text(string): Text to format
118
119 Return:
120 string: Formatted text.
121 """
122 return f'#### {text.strip():s}'
123
124 def heading5(self, text):
125 """Formats text as heading 5 in Markdown format.
126 Args:
127 text(string): Text to format
128 Return:
129 string: Formatted text.
130 """
131 return f'##### {text.strip():s}'
132
133 def bullet(self, text, level=1):
134 """Formats text as a bullet in Markdown format.
135
136 Args:
137 text(string): Text to format
138 level(int): Indentation level.
139 Return:
140 string: Formatted text.
141 """
142 return f"{' ' * (level - 1):s}* {text.strip():s}"
143
144 def code(self, text):
145 """Formats text as code in Markdown format.
146
147 Args:
148 text(string): Text to format
149
150 Return:
151 string: Formatted text.
152 """
153 return f'`{text.strip():s}`'
154
155 def add(self, component: MarkdownReportComponent) -> None:
156 """Adds a MarkdownReportComponent object to the components list.
157
158 This method should additionally set the parent object.
159 """
160 pass
161
162 def add_components(self, components: list[MarkdownReportComponent]) -> None:
163 """Adds multiple MarkdownReportComponent objects to the components list."""
164 pass
165
166 def remove(self, component: MarkdownReportComponent) -> None:
167 """Removes a MarkdownReportComponent object from the components list.
168
169 This method should set the component's object to None.
170 """
171 pass
172
173 @abstractmethod
174 def generate_markdown(self) -> str:
175 pass
176
177
178 class TaskMarkdownReport(MarkdownReportComponent):
179 """Turbinia Task markdown report."""
180
181 def __init__(self, request_data: dict = None):
182 """Initialize TaskMarkdownReport"""
183 super().__init__()
184 self._request_data: dict = request_data
185
186 def generate_markdown(self) -> str:
187 """Generate a markdown report."""
188 report: list[str] = []
189 task: dict = self._request_data
190 if not task:
191 return ''
192
193 try:
194 report.append(self.heading2(task.get('name')))
195 line = f"{self.bold('Evidence:'):s} {task.get('evidence_name')!s}"
196 report.append(self.bullet(line))
197 line = f"{self.bold('Status:'):s} {task.get('status'):s}"
198 report.append(self.bullet(line))
199 report.append(self.bullet(f"Task Id: {task.get('id')!s}"))
200 report.append(
201 self.bullet(f"Executed on worker {task.get('worker_name')!s}"))
202 if task.get('report_data'):
203 report.append('')
204 report.append(self.heading3('Task Reported Data'))
205 report.extend(task.get('report_data').splitlines())
206 report.append('')
207 report.append(self.heading3('Saved Task Files:'))
208
209 saved_paths = task.get('saved_paths')
210 if saved_paths:
211 for path in saved_paths:
212 report.append(self.bullet(self.code(path)))
213 report.append('')
214 except TypeError as exception:
215 log.warning(f'Error formatting the Markdown report: {exception!s}')
216
217 self.report = '\n'.join(report)
218 return self.report
219
220
221 class RequestMarkdownReport(MarkdownReportComponent):
222 """Turbinia Request Markdown report."""
223
224 def __init__(self, request_data: dict):
225 """Initializes a RequestMarkdownReport object."""
226 super().__init__()
227 self._request_data: dict = request_data
228
229 tasks = [TaskMarkdownReport(task) for task in request_data.get('tasks')]
230 self.add_components(tasks)
231
232 def add(self, component: MarkdownReportComponent) -> None:
233 if component:
234 self.components.append(component)
235 component.parent = self
236
237 def remove(self, component: MarkdownReportComponent) -> None:
238 self.components.remove(component)
239 component.parent = None
240
241 def add_components(self, components: list[MarkdownReportComponent]) -> None:
242 if components:
243 for component in components:
244 self.components.append(component)
245 component.parent = self
246
247 def generate_markdown(self) -> str:
248 """Generates a Markdown version of Requests results."""
249 report: list[str] = []
250 request_dict: dict = self._request_data
251 if not request_dict:
252 return ''
253
254 try:
255 report.append(
256 self.heading2(f"Request ID: {request_dict.get('request_id')!s}"))
257 report.append(
258 self.bullet(
259 f"Last Update: {request_dict.get('last_task_update_time')!s}"))
260 report.append(
261 self.bullet(f"Requester: {request_dict.get('requester')!s}"))
262 report.append(self.bullet(f"Reason: {request_dict.get('reason')!s}"))
263 report.append(self.bullet(f"Status: {request_dict.get('status')!s}"))
264 report.append(
265 self.bullet(f"Failed tasks: {request_dict.get('failed_tasks'):d}"))
266 report.append(
267 self.bullet(f"Running tasks: {request_dict.get('running_tasks'):d}"))
268 report.append(
269 self.bullet(
270 f"Successful tasks: {request_dict.get('successful_tasks'):d}"))
271 report.append(
272 self.bullet(f"Task Count: {request_dict.get('task_count'):d}"))
273 report.append(
274 self.bullet(f"Queued tasks: {request_dict.get('queued_tasks'):d}"))
275 report.append(
276 self.bullet(f"Evidence Name: {request_dict.get('evidence_name'):d}"))
277 report.append('')
278 except TypeError as exception:
279 log.warning(f'Error formatting the Markdown report: {exception!s}')
280
281 for task in self.components:
282 report.append(task.generate_markdown())
283
284 self.report = '\n'.join(report)
285 return self.report
286
287
288 class SummaryMarkdownReport(MarkdownReportComponent):
289 """A markdown report summary of all Turbinia Requests."""
290
291 def __init__(self, requests_summary: list[dict]):
292 """Initialize SummaryMarkdownReport."""
293 super().__init__()
294 self._requests_summary = requests_summary
295
296 def generate_markdown(self) -> str:
297 """Generate a Markdown version of Requests summary results."""
298 report: list[str] = []
299 requests_status_list = None
300 if self._requests_summary:
301 requests_status_list = self._requests_summary.get('requests_status')
302
303 if not requests_status_list:
304 return '## No requests found.'
305
306 for request_dict in requests_status_list:
307 request_report = RequestMarkdownReport(request_dict).generate_markdown()
308 report.append(request_report)
309
310 self.report = '\n'.join(report)
311 return self.report
312
313
314 class WorkersMarkdownReport(MarkdownReportComponent):
315 """A markdown report of all tasks for a specific worker."""
316
317 def __init__(self, request_data: dict):
318 super().__init__()
319 self._request_data: dict = request_data
320
321 def generate_markdown(self) -> str:
322 """Generates a Markdown version of tasks per worker."""
323 raise NotImplementedError
324
[end of turbinia/api/cli/turbinia_client/helpers/formatter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/turbinia/api/cli/turbinia_client/helpers/formatter.py b/turbinia/api/cli/turbinia_client/helpers/formatter.py
--- a/turbinia/api/cli/turbinia_client/helpers/formatter.py
+++ b/turbinia/api/cli/turbinia_client/helpers/formatter.py
@@ -194,7 +194,7 @@
report.append(self.heading2(task.get('name')))
line = f"{self.bold('Evidence:'):s} {task.get('evidence_name')!s}"
report.append(self.bullet(line))
- line = f"{self.bold('Status:'):s} {task.get('status'):s}"
+ line = f"{self.bold('Status:'):s} {task.get('status')!s}"
report.append(self.bullet(line))
report.append(self.bullet(f"Task Id: {task.get('id')!s}"))
report.append(
@@ -253,27 +253,26 @@
try:
report.append(
- self.heading2(f"Request ID: {request_dict.get('request_id')!s}"))
+ self.heading2(f"Request ID: {request_dict.get('request_id')}"))
report.append(
self.bullet(
- f"Last Update: {request_dict.get('last_task_update_time')!s}"))
+ f"Last Update: {request_dict.get('last_task_update_time')}"))
+ report.append(self.bullet(f"Requester: {request_dict.get('requester')}"))
+ report.append(self.bullet(f"Reason: {request_dict.get('reason')}"))
+ report.append(self.bullet(f"Status: {request_dict.get('status')}"))
report.append(
- self.bullet(f"Requester: {request_dict.get('requester')!s}"))
- report.append(self.bullet(f"Reason: {request_dict.get('reason')!s}"))
- report.append(self.bullet(f"Status: {request_dict.get('status')!s}"))
+ self.bullet(f"Failed tasks: {request_dict.get('failed_tasks')}"))
report.append(
- self.bullet(f"Failed tasks: {request_dict.get('failed_tasks'):d}"))
- report.append(
- self.bullet(f"Running tasks: {request_dict.get('running_tasks'):d}"))
+ self.bullet(f"Running tasks: {request_dict.get('running_tasks')}"))
report.append(
self.bullet(
- f"Successful tasks: {request_dict.get('successful_tasks'):d}"))
+ f"Successful tasks: {request_dict.get('successful_tasks')}"))
report.append(
- self.bullet(f"Task Count: {request_dict.get('task_count'):d}"))
+ self.bullet(f"Task Count: {request_dict.get('task_count')}"))
report.append(
- self.bullet(f"Queued tasks: {request_dict.get('queued_tasks'):d}"))
+ self.bullet(f"Queued tasks: {request_dict.get('queued_tasks')}"))
report.append(
- self.bullet(f"Evidence Name: {request_dict.get('evidence_name'):d}"))
+ self.bullet(f"Evidence Name: {request_dict.get('evidence_name')}"))
report.append('')
except TypeError as exception:
log.warning(f'Error formatting the Markdown report: {exception!s}')
|
{"golden_diff": "diff --git a/turbinia/api/cli/turbinia_client/helpers/formatter.py b/turbinia/api/cli/turbinia_client/helpers/formatter.py\n--- a/turbinia/api/cli/turbinia_client/helpers/formatter.py\n+++ b/turbinia/api/cli/turbinia_client/helpers/formatter.py\n@@ -194,7 +194,7 @@\n report.append(self.heading2(task.get('name')))\n line = f\"{self.bold('Evidence:'):s} {task.get('evidence_name')!s}\"\n report.append(self.bullet(line))\n- line = f\"{self.bold('Status:'):s} {task.get('status'):s}\"\n+ line = f\"{self.bold('Status:'):s} {task.get('status')!s}\"\n report.append(self.bullet(line))\n report.append(self.bullet(f\"Task Id: {task.get('id')!s}\"))\n report.append(\n@@ -253,27 +253,26 @@\n \n try:\n report.append(\n- self.heading2(f\"Request ID: {request_dict.get('request_id')!s}\"))\n+ self.heading2(f\"Request ID: {request_dict.get('request_id')}\"))\n report.append(\n self.bullet(\n- f\"Last Update: {request_dict.get('last_task_update_time')!s}\"))\n+ f\"Last Update: {request_dict.get('last_task_update_time')}\"))\n+ report.append(self.bullet(f\"Requester: {request_dict.get('requester')}\"))\n+ report.append(self.bullet(f\"Reason: {request_dict.get('reason')}\"))\n+ report.append(self.bullet(f\"Status: {request_dict.get('status')}\"))\n report.append(\n- self.bullet(f\"Requester: {request_dict.get('requester')!s}\"))\n- report.append(self.bullet(f\"Reason: {request_dict.get('reason')!s}\"))\n- report.append(self.bullet(f\"Status: {request_dict.get('status')!s}\"))\n+ self.bullet(f\"Failed tasks: {request_dict.get('failed_tasks')}\"))\n report.append(\n- self.bullet(f\"Failed tasks: {request_dict.get('failed_tasks'):d}\"))\n- report.append(\n- self.bullet(f\"Running tasks: {request_dict.get('running_tasks'):d}\"))\n+ self.bullet(f\"Running tasks: {request_dict.get('running_tasks')}\"))\n report.append(\n self.bullet(\n- f\"Successful tasks: {request_dict.get('successful_tasks'):d}\"))\n+ f\"Successful tasks: {request_dict.get('successful_tasks')}\"))\n report.append(\n- self.bullet(f\"Task Count: {request_dict.get('task_count'):d}\"))\n+ self.bullet(f\"Task Count: {request_dict.get('task_count')}\"))\n report.append(\n- self.bullet(f\"Queued tasks: {request_dict.get('queued_tasks'):d}\"))\n+ self.bullet(f\"Queued tasks: {request_dict.get('queued_tasks')}\"))\n report.append(\n- self.bullet(f\"Evidence Name: {request_dict.get('evidence_name'):d}\"))\n+ self.bullet(f\"Evidence Name: {request_dict.get('evidence_name')}\"))\n report.append('')\n except TypeError as exception:\n log.warning(f'Error formatting the Markdown report: {exception!s}')\n", "issue": "turbinia-client status summary formatting warning\nRunning turbini-client status summary might yield the following warning for some requests:\r\n\r\n```WARNING turbinia - Error formatting the Markdown report: unsupported format string passed to NoneType.__format__```\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Methods for formatting text.\"\"\"\n\nfrom __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom click import echo as click_echo\n\nimport logging\nimport json\n\nlog = logging.getLogger('turbinia')\n\n\ndef echo_json(json_data: dict) -> None:\n \"\"\"Pretty print JSON data.\"\"\"\n if isinstance(json_data, dict):\n click_echo(json.dumps(json_data, indent=2))\n\n\nclass MarkdownReportComponent(ABC):\n \"\"\"Components for generating Turbinia request/task\n markdown reports.\n \"\"\"\n\n def __init__(self):\n \"\"\"Instantiates a MarkdownReportComponent object.\"\"\"\n self._components: list(MarkdownReportComponent) = []\n self._parent: MarkdownReportComponent = None\n self._report: str = None\n\n @property\n def components(self):\n \"\"\"Returns the components list.\"\"\"\n return self._components\n\n @property\n def report(self):\n \"\"\"Returns the markdown report text.\"\"\"\n return self._report\n\n @report.setter\n def report(self, report):\n self._report = report\n\n @property\n def parent(self) -> MarkdownReportComponent:\n \"\"\"Returns the parent object.\"\"\"\n return self._parent\n\n @parent.setter\n def parent(self, parent: MarkdownReportComponent):\n self._parent = parent\n\n def bold(self, text):\n \"\"\"Formats text as bold in Markdown format.\n\n Args:\n text(string): Text to format\n\n Return:\n string: Formatted text.\n \"\"\"\n return f'**{text.strip():s}**'\n\n def heading1(self, text):\n \"\"\"Formats text as heading 1 in Markdown format.\n\n Args:\n text(string): Text to format\n\n Return:\n string: Formatted text.\n \"\"\"\n return f'# {text.strip():s}'\n\n def heading2(self, text):\n \"\"\"Formats text as heading 2 in Markdown format.\n\n Args:\n text(string): Text to format\n\n Return:\n string: Formatted text.\n \"\"\"\n return f'## {text.strip():s}'\n\n def heading3(self, text):\n \"\"\"Formats text as heading 3 in Markdown format.\n\n Args:\n text(string): Text to format\n\n Return:\n string: Formatted text.\n \"\"\"\n return f'### {text.strip():s}'\n\n def heading4(self, text):\n \"\"\"Formats text as heading 4 in Markdown format.\n\n Args:\n text(string): Text to format\n\n Return:\n string: Formatted text.\n \"\"\"\n return f'#### {text.strip():s}'\n\n def heading5(self, text):\n \"\"\"Formats text as heading 5 in Markdown format.\n Args:\n text(string): Text to format\n Return:\n string: Formatted text.\n \"\"\"\n return f'##### {text.strip():s}'\n\n def bullet(self, text, level=1):\n \"\"\"Formats text as a bullet in Markdown format.\n\n Args:\n text(string): Text to format\n level(int): Indentation level.\n Return:\n string: Formatted text.\n \"\"\"\n return f\"{' ' * (level - 1):s}* {text.strip():s}\"\n\n def code(self, text):\n \"\"\"Formats text as code in Markdown format.\n\n Args:\n text(string): Text to format\n\n Return:\n string: Formatted text.\n \"\"\"\n return f'`{text.strip():s}`'\n\n def add(self, component: MarkdownReportComponent) -> None:\n \"\"\"Adds a MarkdownReportComponent object to the components list.\n\n This method should additionally set the parent object.\n \"\"\"\n pass\n\n def add_components(self, components: list[MarkdownReportComponent]) -> None:\n \"\"\"Adds multiple MarkdownReportComponent objects to the components list.\"\"\"\n pass\n\n def remove(self, component: MarkdownReportComponent) -> None:\n \"\"\"Removes a MarkdownReportComponent object from the components list.\n\n This method should set the component's object to None.\n \"\"\"\n pass\n\n @abstractmethod\n def generate_markdown(self) -> str:\n pass\n\n\nclass TaskMarkdownReport(MarkdownReportComponent):\n \"\"\"Turbinia Task markdown report.\"\"\"\n\n def __init__(self, request_data: dict = None):\n \"\"\"Initialize TaskMarkdownReport\"\"\"\n super().__init__()\n self._request_data: dict = request_data\n\n def generate_markdown(self) -> str:\n \"\"\"Generate a markdown report.\"\"\"\n report: list[str] = []\n task: dict = self._request_data\n if not task:\n return ''\n\n try:\n report.append(self.heading2(task.get('name')))\n line = f\"{self.bold('Evidence:'):s} {task.get('evidence_name')!s}\"\n report.append(self.bullet(line))\n line = f\"{self.bold('Status:'):s} {task.get('status'):s}\"\n report.append(self.bullet(line))\n report.append(self.bullet(f\"Task Id: {task.get('id')!s}\"))\n report.append(\n self.bullet(f\"Executed on worker {task.get('worker_name')!s}\"))\n if task.get('report_data'):\n report.append('')\n report.append(self.heading3('Task Reported Data'))\n report.extend(task.get('report_data').splitlines())\n report.append('')\n report.append(self.heading3('Saved Task Files:'))\n\n saved_paths = task.get('saved_paths')\n if saved_paths:\n for path in saved_paths:\n report.append(self.bullet(self.code(path)))\n report.append('')\n except TypeError as exception:\n log.warning(f'Error formatting the Markdown report: {exception!s}')\n\n self.report = '\\n'.join(report)\n return self.report\n\n\nclass RequestMarkdownReport(MarkdownReportComponent):\n \"\"\"Turbinia Request Markdown report.\"\"\"\n\n def __init__(self, request_data: dict):\n \"\"\"Initializes a RequestMarkdownReport object.\"\"\"\n super().__init__()\n self._request_data: dict = request_data\n\n tasks = [TaskMarkdownReport(task) for task in request_data.get('tasks')]\n self.add_components(tasks)\n\n def add(self, component: MarkdownReportComponent) -> None:\n if component:\n self.components.append(component)\n component.parent = self\n\n def remove(self, component: MarkdownReportComponent) -> None:\n self.components.remove(component)\n component.parent = None\n\n def add_components(self, components: list[MarkdownReportComponent]) -> None:\n if components:\n for component in components:\n self.components.append(component)\n component.parent = self\n\n def generate_markdown(self) -> str:\n \"\"\"Generates a Markdown version of Requests results.\"\"\"\n report: list[str] = []\n request_dict: dict = self._request_data\n if not request_dict:\n return ''\n\n try:\n report.append(\n self.heading2(f\"Request ID: {request_dict.get('request_id')!s}\"))\n report.append(\n self.bullet(\n f\"Last Update: {request_dict.get('last_task_update_time')!s}\"))\n report.append(\n self.bullet(f\"Requester: {request_dict.get('requester')!s}\"))\n report.append(self.bullet(f\"Reason: {request_dict.get('reason')!s}\"))\n report.append(self.bullet(f\"Status: {request_dict.get('status')!s}\"))\n report.append(\n self.bullet(f\"Failed tasks: {request_dict.get('failed_tasks'):d}\"))\n report.append(\n self.bullet(f\"Running tasks: {request_dict.get('running_tasks'):d}\"))\n report.append(\n self.bullet(\n f\"Successful tasks: {request_dict.get('successful_tasks'):d}\"))\n report.append(\n self.bullet(f\"Task Count: {request_dict.get('task_count'):d}\"))\n report.append(\n self.bullet(f\"Queued tasks: {request_dict.get('queued_tasks'):d}\"))\n report.append(\n self.bullet(f\"Evidence Name: {request_dict.get('evidence_name'):d}\"))\n report.append('')\n except TypeError as exception:\n log.warning(f'Error formatting the Markdown report: {exception!s}')\n\n for task in self.components:\n report.append(task.generate_markdown())\n\n self.report = '\\n'.join(report)\n return self.report\n\n\nclass SummaryMarkdownReport(MarkdownReportComponent):\n \"\"\"A markdown report summary of all Turbinia Requests.\"\"\"\n\n def __init__(self, requests_summary: list[dict]):\n \"\"\"Initialize SummaryMarkdownReport.\"\"\"\n super().__init__()\n self._requests_summary = requests_summary\n\n def generate_markdown(self) -> str:\n \"\"\"Generate a Markdown version of Requests summary results.\"\"\"\n report: list[str] = []\n requests_status_list = None\n if self._requests_summary:\n requests_status_list = self._requests_summary.get('requests_status')\n\n if not requests_status_list:\n return '## No requests found.'\n\n for request_dict in requests_status_list:\n request_report = RequestMarkdownReport(request_dict).generate_markdown()\n report.append(request_report)\n\n self.report = '\\n'.join(report)\n return self.report\n\n\nclass WorkersMarkdownReport(MarkdownReportComponent):\n \"\"\"A markdown report of all tasks for a specific worker.\"\"\"\n\n def __init__(self, request_data: dict):\n super().__init__()\n self._request_data: dict = request_data\n\n def generate_markdown(self) -> str:\n \"\"\"Generates a Markdown version of tasks per worker.\"\"\"\n raise NotImplementedError\n", "path": "turbinia/api/cli/turbinia_client/helpers/formatter.py"}]}
| 3,679 | 743 |
gh_patches_debug_49360
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-5407
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pull GCI docs into main Zulip repo.
Some of our assets, like git cheatsheets, currently live in the zulip-gci repo. We should move them to the main Zulip repo and link from appropriate places.
cc @synicalsyntax @lonerz @tommyip
</issue>
<code>
[start of tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py]
1 #!/usr/bin/env python
2 from __future__ import print_function
3
4 import logging
5 import re
6 import scrapy
7
8 from scrapy import Request
9 from scrapy.linkextractors import IGNORED_EXTENSIONS
10 from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
11 from scrapy.utils.url import url_has_any_extension
12
13 from typing import Any, Generator, List, Optional, Tuple
14
15 EXCLUDED_URLS = [
16 # Google calendar returns 404s on HEAD requests unconditionally
17 'https://calendar.google.com/calendar/[email protected]',
18 # Returns 409 errors to HEAD requests frequently
19 'https://medium.freecodecamp.com',
20 ]
21
22
23 class BaseDocumentationSpider(scrapy.Spider):
24 name = None # type: Optional[str]
25 # Exclude domain address.
26 deny_domains = [] # type: List[str]
27 start_urls = [] # type: List[str]
28 deny = [] # type: List[str]
29 file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]
30 tags = ('a', 'area', 'img')
31 attrs = ('href', 'src')
32
33 def __init__(self, *args, **kwargs):
34 # type: (*Any, **Any) -> None
35 super(BaseDocumentationSpider, self).__init__(*args, **kwargs)
36 self.has_error = False
37
38 def _set_error_state(self):
39 # type: () -> None
40 self.has_error = True
41
42 def _has_extension(self, url):
43 # type: (str) -> bool
44 return url_has_any_extension(url, self.file_extensions)
45
46 def _is_external_url(self, url):
47 # type: (str) -> bool
48 return url.startswith('http') or self._has_extension(url)
49
50 def check_existing(self, response):
51 # type: (Any) -> None
52 self.log(response)
53
54 def check_permalink(self, response):
55 # type: (Any) -> None
56 self.log(response)
57 xpath_template = "//*[@id='{permalink}' or @name='{permalink}']"
58 m = re.match(r".+\#(?P<permalink>.*)$", response.request.url) # Get anchor value.
59 if not m:
60 return
61 permalink = m.group('permalink')
62 # Check permalink existing on response page.
63 if not response.selector.xpath(xpath_template.format(permalink=permalink)):
64 self._set_error_state()
65 raise Exception(
66 "Permalink #{} is not found on page {}".format(permalink, response.request.url))
67
68 def parse(self, response):
69 # type: (Any) -> Generator[Request, None, None]
70 self.log(response)
71 for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],
72 tags=self.tags, attrs=self.attrs, deny=self.deny,
73 canonicalize=False).extract_links(response):
74 callback = self.parse # type: Any
75 dont_filter = False
76 method = 'GET'
77 if self._is_external_url(link.url):
78 callback = self.check_existing
79 method = 'HEAD'
80 elif '#' in link.url:
81 dont_filter = True
82 callback = self.check_permalink
83 yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter,
84 errback=self.error_callback)
85
86 def retry_request_with_get(self, request):
87 # type: (Request) -> Generator[Request, None, None]
88 request.method = 'GET'
89 request.dont_filter = True
90 yield request
91
92 def exclude_error(self, url):
93 # type: (str) -> bool
94 if url in EXCLUDED_URLS:
95 return True
96 return False
97
98 def error_callback(self, failure):
99 # type: (Any) -> Optional[Generator[Any, None, None]]
100 if hasattr(failure.value, 'response') and failure.value.response:
101 response = failure.value.response
102 if self.exclude_error(response.url):
103 return None
104 if response.status == 404:
105 self._set_error_state()
106 raise Exception('Page not found: {}'.format(response))
107 if response.status == 405 and response.request.method == 'HEAD':
108 # Method 'HEAD' not allowed, repeat request with 'GET'
109 return self.retry_request_with_get(response.request)
110 self.log("Error! Please check link: {}".format(response), logging.ERROR)
111 elif isinstance(failure.type, IOError):
112 self._set_error_state()
113 else:
114 raise Exception(failure.value)
115 return None
116
[end of tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py
--- a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py
+++ b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py
@@ -17,6 +17,8 @@
'https://calendar.google.com/calendar/[email protected]',
# Returns 409 errors to HEAD requests frequently
'https://medium.freecodecamp.com',
+ # Returns 404 to HEAD requests unconditionally
+ 'https://www.git-tower.com/blog/command-line-cheat-sheet/',
]
|
{"golden_diff": "diff --git a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py\n--- a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py\n+++ b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py\n@@ -17,6 +17,8 @@\n 'https://calendar.google.com/calendar/[email protected]',\n # Returns 409 errors to HEAD requests frequently\n 'https://medium.freecodecamp.com',\n+ # Returns 404 to HEAD requests unconditionally\n+ 'https://www.git-tower.com/blog/command-line-cheat-sheet/',\n ]\n", "issue": "Pull GCI docs into main Zulip repo.\nSome of our assets, like git cheatsheets, currently live in the zulip-gci repo. We should move them to the main Zulip repo and link from appropriate places.\r\n\r\ncc @synicalsyntax @lonerz @tommyip \n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport logging\nimport re\nimport scrapy\n\nfrom scrapy import Request\nfrom scrapy.linkextractors import IGNORED_EXTENSIONS\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom scrapy.utils.url import url_has_any_extension\n\nfrom typing import Any, Generator, List, Optional, Tuple\n\nEXCLUDED_URLS = [\n # Google calendar returns 404s on HEAD requests unconditionally\n 'https://calendar.google.com/calendar/[email protected]',\n # Returns 409 errors to HEAD requests frequently\n 'https://medium.freecodecamp.com',\n]\n\n\nclass BaseDocumentationSpider(scrapy.Spider):\n name = None # type: Optional[str]\n # Exclude domain address.\n deny_domains = [] # type: List[str]\n start_urls = [] # type: List[str]\n deny = [] # type: List[str]\n file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]\n tags = ('a', 'area', 'img')\n attrs = ('href', 'src')\n\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n super(BaseDocumentationSpider, self).__init__(*args, **kwargs)\n self.has_error = False\n\n def _set_error_state(self):\n # type: () -> None\n self.has_error = True\n\n def _has_extension(self, url):\n # type: (str) -> bool\n return url_has_any_extension(url, self.file_extensions)\n\n def _is_external_url(self, url):\n # type: (str) -> bool\n return url.startswith('http') or self._has_extension(url)\n\n def check_existing(self, response):\n # type: (Any) -> None\n self.log(response)\n\n def check_permalink(self, response):\n # type: (Any) -> None\n self.log(response)\n xpath_template = \"//*[@id='{permalink}' or @name='{permalink}']\"\n m = re.match(r\".+\\#(?P<permalink>.*)$\", response.request.url) # Get anchor value.\n if not m:\n return\n permalink = m.group('permalink')\n # Check permalink existing on response page.\n if not response.selector.xpath(xpath_template.format(permalink=permalink)):\n self._set_error_state()\n raise Exception(\n \"Permalink #{} is not found on page {}\".format(permalink, response.request.url))\n\n def parse(self, response):\n # type: (Any) -> Generator[Request, None, None]\n self.log(response)\n for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],\n tags=self.tags, attrs=self.attrs, deny=self.deny,\n canonicalize=False).extract_links(response):\n callback = self.parse # type: Any\n dont_filter = False\n method = 'GET'\n if self._is_external_url(link.url):\n callback = self.check_existing\n method = 'HEAD'\n elif '#' in link.url:\n dont_filter = True\n callback = self.check_permalink\n yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter,\n errback=self.error_callback)\n\n def retry_request_with_get(self, request):\n # type: (Request) -> Generator[Request, None, None]\n request.method = 'GET'\n request.dont_filter = True\n yield request\n\n def exclude_error(self, url):\n # type: (str) -> bool\n if url in EXCLUDED_URLS:\n return True\n return False\n\n def error_callback(self, failure):\n # type: (Any) -> Optional[Generator[Any, None, None]]\n if hasattr(failure.value, 'response') and failure.value.response:\n response = failure.value.response\n if self.exclude_error(response.url):\n return None\n if response.status == 404:\n self._set_error_state()\n raise Exception('Page not found: {}'.format(response))\n if response.status == 405 and response.request.method == 'HEAD':\n # Method 'HEAD' not allowed, repeat request with 'GET'\n return self.retry_request_with_get(response.request)\n self.log(\"Error! Please check link: {}\".format(response), logging.ERROR)\n elif isinstance(failure.type, IOError):\n self._set_error_state()\n else:\n raise Exception(failure.value)\n return None\n", "path": "tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py"}]}
| 1,852 | 168 |
gh_patches_debug_28640
|
rasdani/github-patches
|
git_diff
|
microsoft__torchgeo-316
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add "Launch in Hub" button to tutorials
We currently have a "Open in Colab" button for our tutorials. We should also have one for the Planetary Computer. See https://planetarycomputer.microsoft.com/dataset/landsat-8-c2-l2#Example-Notebook for an example of how this works.
</issue>
<code>
[start of docs/conf.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 # Configuration file for the Sphinx documentation builder.
5 #
6 # This file only contains a selection of the most common options. For a full
7 # list see the documentation:
8 # https://www.sphinx-doc.org/en/master/usage/configuration.html
9
10 # -- Path setup --------------------------------------------------------------
11
12 import os
13 import sys
14
15 import pytorch_sphinx_theme
16
17 # If extensions (or modules to document with autodoc) are in another directory,
18 # add these directories to sys.path here. If the directory is relative to the
19 # documentation root, use os.path.abspath to make it absolute, like shown here.
20 sys.path.insert(0, os.path.abspath(".."))
21
22 import torchgeo # noqa: E402
23
24 # -- Project information -----------------------------------------------------
25
26 project = "torchgeo"
27 copyright = "2021, Microsoft Corporation"
28 author = torchgeo.__author__
29 version = ".".join(torchgeo.__version__.split(".")[:2])
30 release = torchgeo.__version__
31
32
33 # -- General configuration ---------------------------------------------------
34
35 # Add any Sphinx extension module names here, as strings. They can be
36 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
37 # ones.
38 extensions = [
39 "sphinx.ext.autodoc",
40 "sphinx.ext.intersphinx",
41 "sphinx.ext.napoleon",
42 "sphinx.ext.todo",
43 "sphinx.ext.viewcode",
44 "nbsphinx",
45 ]
46
47 # List of patterns, relative to source directory, that match files and
48 # directories to ignore when looking for source files.
49 # This pattern also affects html_static_path and html_extra_path.
50 exclude_patterns = ["_build"]
51
52 # Sphinx 3.0+ required for:
53 # autodoc_typehints = "description"
54 needs_sphinx = "3.0"
55
56 nitpicky = True
57 nitpick_ignore = [
58 # https://github.com/sphinx-doc/sphinx/issues/8127
59 ("py:class", ".."),
60 # TODO: can't figure out why this isn't found
61 ("py:class", "LightningDataModule"),
62 # Undocumented class
63 ("py:class", "torchvision.models.resnet.ResNet"),
64 ]
65
66
67 # -- Options for HTML output -------------------------------------------------
68
69 # The theme to use for HTML and HTML Help pages. See the documentation for
70 # a list of builtin themes.
71 html_theme = "pytorch_sphinx_theme"
72 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
73
74 # Theme options are theme-specific and customize the look and feel of a theme
75 # further. For a list of options available for each theme, see the
76 # documentation.
77 html_theme_options = {
78 "collapse_navigation": False,
79 "display_version": True,
80 "logo_only": True,
81 "pytorch_project": "docs",
82 "navigation_with_keys": True,
83 "analytics_id": "UA-209075005-1",
84 }
85
86 html_favicon = os.path.join("..", "logo", "favicon.ico")
87
88 html_static_path = ["_static"]
89 html_css_files = ["workaround.css"]
90
91 # -- Extension configuration -------------------------------------------------
92
93 # sphinx.ext.autodoc
94 autodoc_default_options = {
95 "members": True,
96 "special-members": True,
97 "show-inheritance": True,
98 }
99 autodoc_member_order = "bysource"
100 autodoc_typehints = "description"
101
102 # sphinx.ext.intersphinx
103 intersphinx_mapping = {
104 "matplotlib": ("https://matplotlib.org/stable/", None),
105 "python": ("https://docs.python.org/3", None),
106 "pytorch-lightning": ("https://pytorch-lightning.readthedocs.io/en/latest/", None),
107 "rasterio": ("https://rasterio.readthedocs.io/en/latest/", None),
108 "rtree": ("https://rtree.readthedocs.io/en/latest/", None),
109 "torch": ("https://pytorch.org/docs/stable", None),
110 "torchvision": ("https://pytorch.org/vision/stable", None),
111 }
112
113 # nbsphinx
114 nbsphinx_execute = "never"
115 # TODO: branch/tag should change depending on which version of docs you look at
116 # TODO: width option of image directive is broken, see:
117 # https://github.com/pytorch/pytorch_sphinx_theme/issues/140
118 nbsphinx_prolog = """
119 {% set colab = "https://colab.research.google.com" %}
120 {% set repo = "microsoft/torchgeo" %}
121 {% if "dev" in env.config.release %}
122 {% set branch = "main" %}
123 {% else %}
124 {% set branch = "releases/v" ~ env.config.version %}
125 {% endif %}
126
127 .. image:: {{ colab }}/assets/colab-badge.svg
128 :class: colabbadge
129 :alt: Open in Colab
130 :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb
131 """
132
133 # Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme
134 # See more information here https://github.com/spatialaudio/nbsphinx/issues/599
135 # NOTE: This will likely break nbsphinx widgets
136 nbsphinx_requirejs_path = ""
137
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -116,18 +116,35 @@
# TODO: width option of image directive is broken, see:
# https://github.com/pytorch/pytorch_sphinx_theme/issues/140
nbsphinx_prolog = """
-{% set colab = "https://colab.research.google.com" %}
+{% set host = "https://colab.research.google.com" %}
{% set repo = "microsoft/torchgeo" %}
+{% set urlpath = "docs/" ~ env.docname ~ ".ipynb" %}
{% if "dev" in env.config.release %}
{% set branch = "main" %}
{% else %}
{% set branch = "releases/v" ~ env.config.version %}
{% endif %}
-.. image:: {{ colab }}/assets/colab-badge.svg
+.. image:: {{ host }}/assets/colab-badge.svg
:class: colabbadge
:alt: Open in Colab
- :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb
+ :target: {{ host }}/github/{{ repo }}/blob/{{ branch }}/{{ urlpath }}
+
+{% set host = "https://pccompute.westeurope.cloudapp.azure.com" %}
+{% set host = host ~ "/compute/hub/user-redirect/git-pull" %}
+{% set repo = "https%3A%2F%2Fgithub.com%2Fmicrosoft%2Ftorchgeo" %}
+{% set urlpath = "tree%2Ftorchgeo%2Fdocs%2F" %}
+{% set urlpath = urlpath ~ env.docname | replace("/", "%2F") ~ ".ipynb" %}
+{% if "dev" in env.config.release %}
+ {% set branch = "main" %}
+{% else %}
+ {% set branch = "releases%2Fv" ~ env.config.version %}
+{% endif %}
+
+.. image:: https://img.shields.io/badge/-Open%20on%20Planetary%20Computer-blue
+ :class: colabbadge
+ :alt: Open on Planetary Computer
+ :target: {{ host }}?repo={{ repo }}&urlpath={{ urlpath }}&branch={{ branch }}
"""
# Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -116,18 +116,35 @@\n # TODO: width option of image directive is broken, see:\n # https://github.com/pytorch/pytorch_sphinx_theme/issues/140\n nbsphinx_prolog = \"\"\"\n-{% set colab = \"https://colab.research.google.com\" %}\n+{% set host = \"https://colab.research.google.com\" %}\n {% set repo = \"microsoft/torchgeo\" %}\n+{% set urlpath = \"docs/\" ~ env.docname ~ \".ipynb\" %}\n {% if \"dev\" in env.config.release %}\n {% set branch = \"main\" %}\n {% else %}\n {% set branch = \"releases/v\" ~ env.config.version %}\n {% endif %}\n \n-.. image:: {{ colab }}/assets/colab-badge.svg\n+.. image:: {{ host }}/assets/colab-badge.svg\n :class: colabbadge\n :alt: Open in Colab\n- :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb\n+ :target: {{ host }}/github/{{ repo }}/blob/{{ branch }}/{{ urlpath }}\n+\n+{% set host = \"https://pccompute.westeurope.cloudapp.azure.com\" %}\n+{% set host = host ~ \"/compute/hub/user-redirect/git-pull\" %}\n+{% set repo = \"https%3A%2F%2Fgithub.com%2Fmicrosoft%2Ftorchgeo\" %}\n+{% set urlpath = \"tree%2Ftorchgeo%2Fdocs%2F\" %}\n+{% set urlpath = urlpath ~ env.docname | replace(\"/\", \"%2F\") ~ \".ipynb\" %}\n+{% if \"dev\" in env.config.release %}\n+ {% set branch = \"main\" %}\n+{% else %}\n+ {% set branch = \"releases%2Fv\" ~ env.config.version %}\n+{% endif %}\n+\n+.. image:: https://img.shields.io/badge/-Open%20on%20Planetary%20Computer-blue\n+ :class: colabbadge\n+ :alt: Open on Planetary Computer\n+ :target: {{ host }}?repo={{ repo }}&urlpath={{ urlpath }}&branch={{ branch }}\n \"\"\"\n \n # Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme\n", "issue": "Add \"Launch in Hub\" button to tutorials\nWe currently have a \"Open in Colab\" button for our tutorials. We should also have one for the Planetary Computer. See https://planetarycomputer.microsoft.com/dataset/landsat-8-c2-l2#Example-Notebook for an example of how this works.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = torchgeo.__author__\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"nbsphinx\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\n# Sphinx 3.0+ required for:\n# autodoc_typehints = \"description\"\nneeds_sphinx = \"3.0\"\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n # Undocumented class\n (\"py:class\", \"torchvision.models.resnet.ResNet\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-209075005-1\",\n}\n\nhtml_favicon = os.path.join(\"..\", \"logo\", \"favicon.ico\")\n\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"workaround.css\"]\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable\", None),\n}\n\n# nbsphinx\nnbsphinx_execute = \"never\"\n# TODO: branch/tag should change depending on which version of docs you look at\n# TODO: width option of image directive is broken, see:\n# https://github.com/pytorch/pytorch_sphinx_theme/issues/140\nnbsphinx_prolog = \"\"\"\n{% set colab = \"https://colab.research.google.com\" %}\n{% set repo = \"microsoft/torchgeo\" %}\n{% if \"dev\" in env.config.release %}\n {% set branch = \"main\" %}\n{% else %}\n {% set branch = \"releases/v\" ~ env.config.version %}\n{% endif %}\n\n.. image:: {{ colab }}/assets/colab-badge.svg\n :class: colabbadge\n :alt: Open in Colab\n :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb\n\"\"\"\n\n# Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme\n# See more information here https://github.com/spatialaudio/nbsphinx/issues/599\n# NOTE: This will likely break nbsphinx widgets\nnbsphinx_requirejs_path = \"\"\n", "path": "docs/conf.py"}]}
| 2,041 | 542 |
gh_patches_debug_21843
|
rasdani/github-patches
|
git_diff
|
pymedusa__Medusa-824
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnicodeEncodeError: 'ascii' codec can't encode character u'\u014d' in position 31: ordinal not in range(128)
```
Traceback (most recent call last):
File "/home/orangepi/.sickrage/lib/subliminal/core.py", line 543, in refine
refiner_manager[refiner].plugin(video, **kwargs)
File "/home/orangepi/.sickrage/sickbeard/refiners/release.py", line 54, in refine
release_file = get_release_file(dirpath, fileroot, extension) or release_file
File "/home/orangepi/.sickrage/sickbeard/refiners/release.py", line 90, in get_release_file
if os.path.isfile(release_file):
File "/usr/lib/python2.7/genericpath.py", line 37, in isfile
st = os.stat(path)
UnicodeEncodeError: 'ascii' codec can't encode character u'\u014d' in position 31: ordinal not in range(128)
```
This happens because the subtitle contains a non-ascii char and my locale is set to None.
To fix this we would need to use ek() for isfile() or (better imo) use a similar solution to what we du in subtitles.py. What do you think @ratoaq2?
</issue>
<code>
[start of sickbeard/refiners/release.py]
1 # -*- coding: utf-8 -*-
2 """Release refiner."""
3 from __future__ import unicode_literals
4
5 import logging
6 import os
7
8 from guessit import guessit
9
10 logger = logging.getLogger(__name__)
11
12 MOVIE_ATTRIBUTES = {'title': 'title', 'year': 'year', 'format': 'format', 'release_group': 'release_group',
13 'resolution': 'screen_size', 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}
14 EPISODE_ATTRIBUTES = {'series': 'title', 'season': 'season', 'episode': 'episode', 'title': 'episode_title',
15 'year': 'year', 'format': 'format', 'release_group': 'release_group', 'resolution': 'screen_size',
16 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}
17
18
19 def refine(video, release_name=None, release_file=None, extension='release', **kwargs):
20 """Refine a video by using the original release name.
21
22 The refiner will first try:
23 - Read the file video_name.<extension> seeking for a release name
24 - If no release name, it will read the release_file seeking for a release name
25 - If no release name, it will use the release_name passed as an argument
26 - If no release name, then no change in the video object is made
27
28 When a release name is found, the video object will be enhanced using the guessit properties extracted from it.
29
30 Several :class:`~subliminal.video.Video` attributes can be found:
31
32 * :attr:`~subliminal.video.Video.title`
33 * :attr:`~subliminal.video.Video.series`
34 * :attr:`~subliminal.video.Video.season`
35 * :attr:`~subliminal.video.Video.episode`
36 * :attr:`~subliminal.video.Video.year`
37 * :attr:`~subliminal.video.Video.format`
38 * :attr:`~subliminal.video.Video.release_group`
39 * :attr:`~subliminal.video.Video.resolution`
40 * :attr:`~subliminal.video.Video.video_codec`
41 * :attr:`~subliminal.video.Video.audio_codec`
42
43 :param video: the video to refine.
44 :type video: subliminal.video.Video
45 :param str release_name: the release name to be used.
46 :param str release_file: the release file to be used
47 :param str extension: the release file extension.
48 """
49 logger.debug('Starting release refiner [extension={extension}, release_name={name}, release_file={file}]',
50 extension=extension, name=release_name, file=release_file)
51 dirpath, filename = os.path.split(video.name)
52 dirpath = dirpath or '.'
53 fileroot, fileext = os.path.splitext(filename)
54 release_file = get_release_file(dirpath, fileroot, extension) or release_file
55 release_name = get_release_name(release_file) or release_name
56
57 if not release_name:
58 logger.debug('No release name for {video}', video=video.name)
59 return
60
61 release_path = os.path.join(dirpath, release_name + fileext)
62 logger.debug('Guessing using {path}', path=release_path)
63
64 guess = guessit(release_path)
65 attributes = MOVIE_ATTRIBUTES if guess.get('type') == 'movie' else EPISODE_ATTRIBUTES
66 for key, value in attributes.items():
67 old_value = getattr(video, key)
68 new_value = guess.get(value)
69
70 if new_value and old_value != new_value:
71 setattr(video, key, new_value)
72 logger.debug('Attribute {key} changed from {old} to {new}', key=key, old=old_value, new=new_value)
73
74
75 def get_release_file(dirpath, filename, extension):
76 """Return the release file that should contain the release name for a given a `dirpath`, `filename` and `extension`.
77
78 :param dirpath: the file base folder
79 :type dirpath: str
80 :param filename: the file name without extension
81 :type filename: str
82 :param extension:
83 :type extension: the file extension
84 :return: the release file if the file exists
85 :rtype: str
86 """
87 release_file = os.path.join(dirpath, filename + '.' + extension)
88
89 # skip if info file doesn't exist
90 if os.path.isfile(release_file):
91 logger.debug('Found release file {file}', file=release_file)
92 return release_file
93
94
95 def get_release_name(release_file):
96 """Given a `release_file` it will return the release name.
97
98 :param release_file: the text file that contains the release name
99 :type release_file: str
100 :return: the release name
101 :rtype: str
102 """
103 if not release_file:
104 return
105
106 with open(release_file, 'r') as f:
107 release_name = f.read().strip()
108
109 # skip if no release name was found
110 if not release_name:
111 logger.warning('Release file {file} does not contain a release name', file=release_file)
112
113 return release_name
114
[end of sickbeard/refiners/release.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sickbeard/refiners/release.py b/sickbeard/refiners/release.py
--- a/sickbeard/refiners/release.py
+++ b/sickbeard/refiners/release.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
"""Release refiner."""
-from __future__ import unicode_literals
import logging
import os
@@ -10,7 +9,7 @@
logger = logging.getLogger(__name__)
MOVIE_ATTRIBUTES = {'title': 'title', 'year': 'year', 'format': 'format', 'release_group': 'release_group',
- 'resolution': 'screen_size', 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}
+ 'resolution': 'screen_size', 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}
EPISODE_ATTRIBUTES = {'series': 'title', 'season': 'season', 'episode': 'episode', 'title': 'episode_title',
'year': 'year', 'format': 'format', 'release_group': 'release_group', 'resolution': 'screen_size',
'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}
|
{"golden_diff": "diff --git a/sickbeard/refiners/release.py b/sickbeard/refiners/release.py\n--- a/sickbeard/refiners/release.py\n+++ b/sickbeard/refiners/release.py\n@@ -1,6 +1,5 @@\n # -*- coding: utf-8 -*-\n \"\"\"Release refiner.\"\"\"\n-from __future__ import unicode_literals\n \n import logging\n import os\n@@ -10,7 +9,7 @@\n logger = logging.getLogger(__name__)\n \n MOVIE_ATTRIBUTES = {'title': 'title', 'year': 'year', 'format': 'format', 'release_group': 'release_group',\n- 'resolution': 'screen_size', 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}\n+ 'resolution': 'screen_size', 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}\n EPISODE_ATTRIBUTES = {'series': 'title', 'season': 'season', 'episode': 'episode', 'title': 'episode_title',\n 'year': 'year', 'format': 'format', 'release_group': 'release_group', 'resolution': 'screen_size',\n 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}\n", "issue": "UnicodeEncodeError: 'ascii' codec can't encode character u'\\u014d' in position 31: ordinal not in range(128)\n```\nTraceback (most recent call last):\n File \"/home/orangepi/.sickrage/lib/subliminal/core.py\", line 543, in refine\n refiner_manager[refiner].plugin(video, **kwargs)\n File \"/home/orangepi/.sickrage/sickbeard/refiners/release.py\", line 54, in refine\n release_file = get_release_file(dirpath, fileroot, extension) or release_file\n File \"/home/orangepi/.sickrage/sickbeard/refiners/release.py\", line 90, in get_release_file\n if os.path.isfile(release_file):\n File \"/usr/lib/python2.7/genericpath.py\", line 37, in isfile\n st = os.stat(path)\nUnicodeEncodeError: 'ascii' codec can't encode character u'\\u014d' in position 31: ordinal not in range(128)\n```\n\nThis happens because the subtitle contains a non-ascii char and my locale is set to None.\nTo fix this we would need to use ek() for isfile() or (better imo) use a similar solution to what we du in subtitles.py. What do you think @ratoaq2?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Release refiner.\"\"\"\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\n\nfrom guessit import guessit\n\nlogger = logging.getLogger(__name__)\n\nMOVIE_ATTRIBUTES = {'title': 'title', 'year': 'year', 'format': 'format', 'release_group': 'release_group',\n 'resolution': 'screen_size', 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}\nEPISODE_ATTRIBUTES = {'series': 'title', 'season': 'season', 'episode': 'episode', 'title': 'episode_title',\n 'year': 'year', 'format': 'format', 'release_group': 'release_group', 'resolution': 'screen_size',\n 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}\n\n\ndef refine(video, release_name=None, release_file=None, extension='release', **kwargs):\n \"\"\"Refine a video by using the original release name.\n\n The refiner will first try:\n - Read the file video_name.<extension> seeking for a release name\n - If no release name, it will read the release_file seeking for a release name\n - If no release name, it will use the release_name passed as an argument\n - If no release name, then no change in the video object is made\n\n When a release name is found, the video object will be enhanced using the guessit properties extracted from it.\n\n Several :class:`~subliminal.video.Video` attributes can be found:\n\n * :attr:`~subliminal.video.Video.title`\n * :attr:`~subliminal.video.Video.series`\n * :attr:`~subliminal.video.Video.season`\n * :attr:`~subliminal.video.Video.episode`\n * :attr:`~subliminal.video.Video.year`\n * :attr:`~subliminal.video.Video.format`\n * :attr:`~subliminal.video.Video.release_group`\n * :attr:`~subliminal.video.Video.resolution`\n * :attr:`~subliminal.video.Video.video_codec`\n * :attr:`~subliminal.video.Video.audio_codec`\n\n :param video: the video to refine.\n :type video: subliminal.video.Video\n :param str release_name: the release name to be used.\n :param str release_file: the release file to be used\n :param str extension: the release file extension.\n \"\"\"\n logger.debug('Starting release refiner [extension={extension}, release_name={name}, release_file={file}]',\n extension=extension, name=release_name, file=release_file)\n dirpath, filename = os.path.split(video.name)\n dirpath = dirpath or '.'\n fileroot, fileext = os.path.splitext(filename)\n release_file = get_release_file(dirpath, fileroot, extension) or release_file\n release_name = get_release_name(release_file) or release_name\n\n if not release_name:\n logger.debug('No release name for {video}', video=video.name)\n return\n\n release_path = os.path.join(dirpath, release_name + fileext)\n logger.debug('Guessing using {path}', path=release_path)\n\n guess = guessit(release_path)\n attributes = MOVIE_ATTRIBUTES if guess.get('type') == 'movie' else EPISODE_ATTRIBUTES\n for key, value in attributes.items():\n old_value = getattr(video, key)\n new_value = guess.get(value)\n\n if new_value and old_value != new_value:\n setattr(video, key, new_value)\n logger.debug('Attribute {key} changed from {old} to {new}', key=key, old=old_value, new=new_value)\n\n\ndef get_release_file(dirpath, filename, extension):\n \"\"\"Return the release file that should contain the release name for a given a `dirpath`, `filename` and `extension`.\n\n :param dirpath: the file base folder\n :type dirpath: str\n :param filename: the file name without extension\n :type filename: str\n :param extension:\n :type extension: the file extension\n :return: the release file if the file exists\n :rtype: str\n \"\"\"\n release_file = os.path.join(dirpath, filename + '.' + extension)\n\n # skip if info file doesn't exist\n if os.path.isfile(release_file):\n logger.debug('Found release file {file}', file=release_file)\n return release_file\n\n\ndef get_release_name(release_file):\n \"\"\"Given a `release_file` it will return the release name.\n\n :param release_file: the text file that contains the release name\n :type release_file: str\n :return: the release name\n :rtype: str\n \"\"\"\n if not release_file:\n return\n\n with open(release_file, 'r') as f:\n release_name = f.read().strip()\n\n # skip if no release name was found\n if not release_name:\n logger.warning('Release file {file} does not contain a release name', file=release_file)\n\n return release_name\n", "path": "sickbeard/refiners/release.py"}]}
| 2,173 | 255 |
gh_patches_debug_4872
|
rasdani/github-patches
|
git_diff
|
matrix-org__synapse-8517
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
/displayname returns non-JSON body
### Description
An attempt to call `/profile/{}/displayname` with an empty body results (rightfully) in a 400 error, with plain text "Unable to parse name" in the response. The expected error payload is something about (could also be `M_NOT_JSON` instead of `M_BAD_JSON`):
```json
{
"errcode": "M_BAD_JSON",
"error": "Unable to parse name"
}
```
### Version information
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
<!-- Was this issue identified on matrix.org or another homeserver? -->
- **Homeserver**: matrix.org
</issue>
<code>
[start of synapse/rest/client/v1/profile.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2014-2016 OpenMarket Ltd
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """ This module contains REST servlets to do with profile: /profile/<paths> """
17
18 from synapse.api.errors import Codes, SynapseError
19 from synapse.http.servlet import RestServlet, parse_json_object_from_request
20 from synapse.rest.client.v2_alpha._base import client_patterns
21 from synapse.types import UserID
22
23
24 class ProfileDisplaynameRestServlet(RestServlet):
25 PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/displayname", v1=True)
26
27 def __init__(self, hs):
28 super().__init__()
29 self.hs = hs
30 self.profile_handler = hs.get_profile_handler()
31 self.auth = hs.get_auth()
32
33 async def on_GET(self, request, user_id):
34 requester_user = None
35
36 if self.hs.config.require_auth_for_profile_requests:
37 requester = await self.auth.get_user_by_req(request)
38 requester_user = requester.user
39
40 user = UserID.from_string(user_id)
41
42 await self.profile_handler.check_profile_query_allowed(user, requester_user)
43
44 displayname = await self.profile_handler.get_displayname(user)
45
46 ret = {}
47 if displayname is not None:
48 ret["displayname"] = displayname
49
50 return 200, ret
51
52 async def on_PUT(self, request, user_id):
53 requester = await self.auth.get_user_by_req(request, allow_guest=True)
54 user = UserID.from_string(user_id)
55 is_admin = await self.auth.is_server_admin(requester.user)
56
57 content = parse_json_object_from_request(request)
58
59 try:
60 new_name = content["displayname"]
61 except Exception:
62 return 400, "Unable to parse name"
63
64 await self.profile_handler.set_displayname(user, requester, new_name, is_admin)
65
66 return 200, {}
67
68 def on_OPTIONS(self, request, user_id):
69 return 200, {}
70
71
72 class ProfileAvatarURLRestServlet(RestServlet):
73 PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/avatar_url", v1=True)
74
75 def __init__(self, hs):
76 super().__init__()
77 self.hs = hs
78 self.profile_handler = hs.get_profile_handler()
79 self.auth = hs.get_auth()
80
81 async def on_GET(self, request, user_id):
82 requester_user = None
83
84 if self.hs.config.require_auth_for_profile_requests:
85 requester = await self.auth.get_user_by_req(request)
86 requester_user = requester.user
87
88 user = UserID.from_string(user_id)
89
90 await self.profile_handler.check_profile_query_allowed(user, requester_user)
91
92 avatar_url = await self.profile_handler.get_avatar_url(user)
93
94 ret = {}
95 if avatar_url is not None:
96 ret["avatar_url"] = avatar_url
97
98 return 200, ret
99
100 async def on_PUT(self, request, user_id):
101 requester = await self.auth.get_user_by_req(request)
102 user = UserID.from_string(user_id)
103 is_admin = await self.auth.is_server_admin(requester.user)
104
105 content = parse_json_object_from_request(request)
106 try:
107 new_avatar_url = content["avatar_url"]
108 except KeyError:
109 raise SynapseError(
110 400, "Missing key 'avatar_url'", errcode=Codes.MISSING_PARAM
111 )
112
113 await self.profile_handler.set_avatar_url(
114 user, requester, new_avatar_url, is_admin
115 )
116
117 return 200, {}
118
119 def on_OPTIONS(self, request, user_id):
120 return 200, {}
121
122
123 class ProfileRestServlet(RestServlet):
124 PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)", v1=True)
125
126 def __init__(self, hs):
127 super().__init__()
128 self.hs = hs
129 self.profile_handler = hs.get_profile_handler()
130 self.auth = hs.get_auth()
131
132 async def on_GET(self, request, user_id):
133 requester_user = None
134
135 if self.hs.config.require_auth_for_profile_requests:
136 requester = await self.auth.get_user_by_req(request)
137 requester_user = requester.user
138
139 user = UserID.from_string(user_id)
140
141 await self.profile_handler.check_profile_query_allowed(user, requester_user)
142
143 displayname = await self.profile_handler.get_displayname(user)
144 avatar_url = await self.profile_handler.get_avatar_url(user)
145
146 ret = {}
147 if displayname is not None:
148 ret["displayname"] = displayname
149 if avatar_url is not None:
150 ret["avatar_url"] = avatar_url
151
152 return 200, ret
153
154
155 def register_servlets(hs, http_server):
156 ProfileDisplaynameRestServlet(hs).register(http_server)
157 ProfileAvatarURLRestServlet(hs).register(http_server)
158 ProfileRestServlet(hs).register(http_server)
159
[end of synapse/rest/client/v1/profile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
--- a/synapse/rest/client/v1/profile.py
+++ b/synapse/rest/client/v1/profile.py
@@ -59,7 +59,9 @@
try:
new_name = content["displayname"]
except Exception:
- return 400, "Unable to parse name"
+ raise SynapseError(
+ code=400, msg="Unable to parse name", errcode=Codes.BAD_JSON,
+ )
await self.profile_handler.set_displayname(user, requester, new_name, is_admin)
|
{"golden_diff": "diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py\n--- a/synapse/rest/client/v1/profile.py\n+++ b/synapse/rest/client/v1/profile.py\n@@ -59,7 +59,9 @@\n try:\n new_name = content[\"displayname\"]\n except Exception:\n- return 400, \"Unable to parse name\"\n+ raise SynapseError(\n+ code=400, msg=\"Unable to parse name\", errcode=Codes.BAD_JSON,\n+ )\n \n await self.profile_handler.set_displayname(user, requester, new_name, is_admin)\n", "issue": "/displayname returns non-JSON body\n### Description\r\n\r\nAn attempt to call `/profile/{}/displayname` with an empty body results (rightfully) in a 400 error, with plain text \"Unable to parse name\" in the response. The expected error payload is something about (could also be `M_NOT_JSON` instead of `M_BAD_JSON`):\r\n```json\r\n{\r\n \"errcode\": \"M_BAD_JSON\",\r\n \"error\": \"Unable to parse name\" \r\n}\r\n```\r\n\r\n### Version information\r\n\r\n<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->\r\n\r\n<!-- Was this issue identified on matrix.org or another homeserver? -->\r\n- **Homeserver**: matrix.org\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\" This module contains REST servlets to do with profile: /profile/<paths> \"\"\"\n\nfrom synapse.api.errors import Codes, SynapseError\nfrom synapse.http.servlet import RestServlet, parse_json_object_from_request\nfrom synapse.rest.client.v2_alpha._base import client_patterns\nfrom synapse.types import UserID\n\n\nclass ProfileDisplaynameRestServlet(RestServlet):\n PATTERNS = client_patterns(\"/profile/(?P<user_id>[^/]*)/displayname\", v1=True)\n\n def __init__(self, hs):\n super().__init__()\n self.hs = hs\n self.profile_handler = hs.get_profile_handler()\n self.auth = hs.get_auth()\n\n async def on_GET(self, request, user_id):\n requester_user = None\n\n if self.hs.config.require_auth_for_profile_requests:\n requester = await self.auth.get_user_by_req(request)\n requester_user = requester.user\n\n user = UserID.from_string(user_id)\n\n await self.profile_handler.check_profile_query_allowed(user, requester_user)\n\n displayname = await self.profile_handler.get_displayname(user)\n\n ret = {}\n if displayname is not None:\n ret[\"displayname\"] = displayname\n\n return 200, ret\n\n async def on_PUT(self, request, user_id):\n requester = await self.auth.get_user_by_req(request, allow_guest=True)\n user = UserID.from_string(user_id)\n is_admin = await self.auth.is_server_admin(requester.user)\n\n content = parse_json_object_from_request(request)\n\n try:\n new_name = content[\"displayname\"]\n except Exception:\n return 400, \"Unable to parse name\"\n\n await self.profile_handler.set_displayname(user, requester, new_name, is_admin)\n\n return 200, {}\n\n def on_OPTIONS(self, request, user_id):\n return 200, {}\n\n\nclass ProfileAvatarURLRestServlet(RestServlet):\n PATTERNS = client_patterns(\"/profile/(?P<user_id>[^/]*)/avatar_url\", v1=True)\n\n def __init__(self, hs):\n super().__init__()\n self.hs = hs\n self.profile_handler = hs.get_profile_handler()\n self.auth = hs.get_auth()\n\n async def on_GET(self, request, user_id):\n requester_user = None\n\n if self.hs.config.require_auth_for_profile_requests:\n requester = await self.auth.get_user_by_req(request)\n requester_user = requester.user\n\n user = UserID.from_string(user_id)\n\n await self.profile_handler.check_profile_query_allowed(user, requester_user)\n\n avatar_url = await self.profile_handler.get_avatar_url(user)\n\n ret = {}\n if avatar_url is not None:\n ret[\"avatar_url\"] = avatar_url\n\n return 200, ret\n\n async def on_PUT(self, request, user_id):\n requester = await self.auth.get_user_by_req(request)\n user = UserID.from_string(user_id)\n is_admin = await self.auth.is_server_admin(requester.user)\n\n content = parse_json_object_from_request(request)\n try:\n new_avatar_url = content[\"avatar_url\"]\n except KeyError:\n raise SynapseError(\n 400, \"Missing key 'avatar_url'\", errcode=Codes.MISSING_PARAM\n )\n\n await self.profile_handler.set_avatar_url(\n user, requester, new_avatar_url, is_admin\n )\n\n return 200, {}\n\n def on_OPTIONS(self, request, user_id):\n return 200, {}\n\n\nclass ProfileRestServlet(RestServlet):\n PATTERNS = client_patterns(\"/profile/(?P<user_id>[^/]*)\", v1=True)\n\n def __init__(self, hs):\n super().__init__()\n self.hs = hs\n self.profile_handler = hs.get_profile_handler()\n self.auth = hs.get_auth()\n\n async def on_GET(self, request, user_id):\n requester_user = None\n\n if self.hs.config.require_auth_for_profile_requests:\n requester = await self.auth.get_user_by_req(request)\n requester_user = requester.user\n\n user = UserID.from_string(user_id)\n\n await self.profile_handler.check_profile_query_allowed(user, requester_user)\n\n displayname = await self.profile_handler.get_displayname(user)\n avatar_url = await self.profile_handler.get_avatar_url(user)\n\n ret = {}\n if displayname is not None:\n ret[\"displayname\"] = displayname\n if avatar_url is not None:\n ret[\"avatar_url\"] = avatar_url\n\n return 200, ret\n\n\ndef register_servlets(hs, http_server):\n ProfileDisplaynameRestServlet(hs).register(http_server)\n ProfileAvatarURLRestServlet(hs).register(http_server)\n ProfileRestServlet(hs).register(http_server)\n", "path": "synapse/rest/client/v1/profile.py"}]}
| 2,242 | 141 |
gh_patches_debug_23251
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-4472
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add delete action for AWS Cloudtrail resource
I dont see a delete action on aws.cloudtrail resource:
```
$ custodian schema aws.cloudtrail.actions
aws.cloudtrail:
actions:
- invoke-lambda
- notify
- post-finding
- put-metric
- set-logging
- update-trail
```
Looking at the code it doesnt look too bad to add a delete action, should be just add something like below to `c7n/resources/cloudtrail.py`:
```python
@CloudTrail.action_registry.register('delete-trail')
class UpdateTrail(Action):
"""Delete trail.
:Example:
.. code-block:: yaml
policies:
- name: cloudtrail-delete
resource: aws.cloudtrail
filters:
- type: value
key: Name
value: delete-me
op: eq
actions:
- type: delete
"""
schema = type_schema(
'delete-trail',
attributes={'type': 'object'},
required=('attributes',))
shape = 'DeleteTrailRequest'
permissions = ('cloudtrail:DeleteTrail',)
def validate(self):
# do the validate
def process(self, resources):
# do the delete
```
</issue>
<code>
[start of c7n/resources/cloudtrail.py]
1 # Copyright 2017-2019 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 import logging
17
18 from c7n.actions import Action
19 from c7n.exceptions import PolicyValidationError
20 from c7n.filters import ValueFilter, Filter
21 from c7n.manager import resources
22 from c7n.query import QueryResourceManager, TypeInfo
23 from c7n.utils import local_session, type_schema
24
25 from .aws import shape_validate, Arn
26
27 log = logging.getLogger('c7n.resources.cloudtrail')
28
29
30 @resources.register('cloudtrail')
31 class CloudTrail(QueryResourceManager):
32
33 class resource_type(TypeInfo):
34 service = 'cloudtrail'
35 enum_spec = ('describe_trails', 'trailList', None)
36 filter_name = 'trailNameList'
37 filter_type = 'list'
38 arn = id = 'TrailARN'
39 name = 'Name'
40 config_type = "AWS::CloudTrail::Trail"
41
42
43 @CloudTrail.filter_registry.register('is-shadow')
44 class IsShadow(Filter):
45 """Identify shadow trails (secondary copies), shadow trails
46 can't be modified directly, the origin trail needs to be modified.
47
48 Shadow trails are created for multi-region trails as well for
49 organizational trails.
50 """
51 schema = type_schema('is-shadow', state={'type': 'boolean'})
52 permissions = ('cloudtrail:DescribeTrails',)
53 embedded = False
54
55 def process(self, resources, event=None):
56 rcount = len(resources)
57 trails = [t for t in resources if (self.is_shadow(t) == self.data.get('state', True))]
58 if len(trails) != rcount and self.embedded:
59 self.log.info("implicitly filtering shadow trails %d -> %d",
60 rcount, len(trails))
61 return trails
62
63 def is_shadow(self, t):
64 if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:
65 return True
66 if t.get('IsMultiRegionTrail') and t['HomeRegion'] != self.manager.config.region:
67 return True
68 return False
69
70
71 @CloudTrail.filter_registry.register('status')
72 class Status(ValueFilter):
73 """Filter a cloudtrail by its status.
74
75 :Example:
76
77 .. code-block:: yaml
78
79 policies:
80 - name: cloudtrail-check-status
81 resource: aws.cloudtrail
82 filters:
83 - type: status
84 key: IsLogging
85 value: False
86 """
87
88 schema = type_schema('status', rinherit=ValueFilter.schema)
89 schema_alias = False
90 permissions = ('cloudtrail:GetTrailStatus',)
91 annotation_key = 'c7n:TrailStatus'
92
93 def process(self, resources, event=None):
94 for r in resources:
95 region = self.manager.config.region
96 trail_arn = Arn.parse(r['TrailARN'])
97
98 if (r.get('IsOrganizationTrail') and
99 self.manager.config.account_id != trail_arn.account_id):
100 continue
101 if r.get('HomeRegion') and r['HomeRegion'] != region:
102 region = trail_arn.region
103 if self.annotation_key in r:
104 continue
105 client = local_session(self.manager.session_factory).client(
106 'cloudtrail', region_name=region)
107 status = client.get_trail_status(Name=r['Name'])
108 status.pop('ResponseMetadata')
109 r[self.annotation_key] = status
110
111 return super(Status, self).process(resources)
112
113 def __call__(self, r):
114 return self.match(r['c7n:TrailStatus'])
115
116
117 @CloudTrail.action_registry.register('update-trail')
118 class UpdateTrail(Action):
119 """Update trail attributes.
120
121 :Example:
122
123 .. code-block:: yaml
124
125 policies:
126 - name: cloudtrail-set-log
127 resource: aws.cloudtrail
128 filters:
129 - or:
130 - KmsKeyId: empty
131 - LogFileValidationEnabled: false
132 actions:
133 - type: update-trail
134 attributes:
135 KmsKeyId: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef
136 EnableLogFileValidation: true
137 """
138 schema = type_schema(
139 'update-trail',
140 attributes={'type': 'object'},
141 required=('attributes',))
142 shape = 'UpdateTrailRequest'
143 permissions = ('cloudtrail:UpdateTrail',)
144
145 def validate(self):
146 attrs = dict(self.data['attributes'])
147 if 'Name' in attrs:
148 raise PolicyValidationError(
149 "Can't include Name in update-trail action")
150 attrs['Name'] = 'PolicyValidation'
151 return shape_validate(
152 attrs,
153 self.shape,
154 self.manager.resource_type.service)
155
156 def process(self, resources):
157 client = local_session(self.manager.session_factory).client('cloudtrail')
158 shadow_check = IsShadow({'state': False}, self.manager)
159 shadow_check.embedded = True
160 resources = shadow_check.process(resources)
161
162 for r in resources:
163 client.update_trail(
164 Name=r['Name'],
165 **self.data['attributes'])
166
167
168 @CloudTrail.action_registry.register('set-logging')
169 class SetLogging(Action):
170 """Set the logging state of a trail
171
172 :Example:
173
174 .. code-block:: yaml
175
176 policies:
177 - name: cloudtrail-set-active
178 resource: aws.cloudtrail
179 filters:
180 - type: status
181 key: IsLogging
182 value: False
183 actions:
184 - type: set-logging
185 enabled: True
186 """
187 schema = type_schema(
188 'set-logging', enabled={'type': 'boolean'})
189
190 def get_permissions(self):
191 enable = self.data.get('enabled', True)
192 if enable is True:
193 return ('cloudtrail:StartLogging',)
194 else:
195 return ('cloudtrail:StopLogging',)
196
197 def process(self, resources):
198 client = local_session(self.manager.session_factory).client('cloudtrail')
199 shadow_check = IsShadow({'state': False}, self.manager)
200 shadow_check.embedded = True
201 resources = shadow_check.process(resources)
202 enable = self.data.get('enabled', True)
203
204 for r in resources:
205 if enable:
206 client.start_logging(Name=r['Name'])
207 else:
208 client.stop_logging(Name=r['Name'])
209
[end of c7n/resources/cloudtrail.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/c7n/resources/cloudtrail.py b/c7n/resources/cloudtrail.py
--- a/c7n/resources/cloudtrail.py
+++ b/c7n/resources/cloudtrail.py
@@ -15,7 +15,7 @@
import logging
-from c7n.actions import Action
+from c7n.actions import Action, BaseAction
from c7n.exceptions import PolicyValidationError
from c7n.filters import ValueFilter, Filter
from c7n.manager import resources
@@ -206,3 +206,35 @@
client.start_logging(Name=r['Name'])
else:
client.stop_logging(Name=r['Name'])
+
+
[email protected]_registry.register('delete')
+class DeleteTrail(BaseAction):
+ """ Delete a cloud trail
+
+ :example:
+
+ .. code-block:: yaml
+
+ policies:
+ - name: delete-cloudtrail
+ resource: aws.cloudtrail
+ filters:
+ - type: value
+ key: Name
+ value: delete-me
+ op: eq
+ actions:
+ - type: delete
+ """
+
+ schema = type_schema('delete')
+ permissions = ('cloudtrail:DeleteTrail',)
+
+ def process(self, resources):
+ client = local_session(self.manager.session_factory).client('cloudtrail')
+ for r in resources:
+ try:
+ client.delete_trail(Name=r['Name'])
+ except client.exceptions.TrailNotFoundException:
+ continue
|
{"golden_diff": "diff --git a/c7n/resources/cloudtrail.py b/c7n/resources/cloudtrail.py\n--- a/c7n/resources/cloudtrail.py\n+++ b/c7n/resources/cloudtrail.py\n@@ -15,7 +15,7 @@\n \n import logging\n \n-from c7n.actions import Action\n+from c7n.actions import Action, BaseAction\n from c7n.exceptions import PolicyValidationError\n from c7n.filters import ValueFilter, Filter\n from c7n.manager import resources\n@@ -206,3 +206,35 @@\n client.start_logging(Name=r['Name'])\n else:\n client.stop_logging(Name=r['Name'])\n+\n+\[email protected]_registry.register('delete')\n+class DeleteTrail(BaseAction):\n+ \"\"\" Delete a cloud trail\n+\n+ :example:\n+\n+ .. code-block:: yaml\n+\n+ policies:\n+ - name: delete-cloudtrail\n+ resource: aws.cloudtrail\n+ filters:\n+ - type: value\n+ key: Name\n+ value: delete-me\n+ op: eq\n+ actions:\n+ - type: delete\n+ \"\"\"\n+\n+ schema = type_schema('delete')\n+ permissions = ('cloudtrail:DeleteTrail',)\n+\n+ def process(self, resources):\n+ client = local_session(self.manager.session_factory).client('cloudtrail')\n+ for r in resources:\n+ try:\n+ client.delete_trail(Name=r['Name'])\n+ except client.exceptions.TrailNotFoundException:\n+ continue\n", "issue": "Add delete action for AWS Cloudtrail resource\nI dont see a delete action on aws.cloudtrail resource:\r\n\r\n```\r\n$ custodian schema aws.cloudtrail.actions\r\naws.cloudtrail:\r\n actions:\r\n - invoke-lambda\r\n - notify\r\n - post-finding\r\n - put-metric\r\n - set-logging\r\n - update-trail\r\n```\r\n\r\nLooking at the code it doesnt look too bad to add a delete action, should be just add something like below to `c7n/resources/cloudtrail.py`:\r\n\r\n```python\r\[email protected]_registry.register('delete-trail')\r\nclass UpdateTrail(Action):\r\n \"\"\"Delete trail.\r\n :Example:\r\n .. code-block:: yaml\r\n policies:\r\n - name: cloudtrail-delete\r\n resource: aws.cloudtrail\r\n filters:\r\n - type: value\r\n key: Name\r\n value: delete-me\r\n op: eq\r\n actions:\r\n - type: delete\r\n \"\"\"\r\n schema = type_schema(\r\n 'delete-trail',\r\n attributes={'type': 'object'},\r\n required=('attributes',))\r\n shape = 'DeleteTrailRequest'\r\n permissions = ('cloudtrail:DeleteTrail',)\r\n\r\n def validate(self):\r\n # do the validate\r\n\r\n def process(self, resources):\r\n # do the delete\r\n```\n", "before_files": [{"content": "# Copyright 2017-2019 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nfrom c7n.actions import Action\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import ValueFilter, Filter\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, TypeInfo\nfrom c7n.utils import local_session, type_schema\n\nfrom .aws import shape_validate, Arn\n\nlog = logging.getLogger('c7n.resources.cloudtrail')\n\n\[email protected]('cloudtrail')\nclass CloudTrail(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cloudtrail'\n enum_spec = ('describe_trails', 'trailList', None)\n filter_name = 'trailNameList'\n filter_type = 'list'\n arn = id = 'TrailARN'\n name = 'Name'\n config_type = \"AWS::CloudTrail::Trail\"\n\n\[email protected]_registry.register('is-shadow')\nclass IsShadow(Filter):\n \"\"\"Identify shadow trails (secondary copies), shadow trails\n can't be modified directly, the origin trail needs to be modified.\n\n Shadow trails are created for multi-region trails as well for\n organizational trails.\n \"\"\"\n schema = type_schema('is-shadow', state={'type': 'boolean'})\n permissions = ('cloudtrail:DescribeTrails',)\n embedded = False\n\n def process(self, resources, event=None):\n rcount = len(resources)\n trails = [t for t in resources if (self.is_shadow(t) == self.data.get('state', True))]\n if len(trails) != rcount and self.embedded:\n self.log.info(\"implicitly filtering shadow trails %d -> %d\",\n rcount, len(trails))\n return trails\n\n def is_shadow(self, t):\n if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:\n return True\n if t.get('IsMultiRegionTrail') and t['HomeRegion'] != self.manager.config.region:\n return True\n return False\n\n\[email protected]_registry.register('status')\nclass Status(ValueFilter):\n \"\"\"Filter a cloudtrail by its status.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-check-status\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n \"\"\"\n\n schema = type_schema('status', rinherit=ValueFilter.schema)\n schema_alias = False\n permissions = ('cloudtrail:GetTrailStatus',)\n annotation_key = 'c7n:TrailStatus'\n\n def process(self, resources, event=None):\n for r in resources:\n region = self.manager.config.region\n trail_arn = Arn.parse(r['TrailARN'])\n\n if (r.get('IsOrganizationTrail') and\n self.manager.config.account_id != trail_arn.account_id):\n continue\n if r.get('HomeRegion') and r['HomeRegion'] != region:\n region = trail_arn.region\n if self.annotation_key in r:\n continue\n client = local_session(self.manager.session_factory).client(\n 'cloudtrail', region_name=region)\n status = client.get_trail_status(Name=r['Name'])\n status.pop('ResponseMetadata')\n r[self.annotation_key] = status\n\n return super(Status, self).process(resources)\n\n def __call__(self, r):\n return self.match(r['c7n:TrailStatus'])\n\n\[email protected]_registry.register('update-trail')\nclass UpdateTrail(Action):\n \"\"\"Update trail attributes.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-log\n resource: aws.cloudtrail\n filters:\n - or:\n - KmsKeyId: empty\n - LogFileValidationEnabled: false\n actions:\n - type: update-trail\n attributes:\n KmsKeyId: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef\n EnableLogFileValidation: true\n \"\"\"\n schema = type_schema(\n 'update-trail',\n attributes={'type': 'object'},\n required=('attributes',))\n shape = 'UpdateTrailRequest'\n permissions = ('cloudtrail:UpdateTrail',)\n\n def validate(self):\n attrs = dict(self.data['attributes'])\n if 'Name' in attrs:\n raise PolicyValidationError(\n \"Can't include Name in update-trail action\")\n attrs['Name'] = 'PolicyValidation'\n return shape_validate(\n attrs,\n self.shape,\n self.manager.resource_type.service)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n\n for r in resources:\n client.update_trail(\n Name=r['Name'],\n **self.data['attributes'])\n\n\[email protected]_registry.register('set-logging')\nclass SetLogging(Action):\n \"\"\"Set the logging state of a trail\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-active\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n actions:\n - type: set-logging\n enabled: True\n \"\"\"\n schema = type_schema(\n 'set-logging', enabled={'type': 'boolean'})\n\n def get_permissions(self):\n enable = self.data.get('enabled', True)\n if enable is True:\n return ('cloudtrail:StartLogging',)\n else:\n return ('cloudtrail:StopLogging',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n enable = self.data.get('enabled', True)\n\n for r in resources:\n if enable:\n client.start_logging(Name=r['Name'])\n else:\n client.stop_logging(Name=r['Name'])\n", "path": "c7n/resources/cloudtrail.py"}]}
| 2,816 | 329 |
gh_patches_debug_9501
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-2522
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecation warnings should be printed for dev versions
### Description
`sopel.logger.get_logger` is [marked](https://github.com/sopel-irc/sopel/blob/eac71b07d60d3bea68fe3f1242e88654594a7df5/sopel/logger.py#L164) as [deprecated](https://sopel.chat/docs/api.html#sopel.logger.get_logger), but no deprecation warning is raised when it's used.
### Reproduction steps
1. Create a Sopel plugin with `from sopel.logger import get_logger; LOGGER=get_logger(__name__)`
2. Start Sopel with that plugin
3. No deprecation warnings are produced
### Expected behavior
A deprecation warning is produced
### Relevant logs
```shell
Tested by replacing the logger in admin.py.
[2022-06-11 13:00:50,089] sopel.bot INFO - Plugin loaded: ping
[2022-06-11 13:00:50,090] sopel.plugins.rules DEBUG - Command registered: <Command admin.join []>
[2022-06-11 13:00:50,090] sopel.plugins.rules DEBUG - Command registered: <Command admin.tmpjoin []>
[2022-06-11 13:00:50,091] sopel.plugins.rules DEBUG - Command registered: <Command admin.part []>
[2022-06-11 13:00:50,091] sopel.plugins.rules DEBUG - Command registered: <Command admin.tmppart []>
[2022-06-11 13:00:50,092] sopel.plugins.rules DEBUG - Command registered: <Command admin.chanlist [channels]>
[2022-06-11 13:00:50,092] sopel.plugins.rules DEBUG - Command registered: <Command admin.restart []>
[2022-06-11 13:00:50,093] sopel.plugins.rules DEBUG - Command registered: <Command admin.quit []>
[2022-06-11 13:00:50,093] sopel.plugins.rules DEBUG - Command registered: <Command admin.raw []>
[2022-06-11 13:00:50,094] sopel.plugins.rules DEBUG - Command registered: <Command admin.say [msg]>
[2022-06-11 13:00:50,094] sopel.plugins.rules DEBUG - Command registered: <Command admin.me []>
[2022-06-11 13:00:50,094] sopel.plugins.rules DEBUG - Rule registered: <Rule admin.invite_join (1)>
[2022-06-11 13:00:50,094] sopel.plugins.rules DEBUG - Rule registered: <Rule admin.hold_ground (1)>
[2022-06-11 13:00:50,095] sopel.plugins.rules DEBUG - Command registered: <Command admin.mode []>
[2022-06-11 13:00:50,095] sopel.plugins.rules DEBUG - Command registered: <Command admin.set []>
[2022-06-11 13:00:50,096] sopel.plugins.rules DEBUG - Command registered: <Command admin.unset []>
[2022-06-11 13:00:50,096] sopel.plugins.rules DEBUG - Command registered: <Command admin.save []>
[2022-06-11 13:00:50,096] sopel.bot INFO - Plugin loaded: admin
```
### Notes
_No response_
### Sopel version
eac71b07d60d3bea68fe3f1242e88654594a7df5
### Installation method
`pip install`
### Python version
3.10.4
</issue>
<code>
[start of sopel/lifecycle.py]
1 """Deprecation module for Sopel developers and plugin authors.
2
3 .. versionadded:: 8.0
4
5 Previously in :mod:`sopel.tools`, the :func:`deprecated` function has been
6 moved to this newly created module, as it can be used in every part of the
7 Sopel codebase, including :mod:`sopel.tools` itself.
8
9 """
10 from __future__ import annotations
11
12 import functools
13 import inspect
14 import logging
15 import traceback
16 from typing import Callable, Optional
17
18 from packaging.version import parse as parse_version
19
20 from sopel import __version__
21
22
23 def deprecated(
24 reason: Optional[str] = None,
25 version: Optional[str] = None,
26 removed_in: Optional[str] = None,
27 warning_in: Optional[str] = None,
28 stack_frame: int = -1,
29 func: Optional[Callable] = None,
30 ):
31 """Decorator to mark deprecated functions in Sopel's API
32
33 :param reason: optional text added to the deprecation warning
34 :param version: optional version number when the decorated function
35 is deprecated
36 :param removed_in: optional version number when the deprecated function
37 will be removed
38 :param warning_in: optional version number when the decorated function
39 should start emitting a warning when called
40 :param stack_frame: optional stack frame to output; defaults to
41 ``-1``; should almost always be negative
42 :param func: deprecated function
43 :return: a callable that depends on how the decorator is called; either
44 the decorated function, or a decorator with the appropriate
45 parameters
46
47 Any time the decorated ``func`` is called, a deprecation warning will be
48 logged, with the last frame of the traceback. The optional ``warning_in``
49 argument suppresses the warning on Sopel versions older than that, allowing
50 for multi-stage deprecation timelines.
51
52 The decorator can be used with or without arguments::
53
54 from sopel.lifecycle import deprecated
55
56 @deprecated
57 def func1():
58 print('func 1')
59
60 @deprecated()
61 def func2():
62 print('func 2')
63
64 @deprecated(reason='obsolete', version='7.0', removed_in='8.0')
65 def func3():
66 print('func 3')
67
68 which will output the following in a console::
69
70 >>> func1()
71 Deprecated: func1
72 File "<stdin>", line 1, in <module>
73 func 1
74 >>> func2()
75 Deprecated: func2
76 File "<stdin>", line 1, in <module>
77 func 2
78 >>> func3()
79 Deprecated since 7.0, will be removed in 8.0: obsolete
80 File "<stdin>", line 1, in <module>
81 func 3
82
83 The ``stack_frame`` argument can be used to choose which stack frame is
84 logged along with the message text. By default, this decorator logs the
85 most recent stack frame (the last entry in the list, ``-1``), corresponding
86 to where the decorated function itself was called. However, in certain
87 cases such as deprecating conditional behavior within an object
88 constructor, it can be useful to show a less recent stack frame instead.
89
90 .. note::
91
92 This decorator can be also used on callables that are not functions,
93 such as classes and callable objects.
94
95 .. versionadded:: 7.0
96 Parameters ``reason``, ``version``, and ``removed_in``.
97
98 .. versionadded:: 7.1
99 The ``warning_in`` and ``stack_frame`` parameters.
100
101 .. versionchanged:: 8.0
102 Moved out of :mod:`sopel.tools` to resolve circular dependency issues.
103
104 """
105 if not any([reason, version, removed_in, warning_in, func]):
106 # common usage: @deprecated()
107 return deprecated
108
109 if callable(reason):
110 # common usage: @deprecated
111 return deprecated(func=reason)
112
113 if func is None:
114 # common usage: @deprecated(message, version, removed_in)
115 def decorator(func):
116 return deprecated(
117 reason, version, removed_in, warning_in, stack_frame, func)
118 return decorator
119
120 # now, we have everything we need to have:
121 # - message is not a callable (could be None)
122 # - func is not None
123 # - version and removed_in can be None but that's OK
124 # so now we can return the actual decorated function
125
126 message = reason or getattr(func, '__name__', '<anonymous-function>')
127
128 template = 'Deprecated: {message}'
129 if version and removed_in:
130 template = (
131 'Deprecated since {version}, '
132 'will be removed in {removed_in}: '
133 '{message}')
134 elif version:
135 template = 'Deprecated since {version}: {message}'
136 elif removed_in:
137 template = 'Deprecated, will be removed in {removed_in}: {message}'
138
139 text = template.format(
140 message=message, version=version, removed_in=removed_in)
141
142 @functools.wraps(func)
143 def deprecated_func(*args, **kwargs):
144 if not (warning_in and
145 parse_version(warning_in) >= parse_version(__version__)):
146 original_frame = inspect.stack()[-stack_frame]
147 mod = inspect.getmodule(original_frame[0])
148 module_name = None
149 if mod:
150 module_name = mod.__name__
151 if module_name:
152 if module_name.startswith('sopel.'):
153 # core, or core plugin
154 logger = logging.getLogger(module_name)
155 else:
156 # probably a plugin; try to handle most cases sanely
157 if module_name.startswith('sopel_modules.'):
158 # namespace package plugins have a prefix, obviously
159 # they will use Sopel's namespace; other won't
160 module_name = module_name.replace(
161 'sopel_modules.',
162 'sopel.externals.',
163 1,
164 )
165 logger = logging.getLogger(module_name)
166 else:
167 # don't know the module/plugin name, but we want to make sure
168 # the log line is still output, so just get *something*
169 logger = logging.getLogger(__name__)
170
171 # Format only the desired stack frame
172 trace = traceback.extract_stack()
173 trace_frame = traceback.format_list(trace[:-1])[stack_frame][:-1]
174
175 # Warn the user
176 logger.warning(text + "\n" + trace_frame)
177
178 return func(*args, **kwargs)
179
180 return deprecated_func
181
[end of sopel/lifecycle.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sopel/lifecycle.py b/sopel/lifecycle.py
--- a/sopel/lifecycle.py
+++ b/sopel/lifecycle.py
@@ -141,8 +141,10 @@
@functools.wraps(func)
def deprecated_func(*args, **kwargs):
- if not (warning_in and
- parse_version(warning_in) >= parse_version(__version__)):
+ warn_ver = warning_in and parse_version(warning_in).release
+ this_ver = parse_version(__version__).release
+
+ if not (warn_ver and warn_ver >= this_ver):
original_frame = inspect.stack()[-stack_frame]
mod = inspect.getmodule(original_frame[0])
module_name = None
|
{"golden_diff": "diff --git a/sopel/lifecycle.py b/sopel/lifecycle.py\n--- a/sopel/lifecycle.py\n+++ b/sopel/lifecycle.py\n@@ -141,8 +141,10 @@\n \n @functools.wraps(func)\n def deprecated_func(*args, **kwargs):\n- if not (warning_in and\n- parse_version(warning_in) >= parse_version(__version__)):\n+ warn_ver = warning_in and parse_version(warning_in).release\n+ this_ver = parse_version(__version__).release\n+\n+ if not (warn_ver and warn_ver >= this_ver):\n original_frame = inspect.stack()[-stack_frame]\n mod = inspect.getmodule(original_frame[0])\n module_name = None\n", "issue": "Deprecation warnings should be printed for dev versions\n### Description\r\n\r\n`sopel.logger.get_logger` is [marked](https://github.com/sopel-irc/sopel/blob/eac71b07d60d3bea68fe3f1242e88654594a7df5/sopel/logger.py#L164) as [deprecated](https://sopel.chat/docs/api.html#sopel.logger.get_logger), but no deprecation warning is raised when it's used.\r\n\r\n### Reproduction steps\r\n\r\n1. Create a Sopel plugin with `from sopel.logger import get_logger; LOGGER=get_logger(__name__)`\r\n2. Start Sopel with that plugin\r\n3. No deprecation warnings are produced\r\n\r\n### Expected behavior\r\n\r\nA deprecation warning is produced\r\n\r\n### Relevant logs\r\n\r\n```shell\r\nTested by replacing the logger in admin.py.\r\n\r\n[2022-06-11 13:00:50,089] sopel.bot INFO - Plugin loaded: ping\r\n[2022-06-11 13:00:50,090] sopel.plugins.rules DEBUG - Command registered: <Command admin.join []>\r\n[2022-06-11 13:00:50,090] sopel.plugins.rules DEBUG - Command registered: <Command admin.tmpjoin []>\r\n[2022-06-11 13:00:50,091] sopel.plugins.rules DEBUG - Command registered: <Command admin.part []>\r\n[2022-06-11 13:00:50,091] sopel.plugins.rules DEBUG - Command registered: <Command admin.tmppart []>\r\n[2022-06-11 13:00:50,092] sopel.plugins.rules DEBUG - Command registered: <Command admin.chanlist [channels]>\r\n[2022-06-11 13:00:50,092] sopel.plugins.rules DEBUG - Command registered: <Command admin.restart []>\r\n[2022-06-11 13:00:50,093] sopel.plugins.rules DEBUG - Command registered: <Command admin.quit []>\r\n[2022-06-11 13:00:50,093] sopel.plugins.rules DEBUG - Command registered: <Command admin.raw []>\r\n[2022-06-11 13:00:50,094] sopel.plugins.rules DEBUG - Command registered: <Command admin.say [msg]>\r\n[2022-06-11 13:00:50,094] sopel.plugins.rules DEBUG - Command registered: <Command admin.me []>\r\n[2022-06-11 13:00:50,094] sopel.plugins.rules DEBUG - Rule registered: <Rule admin.invite_join (1)>\r\n[2022-06-11 13:00:50,094] sopel.plugins.rules DEBUG - Rule registered: <Rule admin.hold_ground (1)>\r\n[2022-06-11 13:00:50,095] sopel.plugins.rules DEBUG - Command registered: <Command admin.mode []>\r\n[2022-06-11 13:00:50,095] sopel.plugins.rules DEBUG - Command registered: <Command admin.set []>\r\n[2022-06-11 13:00:50,096] sopel.plugins.rules DEBUG - Command registered: <Command admin.unset []>\r\n[2022-06-11 13:00:50,096] sopel.plugins.rules DEBUG - Command registered: <Command admin.save []>\r\n[2022-06-11 13:00:50,096] sopel.bot INFO - Plugin loaded: admin\r\n```\r\n\r\n\r\n### Notes\r\n\r\n_No response_\r\n\r\n### Sopel version\r\n\r\neac71b07d60d3bea68fe3f1242e88654594a7df5\r\n\r\n### Installation method\r\n\r\n`pip install`\r\n\r\n### Python version\r\n\r\n3.10.4\n", "before_files": [{"content": "\"\"\"Deprecation module for Sopel developers and plugin authors.\n\n.. versionadded:: 8.0\n\n Previously in :mod:`sopel.tools`, the :func:`deprecated` function has been\n moved to this newly created module, as it can be used in every part of the\n Sopel codebase, including :mod:`sopel.tools` itself.\n\n\"\"\"\nfrom __future__ import annotations\n\nimport functools\nimport inspect\nimport logging\nimport traceback\nfrom typing import Callable, Optional\n\nfrom packaging.version import parse as parse_version\n\nfrom sopel import __version__\n\n\ndef deprecated(\n reason: Optional[str] = None,\n version: Optional[str] = None,\n removed_in: Optional[str] = None,\n warning_in: Optional[str] = None,\n stack_frame: int = -1,\n func: Optional[Callable] = None,\n):\n \"\"\"Decorator to mark deprecated functions in Sopel's API\n\n :param reason: optional text added to the deprecation warning\n :param version: optional version number when the decorated function\n is deprecated\n :param removed_in: optional version number when the deprecated function\n will be removed\n :param warning_in: optional version number when the decorated function\n should start emitting a warning when called\n :param stack_frame: optional stack frame to output; defaults to\n ``-1``; should almost always be negative\n :param func: deprecated function\n :return: a callable that depends on how the decorator is called; either\n the decorated function, or a decorator with the appropriate\n parameters\n\n Any time the decorated ``func`` is called, a deprecation warning will be\n logged, with the last frame of the traceback. The optional ``warning_in``\n argument suppresses the warning on Sopel versions older than that, allowing\n for multi-stage deprecation timelines.\n\n The decorator can be used with or without arguments::\n\n from sopel.lifecycle import deprecated\n\n @deprecated\n def func1():\n print('func 1')\n\n @deprecated()\n def func2():\n print('func 2')\n\n @deprecated(reason='obsolete', version='7.0', removed_in='8.0')\n def func3():\n print('func 3')\n\n which will output the following in a console::\n\n >>> func1()\n Deprecated: func1\n File \"<stdin>\", line 1, in <module>\n func 1\n >>> func2()\n Deprecated: func2\n File \"<stdin>\", line 1, in <module>\n func 2\n >>> func3()\n Deprecated since 7.0, will be removed in 8.0: obsolete\n File \"<stdin>\", line 1, in <module>\n func 3\n\n The ``stack_frame`` argument can be used to choose which stack frame is\n logged along with the message text. By default, this decorator logs the\n most recent stack frame (the last entry in the list, ``-1``), corresponding\n to where the decorated function itself was called. However, in certain\n cases such as deprecating conditional behavior within an object\n constructor, it can be useful to show a less recent stack frame instead.\n\n .. note::\n\n This decorator can be also used on callables that are not functions,\n such as classes and callable objects.\n\n .. versionadded:: 7.0\n Parameters ``reason``, ``version``, and ``removed_in``.\n\n .. versionadded:: 7.1\n The ``warning_in`` and ``stack_frame`` parameters.\n\n .. versionchanged:: 8.0\n Moved out of :mod:`sopel.tools` to resolve circular dependency issues.\n\n \"\"\"\n if not any([reason, version, removed_in, warning_in, func]):\n # common usage: @deprecated()\n return deprecated\n\n if callable(reason):\n # common usage: @deprecated\n return deprecated(func=reason)\n\n if func is None:\n # common usage: @deprecated(message, version, removed_in)\n def decorator(func):\n return deprecated(\n reason, version, removed_in, warning_in, stack_frame, func)\n return decorator\n\n # now, we have everything we need to have:\n # - message is not a callable (could be None)\n # - func is not None\n # - version and removed_in can be None but that's OK\n # so now we can return the actual decorated function\n\n message = reason or getattr(func, '__name__', '<anonymous-function>')\n\n template = 'Deprecated: {message}'\n if version and removed_in:\n template = (\n 'Deprecated since {version}, '\n 'will be removed in {removed_in}: '\n '{message}')\n elif version:\n template = 'Deprecated since {version}: {message}'\n elif removed_in:\n template = 'Deprecated, will be removed in {removed_in}: {message}'\n\n text = template.format(\n message=message, version=version, removed_in=removed_in)\n\n @functools.wraps(func)\n def deprecated_func(*args, **kwargs):\n if not (warning_in and\n parse_version(warning_in) >= parse_version(__version__)):\n original_frame = inspect.stack()[-stack_frame]\n mod = inspect.getmodule(original_frame[0])\n module_name = None\n if mod:\n module_name = mod.__name__\n if module_name:\n if module_name.startswith('sopel.'):\n # core, or core plugin\n logger = logging.getLogger(module_name)\n else:\n # probably a plugin; try to handle most cases sanely\n if module_name.startswith('sopel_modules.'):\n # namespace package plugins have a prefix, obviously\n # they will use Sopel's namespace; other won't\n module_name = module_name.replace(\n 'sopel_modules.',\n 'sopel.externals.',\n 1,\n )\n logger = logging.getLogger(module_name)\n else:\n # don't know the module/plugin name, but we want to make sure\n # the log line is still output, so just get *something*\n logger = logging.getLogger(__name__)\n\n # Format only the desired stack frame\n trace = traceback.extract_stack()\n trace_frame = traceback.format_list(trace[:-1])[stack_frame][:-1]\n\n # Warn the user\n logger.warning(text + \"\\n\" + trace_frame)\n\n return func(*args, **kwargs)\n\n return deprecated_func\n", "path": "sopel/lifecycle.py"}]}
| 3,415 | 166 |
gh_patches_debug_22582
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__PaddleSeg-3404
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tool/model/analyze_model.py 分析flops文件问题
### 问题确认 Search before asking
- [X] 我已经查询[历史issue](https://github.com/PaddlePaddle/PaddleSeg/issues)(包括open与closed),没有发现相似的bug。I have searched the [open and closed issues](https://github.com/PaddlePaddle/PaddleSeg/issues) and found no similar bug report.
### Bug描述 Describe the Bug
在运行tool/model/analyze_model.py出现错误:
AttributeError: 'Config' object has no attribute 'model'
tool/model/analyze_model.py源码中并没有根据model_cfg实例化出一个具体的model,而在'_dynamic_flops'函数中,
代码不断调用`model.train()`,'model.eval()'等函数
### 复现环境 Environment
platform: Linux-5.10.16.3-microsoft-standard-WSL2-x86_64-with-glibc2.31
Python: 3.9.0 (default, Nov 15 2020, 14:28:56) [GCC 7.3.0]
Paddle compiled with cuda: False
GCC: gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0
PaddleSeg: 2.8.0
PaddlePaddle: 2.4.2
OpenCV: 4.5.5
### Bug描述确认 Bug description confirmation
- [X] 我确认已经提供了Bug复现步骤、代码改动说明、以及环境信息,确认问题是可以复现的。I confirm that the bug replication steps, code change instructions, and environment information have been provided, and the problem can be reproduced.
### 是否愿意提交PR? Are you willing to submit a PR?
- [ ] 我愿意提交PR!I'd like to help by submitting a PR!
</issue>
<code>
[start of tools/model/analyze_model.py]
1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """
15 Analyze the FLOPs and Params of the model.
16 Usually, just call paddle.flops to output the model information.
17 We use this file for output flops clearly.
18 """
19
20 import argparse
21 import os
22 import sys
23
24 import paddle
25 import numpy as np
26
27 from paddleseg.cvlibs import Config
28 from paddleseg.utils import get_sys_env, logger, op_flops_funs
29 from paddle.hapi.dynamic_flops import (count_parameters, register_hooks,
30 count_io_info)
31 from paddle.hapi.static_flops import Table
32
33
34 def parse_args():
35 parser = argparse.ArgumentParser(description='Model training')
36 parser.add_argument("--config", help="The path of config file.", type=str)
37 parser.add_argument(
38 "--input_shape",
39 nargs='+',
40 type=int,
41 help="Set the input shape, such as --input_shape 1 3 1024 1024",
42 default=[1, 3, 1024, 1024])
43 return parser.parse_args()
44
45
46 # For output flops clearly, modify the dynamic_flops function in paddle/hapi/dynamic_flops.py
47 def _dynamic_flops(model, inputs, custom_ops=None, print_detail=False):
48 handler_collection = []
49 types_collection = set()
50 if custom_ops is None:
51 custom_ops = {}
52
53 def add_hooks(m):
54 if len(list(m.children())) > 0:
55 return
56 m.register_buffer('total_ops', paddle.zeros([1], dtype='int64'))
57 m.register_buffer('total_params', paddle.zeros([1], dtype='int64'))
58 m_type = type(m)
59
60 flops_fn = None
61 if m_type in custom_ops:
62 flops_fn = custom_ops[m_type]
63 if m_type not in types_collection:
64 print("Customize Function has been applied to {}".format(
65 m_type))
66 elif m_type in register_hooks:
67 flops_fn = register_hooks[m_type]
68 if m_type not in types_collection:
69 print("{}'s flops has been counted".format(m_type))
70 else:
71 if m_type not in types_collection:
72 print(
73 "Cannot find suitable count function for {}. Treat it as zero FLOPs."
74 .format(m_type))
75
76 if flops_fn is not None:
77 flops_handler = m.register_forward_post_hook(flops_fn)
78 handler_collection.append(flops_handler)
79 params_handler = m.register_forward_post_hook(count_parameters)
80 io_handler = m.register_forward_post_hook(count_io_info)
81 handler_collection.append(params_handler)
82 handler_collection.append(io_handler)
83 types_collection.add(m_type)
84
85 training = model.training
86
87 model.eval()
88 model.apply(add_hooks)
89
90 with paddle.framework.no_grad():
91 model(inputs)
92
93 total_ops = 0
94 total_params = 0
95 for m in model.sublayers():
96 if len(list(m.children())) > 0:
97 continue
98 if set(['total_ops', 'total_params', 'input_shape',
99 'output_shape']).issubset(set(list(m._buffers.keys()))):
100 total_ops += m.total_ops
101 total_params += m.total_params
102
103 if training:
104 model.train()
105 for handler in handler_collection:
106 handler.remove()
107
108 table = Table(
109 ["Layer Name", "Input Shape", "Output Shape", "Params(M)", "Flops(G)"])
110
111 for n, m in model.named_sublayers():
112 if len(list(m.children())) > 0:
113 continue
114 if set(['total_ops', 'total_params', 'input_shape',
115 'output_shape']).issubset(set(list(m._buffers.keys()))):
116 table.add_row([
117 m.full_name(), list(m.input_shape.numpy()),
118 list(m.output_shape.numpy()),
119 round(float(m.total_params / 1e6), 3),
120 round(float(m.total_ops / 1e9), 3)
121 ])
122 m._buffers.pop("total_ops")
123 m._buffers.pop("total_params")
124 m._buffers.pop('input_shape')
125 m._buffers.pop('output_shape')
126 if print_detail:
127 table.print_table()
128 print('Total Flops: {}G Total Params: {}M'.format(
129 round(float(total_ops / 1e9), 3), round(float(total_params / 1e6), 3)))
130 return int(total_ops)
131
132
133 def analyze(args):
134 env_info = get_sys_env()
135 info = ['{}: {}'.format(k, v) for k, v in env_info.items()]
136 info = '\n'.join(['', format('Environment Information', '-^48s')] + info +
137 ['-' * 48])
138 logger.info(info)
139
140 paddle.set_device('cpu')
141
142 cfg = Config(args.config)
143
144 custom_ops = {paddle.nn.SyncBatchNorm: op_flops_funs.count_syncbn}
145 inputs = paddle.randn(args.input_shape)
146 _dynamic_flops(cfg.model, inputs, custom_ops=custom_ops, print_detail=True)
147
148
149 if __name__ == '__main__':
150 args = parse_args()
151 if not args.config:
152 raise RuntimeError('No configuration file specified.')
153
154 logger.info("config:" + args.config)
155 logger.info("input_shape:")
156 logger.info(args.input_shape)
157 analyze(args)
158
[end of tools/model/analyze_model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tools/model/analyze_model.py b/tools/model/analyze_model.py
--- a/tools/model/analyze_model.py
+++ b/tools/model/analyze_model.py
@@ -21,14 +21,14 @@
import os
import sys
-import paddle
import numpy as np
+import paddle
+from paddle.hapi.dynamic_flops import (count_io_info, count_parameters,
+ register_hooks)
+from paddle.hapi.static_flops import Table
-from paddleseg.cvlibs import Config
+from paddleseg.cvlibs import Config, SegBuilder
from paddleseg.utils import get_sys_env, logger, op_flops_funs
-from paddle.hapi.dynamic_flops import (count_parameters, register_hooks,
- count_io_info)
-from paddle.hapi.static_flops import Table
def parse_args():
@@ -140,10 +140,11 @@
paddle.set_device('cpu')
cfg = Config(args.config)
+ builder = SegBuilder(cfg)
custom_ops = {paddle.nn.SyncBatchNorm: op_flops_funs.count_syncbn}
inputs = paddle.randn(args.input_shape)
- _dynamic_flops(cfg.model, inputs, custom_ops=custom_ops, print_detail=True)
+ _dynamic_flops(builder.model, inputs, custom_ops=custom_ops, print_detail=True)
if __name__ == '__main__':
|
{"golden_diff": "diff --git a/tools/model/analyze_model.py b/tools/model/analyze_model.py\n--- a/tools/model/analyze_model.py\n+++ b/tools/model/analyze_model.py\n@@ -21,14 +21,14 @@\n import os\n import sys\n \n-import paddle\n import numpy as np\n+import paddle\n+from paddle.hapi.dynamic_flops import (count_io_info, count_parameters,\n+ register_hooks)\n+from paddle.hapi.static_flops import Table\n \n-from paddleseg.cvlibs import Config\n+from paddleseg.cvlibs import Config, SegBuilder\n from paddleseg.utils import get_sys_env, logger, op_flops_funs\n-from paddle.hapi.dynamic_flops import (count_parameters, register_hooks,\n- count_io_info)\n-from paddle.hapi.static_flops import Table\n \n \n def parse_args():\n@@ -140,10 +140,11 @@\n paddle.set_device('cpu')\n \n cfg = Config(args.config)\n+ builder = SegBuilder(cfg)\n \n custom_ops = {paddle.nn.SyncBatchNorm: op_flops_funs.count_syncbn}\n inputs = paddle.randn(args.input_shape)\n- _dynamic_flops(cfg.model, inputs, custom_ops=custom_ops, print_detail=True)\n+ _dynamic_flops(builder.model, inputs, custom_ops=custom_ops, print_detail=True)\n \n \n if __name__ == '__main__':\n", "issue": "tool/model/analyze_model.py \u5206\u6790flops\u6587\u4ef6\u95ee\u9898\n### \u95ee\u9898\u786e\u8ba4 Search before asking\n\n- [X] \u6211\u5df2\u7ecf\u67e5\u8be2[\u5386\u53f2issue](https://github.com/PaddlePaddle/PaddleSeg/issues)(\u5305\u62ecopen\u4e0eclosed)\uff0c\u6ca1\u6709\u53d1\u73b0\u76f8\u4f3c\u7684bug\u3002I have searched the [open and closed issues](https://github.com/PaddlePaddle/PaddleSeg/issues) and found no similar bug report.\n\n\n### Bug\u63cf\u8ff0 Describe the Bug\n\n\u5728\u8fd0\u884ctool/model/analyze_model.py\u51fa\u73b0\u9519\u8bef\uff1a\r\nAttributeError: 'Config' object has no attribute 'model' \r\n\r\ntool/model/analyze_model.py\u6e90\u7801\u4e2d\u5e76\u6ca1\u6709\u6839\u636emodel_cfg\u5b9e\u4f8b\u5316\u51fa\u4e00\u4e2a\u5177\u4f53\u7684model\uff0c\u800c\u5728'_dynamic_flops'\u51fd\u6570\u4e2d\uff0c\r\n\u4ee3\u7801\u4e0d\u65ad\u8c03\u7528`model.train()`,'model.eval()'\u7b49\u51fd\u6570\n\n### \u590d\u73b0\u73af\u5883 Environment\n\n\r\nplatform: Linux-5.10.16.3-microsoft-standard-WSL2-x86_64-with-glibc2.31\r\nPython: 3.9.0 (default, Nov 15 2020, 14:28:56) [GCC 7.3.0]\r\nPaddle compiled with cuda: False\r\nGCC: gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\r\nPaddleSeg: 2.8.0\r\nPaddlePaddle: 2.4.2\r\nOpenCV: 4.5.5\n\n### Bug\u63cf\u8ff0\u786e\u8ba4 Bug description confirmation\n\n- [X] \u6211\u786e\u8ba4\u5df2\u7ecf\u63d0\u4f9b\u4e86Bug\u590d\u73b0\u6b65\u9aa4\u3001\u4ee3\u7801\u6539\u52a8\u8bf4\u660e\u3001\u4ee5\u53ca\u73af\u5883\u4fe1\u606f\uff0c\u786e\u8ba4\u95ee\u9898\u662f\u53ef\u4ee5\u590d\u73b0\u7684\u3002I confirm that the bug replication steps, code change instructions, and environment information have been provided, and the problem can be reproduced.\n\n\n### \u662f\u5426\u613f\u610f\u63d0\u4ea4PR\uff1f Are you willing to submit a PR?\n\n- [ ] \u6211\u613f\u610f\u63d0\u4ea4PR\uff01I'd like to help by submitting a PR!\n", "before_files": [{"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nAnalyze the FLOPs and Params of the model.\nUsually, just call paddle.flops to output the model information.\nWe use this file for output flops clearly.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\n\nimport paddle\nimport numpy as np\n\nfrom paddleseg.cvlibs import Config\nfrom paddleseg.utils import get_sys_env, logger, op_flops_funs\nfrom paddle.hapi.dynamic_flops import (count_parameters, register_hooks,\n count_io_info)\nfrom paddle.hapi.static_flops import Table\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Model training')\n parser.add_argument(\"--config\", help=\"The path of config file.\", type=str)\n parser.add_argument(\n \"--input_shape\",\n nargs='+',\n type=int,\n help=\"Set the input shape, such as --input_shape 1 3 1024 1024\",\n default=[1, 3, 1024, 1024])\n return parser.parse_args()\n\n\n# For output flops clearly, modify the dynamic_flops function in paddle/hapi/dynamic_flops.py\ndef _dynamic_flops(model, inputs, custom_ops=None, print_detail=False):\n handler_collection = []\n types_collection = set()\n if custom_ops is None:\n custom_ops = {}\n\n def add_hooks(m):\n if len(list(m.children())) > 0:\n return\n m.register_buffer('total_ops', paddle.zeros([1], dtype='int64'))\n m.register_buffer('total_params', paddle.zeros([1], dtype='int64'))\n m_type = type(m)\n\n flops_fn = None\n if m_type in custom_ops:\n flops_fn = custom_ops[m_type]\n if m_type not in types_collection:\n print(\"Customize Function has been applied to {}\".format(\n m_type))\n elif m_type in register_hooks:\n flops_fn = register_hooks[m_type]\n if m_type not in types_collection:\n print(\"{}'s flops has been counted\".format(m_type))\n else:\n if m_type not in types_collection:\n print(\n \"Cannot find suitable count function for {}. Treat it as zero FLOPs.\"\n .format(m_type))\n\n if flops_fn is not None:\n flops_handler = m.register_forward_post_hook(flops_fn)\n handler_collection.append(flops_handler)\n params_handler = m.register_forward_post_hook(count_parameters)\n io_handler = m.register_forward_post_hook(count_io_info)\n handler_collection.append(params_handler)\n handler_collection.append(io_handler)\n types_collection.add(m_type)\n\n training = model.training\n\n model.eval()\n model.apply(add_hooks)\n\n with paddle.framework.no_grad():\n model(inputs)\n\n total_ops = 0\n total_params = 0\n for m in model.sublayers():\n if len(list(m.children())) > 0:\n continue\n if set(['total_ops', 'total_params', 'input_shape',\n 'output_shape']).issubset(set(list(m._buffers.keys()))):\n total_ops += m.total_ops\n total_params += m.total_params\n\n if training:\n model.train()\n for handler in handler_collection:\n handler.remove()\n\n table = Table(\n [\"Layer Name\", \"Input Shape\", \"Output Shape\", \"Params(M)\", \"Flops(G)\"])\n\n for n, m in model.named_sublayers():\n if len(list(m.children())) > 0:\n continue\n if set(['total_ops', 'total_params', 'input_shape',\n 'output_shape']).issubset(set(list(m._buffers.keys()))):\n table.add_row([\n m.full_name(), list(m.input_shape.numpy()),\n list(m.output_shape.numpy()),\n round(float(m.total_params / 1e6), 3),\n round(float(m.total_ops / 1e9), 3)\n ])\n m._buffers.pop(\"total_ops\")\n m._buffers.pop(\"total_params\")\n m._buffers.pop('input_shape')\n m._buffers.pop('output_shape')\n if print_detail:\n table.print_table()\n print('Total Flops: {}G Total Params: {}M'.format(\n round(float(total_ops / 1e9), 3), round(float(total_params / 1e6), 3)))\n return int(total_ops)\n\n\ndef analyze(args):\n env_info = get_sys_env()\n info = ['{}: {}'.format(k, v) for k, v in env_info.items()]\n info = '\\n'.join(['', format('Environment Information', '-^48s')] + info +\n ['-' * 48])\n logger.info(info)\n\n paddle.set_device('cpu')\n\n cfg = Config(args.config)\n\n custom_ops = {paddle.nn.SyncBatchNorm: op_flops_funs.count_syncbn}\n inputs = paddle.randn(args.input_shape)\n _dynamic_flops(cfg.model, inputs, custom_ops=custom_ops, print_detail=True)\n\n\nif __name__ == '__main__':\n args = parse_args()\n if not args.config:\n raise RuntimeError('No configuration file specified.')\n\n logger.info(\"config:\" + args.config)\n logger.info(\"input_shape:\")\n logger.info(args.input_shape)\n analyze(args)\n", "path": "tools/model/analyze_model.py"}]}
| 2,587 | 295 |
gh_patches_debug_29094
|
rasdani/github-patches
|
git_diff
|
wright-group__WrightTools-897
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
from_PyCMDS should check version and system to determine if d1, d2, and d3 variables need to be scaled
Helps to bypass legacy problems involving wright-group/PyCMDS#210 .
</issue>
<code>
[start of WrightTools/data/_pycmds.py]
1 """PyCMDS."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import itertools
8 import os
9 import pathlib
10
11 import numpy as np
12
13 import tidy_headers
14
15 from ._data import Data
16 from .. import kit as wt_kit
17 from .. import units as wt_units
18
19
20 # --- define --------------------------------------------------------------------------------------
21
22
23 __all__ = ["from_PyCMDS"]
24
25
26 # --- from function -------------------------------------------------------------------------------
27
28
29 def from_PyCMDS(filepath, name=None, parent=None, verbose=True) -> Data:
30 """Create a data object from a single PyCMDS output file.
31
32 Parameters
33 ----------
34 filepath : path-like
35 Path to the .data file
36 Can be either a local or remote file (http/ftp).
37 Can be compressed with gz/bz2, decompression based on file name.
38 name : str or None (optional)
39 The name to be applied to the new data object. If None, name is read
40 from file.
41 parent : WrightTools.Collection (optional)
42 Collection to place new data object within. Default is None.
43 verbose : bool (optional)
44 Toggle talkback. Default is True.
45
46 Returns
47 -------
48 data
49 A Data instance.
50 """
51 filestr = os.fspath(filepath)
52 filepath = pathlib.Path(filepath)
53
54 # header
55 ds = np.DataSource(None)
56 file_ = ds.open(filestr, "rt")
57 headers = tidy_headers.read(file_)
58 file_.seek(0)
59 # name
60 if name is None: # name not given in method arguments
61 data_name = headers["data name"]
62 else:
63 data_name = name
64 if data_name == "": # name not given in PyCMDS
65 data_name = headers["data origin"]
66 # create data object
67 kwargs = {
68 "name": data_name,
69 "kind": "PyCMDS",
70 "source": filestr,
71 "created": headers["file created"],
72 }
73 if parent is not None:
74 data = parent.create_data(**kwargs)
75 else:
76 data = Data(**kwargs)
77 # array
78 arr = np.genfromtxt(file_).T
79 file_.close()
80 # get axes and scanned variables
81 axes = []
82 for name, identity, units in zip(
83 headers["axis names"], headers["axis identities"], headers["axis units"]
84 ):
85 # points and centers
86 points = np.array(headers[name + " points"])
87 if name + " centers" in headers.keys():
88 centers = headers[name + " centers"]
89 else:
90 centers = None
91 # create
92 axis = {
93 "points": points,
94 "units": units,
95 "name": name,
96 "identity": identity,
97 "centers": centers,
98 }
99 axes.append(axis)
100 shape = tuple([a["points"].size for a in axes])
101 for i, ax in enumerate(axes):
102 sh = [1] * len(shape)
103 sh[i] = len(ax["points"])
104 data.create_variable(
105 name=ax["name"] + "_points", values=np.array(ax["points"]).reshape(sh)
106 )
107 if ax["centers"] is not None:
108 sh = list(shape)
109 sh[i] = 1
110 data.create_variable(
111 name=ax["name"] + "_centers", values=np.array(ax["centers"]).reshape(sh)
112 )
113 # get assorted remaining things
114 # variables and channels
115 try:
116 signed = iter(headers["channel signed"])
117 except KeyError:
118 signed = itertools.repeat(False)
119 for index, kind, name in zip(range(len(arr)), headers["kind"], headers["name"]):
120 values = np.full(np.prod(shape), np.nan)
121 values[: len(arr[index])] = arr[index]
122 values.shape = shape
123 if name == "time":
124 data.create_variable(name="labtime", values=values)
125 if kind == "hardware":
126 # sadly, recorded tolerances are not reliable
127 # so a bit of hard-coded hacking is needed
128 # if this ends up being too fragile, we might have to use the points arrays
129 # ---Blaise 2018-01-09
130 units = headers["units"][index]
131 label = headers["label"][index]
132 if (
133 "w" in name
134 and name.startswith(tuple(data.variable_names))
135 and name not in headers["axis names"]
136 ):
137 inherited_shape = data[name.split("_")[0]].shape
138 for i, s in enumerate(inherited_shape):
139 if s == 1:
140 values = np.mean(values, axis=i)
141 values = np.expand_dims(values, i)
142 else:
143 tolerance = headers["tolerance"][index]
144 units = headers["units"][index]
145 for i in range(len(shape)):
146 if tolerance is None:
147 break
148 if "d" in name:
149 # This is a hack because delay is particularly
150 # unreliable in tolerance. And 3 fs vs 3 ps is a huge
151 # difference... KFS 2019-2-27
152 if units == "fs":
153 tolerance = 3.
154 else:
155 tolerance = 0.1
156 if "zero" in name:
157 tolerance = 1e-10
158 try:
159 assert i == headers["axis names"].index(name)
160 tolerance = 0
161 except (ValueError, AssertionError):
162 if (
163 name in headers["axis names"]
164 and "%s_centers" % name not in data.variable_names
165 ):
166 tolerance = np.inf
167 mean = np.nanmean(values, axis=i)
168 mean = np.expand_dims(mean, i)
169 values, meanexp = wt_kit.share_nans(values, mean)
170 if np.allclose(meanexp, values, atol=tolerance, equal_nan=True):
171 values = mean
172 if name in headers["axis names"]:
173 points = np.array(headers[name + " points"])
174 pointsshape = [1] * values.ndim
175 for i, ax in enumerate(axes):
176 if ax["name"] == name:
177 pointsshape[i] = len(points)
178 break
179 points.shape = pointsshape
180 points = wt_units.converter(points, headers["axis units"][i], units)
181 for i in range(points.ndim):
182 if points.shape[i] == 1:
183 points = np.repeat(points, values.shape[i], axis=i)
184 if points.size <= values.size:
185 values[np.isnan(values)] = points[np.isnan(values)]
186 data.create_variable(name, values=values, units=units, label=label)
187 if kind == "channel":
188 data.create_channel(name=name, values=values, shape=values.shape, signed=next(signed))
189 # axes
190 for a in axes:
191 expression = a["identity"]
192 if expression.startswith("D"):
193 expression = expression[1:]
194 expression.replace("=D", "=")
195 a["expression"] = expression
196 data.transform(*[a["expression"] for a in axes])
197 for a, u in zip(data.axes, headers["axis units"]):
198 if u is not None:
199 a.convert(u)
200 # return
201 if verbose:
202 print("data created at {0}".format(data.fullpath))
203 print(" axes: {0}".format(data.axis_names))
204 print(" shape: {0}".format(data.shape))
205 return data
206
[end of WrightTools/data/_pycmds.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/WrightTools/data/_pycmds.py b/WrightTools/data/_pycmds.py
--- a/WrightTools/data/_pycmds.py
+++ b/WrightTools/data/_pycmds.py
@@ -146,7 +146,7 @@
if tolerance is None:
break
if "d" in name:
- # This is a hack because delay is particularly
+ # This is a hack because delay is particularly
# unreliable in tolerance. And 3 fs vs 3 ps is a huge
# difference... KFS 2019-2-27
if units == "fs":
@@ -197,6 +197,23 @@
for a, u in zip(data.axes, headers["axis units"]):
if u is not None:
a.convert(u)
+ if (
+ headers["system name"] == "fs"
+ and int(headers["PyCMDS version"].split(".")[0]) == 0
+ and int(headers["PyCMDS version"].split(".")[1]) < 10
+ ):
+ # in versions of PyCMDS up to (and including) 0.9.0
+ # there was an incorrect hard-coded conversion factor between mm and fs
+ # this ONLY applied to Newport MFA stages
+ # we apply this correction knowing that Newport MFAs were only used on the "fs" system
+ # and knowing that the Newport MFAs were always assigned as "d1", "d2" and "d3"
+ # ---Blaise 2019-04-09
+ for delay in ("d1", "d2", "d3", "d1_points", "d2_points", "d3_points"):
+ if delay not in data.variable_names:
+ continue
+ data[delay][:] *= 6000.671281903963041 / 6671.281903963041
+ if verbose:
+ print(f"Correction factor applied to {delay}")
# return
if verbose:
print("data created at {0}".format(data.fullpath))
|
{"golden_diff": "diff --git a/WrightTools/data/_pycmds.py b/WrightTools/data/_pycmds.py\n--- a/WrightTools/data/_pycmds.py\n+++ b/WrightTools/data/_pycmds.py\n@@ -146,7 +146,7 @@\n if tolerance is None:\n break\n if \"d\" in name:\n- # This is a hack because delay is particularly \n+ # This is a hack because delay is particularly\n # unreliable in tolerance. And 3 fs vs 3 ps is a huge\n # difference... KFS 2019-2-27\n if units == \"fs\":\n@@ -197,6 +197,23 @@\n for a, u in zip(data.axes, headers[\"axis units\"]):\n if u is not None:\n a.convert(u)\n+ if (\n+ headers[\"system name\"] == \"fs\"\n+ and int(headers[\"PyCMDS version\"].split(\".\")[0]) == 0\n+ and int(headers[\"PyCMDS version\"].split(\".\")[1]) < 10\n+ ):\n+ # in versions of PyCMDS up to (and including) 0.9.0\n+ # there was an incorrect hard-coded conversion factor between mm and fs\n+ # this ONLY applied to Newport MFA stages\n+ # we apply this correction knowing that Newport MFAs were only used on the \"fs\" system\n+ # and knowing that the Newport MFAs were always assigned as \"d1\", \"d2\" and \"d3\"\n+ # ---Blaise 2019-04-09\n+ for delay in (\"d1\", \"d2\", \"d3\", \"d1_points\", \"d2_points\", \"d3_points\"):\n+ if delay not in data.variable_names:\n+ continue\n+ data[delay][:] *= 6000.671281903963041 / 6671.281903963041\n+ if verbose:\n+ print(f\"Correction factor applied to {delay}\")\n # return\n if verbose:\n print(\"data created at {0}\".format(data.fullpath))\n", "issue": "from_PyCMDS should check version and system to determine if d1, d2, and d3 variables need to be scaled \nHelps to bypass legacy problems involving wright-group/PyCMDS#210 .\n", "before_files": [{"content": "\"\"\"PyCMDS.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport itertools\nimport os\nimport pathlib\n\nimport numpy as np\n\nimport tidy_headers\n\nfrom ._data import Data\nfrom .. import kit as wt_kit\nfrom .. import units as wt_units\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"from_PyCMDS\"]\n\n\n# --- from function -------------------------------------------------------------------------------\n\n\ndef from_PyCMDS(filepath, name=None, parent=None, verbose=True) -> Data:\n \"\"\"Create a data object from a single PyCMDS output file.\n\n Parameters\n ----------\n filepath : path-like\n Path to the .data file\n Can be either a local or remote file (http/ftp).\n Can be compressed with gz/bz2, decompression based on file name.\n name : str or None (optional)\n The name to be applied to the new data object. If None, name is read\n from file.\n parent : WrightTools.Collection (optional)\n Collection to place new data object within. Default is None.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n data\n A Data instance.\n \"\"\"\n filestr = os.fspath(filepath)\n filepath = pathlib.Path(filepath)\n\n # header\n ds = np.DataSource(None)\n file_ = ds.open(filestr, \"rt\")\n headers = tidy_headers.read(file_)\n file_.seek(0)\n # name\n if name is None: # name not given in method arguments\n data_name = headers[\"data name\"]\n else:\n data_name = name\n if data_name == \"\": # name not given in PyCMDS\n data_name = headers[\"data origin\"]\n # create data object\n kwargs = {\n \"name\": data_name,\n \"kind\": \"PyCMDS\",\n \"source\": filestr,\n \"created\": headers[\"file created\"],\n }\n if parent is not None:\n data = parent.create_data(**kwargs)\n else:\n data = Data(**kwargs)\n # array\n arr = np.genfromtxt(file_).T\n file_.close()\n # get axes and scanned variables\n axes = []\n for name, identity, units in zip(\n headers[\"axis names\"], headers[\"axis identities\"], headers[\"axis units\"]\n ):\n # points and centers\n points = np.array(headers[name + \" points\"])\n if name + \" centers\" in headers.keys():\n centers = headers[name + \" centers\"]\n else:\n centers = None\n # create\n axis = {\n \"points\": points,\n \"units\": units,\n \"name\": name,\n \"identity\": identity,\n \"centers\": centers,\n }\n axes.append(axis)\n shape = tuple([a[\"points\"].size for a in axes])\n for i, ax in enumerate(axes):\n sh = [1] * len(shape)\n sh[i] = len(ax[\"points\"])\n data.create_variable(\n name=ax[\"name\"] + \"_points\", values=np.array(ax[\"points\"]).reshape(sh)\n )\n if ax[\"centers\"] is not None:\n sh = list(shape)\n sh[i] = 1\n data.create_variable(\n name=ax[\"name\"] + \"_centers\", values=np.array(ax[\"centers\"]).reshape(sh)\n )\n # get assorted remaining things\n # variables and channels\n try:\n signed = iter(headers[\"channel signed\"])\n except KeyError:\n signed = itertools.repeat(False)\n for index, kind, name in zip(range(len(arr)), headers[\"kind\"], headers[\"name\"]):\n values = np.full(np.prod(shape), np.nan)\n values[: len(arr[index])] = arr[index]\n values.shape = shape\n if name == \"time\":\n data.create_variable(name=\"labtime\", values=values)\n if kind == \"hardware\":\n # sadly, recorded tolerances are not reliable\n # so a bit of hard-coded hacking is needed\n # if this ends up being too fragile, we might have to use the points arrays\n # ---Blaise 2018-01-09\n units = headers[\"units\"][index]\n label = headers[\"label\"][index]\n if (\n \"w\" in name\n and name.startswith(tuple(data.variable_names))\n and name not in headers[\"axis names\"]\n ):\n inherited_shape = data[name.split(\"_\")[0]].shape\n for i, s in enumerate(inherited_shape):\n if s == 1:\n values = np.mean(values, axis=i)\n values = np.expand_dims(values, i)\n else:\n tolerance = headers[\"tolerance\"][index]\n units = headers[\"units\"][index]\n for i in range(len(shape)):\n if tolerance is None:\n break\n if \"d\" in name:\n # This is a hack because delay is particularly \n # unreliable in tolerance. And 3 fs vs 3 ps is a huge\n # difference... KFS 2019-2-27\n if units == \"fs\":\n tolerance = 3.\n else:\n tolerance = 0.1\n if \"zero\" in name:\n tolerance = 1e-10\n try:\n assert i == headers[\"axis names\"].index(name)\n tolerance = 0\n except (ValueError, AssertionError):\n if (\n name in headers[\"axis names\"]\n and \"%s_centers\" % name not in data.variable_names\n ):\n tolerance = np.inf\n mean = np.nanmean(values, axis=i)\n mean = np.expand_dims(mean, i)\n values, meanexp = wt_kit.share_nans(values, mean)\n if np.allclose(meanexp, values, atol=tolerance, equal_nan=True):\n values = mean\n if name in headers[\"axis names\"]:\n points = np.array(headers[name + \" points\"])\n pointsshape = [1] * values.ndim\n for i, ax in enumerate(axes):\n if ax[\"name\"] == name:\n pointsshape[i] = len(points)\n break\n points.shape = pointsshape\n points = wt_units.converter(points, headers[\"axis units\"][i], units)\n for i in range(points.ndim):\n if points.shape[i] == 1:\n points = np.repeat(points, values.shape[i], axis=i)\n if points.size <= values.size:\n values[np.isnan(values)] = points[np.isnan(values)]\n data.create_variable(name, values=values, units=units, label=label)\n if kind == \"channel\":\n data.create_channel(name=name, values=values, shape=values.shape, signed=next(signed))\n # axes\n for a in axes:\n expression = a[\"identity\"]\n if expression.startswith(\"D\"):\n expression = expression[1:]\n expression.replace(\"=D\", \"=\")\n a[\"expression\"] = expression\n data.transform(*[a[\"expression\"] for a in axes])\n for a, u in zip(data.axes, headers[\"axis units\"]):\n if u is not None:\n a.convert(u)\n # return\n if verbose:\n print(\"data created at {0}\".format(data.fullpath))\n print(\" axes: {0}\".format(data.axis_names))\n print(\" shape: {0}\".format(data.shape))\n return data\n", "path": "WrightTools/data/_pycmds.py"}]}
| 2,682 | 494 |
gh_patches_debug_28511
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-2272
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A utility for hashing a checkpoint file
### 🚀 Feature
By default Ignite save a checkpoint with the following structure `{filename_prefix}_{name}_{suffix}.{ext}` which is great.
At first, I would like to add an argument for hashing a checkpoint in `Checkpoint` class. But that could occur a read + write overhead if we turn on hashing which may not be used very often during development.
So instead, I would like to propose to add a utility in `ignite.util` for hashing a saved checkpoint file so that we could use `torch.hub.load_state_dict_from_url` with `check_hash=True`.
Implementation (stripped down and untested version):
```py
def hash_checkpoint(checkpoint):
with open(checkpoint, 'rb') as f:
hash = hashlib.sha256(f.read()).hexdigest()
checkpoint_prefix = checkpoint.strip('.pt')
filname = '-'.join([checkpoint_prefix, hash[:8]]) + '.pt'
shutil.move(checkpoint, filename)
return filename
```
Implementation in Code-Generator:
https://github.com/pytorch-ignite/code-generator/blob/a2ed5f771e7cf8a57c6f68b5a3797792f4676433/templates/single/utils.py#L134
</issue>
<code>
[start of ignite/utils.py]
1 import collections.abc as collections
2 import functools
3 import logging
4 import random
5 import warnings
6 from typing import Any, Callable, Dict, Optional, TextIO, Tuple, Type, TypeVar, Union, cast
7
8 import torch
9
10 __all__ = ["convert_tensor", "apply_to_tensor", "apply_to_type", "to_onehot", "setup_logger", "manual_seed"]
11
12
13 def convert_tensor(
14 x: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes],
15 device: Optional[Union[str, torch.device]] = None,
16 non_blocking: bool = False,
17 ) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:
18 """Move tensors to relevant device.
19
20 Args:
21 x: input tensor or mapping, or sequence of tensors.
22 device: device type to move ``x``.
23 non_blocking: convert a CPU Tensor with pinned memory to a CUDA Tensor
24 asynchronously with respect to the host if possible
25 """
26
27 def _func(tensor: torch.Tensor) -> torch.Tensor:
28 return tensor.to(device=device, non_blocking=non_blocking) if device is not None else tensor
29
30 return apply_to_tensor(x, _func)
31
32
33 def apply_to_tensor(
34 x: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable
35 ) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:
36 """Apply a function on a tensor or mapping, or sequence of tensors.
37
38 Args:
39 x: input tensor or mapping, or sequence of tensors.
40 func: the function to apply on ``x``.
41 """
42 return apply_to_type(x, torch.Tensor, func)
43
44
45 def apply_to_type(
46 x: Union[Any, collections.Sequence, collections.Mapping, str, bytes],
47 input_type: Union[Type, Tuple[Type[Any], Any]],
48 func: Callable,
49 ) -> Union[Any, collections.Sequence, collections.Mapping, str, bytes]:
50 """Apply a function on an object of `input_type` or mapping, or sequence of objects of `input_type`.
51
52 Args:
53 x: object or mapping or sequence.
54 input_type: data type of ``x``.
55 func: the function to apply on ``x``.
56 """
57 if isinstance(x, input_type):
58 return func(x)
59 if isinstance(x, (str, bytes)):
60 return x
61 if isinstance(x, collections.Mapping):
62 return cast(Callable, type(x))({k: apply_to_type(sample, input_type, func) for k, sample in x.items()})
63 if isinstance(x, tuple) and hasattr(x, "_fields"): # namedtuple
64 return cast(Callable, type(x))(*(apply_to_type(sample, input_type, func) for sample in x))
65 if isinstance(x, collections.Sequence):
66 return cast(Callable, type(x))([apply_to_type(sample, input_type, func) for sample in x])
67 raise TypeError((f"x must contain {input_type}, dicts or lists; found {type(x)}"))
68
69
70 def to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor:
71 """Convert a tensor of indices of any shape `(N, ...)` to a
72 tensor of one-hot indicators of shape `(N, num_classes, ...)` and of type uint8. Output's device is equal to the
73 input's device`.
74
75 Args:
76 indices: input tensor to convert.
77 num_classes: number of classes for one-hot tensor.
78
79 .. versionchanged:: 0.4.3
80 This functions is now torchscriptable.
81 """
82 new_shape = (indices.shape[0], num_classes) + indices.shape[1:]
83 onehot = torch.zeros(new_shape, dtype=torch.uint8, device=indices.device)
84 return onehot.scatter_(1, indices.unsqueeze(1), 1)
85
86
87 def setup_logger(
88 name: Optional[str] = "ignite",
89 level: int = logging.INFO,
90 stream: Optional[TextIO] = None,
91 format: str = "%(asctime)s %(name)s %(levelname)s: %(message)s",
92 filepath: Optional[str] = None,
93 distributed_rank: Optional[int] = None,
94 reset: bool = False,
95 ) -> logging.Logger:
96 """Setups logger: name, level, format etc.
97
98 Args:
99 name: new name for the logger. If None, the standard logger is used.
100 level: logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG.
101 stream: logging stream. If None, the standard stream is used (sys.stderr).
102 format: logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s`.
103 filepath: Optional logging file path. If not None, logs are written to the file.
104 distributed_rank: Optional, rank in distributed configuration to avoid logger setup for workers.
105 If None, distributed_rank is initialized to the rank of process.
106 reset: if True, reset an existing logger rather than keep format, handlers, and level.
107
108 Returns:
109 logging.Logger
110
111 Examples:
112 Improve logs readability when training with a trainer and evaluator:
113
114 .. code-block:: python
115
116 from ignite.utils import setup_logger
117
118 trainer = ...
119 evaluator = ...
120
121 trainer.logger = setup_logger("trainer")
122 evaluator.logger = setup_logger("evaluator")
123
124 trainer.run(data, max_epochs=10)
125
126 # Logs will look like
127 # 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5.
128 # 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23
129 # 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1.
130 # 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02
131 # ...
132
133 Every existing logger can be reset if needed
134
135 .. code-block:: python
136
137 logger = setup_logger(name="my-logger", format="=== %(name)s %(message)s")
138 logger.info("first message")
139 setup_logger(name="my-logger", format="+++ %(name)s %(message)s", reset=True)
140 logger.info("second message")
141
142 # Logs will look like
143 # === my-logger first message
144 # +++ my-logger second message
145
146 Change the level of an existing internal logger
147
148 .. code-block:: python
149
150 setup_logger(
151 name="ignite.distributed.launcher.Parallel",
152 level=logging.WARNING
153 )
154
155 .. versionchanged:: 0.4.3
156 Added ``stream`` parameter.
157
158 .. versionchanged:: 0.4.5
159 Added ``reset`` parameter.
160 """
161 # check if the logger already exists
162 existing = name is None or name in logging.root.manager.loggerDict
163
164 # if existing, get the logger otherwise create a new one
165 logger = logging.getLogger(name)
166
167 if distributed_rank is None:
168 import ignite.distributed as idist
169
170 distributed_rank = idist.get_rank()
171
172 # Remove previous handlers
173 if distributed_rank > 0 or reset:
174
175 if logger.hasHandlers():
176 for h in list(logger.handlers):
177 logger.removeHandler(h)
178
179 if distributed_rank > 0:
180
181 # Add null handler to avoid multiple parallel messages
182 logger.addHandler(logging.NullHandler())
183
184 # Keep the existing configuration if not reset
185 if existing and not reset:
186 return logger
187
188 if distributed_rank == 0:
189 logger.setLevel(level)
190
191 formatter = logging.Formatter(format)
192
193 ch = logging.StreamHandler(stream=stream)
194 ch.setLevel(level)
195 ch.setFormatter(formatter)
196 logger.addHandler(ch)
197
198 if filepath is not None:
199 fh = logging.FileHandler(filepath)
200 fh.setLevel(level)
201 fh.setFormatter(formatter)
202 logger.addHandler(fh)
203
204 # don't propagate to ancestors
205 # the problem here is to attach handlers to loggers
206 # should we provide a default configuration less open ?
207 if name is not None:
208 logger.propagate = False
209
210 return logger
211
212
213 def manual_seed(seed: int) -> None:
214 """Setup random state from a seed for `torch`, `random` and optionally `numpy` (if can be imported).
215
216 Args:
217 seed: Random state seed
218
219 .. versionchanged:: 0.4.3
220 Added ``torch.cuda.manual_seed_all(seed)``.
221
222 .. versionchanged:: 0.4.5
223 Added ``torch_xla.core.xla_model.set_rng_state(seed)``.
224 """
225 random.seed(seed)
226 torch.manual_seed(seed)
227
228 if torch.cuda.is_available():
229 torch.cuda.manual_seed_all(seed)
230
231 try:
232 import torch_xla.core.xla_model as xm
233
234 xm.set_rng_state(seed)
235 except ImportError:
236 pass
237
238 try:
239 import numpy as np
240
241 np.random.seed(seed)
242 except ImportError:
243 pass
244
245
246 def deprecated(
247 deprecated_in: str, removed_in: str = "", reasons: Tuple[str, ...] = (), raise_exception: bool = False
248 ) -> Callable:
249
250 F = TypeVar("F", bound=Callable[..., Any])
251
252 def decorator(func: F) -> F:
253 func_doc = func.__doc__ if func.__doc__ else ""
254 deprecation_warning = (
255 f"This function has been deprecated since version {deprecated_in}"
256 + (f" and will be removed in version {removed_in}" if removed_in else "")
257 + ".\n Please refer to the documentation for more details."
258 )
259
260 @functools.wraps(func)
261 def wrapper(*args: Any, **kwargs: Dict[str, Any]) -> Callable:
262 if raise_exception:
263 raise DeprecationWarning(deprecation_warning)
264 warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
265 return func(*args, **kwargs)
266
267 appended_doc = f".. deprecated:: {deprecated_in}" + ("\n\n\t" if len(reasons) > 0 else "")
268
269 for reason in reasons:
270 appended_doc += "\n\t- " + reason
271 wrapper.__doc__ = f"**Deprecated function**.\n\n {func_doc}{appended_doc}"
272 return cast(F, wrapper)
273
274 return decorator
275
[end of ignite/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ignite/utils.py b/ignite/utils.py
--- a/ignite/utils.py
+++ b/ignite/utils.py
@@ -1,13 +1,24 @@
import collections.abc as collections
import functools
+import hashlib
import logging
import random
+import shutil
import warnings
+from pathlib import Path
from typing import Any, Callable, Dict, Optional, TextIO, Tuple, Type, TypeVar, Union, cast
import torch
-__all__ = ["convert_tensor", "apply_to_tensor", "apply_to_type", "to_onehot", "setup_logger", "manual_seed"]
+__all__ = [
+ "convert_tensor",
+ "apply_to_tensor",
+ "apply_to_type",
+ "to_onehot",
+ "setup_logger",
+ "manual_seed",
+ "hash_checkpoint",
+]
def convert_tensor(
@@ -272,3 +283,34 @@
return cast(F, wrapper)
return decorator
+
+
+def hash_checkpoint(checkpoint_path: Union[str, Path], output_dir: Union[str, Path],) -> Tuple[Path, str]:
+ """
+ Hash the checkpoint file in the format of ``<filename>-<hash>.<ext>``
+ to be used with ``check_hash`` of :func:`torch.hub.load_state_dict_from_url`.
+
+ Args:
+ checkpoint_path: Path to the checkpoint file.
+ output_dir: Output directory to store the hashed checkpoint file.
+
+ Returns:
+ Path to the hashed checkpoint file, The 8 digits of SHA256 hash.
+
+ .. versionadded:: 0.5.0
+ """
+
+ if isinstance(checkpoint_path, str):
+ checkpoint_path = Path(checkpoint_path)
+
+ if isinstance(output_dir, str):
+ output_dir = Path(output_dir)
+
+ sha_hash = hashlib.sha256(checkpoint_path.read_bytes()).hexdigest()
+ old_filename = checkpoint_path.stem
+ new_filename = "-".join((old_filename, sha_hash[:8])) + ".pt"
+
+ hash_checkpoint_path = output_dir / new_filename
+ shutil.move(str(checkpoint_path), hash_checkpoint_path)
+
+ return hash_checkpoint_path, sha_hash
|
{"golden_diff": "diff --git a/ignite/utils.py b/ignite/utils.py\n--- a/ignite/utils.py\n+++ b/ignite/utils.py\n@@ -1,13 +1,24 @@\n import collections.abc as collections\n import functools\n+import hashlib\n import logging\n import random\n+import shutil\n import warnings\n+from pathlib import Path\n from typing import Any, Callable, Dict, Optional, TextIO, Tuple, Type, TypeVar, Union, cast\n \n import torch\n \n-__all__ = [\"convert_tensor\", \"apply_to_tensor\", \"apply_to_type\", \"to_onehot\", \"setup_logger\", \"manual_seed\"]\n+__all__ = [\n+ \"convert_tensor\",\n+ \"apply_to_tensor\",\n+ \"apply_to_type\",\n+ \"to_onehot\",\n+ \"setup_logger\",\n+ \"manual_seed\",\n+ \"hash_checkpoint\",\n+]\n \n \n def convert_tensor(\n@@ -272,3 +283,34 @@\n return cast(F, wrapper)\n \n return decorator\n+\n+\n+def hash_checkpoint(checkpoint_path: Union[str, Path], output_dir: Union[str, Path],) -> Tuple[Path, str]:\n+ \"\"\"\n+ Hash the checkpoint file in the format of ``<filename>-<hash>.<ext>``\n+ to be used with ``check_hash`` of :func:`torch.hub.load_state_dict_from_url`.\n+\n+ Args:\n+ checkpoint_path: Path to the checkpoint file.\n+ output_dir: Output directory to store the hashed checkpoint file.\n+\n+ Returns:\n+ Path to the hashed checkpoint file, The 8 digits of SHA256 hash.\n+\n+ .. versionadded:: 0.5.0\n+ \"\"\"\n+\n+ if isinstance(checkpoint_path, str):\n+ checkpoint_path = Path(checkpoint_path)\n+\n+ if isinstance(output_dir, str):\n+ output_dir = Path(output_dir)\n+\n+ sha_hash = hashlib.sha256(checkpoint_path.read_bytes()).hexdigest()\n+ old_filename = checkpoint_path.stem\n+ new_filename = \"-\".join((old_filename, sha_hash[:8])) + \".pt\"\n+\n+ hash_checkpoint_path = output_dir / new_filename\n+ shutil.move(str(checkpoint_path), hash_checkpoint_path)\n+\n+ return hash_checkpoint_path, sha_hash\n", "issue": "A utility for hashing a checkpoint file\n### \ud83d\ude80 Feature\r\n\r\nBy default Ignite save a checkpoint with the following structure `{filename_prefix}_{name}_{suffix}.{ext}` which is great.\r\nAt first, I would like to add an argument for hashing a checkpoint in `Checkpoint` class. But that could occur a read + write overhead if we turn on hashing which may not be used very often during development.\r\n\r\nSo instead, I would like to propose to add a utility in `ignite.util` for hashing a saved checkpoint file so that we could use `torch.hub.load_state_dict_from_url` with `check_hash=True`.\r\n\r\nImplementation (stripped down and untested version):\r\n\r\n```py\r\ndef hash_checkpoint(checkpoint):\r\n with open(checkpoint, 'rb') as f:\r\n hash = hashlib.sha256(f.read()).hexdigest()\r\n\r\n checkpoint_prefix = checkpoint.strip('.pt')\r\n filname = '-'.join([checkpoint_prefix, hash[:8]]) + '.pt'\r\n shutil.move(checkpoint, filename)\r\n return filename\r\n```\r\n\r\nImplementation in Code-Generator:\r\n\r\nhttps://github.com/pytorch-ignite/code-generator/blob/a2ed5f771e7cf8a57c6f68b5a3797792f4676433/templates/single/utils.py#L134\n", "before_files": [{"content": "import collections.abc as collections\nimport functools\nimport logging\nimport random\nimport warnings\nfrom typing import Any, Callable, Dict, Optional, TextIO, Tuple, Type, TypeVar, Union, cast\n\nimport torch\n\n__all__ = [\"convert_tensor\", \"apply_to_tensor\", \"apply_to_type\", \"to_onehot\", \"setup_logger\", \"manual_seed\"]\n\n\ndef convert_tensor(\n x: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes],\n device: Optional[Union[str, torch.device]] = None,\n non_blocking: bool = False,\n) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Move tensors to relevant device.\n\n Args:\n x: input tensor or mapping, or sequence of tensors.\n device: device type to move ``x``.\n non_blocking: convert a CPU Tensor with pinned memory to a CUDA Tensor\n asynchronously with respect to the host if possible\n \"\"\"\n\n def _func(tensor: torch.Tensor) -> torch.Tensor:\n return tensor.to(device=device, non_blocking=non_blocking) if device is not None else tensor\n\n return apply_to_tensor(x, _func)\n\n\ndef apply_to_tensor(\n x: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable\n) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Apply a function on a tensor or mapping, or sequence of tensors.\n\n Args:\n x: input tensor or mapping, or sequence of tensors.\n func: the function to apply on ``x``.\n \"\"\"\n return apply_to_type(x, torch.Tensor, func)\n\n\ndef apply_to_type(\n x: Union[Any, collections.Sequence, collections.Mapping, str, bytes],\n input_type: Union[Type, Tuple[Type[Any], Any]],\n func: Callable,\n) -> Union[Any, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Apply a function on an object of `input_type` or mapping, or sequence of objects of `input_type`.\n\n Args:\n x: object or mapping or sequence.\n input_type: data type of ``x``.\n func: the function to apply on ``x``.\n \"\"\"\n if isinstance(x, input_type):\n return func(x)\n if isinstance(x, (str, bytes)):\n return x\n if isinstance(x, collections.Mapping):\n return cast(Callable, type(x))({k: apply_to_type(sample, input_type, func) for k, sample in x.items()})\n if isinstance(x, tuple) and hasattr(x, \"_fields\"): # namedtuple\n return cast(Callable, type(x))(*(apply_to_type(sample, input_type, func) for sample in x))\n if isinstance(x, collections.Sequence):\n return cast(Callable, type(x))([apply_to_type(sample, input_type, func) for sample in x])\n raise TypeError((f\"x must contain {input_type}, dicts or lists; found {type(x)}\"))\n\n\ndef to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor:\n \"\"\"Convert a tensor of indices of any shape `(N, ...)` to a\n tensor of one-hot indicators of shape `(N, num_classes, ...)` and of type uint8. Output's device is equal to the\n input's device`.\n\n Args:\n indices: input tensor to convert.\n num_classes: number of classes for one-hot tensor.\n\n .. versionchanged:: 0.4.3\n This functions is now torchscriptable.\n \"\"\"\n new_shape = (indices.shape[0], num_classes) + indices.shape[1:]\n onehot = torch.zeros(new_shape, dtype=torch.uint8, device=indices.device)\n return onehot.scatter_(1, indices.unsqueeze(1), 1)\n\n\ndef setup_logger(\n name: Optional[str] = \"ignite\",\n level: int = logging.INFO,\n stream: Optional[TextIO] = None,\n format: str = \"%(asctime)s %(name)s %(levelname)s: %(message)s\",\n filepath: Optional[str] = None,\n distributed_rank: Optional[int] = None,\n reset: bool = False,\n) -> logging.Logger:\n \"\"\"Setups logger: name, level, format etc.\n\n Args:\n name: new name for the logger. If None, the standard logger is used.\n level: logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG.\n stream: logging stream. If None, the standard stream is used (sys.stderr).\n format: logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s`.\n filepath: Optional logging file path. If not None, logs are written to the file.\n distributed_rank: Optional, rank in distributed configuration to avoid logger setup for workers.\n If None, distributed_rank is initialized to the rank of process.\n reset: if True, reset an existing logger rather than keep format, handlers, and level.\n\n Returns:\n logging.Logger\n\n Examples:\n Improve logs readability when training with a trainer and evaluator:\n\n .. code-block:: python\n\n from ignite.utils import setup_logger\n\n trainer = ...\n evaluator = ...\n\n trainer.logger = setup_logger(\"trainer\")\n evaluator.logger = setup_logger(\"evaluator\")\n\n trainer.run(data, max_epochs=10)\n\n # Logs will look like\n # 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5.\n # 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23\n # 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1.\n # 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02\n # ...\n\n Every existing logger can be reset if needed\n\n .. code-block:: python\n\n logger = setup_logger(name=\"my-logger\", format=\"=== %(name)s %(message)s\")\n logger.info(\"first message\")\n setup_logger(name=\"my-logger\", format=\"+++ %(name)s %(message)s\", reset=True)\n logger.info(\"second message\")\n\n # Logs will look like\n # === my-logger first message\n # +++ my-logger second message\n\n Change the level of an existing internal logger\n\n .. code-block:: python\n\n setup_logger(\n name=\"ignite.distributed.launcher.Parallel\",\n level=logging.WARNING\n )\n\n .. versionchanged:: 0.4.3\n Added ``stream`` parameter.\n\n .. versionchanged:: 0.4.5\n Added ``reset`` parameter.\n \"\"\"\n # check if the logger already exists\n existing = name is None or name in logging.root.manager.loggerDict\n\n # if existing, get the logger otherwise create a new one\n logger = logging.getLogger(name)\n\n if distributed_rank is None:\n import ignite.distributed as idist\n\n distributed_rank = idist.get_rank()\n\n # Remove previous handlers\n if distributed_rank > 0 or reset:\n\n if logger.hasHandlers():\n for h in list(logger.handlers):\n logger.removeHandler(h)\n\n if distributed_rank > 0:\n\n # Add null handler to avoid multiple parallel messages\n logger.addHandler(logging.NullHandler())\n\n # Keep the existing configuration if not reset\n if existing and not reset:\n return logger\n\n if distributed_rank == 0:\n logger.setLevel(level)\n\n formatter = logging.Formatter(format)\n\n ch = logging.StreamHandler(stream=stream)\n ch.setLevel(level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n if filepath is not None:\n fh = logging.FileHandler(filepath)\n fh.setLevel(level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # don't propagate to ancestors\n # the problem here is to attach handlers to loggers\n # should we provide a default configuration less open ?\n if name is not None:\n logger.propagate = False\n\n return logger\n\n\ndef manual_seed(seed: int) -> None:\n \"\"\"Setup random state from a seed for `torch`, `random` and optionally `numpy` (if can be imported).\n\n Args:\n seed: Random state seed\n\n .. versionchanged:: 0.4.3\n Added ``torch.cuda.manual_seed_all(seed)``.\n\n .. versionchanged:: 0.4.5\n Added ``torch_xla.core.xla_model.set_rng_state(seed)``.\n \"\"\"\n random.seed(seed)\n torch.manual_seed(seed)\n\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n try:\n import torch_xla.core.xla_model as xm\n\n xm.set_rng_state(seed)\n except ImportError:\n pass\n\n try:\n import numpy as np\n\n np.random.seed(seed)\n except ImportError:\n pass\n\n\ndef deprecated(\n deprecated_in: str, removed_in: str = \"\", reasons: Tuple[str, ...] = (), raise_exception: bool = False\n) -> Callable:\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n def decorator(func: F) -> F:\n func_doc = func.__doc__ if func.__doc__ else \"\"\n deprecation_warning = (\n f\"This function has been deprecated since version {deprecated_in}\"\n + (f\" and will be removed in version {removed_in}\" if removed_in else \"\")\n + \".\\n Please refer to the documentation for more details.\"\n )\n\n @functools.wraps(func)\n def wrapper(*args: Any, **kwargs: Dict[str, Any]) -> Callable:\n if raise_exception:\n raise DeprecationWarning(deprecation_warning)\n warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)\n return func(*args, **kwargs)\n\n appended_doc = f\".. deprecated:: {deprecated_in}\" + (\"\\n\\n\\t\" if len(reasons) > 0 else \"\")\n\n for reason in reasons:\n appended_doc += \"\\n\\t- \" + reason\n wrapper.__doc__ = f\"**Deprecated function**.\\n\\n {func_doc}{appended_doc}\"\n return cast(F, wrapper)\n\n return decorator\n", "path": "ignite/utils.py"}]}
| 3,817 | 494 |
gh_patches_debug_12430
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-2065
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IOError, 'Not a gzipped file'
while trying to access sitemap from robots.txt , Scrapy fails with **IOError, 'Not a gzipped file'** error
not sure if this issue is related to following issue(s)
https://github.com/scrapy/scrapy/issues/193 -> closed issue
https://github.com/scrapy/scrapy/pull/660 -> merged pull request to address issue 193
https://github.com/scrapy/scrapy/issues/951 -> open issue
> line where code fails in gzip.py at line # 197
>
> ``` python
> def _read_gzip_header(self):
> magic = self.fileobj.read(2)
> if magic != '\037\213':
> raise IOError, 'Not a gzipped file'
> ```
# Response Header
```
Content-Encoding: gzip
Accept-Ranges: bytes
X-Amz-Request-Id: BFFF010DDE6268DA
Vary: Accept-Encoding
Server: AmazonS3
Last-Modified: Wed, 15 Jun 2016 19:02:20 GMT
Etag: "300bb71d6897cb2a22bba0bd07978c84"
Cache-Control: no-transform
Date: Sun, 19 Jun 2016 10:54:53 GMT
Content-Type: binary/octet-stream
```
Error Log:
``` log
Traceback (most recent call last):
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\utils\defer.py", line 102, in iter_errback
yield next(it)
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spidermiddlewares\offsite.py", line 29, in process_spider_output
for x in result:
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spidermiddlewares\referer.py", line 22, in <genexpr>
return (_set_referer(r) for r in result or ())
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spidermiddlewares\urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spidermiddlewares\depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "D:\projects\sitemap_spider\sitemap_spider\spiders\mainspider.py", line 31, in _parse_sitemap
body = self._get_sitemap_body(response)
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spiders\sitemap.py", line 67, in _get_sitemap_body
return gunzip(response.body)
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\utils\gz.py", line 37, in gunzip
chunk = read1(f, 8196)
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\utils\gz.py", line 21, in read1
return gzf.read(size)
File "c:\python27\Lib\gzip.py", line 268, in read
self._read(readsize)
File "c:\python27\Lib\gzip.py", line 303, in _read
self._read_gzip_header()
File "c:\python27\Lib\gzip.py", line 197, in _read_gzip_header
raise IOError, 'Not a gzipped file'
```
i did download file manually and was able to extract the content so it is not like file is corrupted
as an example sitemap url : you can follow amazon robots.txt
</issue>
<code>
[start of scrapy/utils/gz.py]
1 import struct
2
3 try:
4 from cStringIO import StringIO as BytesIO
5 except ImportError:
6 from io import BytesIO
7 from gzip import GzipFile
8
9 import six
10 import re
11
12 # - Python>=3.5 GzipFile's read() has issues returning leftover
13 # uncompressed data when input is corrupted
14 # (regression or bug-fix compared to Python 3.4)
15 # - read1(), which fetches data before raising EOFError on next call
16 # works here but is only available from Python>=3.3
17 # - scrapy does not support Python 3.2
18 # - Python 2.7 GzipFile works fine with standard read() + extrabuf
19 if six.PY2:
20 def read1(gzf, size=-1):
21 return gzf.read(size)
22 else:
23 def read1(gzf, size=-1):
24 return gzf.read1(size)
25
26
27 def gunzip(data):
28 """Gunzip the given data and return as much data as possible.
29
30 This is resilient to CRC checksum errors.
31 """
32 f = GzipFile(fileobj=BytesIO(data))
33 output = b''
34 chunk = b'.'
35 while chunk:
36 try:
37 chunk = read1(f, 8196)
38 output += chunk
39 except (IOError, EOFError, struct.error):
40 # complete only if there is some data, otherwise re-raise
41 # see issue 87 about catching struct.error
42 # some pages are quite small so output is '' and f.extrabuf
43 # contains the whole page content
44 if output or getattr(f, 'extrabuf', None):
45 try:
46 output += f.extrabuf
47 finally:
48 break
49 else:
50 raise
51 return output
52
53 _is_gzipped_re = re.compile(br'^application/(x-)?gzip\b', re.I)
54
55 def is_gzipped(response):
56 """Return True if the response is gzipped, or False otherwise"""
57 ctype = response.headers.get('Content-Type', b'')
58 return _is_gzipped_re.search(ctype) is not None
59
[end of scrapy/utils/gz.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/utils/gz.py b/scrapy/utils/gz.py
--- a/scrapy/utils/gz.py
+++ b/scrapy/utils/gz.py
@@ -50,9 +50,12 @@
raise
return output
-_is_gzipped_re = re.compile(br'^application/(x-)?gzip\b', re.I)
+_is_gzipped = re.compile(br'^application/(x-)?gzip\b', re.I).search
+_is_octetstream = re.compile(br'^(application|binary)/octet-stream\b', re.I).search
def is_gzipped(response):
"""Return True if the response is gzipped, or False otherwise"""
ctype = response.headers.get('Content-Type', b'')
- return _is_gzipped_re.search(ctype) is not None
+ cenc = response.headers.get('Content-Encoding', b'').lower()
+ return (_is_gzipped(ctype) or
+ (_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))
|
{"golden_diff": "diff --git a/scrapy/utils/gz.py b/scrapy/utils/gz.py\n--- a/scrapy/utils/gz.py\n+++ b/scrapy/utils/gz.py\n@@ -50,9 +50,12 @@\n raise\n return output\n \n-_is_gzipped_re = re.compile(br'^application/(x-)?gzip\\b', re.I)\n+_is_gzipped = re.compile(br'^application/(x-)?gzip\\b', re.I).search\n+_is_octetstream = re.compile(br'^(application|binary)/octet-stream\\b', re.I).search\n \n def is_gzipped(response):\n \"\"\"Return True if the response is gzipped, or False otherwise\"\"\"\n ctype = response.headers.get('Content-Type', b'')\n- return _is_gzipped_re.search(ctype) is not None\n+ cenc = response.headers.get('Content-Encoding', b'').lower()\n+ return (_is_gzipped(ctype) or\n+ (_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))\n", "issue": "IOError, 'Not a gzipped file'\nwhile trying to access sitemap from robots.txt , Scrapy fails with **IOError, 'Not a gzipped file'** error\n\nnot sure if this issue is related to following issue(s)\nhttps://github.com/scrapy/scrapy/issues/193 -> closed issue\nhttps://github.com/scrapy/scrapy/pull/660 -> merged pull request to address issue 193\nhttps://github.com/scrapy/scrapy/issues/951 -> open issue\n\n> line where code fails in gzip.py at line # 197\n> \n> ``` python\n> def _read_gzip_header(self):\n> magic = self.fileobj.read(2)\n> if magic != '\\037\\213':\n> raise IOError, 'Not a gzipped file'\n> ```\n# Response Header\n\n```\nContent-Encoding: gzip\nAccept-Ranges: bytes\nX-Amz-Request-Id: BFFF010DDE6268DA\nVary: Accept-Encoding\nServer: AmazonS3\nLast-Modified: Wed, 15 Jun 2016 19:02:20 GMT\nEtag: \"300bb71d6897cb2a22bba0bd07978c84\"\nCache-Control: no-transform\nDate: Sun, 19 Jun 2016 10:54:53 GMT\nContent-Type: binary/octet-stream\n```\n\nError Log:\n\n``` log\n Traceback (most recent call last):\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\utils\\defer.py\", line 102, in iter_errback\n yield next(it)\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\spidermiddlewares\\offsite.py\", line 29, in process_spider_output\n for x in result:\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\spidermiddlewares\\referer.py\", line 22, in <genexpr>\n return (_set_referer(r) for r in result or ())\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\spidermiddlewares\\urllength.py\", line 37, in <genexpr>\n return (r for r in result or () if _filter(r))\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\spidermiddlewares\\depth.py\", line 58, in <genexpr>\n return (r for r in result or () if _filter(r))\n File \"D:\\projects\\sitemap_spider\\sitemap_spider\\spiders\\mainspider.py\", line 31, in _parse_sitemap\n body = self._get_sitemap_body(response)\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\spiders\\sitemap.py\", line 67, in _get_sitemap_body\n return gunzip(response.body)\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\utils\\gz.py\", line 37, in gunzip\n chunk = read1(f, 8196)\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\utils\\gz.py\", line 21, in read1\n return gzf.read(size)\n File \"c:\\python27\\Lib\\gzip.py\", line 268, in read\n self._read(readsize)\n File \"c:\\python27\\Lib\\gzip.py\", line 303, in _read\n self._read_gzip_header()\n File \"c:\\python27\\Lib\\gzip.py\", line 197, in _read_gzip_header\n raise IOError, 'Not a gzipped file'\n```\n\ni did download file manually and was able to extract the content so it is not like file is corrupted \n\nas an example sitemap url : you can follow amazon robots.txt\n\n", "before_files": [{"content": "import struct\n\ntry:\n from cStringIO import StringIO as BytesIO\nexcept ImportError:\n from io import BytesIO\nfrom gzip import GzipFile\n\nimport six\nimport re\n\n# - Python>=3.5 GzipFile's read() has issues returning leftover\n# uncompressed data when input is corrupted\n# (regression or bug-fix compared to Python 3.4)\n# - read1(), which fetches data before raising EOFError on next call\n# works here but is only available from Python>=3.3\n# - scrapy does not support Python 3.2\n# - Python 2.7 GzipFile works fine with standard read() + extrabuf\nif six.PY2:\n def read1(gzf, size=-1):\n return gzf.read(size)\nelse:\n def read1(gzf, size=-1):\n return gzf.read1(size)\n\n\ndef gunzip(data):\n \"\"\"Gunzip the given data and return as much data as possible.\n\n This is resilient to CRC checksum errors.\n \"\"\"\n f = GzipFile(fileobj=BytesIO(data))\n output = b''\n chunk = b'.'\n while chunk:\n try:\n chunk = read1(f, 8196)\n output += chunk\n except (IOError, EOFError, struct.error):\n # complete only if there is some data, otherwise re-raise\n # see issue 87 about catching struct.error\n # some pages are quite small so output is '' and f.extrabuf\n # contains the whole page content\n if output or getattr(f, 'extrabuf', None):\n try:\n output += f.extrabuf\n finally:\n break\n else:\n raise\n return output\n\n_is_gzipped_re = re.compile(br'^application/(x-)?gzip\\b', re.I)\n\ndef is_gzipped(response):\n \"\"\"Return True if the response is gzipped, or False otherwise\"\"\"\n ctype = response.headers.get('Content-Type', b'')\n return _is_gzipped_re.search(ctype) is not None\n", "path": "scrapy/utils/gz.py"}]}
| 2,018 | 233 |
gh_patches_debug_19955
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-134
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong paths to extra media files
Incorrect paths are generated for files in `extra_css` and `extra_javascript`. For example, `test.js` will be referenced on the index page as `./test.js`, on the next page as `.././test.js`, on the next page as `../.././test.js`, etc.
</issue>
<code>
[start of mkdocs/build.py]
1 # coding: utf-8
2 from __future__ import print_function
3
4 from mkdocs import nav, toc, utils
5 from mkdocs.compat import urljoin, urlparse, urlunparse, PY2
6 import jinja2
7 import markdown
8 import os
9 import re
10
11
12 class PathToURL(object):
13 def __init__(self, nav=None):
14 self.nav = nav
15
16 def __call__(self, match):
17 url = match.groups()[0]
18 scheme, netloc, path, query, query, fragment = urlparse(url)
19
20 if (scheme or netloc or not utils.is_markdown_file(path)):
21 # Ignore URLs unless they are a relative link to a markdown file.
22 return 'a href="%s"' % url
23
24 if self.nav:
25 # If the site navigation has been provided, then validate
26 # the internal hyperlink, making sure the target actually exists.
27 target_file = self.nav.file_context.make_absolute(path)
28 if target_file not in self.nav.source_files:
29 source_file = self.nav.file_context.current_file
30 msg = (
31 'The page "%s" contained a hyperlink to "%s" which '
32 'is not listed in the "pages" configuration.'
33 )
34 assert False, msg % (source_file, target_file)
35 path = utils.get_url_path(target_file)
36 path = self.nav.url_context.make_relative(path)
37 else:
38 path = utils.get_url_path(path).lstrip('/')
39
40 # Convert the .md hyperlink to a relative hyperlink to the HTML page.
41 url = urlunparse((scheme, netloc, path, query, query, fragment))
42 return 'a href="%s"' % url
43
44
45 def convert_markdown(markdown_source, extensions=()):
46 """
47 Convert the Markdown source file to HTML content, and additionally
48 return the parsed table of contents, and a dictionary of any metadata
49 that was specified in the Markdown file.
50
51 `extensions` is an optional sequence of Python Markdown extensions to add
52 to the default set.
53 """
54
55 # Prepend a table of contents marker for the TOC extension
56 markdown_source = toc.pre_process(markdown_source)
57
58 # Generate the HTML from the markdown source
59 md = markdown.Markdown(
60 extensions=['meta', 'toc', 'tables', 'fenced_code'] + list(extensions)
61 )
62 html_content = md.convert(markdown_source)
63 meta = md.Meta
64
65 # Strip out the generated table of contents
66 (html_content, toc_html) = toc.post_process(html_content)
67
68 # Post process the generated table of contents into a data structure
69 table_of_contents = toc.TableOfContents(toc_html)
70
71 return (html_content, table_of_contents, meta)
72
73
74 def post_process_html(html_content, nav=None):
75 html_content = re.sub(r'a href="([^"]*)"', PathToURL(nav), html_content)
76 html_content = re.sub('<pre>', '<pre class="prettyprint well">', html_content)
77 return html_content
78
79
80 def get_context(page, content, nav, toc, meta, config):
81 site_name = config['site_name']
82
83 if page.is_homepage or page.title is None:
84 page_title = site_name
85 else:
86 page_title = page.title + ' - ' + site_name
87
88 if page.is_homepage:
89 page_description = config['site_description']
90 else:
91 page_description = None
92
93 if config['site_url']:
94 base = config['site_url']
95 if not base.endswith('/'):
96 base += '/'
97 canonical_url = urljoin(base, page.abs_url.lstrip('/'))
98 else:
99 canonical_url = None
100
101 if config['site_favicon']:
102 site_favicon = nav.url_context.make_relative('/' + config['site_favicon'])
103 else:
104 site_favicon = None
105
106 if config['extra_javascript']:
107 config['extra_javascript'] = utils.create_media_urls(nav=nav, url_list=config['extra_javascript'])
108
109 if config['extra_css']:
110 config['extra_css'] = utils.create_media_urls(nav=nav, url_list=config['extra_css'])
111
112 return {
113 'site_name': site_name,
114 'site_author': config['site_author'],
115 'favicon': site_favicon,
116
117 'page_title': page_title,
118 'page_description': page_description,
119
120 'content': content,
121 'toc': toc,
122 'nav': nav,
123 'meta': meta,
124
125 'base_url': nav.url_context.make_relative('/'),
126 'homepage_url': nav.homepage.url,
127 'canonical_url': canonical_url,
128
129 'current_page': page,
130 'previous_page': page.previous_page,
131 'next_page': page.next_page,
132
133 # Note that there's intentionally repetition here. Rather than simply
134 # provide the config dictionary we instead pass everything explicitly.
135 #
136 # This helps ensure that we can throughly document the context that
137 # gets passed to themes.
138 'repo_url': config['repo_url'],
139 'repo_name': config['repo_name'],
140
141 'extra_css': config['extra_css'],
142 'extra_javascript': config['extra_javascript'],
143
144 'include_nav': config['include_nav'],
145 'include_next_prev': config['include_next_prev'],
146 'include_search': config['include_search'],
147
148 'copyright': config['copyright'],
149 'google-analytics': config['google-analytics']
150 }
151
152
153 def build_pages(config):
154 """
155 Builds all the pages and writes them into the build directory.
156 """
157 site_navigation = nav.SiteNavigation(config['pages'])
158 loader = jinja2.FileSystemLoader(config['theme_dir'])
159 env = jinja2.Environment(loader=loader)
160
161 for page in site_navigation.walk_pages():
162 # Read the input file
163 input_path = os.path.join(config['docs_dir'], page.input_path)
164 input_content = open(input_path, 'r').read()
165 if PY2:
166 input_content = input_content.decode('utf-8')
167
168 # Process the markdown text
169 html_content, table_of_contents, meta = convert_markdown(
170 input_content, extensions=config['markdown_extensions']
171 )
172 html_content = post_process_html(html_content, site_navigation)
173
174 context = get_context(
175 page, html_content, site_navigation,
176 table_of_contents, meta, config
177 )
178
179 # Allow 'template:' override in md source files.
180 if 'template' in meta:
181 template = env.get_template(meta['template'][0])
182 else:
183 template = env.get_template('base.html')
184
185 # Render the template.
186 output_content = template.render(context)
187
188 # Write the output file.
189 output_path = os.path.join(config['site_dir'], page.output_path)
190 utils.write_file(output_content.encode('utf-8'), output_path)
191
192
193 def build(config, live_server=False):
194 """
195 Perform a full site build.
196 """
197 if not live_server:
198 print("Building documentation to directory: %s" % config['site_dir'])
199 utils.copy_media_files(config['theme_dir'], config['site_dir'])
200 utils.copy_media_files(config['docs_dir'], config['site_dir'])
201 build_pages(config)
202
[end of mkdocs/build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mkdocs/build.py b/mkdocs/build.py
--- a/mkdocs/build.py
+++ b/mkdocs/build.py
@@ -103,11 +103,9 @@
else:
site_favicon = None
- if config['extra_javascript']:
- config['extra_javascript'] = utils.create_media_urls(nav=nav, url_list=config['extra_javascript'])
+ extra_javascript = utils.create_media_urls(nav=nav, url_list=config['extra_javascript'])
- if config['extra_css']:
- config['extra_css'] = utils.create_media_urls(nav=nav, url_list=config['extra_css'])
+ extra_css = utils.create_media_urls(nav=nav, url_list=config['extra_css'])
return {
'site_name': site_name,
@@ -138,8 +136,8 @@
'repo_url': config['repo_url'],
'repo_name': config['repo_name'],
- 'extra_css': config['extra_css'],
- 'extra_javascript': config['extra_javascript'],
+ 'extra_css': extra_css,
+ 'extra_javascript': extra_javascript,
'include_nav': config['include_nav'],
'include_next_prev': config['include_next_prev'],
|
{"golden_diff": "diff --git a/mkdocs/build.py b/mkdocs/build.py\n--- a/mkdocs/build.py\n+++ b/mkdocs/build.py\n@@ -103,11 +103,9 @@\n else:\n site_favicon = None\n \n- if config['extra_javascript']:\n- config['extra_javascript'] = utils.create_media_urls(nav=nav, url_list=config['extra_javascript'])\n+ extra_javascript = utils.create_media_urls(nav=nav, url_list=config['extra_javascript'])\n \n- if config['extra_css']:\n- config['extra_css'] = utils.create_media_urls(nav=nav, url_list=config['extra_css'])\n+ extra_css = utils.create_media_urls(nav=nav, url_list=config['extra_css'])\n \n return {\n 'site_name': site_name,\n@@ -138,8 +136,8 @@\n 'repo_url': config['repo_url'],\n 'repo_name': config['repo_name'],\n \n- 'extra_css': config['extra_css'],\n- 'extra_javascript': config['extra_javascript'],\n+ 'extra_css': extra_css,\n+ 'extra_javascript': extra_javascript,\n \n 'include_nav': config['include_nav'],\n 'include_next_prev': config['include_next_prev'],\n", "issue": "Wrong paths to extra media files\nIncorrect paths are generated for files in `extra_css` and `extra_javascript`. For example, `test.js` will be referenced on the index page as `./test.js`, on the next page as `.././test.js`, on the next page as `../.././test.js`, etc.\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import print_function\n\nfrom mkdocs import nav, toc, utils\nfrom mkdocs.compat import urljoin, urlparse, urlunparse, PY2\nimport jinja2\nimport markdown\nimport os\nimport re\n\n\nclass PathToURL(object):\n def __init__(self, nav=None):\n self.nav = nav\n\n def __call__(self, match):\n url = match.groups()[0]\n scheme, netloc, path, query, query, fragment = urlparse(url)\n\n if (scheme or netloc or not utils.is_markdown_file(path)):\n # Ignore URLs unless they are a relative link to a markdown file.\n return 'a href=\"%s\"' % url\n\n if self.nav:\n # If the site navigation has been provided, then validate\n # the internal hyperlink, making sure the target actually exists.\n target_file = self.nav.file_context.make_absolute(path)\n if target_file not in self.nav.source_files:\n source_file = self.nav.file_context.current_file\n msg = (\n 'The page \"%s\" contained a hyperlink to \"%s\" which '\n 'is not listed in the \"pages\" configuration.'\n )\n assert False, msg % (source_file, target_file)\n path = utils.get_url_path(target_file)\n path = self.nav.url_context.make_relative(path)\n else:\n path = utils.get_url_path(path).lstrip('/')\n\n # Convert the .md hyperlink to a relative hyperlink to the HTML page.\n url = urlunparse((scheme, netloc, path, query, query, fragment))\n return 'a href=\"%s\"' % url\n\n\ndef convert_markdown(markdown_source, extensions=()):\n \"\"\"\n Convert the Markdown source file to HTML content, and additionally\n return the parsed table of contents, and a dictionary of any metadata\n that was specified in the Markdown file.\n\n `extensions` is an optional sequence of Python Markdown extensions to add\n to the default set.\n \"\"\"\n\n # Prepend a table of contents marker for the TOC extension\n markdown_source = toc.pre_process(markdown_source)\n\n # Generate the HTML from the markdown source\n md = markdown.Markdown(\n extensions=['meta', 'toc', 'tables', 'fenced_code'] + list(extensions)\n )\n html_content = md.convert(markdown_source)\n meta = md.Meta\n\n # Strip out the generated table of contents\n (html_content, toc_html) = toc.post_process(html_content)\n\n # Post process the generated table of contents into a data structure\n table_of_contents = toc.TableOfContents(toc_html)\n\n return (html_content, table_of_contents, meta)\n\n\ndef post_process_html(html_content, nav=None):\n html_content = re.sub(r'a href=\"([^\"]*)\"', PathToURL(nav), html_content)\n html_content = re.sub('<pre>', '<pre class=\"prettyprint well\">', html_content)\n return html_content\n\n\ndef get_context(page, content, nav, toc, meta, config):\n site_name = config['site_name']\n\n if page.is_homepage or page.title is None:\n page_title = site_name\n else:\n page_title = page.title + ' - ' + site_name\n\n if page.is_homepage:\n page_description = config['site_description']\n else:\n page_description = None\n\n if config['site_url']:\n base = config['site_url']\n if not base.endswith('/'):\n base += '/'\n canonical_url = urljoin(base, page.abs_url.lstrip('/'))\n else:\n canonical_url = None\n\n if config['site_favicon']:\n site_favicon = nav.url_context.make_relative('/' + config['site_favicon'])\n else:\n site_favicon = None\n\n if config['extra_javascript']:\n config['extra_javascript'] = utils.create_media_urls(nav=nav, url_list=config['extra_javascript'])\n\n if config['extra_css']:\n config['extra_css'] = utils.create_media_urls(nav=nav, url_list=config['extra_css'])\n\n return {\n 'site_name': site_name,\n 'site_author': config['site_author'],\n 'favicon': site_favicon,\n\n 'page_title': page_title,\n 'page_description': page_description,\n\n 'content': content,\n 'toc': toc,\n 'nav': nav,\n 'meta': meta,\n\n 'base_url': nav.url_context.make_relative('/'),\n 'homepage_url': nav.homepage.url,\n 'canonical_url': canonical_url,\n\n 'current_page': page,\n 'previous_page': page.previous_page,\n 'next_page': page.next_page,\n\n # Note that there's intentionally repetition here. Rather than simply\n # provide the config dictionary we instead pass everything explicitly.\n #\n # This helps ensure that we can throughly document the context that\n # gets passed to themes.\n 'repo_url': config['repo_url'],\n 'repo_name': config['repo_name'],\n\n 'extra_css': config['extra_css'],\n 'extra_javascript': config['extra_javascript'],\n\n 'include_nav': config['include_nav'],\n 'include_next_prev': config['include_next_prev'],\n 'include_search': config['include_search'],\n\n 'copyright': config['copyright'],\n 'google-analytics': config['google-analytics']\n }\n\n\ndef build_pages(config):\n \"\"\"\n Builds all the pages and writes them into the build directory.\n \"\"\"\n site_navigation = nav.SiteNavigation(config['pages'])\n loader = jinja2.FileSystemLoader(config['theme_dir'])\n env = jinja2.Environment(loader=loader)\n\n for page in site_navigation.walk_pages():\n # Read the input file\n input_path = os.path.join(config['docs_dir'], page.input_path)\n input_content = open(input_path, 'r').read()\n if PY2:\n input_content = input_content.decode('utf-8')\n\n # Process the markdown text\n html_content, table_of_contents, meta = convert_markdown(\n input_content, extensions=config['markdown_extensions']\n )\n html_content = post_process_html(html_content, site_navigation)\n\n context = get_context(\n page, html_content, site_navigation,\n table_of_contents, meta, config\n )\n\n # Allow 'template:' override in md source files.\n if 'template' in meta:\n template = env.get_template(meta['template'][0])\n else:\n template = env.get_template('base.html')\n\n # Render the template.\n output_content = template.render(context)\n\n # Write the output file.\n output_path = os.path.join(config['site_dir'], page.output_path)\n utils.write_file(output_content.encode('utf-8'), output_path)\n\n\ndef build(config, live_server=False):\n \"\"\"\n Perform a full site build.\n \"\"\"\n if not live_server:\n print(\"Building documentation to directory: %s\" % config['site_dir'])\n utils.copy_media_files(config['theme_dir'], config['site_dir'])\n utils.copy_media_files(config['docs_dir'], config['site_dir'])\n build_pages(config)\n", "path": "mkdocs/build.py"}]}
| 2,634 | 279 |
gh_patches_debug_27658
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1058
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
.setlocation gives an error and fails: AttributeError: 'NoneType' object has no attribute 'get'
yoosi: `.setlocation london`
sopel: `AttributeError: 'NoneType' object has no attribute 'get' (file "/home/sopel/sopel/sopel/modules/weather.py", line 181, in update_woeid)`
</issue>
<code>
[start of sopel/modules/weather.py]
1 # coding=utf-8
2 # Copyright 2008, Sean B. Palmer, inamidst.com
3 # Copyright 2012, Elsie Powell, embolalia.com
4 # Licensed under the Eiffel Forum License 2.
5 from __future__ import unicode_literals, absolute_import, print_function, division
6
7 from sopel import web
8 from sopel.module import commands, example, NOLIMIT
9
10 import xmltodict
11
12
13 def woeid_search(query):
14 """
15 Find the first Where On Earth ID for the given query. Result is the etree
16 node for the result, so that location data can still be retrieved. Returns
17 None if there is no result, or the woeid field is empty.
18 """
19 query = 'q=select * from geo.places where text="%s"' % query
20 body = web.get('http://query.yahooapis.com/v1/public/yql?' + query,
21 dont_decode=True)
22 parsed = xmltodict.parse(body).get('query')
23 results = parsed.get('results')
24 if results is None or results.get('place') is None:
25 return None
26 if type(results.get('place')) is list:
27 return results.get('place')[0]
28 return results.get('place')
29
30
31 def get_cover(parsed):
32 try:
33 condition = parsed['channel']['item']['yweather:condition']
34 except KeyError:
35 return 'unknown'
36 text = condition['@text']
37 # code = int(condition['code'])
38 # TODO parse code to get those little icon thingies.
39 return text
40
41
42 def get_temp(parsed):
43 try:
44 condition = parsed['channel']['item']['yweather:condition']
45 temp = int(condition['@temp'])
46 except (KeyError, ValueError):
47 return 'unknown'
48 f = round((temp * 1.8) + 32, 2)
49 return (u'%d\u00B0C (%d\u00B0F)' % (temp, f))
50
51
52 def get_humidity(parsed):
53 try:
54 humidity = parsed['channel']['yweather:atmosphere']['@humidity']
55 except (KeyError, ValueError):
56 return 'unknown'
57 return "Humidity: %s%%" % humidity
58
59
60 def get_wind(parsed):
61 try:
62 wind_data = parsed['channel']['yweather:wind']
63 kph = float(wind_data['@speed'])
64 m_s = float(round(kph / 3.6, 1))
65 speed = int(round(kph / 1.852, 0))
66 degrees = int(wind_data['@direction'])
67 except (KeyError, ValueError):
68 return 'unknown'
69
70 if speed < 1:
71 description = 'Calm'
72 elif speed < 4:
73 description = 'Light air'
74 elif speed < 7:
75 description = 'Light breeze'
76 elif speed < 11:
77 description = 'Gentle breeze'
78 elif speed < 16:
79 description = 'Moderate breeze'
80 elif speed < 22:
81 description = 'Fresh breeze'
82 elif speed < 28:
83 description = 'Strong breeze'
84 elif speed < 34:
85 description = 'Near gale'
86 elif speed < 41:
87 description = 'Gale'
88 elif speed < 48:
89 description = 'Strong gale'
90 elif speed < 56:
91 description = 'Storm'
92 elif speed < 64:
93 description = 'Violent storm'
94 else:
95 description = 'Hurricane'
96
97 if (degrees <= 22.5) or (degrees > 337.5):
98 degrees = u'\u2193'
99 elif (degrees > 22.5) and (degrees <= 67.5):
100 degrees = u'\u2199'
101 elif (degrees > 67.5) and (degrees <= 112.5):
102 degrees = u'\u2190'
103 elif (degrees > 112.5) and (degrees <= 157.5):
104 degrees = u'\u2196'
105 elif (degrees > 157.5) and (degrees <= 202.5):
106 degrees = u'\u2191'
107 elif (degrees > 202.5) and (degrees <= 247.5):
108 degrees = u'\u2197'
109 elif (degrees > 247.5) and (degrees <= 292.5):
110 degrees = u'\u2192'
111 elif (degrees > 292.5) and (degrees <= 337.5):
112 degrees = u'\u2198'
113
114 return description + ' ' + str(m_s) + 'm/s (' + degrees + ')'
115
116
117 @commands('weather', 'wea')
118 @example('.weather London')
119 def weather(bot, trigger):
120 """.weather location - Show the weather at the given location."""
121
122 location = trigger.group(2)
123 woeid = ''
124 if not location:
125 woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
126 if not woeid:
127 return bot.msg(trigger.sender, "I don't know where you live. " +
128 'Give me a location, like .weather London, or tell me where you live by saying .setlocation London, for example.')
129 else:
130 location = location.strip()
131 woeid = bot.db.get_nick_value(location, 'woeid')
132 if woeid is None:
133 first_result = woeid_search(location)
134 if first_result is not None:
135 woeid = first_result.get('woeid')
136
137 if not woeid:
138 return bot.reply("I don't know where that is.")
139
140 query = web.urlencode({'w': woeid, 'u': 'c'})
141 raw = web.get('http://weather.yahooapis.com/forecastrss?' + query,
142 dont_decode=True)
143 parsed = xmltodict.parse(raw).get('rss')
144 location = parsed.get('channel').get('title')
145
146 cover = get_cover(parsed)
147 temp = get_temp(parsed)
148 humidity = get_humidity(parsed)
149 wind = get_wind(parsed)
150 bot.say(u'%s: %s, %s, %s, %s' % (location, cover, temp, humidity, wind))
151
152
153 @commands('setlocation', 'setwoeid')
154 @example('.setlocation Columbus, OH')
155 def update_woeid(bot, trigger):
156 """Set your default weather location."""
157 if not trigger.group(2):
158 bot.reply('Give me a location, like "Washington, DC" or "London".')
159 return NOLIMIT
160
161 first_result = woeid_search(trigger.group(2))
162 if first_result is None:
163 return bot.reply("I don't know where that is.")
164
165 woeid = first_result.get('woeid')
166
167 bot.db.set_nick_value(trigger.nick, 'woeid', woeid)
168
169 neighborhood = first_result.get('locality2') or ''
170 if neighborhood:
171 neighborhood = neighborhood.get('#text') + ', '
172 city = first_result.get('locality1') or ''
173 # This is to catch cases like 'Bawlf, Alberta' where the location is
174 # thought to be a "LocalAdmin" rather than a "Town"
175 if city:
176 city = city.get('#text')
177 else:
178 city = first_result.get('name')
179 state = first_result.get('admin1').get('#text') or ''
180 country = first_result.get('country').get('#text') or ''
181 uzip = first_result.get('postal').get('#text') or ''
182 bot.reply('I now have you at WOEID %s (%s%s, %s, %s %s)' %
183 (woeid, neighborhood, city, state, country, uzip))
184
[end of sopel/modules/weather.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sopel/modules/weather.py b/sopel/modules/weather.py
--- a/sopel/modules/weather.py
+++ b/sopel/modules/weather.py
@@ -137,16 +137,16 @@
if not woeid:
return bot.reply("I don't know where that is.")
- query = web.urlencode({'w': woeid, 'u': 'c'})
- raw = web.get('http://weather.yahooapis.com/forecastrss?' + query,
+ query = 'q=select * from weather.forecast where woeid="%s" and u=\'c\'' % woeid
+ body = web.get('http://query.yahooapis.com/v1/public/yql?' + query,
dont_decode=True)
- parsed = xmltodict.parse(raw).get('rss')
- location = parsed.get('channel').get('title')
-
- cover = get_cover(parsed)
- temp = get_temp(parsed)
- humidity = get_humidity(parsed)
- wind = get_wind(parsed)
+ parsed = xmltodict.parse(body).get('query')
+ results = parsed.get('results')
+ location = results.get('channel').get('title')
+ cover = get_cover(results)
+ temp = get_temp(results)
+ humidity = get_humidity(results)
+ wind = get_wind(results)
bot.say(u'%s: %s, %s, %s, %s' % (location, cover, temp, humidity, wind))
@@ -178,6 +178,5 @@
city = first_result.get('name')
state = first_result.get('admin1').get('#text') or ''
country = first_result.get('country').get('#text') or ''
- uzip = first_result.get('postal').get('#text') or ''
- bot.reply('I now have you at WOEID %s (%s%s, %s, %s %s)' %
- (woeid, neighborhood, city, state, country, uzip))
+ bot.reply('I now have you at WOEID %s (%s%s, %s, %s)' %
+ (woeid, neighborhood, city, state, country))
|
{"golden_diff": "diff --git a/sopel/modules/weather.py b/sopel/modules/weather.py\n--- a/sopel/modules/weather.py\n+++ b/sopel/modules/weather.py\n@@ -137,16 +137,16 @@\n if not woeid:\n return bot.reply(\"I don't know where that is.\")\n \n- query = web.urlencode({'w': woeid, 'u': 'c'})\n- raw = web.get('http://weather.yahooapis.com/forecastrss?' + query,\n+ query = 'q=select * from weather.forecast where woeid=\"%s\" and u=\\'c\\'' % woeid\n+ body = web.get('http://query.yahooapis.com/v1/public/yql?' + query,\n dont_decode=True)\n- parsed = xmltodict.parse(raw).get('rss')\n- location = parsed.get('channel').get('title')\n-\n- cover = get_cover(parsed)\n- temp = get_temp(parsed)\n- humidity = get_humidity(parsed)\n- wind = get_wind(parsed)\n+ parsed = xmltodict.parse(body).get('query')\n+ results = parsed.get('results')\n+ location = results.get('channel').get('title')\n+ cover = get_cover(results)\n+ temp = get_temp(results)\n+ humidity = get_humidity(results)\n+ wind = get_wind(results)\n bot.say(u'%s: %s, %s, %s, %s' % (location, cover, temp, humidity, wind))\n \n \n@@ -178,6 +178,5 @@\n city = first_result.get('name')\n state = first_result.get('admin1').get('#text') or ''\n country = first_result.get('country').get('#text') or ''\n- uzip = first_result.get('postal').get('#text') or ''\n- bot.reply('I now have you at WOEID %s (%s%s, %s, %s %s)' %\n- (woeid, neighborhood, city, state, country, uzip))\n+ bot.reply('I now have you at WOEID %s (%s%s, %s, %s)' %\n+ (woeid, neighborhood, city, state, country))\n", "issue": ".setlocation gives an error and fails: AttributeError: 'NoneType' object has no attribute 'get'\nyoosi: `.setlocation london`\n\nsopel: `AttributeError: 'NoneType' object has no attribute 'get' (file \"/home/sopel/sopel/sopel/modules/weather.py\", line 181, in update_woeid)`\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2008, Sean B. Palmer, inamidst.com\n# Copyright 2012, Elsie Powell, embolalia.com\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel import web\nfrom sopel.module import commands, example, NOLIMIT\n\nimport xmltodict\n\n\ndef woeid_search(query):\n \"\"\"\n Find the first Where On Earth ID for the given query. Result is the etree\n node for the result, so that location data can still be retrieved. Returns\n None if there is no result, or the woeid field is empty.\n \"\"\"\n query = 'q=select * from geo.places where text=\"%s\"' % query\n body = web.get('http://query.yahooapis.com/v1/public/yql?' + query,\n dont_decode=True)\n parsed = xmltodict.parse(body).get('query')\n results = parsed.get('results')\n if results is None or results.get('place') is None:\n return None\n if type(results.get('place')) is list:\n return results.get('place')[0]\n return results.get('place')\n\n\ndef get_cover(parsed):\n try:\n condition = parsed['channel']['item']['yweather:condition']\n except KeyError:\n return 'unknown'\n text = condition['@text']\n # code = int(condition['code'])\n # TODO parse code to get those little icon thingies.\n return text\n\n\ndef get_temp(parsed):\n try:\n condition = parsed['channel']['item']['yweather:condition']\n temp = int(condition['@temp'])\n except (KeyError, ValueError):\n return 'unknown'\n f = round((temp * 1.8) + 32, 2)\n return (u'%d\\u00B0C (%d\\u00B0F)' % (temp, f))\n\n\ndef get_humidity(parsed):\n try:\n humidity = parsed['channel']['yweather:atmosphere']['@humidity']\n except (KeyError, ValueError):\n return 'unknown'\n return \"Humidity: %s%%\" % humidity\n\n\ndef get_wind(parsed):\n try:\n wind_data = parsed['channel']['yweather:wind']\n kph = float(wind_data['@speed'])\n m_s = float(round(kph / 3.6, 1))\n speed = int(round(kph / 1.852, 0))\n degrees = int(wind_data['@direction'])\n except (KeyError, ValueError):\n return 'unknown'\n\n if speed < 1:\n description = 'Calm'\n elif speed < 4:\n description = 'Light air'\n elif speed < 7:\n description = 'Light breeze'\n elif speed < 11:\n description = 'Gentle breeze'\n elif speed < 16:\n description = 'Moderate breeze'\n elif speed < 22:\n description = 'Fresh breeze'\n elif speed < 28:\n description = 'Strong breeze'\n elif speed < 34:\n description = 'Near gale'\n elif speed < 41:\n description = 'Gale'\n elif speed < 48:\n description = 'Strong gale'\n elif speed < 56:\n description = 'Storm'\n elif speed < 64:\n description = 'Violent storm'\n else:\n description = 'Hurricane'\n\n if (degrees <= 22.5) or (degrees > 337.5):\n degrees = u'\\u2193'\n elif (degrees > 22.5) and (degrees <= 67.5):\n degrees = u'\\u2199'\n elif (degrees > 67.5) and (degrees <= 112.5):\n degrees = u'\\u2190'\n elif (degrees > 112.5) and (degrees <= 157.5):\n degrees = u'\\u2196'\n elif (degrees > 157.5) and (degrees <= 202.5):\n degrees = u'\\u2191'\n elif (degrees > 202.5) and (degrees <= 247.5):\n degrees = u'\\u2197'\n elif (degrees > 247.5) and (degrees <= 292.5):\n degrees = u'\\u2192'\n elif (degrees > 292.5) and (degrees <= 337.5):\n degrees = u'\\u2198'\n\n return description + ' ' + str(m_s) + 'm/s (' + degrees + ')'\n\n\n@commands('weather', 'wea')\n@example('.weather London')\ndef weather(bot, trigger):\n \"\"\".weather location - Show the weather at the given location.\"\"\"\n\n location = trigger.group(2)\n woeid = ''\n if not location:\n woeid = bot.db.get_nick_value(trigger.nick, 'woeid')\n if not woeid:\n return bot.msg(trigger.sender, \"I don't know where you live. \" +\n 'Give me a location, like .weather London, or tell me where you live by saying .setlocation London, for example.')\n else:\n location = location.strip()\n woeid = bot.db.get_nick_value(location, 'woeid')\n if woeid is None:\n first_result = woeid_search(location)\n if first_result is not None:\n woeid = first_result.get('woeid')\n\n if not woeid:\n return bot.reply(\"I don't know where that is.\")\n\n query = web.urlencode({'w': woeid, 'u': 'c'})\n raw = web.get('http://weather.yahooapis.com/forecastrss?' + query,\n dont_decode=True)\n parsed = xmltodict.parse(raw).get('rss')\n location = parsed.get('channel').get('title')\n\n cover = get_cover(parsed)\n temp = get_temp(parsed)\n humidity = get_humidity(parsed)\n wind = get_wind(parsed)\n bot.say(u'%s: %s, %s, %s, %s' % (location, cover, temp, humidity, wind))\n\n\n@commands('setlocation', 'setwoeid')\n@example('.setlocation Columbus, OH')\ndef update_woeid(bot, trigger):\n \"\"\"Set your default weather location.\"\"\"\n if not trigger.group(2):\n bot.reply('Give me a location, like \"Washington, DC\" or \"London\".')\n return NOLIMIT\n\n first_result = woeid_search(trigger.group(2))\n if first_result is None:\n return bot.reply(\"I don't know where that is.\")\n\n woeid = first_result.get('woeid')\n\n bot.db.set_nick_value(trigger.nick, 'woeid', woeid)\n\n neighborhood = first_result.get('locality2') or ''\n if neighborhood:\n neighborhood = neighborhood.get('#text') + ', '\n city = first_result.get('locality1') or ''\n # This is to catch cases like 'Bawlf, Alberta' where the location is\n # thought to be a \"LocalAdmin\" rather than a \"Town\"\n if city:\n city = city.get('#text')\n else:\n city = first_result.get('name')\n state = first_result.get('admin1').get('#text') or ''\n country = first_result.get('country').get('#text') or ''\n uzip = first_result.get('postal').get('#text') or ''\n bot.reply('I now have you at WOEID %s (%s%s, %s, %s %s)' %\n (woeid, neighborhood, city, state, country, uzip))\n", "path": "sopel/modules/weather.py"}]}
| 2,791 | 491 |
gh_patches_debug_4126
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-762
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Value error using sjoin with pandas v0.23
I use the `sjoin` function to add the region name (polygons) to every point within the region. Some points are not in any region, therefore I filter these points and buffer them step by step. So the points layer without intersection becomes smaller and smaller. If there is only one row left I get the following error in pandas v0.23 which I did not get before (pandas < v0.23). Using geopandas v0.3.0.
My call:
```python
new = gpd.sjoin(rest_points, polygons, how='left', op='intersects')
```
Error message:
```python
ValueError: You are trying to merge on object and int64 columns.
If you wish to proceed you should use pd.concat
```
class: `GeoDataFrame`
method: `merge(self, *args, **kwargs)`
line: `result = DataFrame.merge(self, *args, **kwargs)`
I do not understand the error and why it happens only with the last point (last row) and only with the newest pandas version. I had a look at "What's New" but could not find anything.
Full message:
```python
File "virtualenv/lib/python3.5/site-packages/geopandas/tools/sjoin.py", line 140,
in sjoin suffixes=('_%s' % lsuffix, '_%s' % rsuffix))
File "virtualenv/lib/python3.5/site-packages/geopandas/geodataframe.py", line 418,
in merge result = DataFrame.merge(self, *args, **kwargs)
File "virtualenv/lib/python3.5/site-packages/pandas/core/frame.py", line 6379,
in merge copy=copy, indicator=indicator, validate=validate)
File "virtualenv/lib/python3.5/site-packages/pandas/core/reshape/merge.py", line 60,
in mergevalidate=validate)
File "virtualenv/lib/python3.5/site-packages/pandas/core/reshape/merge.py", line 554,
in __init__self._maybe_coerce_merge_keys()
File "virtualenv/lib/python3.5/site-packages/pandas/core/reshape/merge.py", line 980,
in _maybe_coerce_merge_keys
raise ValueError(msg)
ValueError: You are trying to merge on object and int64 columns.
If you wish to proceed you should use pd.concat
```
</issue>
<code>
[start of geopandas/tools/sjoin.py]
1 from warnings import warn
2
3 import numpy as np
4 import pandas as pd
5 from shapely import prepared
6
7
8 def sjoin(left_df, right_df, how='inner', op='intersects',
9 lsuffix='left', rsuffix='right'):
10 """Spatial join of two GeoDataFrames.
11
12 Parameters
13 ----------
14 left_df, right_df : GeoDataFrames
15 how : string, default 'inner'
16 The type of join:
17
18 * 'left': use keys from left_df; retain only left_df geometry column
19 * 'right': use keys from right_df; retain only right_df geometry column
20 * 'inner': use intersection of keys from both dfs; retain only
21 left_df geometry column
22 op : string, default 'intersection'
23 Binary predicate, one of {'intersects', 'contains', 'within'}.
24 See http://toblerity.org/shapely/manual.html#binary-predicates.
25 lsuffix : string, default 'left'
26 Suffix to apply to overlapping column names (left GeoDataFrame).
27 rsuffix : string, default 'right'
28 Suffix to apply to overlapping column names (right GeoDataFrame).
29
30 """
31 import rtree
32
33 allowed_hows = ['left', 'right', 'inner']
34 if how not in allowed_hows:
35 raise ValueError("`how` was \"%s\" but is expected to be in %s" %
36 (how, allowed_hows))
37
38 allowed_ops = ['contains', 'within', 'intersects']
39 if op not in allowed_ops:
40 raise ValueError("`op` was \"%s\" but is expected to be in %s" %
41 (op, allowed_ops))
42
43 if left_df.crs != right_df.crs:
44 warn('CRS of frames being joined does not match!')
45
46 index_left = 'index_%s' % lsuffix
47 index_right = 'index_%s' % rsuffix
48
49 # due to GH 352
50 if (any(left_df.columns.isin([index_left, index_right]))
51 or any(right_df.columns.isin([index_left, index_right]))):
52 raise ValueError("'{0}' and '{1}' cannot be names in the frames being"
53 " joined".format(index_left, index_right))
54
55 # the rtree spatial index only allows limited (numeric) index types, but an
56 # index in geopandas may be any arbitrary dtype. so reset both indices now
57 # and store references to the original indices, to be reaffixed later.
58 # GH 352
59 left_df = left_df.copy(deep=True)
60 left_df.index = left_df.index.rename(index_left)
61 left_df = left_df.reset_index()
62 right_df = right_df.copy(deep=True)
63 right_df.index = right_df.index.rename(index_right)
64 right_df = right_df.reset_index()
65
66 if op == "within":
67 # within implemented as the inverse of contains; swap names
68 left_df, right_df = right_df, left_df
69
70 # insert the bounds in the rtree spatial index
71 right_df_bounds = right_df.geometry.apply(lambda x: x.bounds)
72 stream = ((i, b, None) for i, b in enumerate(right_df_bounds))
73 tree_idx = rtree.index.Index(stream)
74
75 idxmatch = (left_df.geometry.apply(lambda x: x.bounds)
76 .apply(lambda x: list(tree_idx.intersection(x))))
77 idxmatch = idxmatch[idxmatch.apply(len) > 0]
78
79 if idxmatch.shape[0] > 0:
80 # if output from join has overlapping geometries
81 r_idx = np.concatenate(idxmatch.values)
82 l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
83
84 # Vectorize predicate operations
85 def find_intersects(a1, a2):
86 return a1.intersects(a2)
87
88 def find_contains(a1, a2):
89 return a1.contains(a2)
90
91 predicate_d = {'intersects': find_intersects,
92 'contains': find_contains,
93 'within': find_contains}
94
95 check_predicates = np.vectorize(predicate_d[op])
96
97 result = (
98 pd.DataFrame(
99 np.column_stack(
100 [l_idx,
101 r_idx,
102 check_predicates(
103 left_df.geometry
104 .apply(lambda x: prepared.prep(x))[l_idx],
105 right_df[right_df.geometry.name][r_idx])
106 ]))
107 )
108
109 result.columns = ['_key_left', '_key_right', 'match_bool']
110 result = (
111 pd.DataFrame(result[result['match_bool']==1])
112 .drop('match_bool', axis=1)
113 )
114
115 else:
116 # when output from the join has no overlapping geometries
117 result = pd.DataFrame(columns=['_key_left', '_key_right'])
118
119 if op == "within":
120 # within implemented as the inverse of contains; swap names
121 left_df, right_df = right_df, left_df
122 result = result.rename(columns={'_key_left': '_key_right',
123 '_key_right': '_key_left'})
124
125
126 if how == 'inner':
127 result = result.set_index('_key_left')
128 joined = (
129 left_df
130 .merge(result, left_index=True, right_index=True)
131 .merge(right_df.drop(right_df.geometry.name, axis=1),
132 left_on='_key_right', right_index=True,
133 suffixes=('_%s' % lsuffix, '_%s' % rsuffix))
134 )
135 joined = joined.set_index(index_left).drop(['_key_right'], axis=1)
136 joined.index.name = None
137 elif how == 'left':
138 result = result.set_index('_key_left')
139 joined = (
140 left_df
141 .merge(result, left_index=True, right_index=True, how='left')
142 .merge(right_df.drop(right_df.geometry.name, axis=1),
143 how='left', left_on='_key_right', right_index=True,
144 suffixes=('_%s' % lsuffix, '_%s' % rsuffix))
145 )
146 joined = joined.set_index(index_left).drop(['_key_right'], axis=1)
147 joined.index.name = None
148 else: # how == 'right':
149 joined = (
150 left_df
151 .drop(left_df.geometry.name, axis=1)
152 .merge(result.merge(right_df,
153 left_on='_key_right', right_index=True,
154 how='right'), left_index=True,
155 right_on='_key_left', how='right')
156 .set_index(index_right)
157 )
158 joined = joined.drop(['_key_left', '_key_right'], axis=1)
159
160 return joined
161
[end of geopandas/tools/sjoin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geopandas/tools/sjoin.py b/geopandas/tools/sjoin.py
--- a/geopandas/tools/sjoin.py
+++ b/geopandas/tools/sjoin.py
@@ -114,7 +114,7 @@
else:
# when output from the join has no overlapping geometries
- result = pd.DataFrame(columns=['_key_left', '_key_right'])
+ result = pd.DataFrame(columns=['_key_left', '_key_right'], dtype=float)
if op == "within":
# within implemented as the inverse of contains; swap names
|
{"golden_diff": "diff --git a/geopandas/tools/sjoin.py b/geopandas/tools/sjoin.py\n--- a/geopandas/tools/sjoin.py\n+++ b/geopandas/tools/sjoin.py\n@@ -114,7 +114,7 @@\n \n else:\n # when output from the join has no overlapping geometries\n- result = pd.DataFrame(columns=['_key_left', '_key_right'])\n+ result = pd.DataFrame(columns=['_key_left', '_key_right'], dtype=float)\n \n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n", "issue": "Value error using sjoin with pandas v0.23\nI use the `sjoin` function to add the region name (polygons) to every point within the region. Some points are not in any region, therefore I filter these points and buffer them step by step. So the points layer without intersection becomes smaller and smaller. If there is only one row left I get the following error in pandas v0.23 which I did not get before (pandas < v0.23). Using geopandas v0.3.0.\r\n\r\nMy call:\r\n```python\r\nnew = gpd.sjoin(rest_points, polygons, how='left', op='intersects')\r\n```\r\n\r\nError message:\r\n```python\r\nValueError: You are trying to merge on object and int64 columns.\r\nIf you wish to proceed you should use pd.concat\r\n```\r\nclass: `GeoDataFrame`\r\nmethod: `merge(self, *args, **kwargs)`\r\nline: `result = DataFrame.merge(self, *args, **kwargs)`\r\n\r\nI do not understand the error and why it happens only with the last point (last row) and only with the newest pandas version. I had a look at \"What's New\" but could not find anything.\r\n\r\nFull message:\r\n```python\r\n File \"virtualenv/lib/python3.5/site-packages/geopandas/tools/sjoin.py\", line 140,\r\n in sjoin suffixes=('_%s' % lsuffix, '_%s' % rsuffix))\r\n File \"virtualenv/lib/python3.5/site-packages/geopandas/geodataframe.py\", line 418,\r\n in merge result = DataFrame.merge(self, *args, **kwargs)\r\n File \"virtualenv/lib/python3.5/site-packages/pandas/core/frame.py\", line 6379,\r\n in merge copy=copy, indicator=indicator, validate=validate)\r\n File \"virtualenv/lib/python3.5/site-packages/pandas/core/reshape/merge.py\", line 60,\r\n in mergevalidate=validate)\r\n File \"virtualenv/lib/python3.5/site-packages/pandas/core/reshape/merge.py\", line 554,\r\n in __init__self._maybe_coerce_merge_keys()\r\n File \"virtualenv/lib/python3.5/site-packages/pandas/core/reshape/merge.py\", line 980,\r\n in _maybe_coerce_merge_keys\r\n raise ValueError(msg)\r\nValueError: You are trying to merge on object and int64 columns.\r\nIf you wish to proceed you should use pd.concat\r\n```\n", "before_files": [{"content": "from warnings import warn\n\nimport numpy as np\nimport pandas as pd\nfrom shapely import prepared\n\n\ndef sjoin(left_df, right_df, how='inner', op='intersects',\n lsuffix='left', rsuffix='right'):\n \"\"\"Spatial join of two GeoDataFrames.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrames\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n op : string, default 'intersection'\n Binary predicate, one of {'intersects', 'contains', 'within'}.\n See http://toblerity.org/shapely/manual.html#binary-predicates.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n \"\"\"\n import rtree\n\n allowed_hows = ['left', 'right', 'inner']\n if how not in allowed_hows:\n raise ValueError(\"`how` was \\\"%s\\\" but is expected to be in %s\" %\n (how, allowed_hows))\n\n allowed_ops = ['contains', 'within', 'intersects']\n if op not in allowed_ops:\n raise ValueError(\"`op` was \\\"%s\\\" but is expected to be in %s\" %\n (op, allowed_ops))\n\n if left_df.crs != right_df.crs:\n warn('CRS of frames being joined does not match!')\n\n index_left = 'index_%s' % lsuffix\n index_right = 'index_%s' % rsuffix\n\n # due to GH 352\n if (any(left_df.columns.isin([index_left, index_right]))\n or any(right_df.columns.isin([index_left, index_right]))):\n raise ValueError(\"'{0}' and '{1}' cannot be names in the frames being\"\n \" joined\".format(index_left, index_right))\n\n # the rtree spatial index only allows limited (numeric) index types, but an\n # index in geopandas may be any arbitrary dtype. so reset both indices now\n # and store references to the original indices, to be reaffixed later.\n # GH 352\n left_df = left_df.copy(deep=True)\n left_df.index = left_df.index.rename(index_left)\n left_df = left_df.reset_index()\n right_df = right_df.copy(deep=True)\n right_df.index = right_df.index.rename(index_right)\n right_df = right_df.reset_index()\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n\n # insert the bounds in the rtree spatial index\n right_df_bounds = right_df.geometry.apply(lambda x: x.bounds)\n stream = ((i, b, None) for i, b in enumerate(right_df_bounds))\n tree_idx = rtree.index.Index(stream)\n\n idxmatch = (left_df.geometry.apply(lambda x: x.bounds)\n .apply(lambda x: list(tree_idx.intersection(x))))\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n\n if idxmatch.shape[0] > 0:\n # if output from join has overlapping geometries\n r_idx = np.concatenate(idxmatch.values)\n l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n\n # Vectorize predicate operations\n def find_intersects(a1, a2):\n return a1.intersects(a2)\n\n def find_contains(a1, a2):\n return a1.contains(a2)\n\n predicate_d = {'intersects': find_intersects,\n 'contains': find_contains,\n 'within': find_contains}\n\n check_predicates = np.vectorize(predicate_d[op])\n\n result = (\n pd.DataFrame(\n np.column_stack(\n [l_idx,\n r_idx,\n check_predicates(\n left_df.geometry\n .apply(lambda x: prepared.prep(x))[l_idx],\n right_df[right_df.geometry.name][r_idx])\n ]))\n )\n\n result.columns = ['_key_left', '_key_right', 'match_bool']\n result = (\n pd.DataFrame(result[result['match_bool']==1])\n .drop('match_bool', axis=1)\n )\n\n else:\n # when output from the join has no overlapping geometries\n result = pd.DataFrame(columns=['_key_left', '_key_right'])\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n result = result.rename(columns={'_key_left': '_key_right',\n '_key_right': '_key_left'})\n\n\n if how == 'inner':\n result = result.set_index('_key_left')\n joined = (\n left_df\n .merge(result, left_index=True, right_index=True)\n .merge(right_df.drop(right_df.geometry.name, axis=1),\n left_on='_key_right', right_index=True,\n suffixes=('_%s' % lsuffix, '_%s' % rsuffix))\n )\n joined = joined.set_index(index_left).drop(['_key_right'], axis=1)\n joined.index.name = None\n elif how == 'left':\n result = result.set_index('_key_left')\n joined = (\n left_df\n .merge(result, left_index=True, right_index=True, how='left')\n .merge(right_df.drop(right_df.geometry.name, axis=1),\n how='left', left_on='_key_right', right_index=True,\n suffixes=('_%s' % lsuffix, '_%s' % rsuffix))\n )\n joined = joined.set_index(index_left).drop(['_key_right'], axis=1)\n joined.index.name = None\n else: # how == 'right':\n joined = (\n left_df\n .drop(left_df.geometry.name, axis=1)\n .merge(result.merge(right_df,\n left_on='_key_right', right_index=True,\n how='right'), left_index=True,\n right_on='_key_left', how='right')\n .set_index(index_right)\n )\n joined = joined.drop(['_key_left', '_key_right'], axis=1)\n\n return joined\n", "path": "geopandas/tools/sjoin.py"}]}
| 2,872 | 127 |
gh_patches_debug_32401
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-1961
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tests create figure hashes directory
when running `py.tests` on single file it creates unneeded tmp directories.
</issue>
<code>
[start of sunpy/conftest.py]
1 from __future__ import absolute_import, print_function
2 from functools import partial
3
4 import os
5 import socket
6 import tempfile
7 import json
8
9 from sunpy.extern.six.moves.urllib.request import urlopen
10 from sunpy.extern.six.moves.urllib.error import URLError
11
12 import pytest
13
14
15 # Force MPL to use non-gui backends for testing.
16 try:
17 import matplotlib
18 except ImportError:
19 pass
20 else:
21 matplotlib.use('Agg')
22
23 from astropy.tests import disable_internet
24
25 from sunpy.tests import hash
26
27 hash_library_original_len = len(hash.hash_library)
28
29 GOOGLE_URL = 'http://www.google.com'
30
31
32 def site_reachable(url):
33 try:
34 urlopen(url, timeout=1)
35 except (URLError, socket.timeout):
36 return False
37 else:
38 return True
39
40
41 is_online = partial(site_reachable, GOOGLE_URL)
42
43
44 def pytest_runtest_setup(item):
45 """
46 pytest hook to skip all tests that have the mark 'online' if the
47 client is online (simply detected by checking whether http://www.google.com
48 can be requested).
49 """
50 if isinstance(item, item.Function):
51 if 'online' in item.keywords and not is_online():
52 msg = 'skipping test {0} (reason: client seems to be offline)'
53 pytest.skip(msg.format(item.name))
54
55 if 'online' not in item.keywords:
56 disable_internet.turn_off_internet()
57
58
59 def pytest_runtest_teardown(item, nextitem):
60 disable_internet.turn_on_internet()
61
62
63 def pytest_unconfigure(config):
64 tempdir = tempfile.mkdtemp(suffix="_figures")
65 # the hash_library is indexed by the name of the test but we want to look
66 # things up with the hash value
67 inv_hash_library = {v: k for k, v in hash.hash_library.items()}
68
69 for h in hash.file_list:
70 test_name = inv_hash_library.get(h, '')
71 if test_name != '':
72 os.rename(hash.file_list[h], os.path.join(tempdir,
73 test_name + '.png'))
74 print('All test files for figure hashes can be found in {0}'.format(
75 tempdir))
76
77 # Check if additions have been made to the hash library
78 if len(hash.hash_library) > hash_library_original_len:
79 # Write the new hash library in JSON
80 tempdir = tempfile.mkdtemp()
81 hashfile = os.path.join(tempdir, hash.HASH_LIBRARY_NAME)
82 with open(hashfile, 'wb') as outfile:
83 json.dump(
84 hash.hash_library,
85 outfile,
86 sort_keys=True,
87 indent=4,
88 separators=(',', ': '))
89 print(
90 "The hash library has expanded and should be copied to sunpy/tests/")
91 print(" " + hashfile)
92
[end of sunpy/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sunpy/conftest.py b/sunpy/conftest.py
--- a/sunpy/conftest.py
+++ b/sunpy/conftest.py
@@ -22,9 +22,8 @@
from astropy.tests import disable_internet
-from sunpy.tests import hash
-
-hash_library_original_len = len(hash.hash_library)
+from sunpy.tests.hash import HASH_LIBRARY_NAME
+from sunpy.tests.helpers import new_hash_library, figure_test_pngfiles
GOOGLE_URL = 'http://www.google.com'
@@ -61,31 +60,17 @@
def pytest_unconfigure(config):
- tempdir = tempfile.mkdtemp(suffix="_figures")
- # the hash_library is indexed by the name of the test but we want to look
- # things up with the hash value
- inv_hash_library = {v: k for k, v in hash.hash_library.items()}
-
- for h in hash.file_list:
- test_name = inv_hash_library.get(h, '')
- if test_name != '':
- os.rename(hash.file_list[h], os.path.join(tempdir,
- test_name + '.png'))
- print('All test files for figure hashes can be found in {0}'.format(
- tempdir))
-
- # Check if additions have been made to the hash library
- if len(hash.hash_library) > hash_library_original_len:
+ if len(figure_test_pngfiles) > 0:
+ tempdir = tempfile.mkdtemp(suffix="_figures")
+
+ # Rename each PNG with the name of the corresponding test
+ for test_name in figure_test_pngfiles:
+ os.rename(figure_test_pngfiles[test_name], os.path.join(tempdir, test_name + '.png'))
+
# Write the new hash library in JSON
- tempdir = tempfile.mkdtemp()
- hashfile = os.path.join(tempdir, hash.HASH_LIBRARY_NAME)
- with open(hashfile, 'wb') as outfile:
- json.dump(
- hash.hash_library,
- outfile,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))
- print(
- "The hash library has expanded and should be copied to sunpy/tests/")
- print(" " + hashfile)
+ hashfile = os.path.join(tempdir, HASH_LIBRARY_NAME)
+ with open(hashfile, 'w') as outfile:
+ json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))
+
+ print('All test files for figure hashes can be found in {0}'.format(tempdir))
+ print("The corresponding hash library is {0}".format(hashfile))
|
{"golden_diff": "diff --git a/sunpy/conftest.py b/sunpy/conftest.py\n--- a/sunpy/conftest.py\n+++ b/sunpy/conftest.py\n@@ -22,9 +22,8 @@\n \n from astropy.tests import disable_internet\n \n-from sunpy.tests import hash\n-\n-hash_library_original_len = len(hash.hash_library)\n+from sunpy.tests.hash import HASH_LIBRARY_NAME\n+from sunpy.tests.helpers import new_hash_library, figure_test_pngfiles\n \n GOOGLE_URL = 'http://www.google.com'\n \n@@ -61,31 +60,17 @@\n \n \n def pytest_unconfigure(config):\n- tempdir = tempfile.mkdtemp(suffix=\"_figures\")\n- # the hash_library is indexed by the name of the test but we want to look\n- # things up with the hash value\n- inv_hash_library = {v: k for k, v in hash.hash_library.items()}\n-\n- for h in hash.file_list:\n- test_name = inv_hash_library.get(h, '')\n- if test_name != '':\n- os.rename(hash.file_list[h], os.path.join(tempdir,\n- test_name + '.png'))\n- print('All test files for figure hashes can be found in {0}'.format(\n- tempdir))\n-\n- # Check if additions have been made to the hash library\n- if len(hash.hash_library) > hash_library_original_len:\n+ if len(figure_test_pngfiles) > 0:\n+ tempdir = tempfile.mkdtemp(suffix=\"_figures\")\n+\n+ # Rename each PNG with the name of the corresponding test\n+ for test_name in figure_test_pngfiles:\n+ os.rename(figure_test_pngfiles[test_name], os.path.join(tempdir, test_name + '.png'))\n+\n # Write the new hash library in JSON\n- tempdir = tempfile.mkdtemp()\n- hashfile = os.path.join(tempdir, hash.HASH_LIBRARY_NAME)\n- with open(hashfile, 'wb') as outfile:\n- json.dump(\n- hash.hash_library,\n- outfile,\n- sort_keys=True,\n- indent=4,\n- separators=(',', ': '))\n- print(\n- \"The hash library has expanded and should be copied to sunpy/tests/\")\n- print(\" \" + hashfile)\n+ hashfile = os.path.join(tempdir, HASH_LIBRARY_NAME)\n+ with open(hashfile, 'w') as outfile:\n+ json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))\n+\n+ print('All test files for figure hashes can be found in {0}'.format(tempdir))\n+ print(\"The corresponding hash library is {0}\".format(hashfile))\n", "issue": "tests create figure hashes directory\nwhen running `py.tests` on single file it creates unneeded tmp directories.\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\nfrom functools import partial\n\nimport os\nimport socket\nimport tempfile\nimport json\n\nfrom sunpy.extern.six.moves.urllib.request import urlopen\nfrom sunpy.extern.six.moves.urllib.error import URLError\n\nimport pytest\n\n\n# Force MPL to use non-gui backends for testing.\ntry:\n import matplotlib\nexcept ImportError:\n pass\nelse:\n matplotlib.use('Agg')\n\nfrom astropy.tests import disable_internet\n\nfrom sunpy.tests import hash\n\nhash_library_original_len = len(hash.hash_library)\n\nGOOGLE_URL = 'http://www.google.com'\n\n\ndef site_reachable(url):\n try:\n urlopen(url, timeout=1)\n except (URLError, socket.timeout):\n return False\n else:\n return True\n\n\nis_online = partial(site_reachable, GOOGLE_URL)\n\n\ndef pytest_runtest_setup(item):\n \"\"\"\n pytest hook to skip all tests that have the mark 'online' if the\n client is online (simply detected by checking whether http://www.google.com\n can be requested).\n \"\"\"\n if isinstance(item, item.Function):\n if 'online' in item.keywords and not is_online():\n msg = 'skipping test {0} (reason: client seems to be offline)'\n pytest.skip(msg.format(item.name))\n\n if 'online' not in item.keywords:\n disable_internet.turn_off_internet()\n\n\ndef pytest_runtest_teardown(item, nextitem):\n disable_internet.turn_on_internet()\n\n\ndef pytest_unconfigure(config):\n tempdir = tempfile.mkdtemp(suffix=\"_figures\")\n # the hash_library is indexed by the name of the test but we want to look\n # things up with the hash value\n inv_hash_library = {v: k for k, v in hash.hash_library.items()}\n\n for h in hash.file_list:\n test_name = inv_hash_library.get(h, '')\n if test_name != '':\n os.rename(hash.file_list[h], os.path.join(tempdir,\n test_name + '.png'))\n print('All test files for figure hashes can be found in {0}'.format(\n tempdir))\n\n # Check if additions have been made to the hash library\n if len(hash.hash_library) > hash_library_original_len:\n # Write the new hash library in JSON\n tempdir = tempfile.mkdtemp()\n hashfile = os.path.join(tempdir, hash.HASH_LIBRARY_NAME)\n with open(hashfile, 'wb') as outfile:\n json.dump(\n hash.hash_library,\n outfile,\n sort_keys=True,\n indent=4,\n separators=(',', ': '))\n print(\n \"The hash library has expanded and should be copied to sunpy/tests/\")\n print(\" \" + hashfile)\n", "path": "sunpy/conftest.py"}]}
| 1,333 | 589 |
gh_patches_debug_8984
|
rasdani/github-patches
|
git_diff
|
gammapy__gammapy-2896
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Install fails when Cython is not already installed
**Gammapy version**
Every I tested
**Bug description**
The setup.py hard imports Cython, so setuptools has no chance to install it.
**Expected behavior**
Install cython if needed before attempting to import it
**To Reproduce**
```
$ python -m venv test_gammapy
$ . test_gammapy/bin/activate
$ pip install gammapy
```
**Other information**
There is a very simple fix: provide a pyproject.toml that requires cython for the build system.
</issue>
<code>
[start of setup.py]
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 import sys
3 import setuptools
4 from distutils.version import LooseVersion
5
6 if LooseVersion(setuptools.__version__) < "30.3":
7 sys.stderr.write("ERROR: setuptools 30.3 or later is required by gammapy\n")
8 sys.exit(1)
9
10 # TODO: check if setuptools_scm, numpy, ... are OK
11 # Exit with good error message telling people to install those first if not
12
13
14 from Cython.Build import cythonize
15 from distutils.extension import Extension
16 import numpy as np
17
18
19 def make_cython_extension(filename):
20 return Extension(
21 filename.strip(".pyx").replace("/", "."),
22 [filename],
23 include_dirs=[np.get_include()],
24 )
25
26
27 cython_files = [
28 "gammapy/stats/fit_statistics_cython.pyx",
29 ]
30
31 ext_modules = cythonize([make_cython_extension(_) for _ in cython_files])
32
33 setuptools.setup(use_scm_version=True, ext_modules=ext_modules)
34
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,16 +1,5 @@
# Licensed under a 3-clause BSD style license - see LICENSE.rst
-import sys
import setuptools
-from distutils.version import LooseVersion
-
-if LooseVersion(setuptools.__version__) < "30.3":
- sys.stderr.write("ERROR: setuptools 30.3 or later is required by gammapy\n")
- sys.exit(1)
-
-# TODO: check if setuptools_scm, numpy, ... are OK
-# Exit with good error message telling people to install those first if not
-
-
from Cython.Build import cythonize
from distutils.extension import Extension
import numpy as np
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,16 +1,5 @@\n # Licensed under a 3-clause BSD style license - see LICENSE.rst\n-import sys\n import setuptools\n-from distutils.version import LooseVersion\n-\n-if LooseVersion(setuptools.__version__) < \"30.3\":\n- sys.stderr.write(\"ERROR: setuptools 30.3 or later is required by gammapy\\n\")\n- sys.exit(1)\n-\n-# TODO: check if setuptools_scm, numpy, ... are OK\n-# Exit with good error message telling people to install those first if not\n-\n-\n from Cython.Build import cythonize\n from distutils.extension import Extension\n import numpy as np\n", "issue": "Install fails when Cython is not already installed\n**Gammapy version**\r\n\r\nEvery I tested\r\n\r\n**Bug description**\r\n\r\nThe setup.py hard imports Cython, so setuptools has no chance to install it.\r\n\r\n**Expected behavior**\r\n\r\nInstall cython if needed before attempting to import it\r\n\r\n**To Reproduce**\r\n\r\n\r\n```\r\n$ python -m venv test_gammapy\r\n$ . test_gammapy/bin/activate\r\n$ pip install gammapy\r\n```\r\n\r\n\r\n**Other information**\r\nThere is a very simple fix: provide a pyproject.toml that requires cython for the build system.\r\n\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport sys\nimport setuptools\nfrom distutils.version import LooseVersion\n\nif LooseVersion(setuptools.__version__) < \"30.3\":\n sys.stderr.write(\"ERROR: setuptools 30.3 or later is required by gammapy\\n\")\n sys.exit(1)\n\n# TODO: check if setuptools_scm, numpy, ... are OK\n# Exit with good error message telling people to install those first if not\n\n\nfrom Cython.Build import cythonize\nfrom distutils.extension import Extension\nimport numpy as np\n\n\ndef make_cython_extension(filename):\n return Extension(\n filename.strip(\".pyx\").replace(\"/\", \".\"),\n [filename],\n include_dirs=[np.get_include()],\n )\n\n\ncython_files = [\n \"gammapy/stats/fit_statistics_cython.pyx\",\n]\n\next_modules = cythonize([make_cython_extension(_) for _ in cython_files])\n\nsetuptools.setup(use_scm_version=True, ext_modules=ext_modules)\n", "path": "setup.py"}]}
| 939 | 165 |
gh_patches_debug_7143
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-13499
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Circular import for warehouse.oidc
Surfaced via:
```shellsession
python -c "import warehouse.oidc"
```
```
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "workpath/warehouse/warehouse/oidc/__init__.py", line 13, in <module>
from warehouse.oidc.interfaces import IOIDCPublisherService
File "workpath/warehouse/warehouse/oidc/interfaces.py", line 19, in <module>
from warehouse.packaging.models import Project
File "workpath/warehouse/warehouse/packaging/__init__.py", line 17, in <module>
from warehouse.accounts.models import Email, User
File "workpath/warehouse/warehouse/accounts/__init__.py", line 22, in <module>
from warehouse.accounts.security_policy import (
File "workpath/warehouse/warehouse/accounts/security_policy.py", line 36, in <module>
from warehouse.utils.security_policy import AuthenticationMethod
File "workpath/warehouse/warehouse/utils/security_policy.py", line 21, in <module>
from warehouse.oidc.models import OIDCPublisher
File "workpath/warehouse/warehouse/oidc/models.py", line 24, in <module>
from warehouse.oidc.interfaces import SignedClaims
ImportError: cannot import name 'SignedClaims' from partially initialized module 'warehouse.oidc.interfaces' (most likely due to a circular import) (workpath/warehouse/warehouse/oidc/interfaces.py)
```
This isn't posing a problem in production (yet). I found it when I tried to filter out the warnings from `InsecureOIDCPublisherWarning` - but we should figure out how to not have a circular import.
</issue>
<code>
[start of warehouse/oidc/interfaces.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from __future__ import annotations
14
15 from typing import TYPE_CHECKING, Any, NewType
16
17 from zope.interface import Interface
18
19 from warehouse.packaging.models import Project
20 from warehouse.rate_limiting.interfaces import RateLimiterException
21
22 if TYPE_CHECKING:
23 from warehouse.oidc.models import PendingOIDCPublisher # pragma: no cover
24
25 SignedClaims = NewType("SignedClaims", dict[str, Any])
26
27
28 class IOIDCPublisherService(Interface):
29 def verify_jwt_signature(unverified_token: str):
30 """
31 Verify the given JWT's signature, returning its signed claims if
32 valid. If the signature is invalid, `None` is returned.
33
34 This method does **not** verify the claim set itself -- the API
35 consumer is responsible for evaluating the claim set.
36 """
37 pass
38
39 def find_publisher(signed_claims: SignedClaims, *, pending: bool = False):
40 """
41 Given a mapping of signed claims produced by `verify_jwt_signature`,
42 attempt to find and return either a `OIDCPublisher` or `PendingOIDCPublisher`
43 that matches them, depending on the value of `pending`.
44
45 If no publisher matches the claims, `None` is returned.
46 """
47 pass
48
49 def reify_pending_publisher(
50 pending_publisher: PendingOIDCPublisher, project: Project
51 ):
52 """
53 Reify the given pending `PendingOIDCPublisher` into an `OIDCPublisher`,
54 adding it to the given project (presumed newly created) in the process.
55
56 Returns the reified publisher.
57 """
58 pass
59
60
61 class TooManyOIDCRegistrations(RateLimiterException):
62 pass
63
[end of warehouse/oidc/interfaces.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/warehouse/oidc/interfaces.py b/warehouse/oidc/interfaces.py
--- a/warehouse/oidc/interfaces.py
+++ b/warehouse/oidc/interfaces.py
@@ -16,11 +16,11 @@
from zope.interface import Interface
-from warehouse.packaging.models import Project
from warehouse.rate_limiting.interfaces import RateLimiterException
if TYPE_CHECKING:
- from warehouse.oidc.models import PendingOIDCPublisher # pragma: no cover
+ from warehouse.oidc.models import PendingOIDCPublisher
+ from warehouse.packaging.models import Project
SignedClaims = NewType("SignedClaims", dict[str, Any])
|
{"golden_diff": "diff --git a/warehouse/oidc/interfaces.py b/warehouse/oidc/interfaces.py\n--- a/warehouse/oidc/interfaces.py\n+++ b/warehouse/oidc/interfaces.py\n@@ -16,11 +16,11 @@\n \n from zope.interface import Interface\n \n-from warehouse.packaging.models import Project\n from warehouse.rate_limiting.interfaces import RateLimiterException\n \n if TYPE_CHECKING:\n- from warehouse.oidc.models import PendingOIDCPublisher # pragma: no cover\n+ from warehouse.oidc.models import PendingOIDCPublisher\n+ from warehouse.packaging.models import Project\n \n SignedClaims = NewType(\"SignedClaims\", dict[str, Any])\n", "issue": "Circular import for warehouse.oidc\nSurfaced via:\r\n\r\n```shellsession\r\npython -c \"import warehouse.oidc\"\r\n```\r\n\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"workpath/warehouse/warehouse/oidc/__init__.py\", line 13, in <module>\r\n from warehouse.oidc.interfaces import IOIDCPublisherService\r\n File \"workpath/warehouse/warehouse/oidc/interfaces.py\", line 19, in <module>\r\n from warehouse.packaging.models import Project\r\n File \"workpath/warehouse/warehouse/packaging/__init__.py\", line 17, in <module>\r\n from warehouse.accounts.models import Email, User\r\n File \"workpath/warehouse/warehouse/accounts/__init__.py\", line 22, in <module>\r\n from warehouse.accounts.security_policy import (\r\n File \"workpath/warehouse/warehouse/accounts/security_policy.py\", line 36, in <module>\r\n from warehouse.utils.security_policy import AuthenticationMethod\r\n File \"workpath/warehouse/warehouse/utils/security_policy.py\", line 21, in <module>\r\n from warehouse.oidc.models import OIDCPublisher\r\n File \"workpath/warehouse/warehouse/oidc/models.py\", line 24, in <module>\r\n from warehouse.oidc.interfaces import SignedClaims\r\nImportError: cannot import name 'SignedClaims' from partially initialized module 'warehouse.oidc.interfaces' (most likely due to a circular import) (workpath/warehouse/warehouse/oidc/interfaces.py)\r\n```\r\n\r\nThis isn't posing a problem in production (yet). I found it when I tried to filter out the warnings from `InsecureOIDCPublisherWarning` - but we should figure out how to not have a circular import.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, NewType\n\nfrom zope.interface import Interface\n\nfrom warehouse.packaging.models import Project\nfrom warehouse.rate_limiting.interfaces import RateLimiterException\n\nif TYPE_CHECKING:\n from warehouse.oidc.models import PendingOIDCPublisher # pragma: no cover\n\nSignedClaims = NewType(\"SignedClaims\", dict[str, Any])\n\n\nclass IOIDCPublisherService(Interface):\n def verify_jwt_signature(unverified_token: str):\n \"\"\"\n Verify the given JWT's signature, returning its signed claims if\n valid. If the signature is invalid, `None` is returned.\n\n This method does **not** verify the claim set itself -- the API\n consumer is responsible for evaluating the claim set.\n \"\"\"\n pass\n\n def find_publisher(signed_claims: SignedClaims, *, pending: bool = False):\n \"\"\"\n Given a mapping of signed claims produced by `verify_jwt_signature`,\n attempt to find and return either a `OIDCPublisher` or `PendingOIDCPublisher`\n that matches them, depending on the value of `pending`.\n\n If no publisher matches the claims, `None` is returned.\n \"\"\"\n pass\n\n def reify_pending_publisher(\n pending_publisher: PendingOIDCPublisher, project: Project\n ):\n \"\"\"\n Reify the given pending `PendingOIDCPublisher` into an `OIDCPublisher`,\n adding it to the given project (presumed newly created) in the process.\n\n Returns the reified publisher.\n \"\"\"\n pass\n\n\nclass TooManyOIDCRegistrations(RateLimiterException):\n pass\n", "path": "warehouse/oidc/interfaces.py"}]}
| 1,514 | 145 |
gh_patches_debug_54008
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-555
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError: expected string or buffer
https://github.com/tomv564/LSP/blob/e37c4e6d7d959890c465cada35dff7fef22feb6e/plugin/core/types.py#L50-L54
It happened only once so far, when `plugin_loaded` was called on start up. After restarting Sublime Text right away, it did not happened again.
```
Traceback (most recent call last):
File "F:\SublimeText\sublime_plugin.py", line 298, in on_api_ready
plc()
File "F:\SublimeText\Data\Packages\LSP\boot.py", line 30, in plugin_loaded
startup()
File "F:\SublimeText\Data\Packages\LSP\plugin\core\main.py", line 25, in startup
start_active_window()
File "F:\SublimeText\Data\Packages\LSP\plugin\core\main.py", line 44, in start_active_window
windows.lookup(window).start_active_views()
File "F:\SublimeText\Data\Packages\LSP\plugin\core\windows.py", line 336, in start_active_views
self._initialize_on_open(view)
File "F:\SublimeText\Data\Packages\LSP\plugin\core\windows.py", line 348, in _initialize_on_open
self._configs.syntax_configs(view))
File "F:\SublimeText\Data\Packages\LSP\plugin\core\configurations.py", line 120, in syntax_configs
return list(filter(lambda c: config_supports_syntax(c, syntax) and c.enabled, self.all))
File "F:\SublimeText\Data\Packages\LSP\plugin\core\configurations.py", line 120, in <lambda>
return list(filter(lambda c: config_supports_syntax(c, syntax) and c.enabled, self.all))
File "F:\SublimeText\Data\Packages\LSP\plugin\core\types.py", line 81, in config_supports_syntax
if re.search(r'|'.join(r'\b%s\b' % re.escape(s) for s in language.syntaxes), syntax, re.IGNORECASE):
File "./python3.3/re.py", line 161, in search
TypeError: expected string or buffer
```
</issue>
<code>
[start of plugin/completion.py]
1 import sublime
2 import sublime_plugin
3
4 try:
5 from typing import Any, List, Dict, Tuple, Callable, Optional
6 assert Any and List and Dict and Tuple and Callable and Optional
7 except ImportError:
8 pass
9
10 from .core.protocol import Request
11 from .core.events import global_events
12 from .core.settings import settings, client_configs
13 from .core.logging import debug
14 from .core.completion import parse_completion_response
15 from .core.registry import session_for_view, client_for_view
16 from .core.configurations import is_supported_syntax
17 from .core.documents import get_document_position
18 from .core.sessions import Session
19
20 NO_COMPLETION_SCOPES = 'comment, string'
21
22
23 class CompletionState(object):
24 IDLE = 0
25 REQUESTING = 1
26 APPLYING = 2
27 CANCELLING = 3
28
29
30 last_text_command = None
31
32
33 class CompletionHelper(sublime_plugin.EventListener):
34 def on_text_command(self, view, command_name, args):
35 global last_text_command
36 last_text_command = command_name
37
38
39 class CompletionHandler(sublime_plugin.ViewEventListener):
40 def __init__(self, view):
41 self.view = view
42 self.initialized = False
43 self.enabled = False
44 self.trigger_chars = [] # type: List[str]
45 self.state = CompletionState.IDLE
46 self.completions = [] # type: List[Any]
47 self.next_request = None # type: Optional[Tuple[str, List[int]]]
48 self.last_prefix = ""
49 self.last_location = 0
50
51 @classmethod
52 def is_applicable(cls, settings):
53 syntax = settings.get('syntax')
54 if syntax is not None:
55 return is_supported_syntax(syntax, client_configs.all)
56 else:
57 return False
58
59 def initialize(self):
60 self.initialized = True
61 session = session_for_view(self.view)
62 if session:
63 completionProvider = session.get_capability(
64 'completionProvider')
65 if completionProvider:
66 self.enabled = True
67 self.trigger_chars = completionProvider.get(
68 'triggerCharacters') or []
69 if self.trigger_chars:
70 self.register_trigger_chars(session)
71
72 def _view_language(self, config_name: str) -> 'Optional[str]':
73 languages = self.view.settings().get('lsp_language')
74 return languages.get(config_name) if languages else None
75
76 def register_trigger_chars(self, session: Session) -> None:
77 completion_triggers = self.view.settings().get('auto_complete_triggers', [])
78 view_language = self._view_language(session.config.name)
79 if view_language:
80 for language in session.config.languages:
81 if language.id == view_language:
82 for scope in language.scopes:
83 # debug("registering", self.trigger_chars, "for", scope)
84 scope_trigger = next(
85 (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),
86 None
87 )
88 if scope_trigger:
89 scope_trigger['characters'] = "".join(self.trigger_chars)
90 else:
91 completion_triggers.append({
92 'characters': "".join(self.trigger_chars),
93 'selector': scope
94 })
95
96 self.view.settings().set('auto_complete_triggers', completion_triggers)
97
98 def is_after_trigger_character(self, location):
99 if location > 0:
100 prev_char = self.view.substr(location - 1)
101 return prev_char in self.trigger_chars
102
103 def is_same_completion(self, prefix, locations):
104 # completion requests from the same location with the same prefix are cached.
105 current_start = locations[0] - len(prefix)
106 last_start = self.last_location - len(self.last_prefix)
107 return prefix.startswith(self.last_prefix) and current_start == last_start
108
109 def on_modified(self):
110 # hide completion when backspacing past last completion.
111 if self.view.sel()[0].begin() < self.last_location:
112 self.last_location = 0
113 self.view.run_command("hide_auto_complete")
114 # cancel current completion if the previous input is an space
115 prev_char = self.view.substr(self.view.sel()[0].begin() - 1)
116 if self.state == CompletionState.REQUESTING and prev_char.isspace():
117 self.state = CompletionState.CANCELLING
118
119 def on_query_completions(self, prefix, locations):
120 if prefix != "" and self.view.match_selector(locations[0], NO_COMPLETION_SCOPES):
121 # debug('discarding completion because no completion scope with prefix {}'.format(prefix))
122 return (
123 [],
124 sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS
125 )
126
127 if not self.initialized:
128 self.initialize()
129
130 if self.enabled:
131 reuse_completion = self.is_same_completion(prefix, locations)
132 if self.state == CompletionState.IDLE:
133 if not reuse_completion:
134 self.last_prefix = prefix
135 self.last_location = locations[0]
136 self.do_request(prefix, locations)
137 self.completions = []
138
139 elif self.state in (CompletionState.REQUESTING, CompletionState.CANCELLING):
140 self.next_request = (prefix, locations)
141 self.state = CompletionState.CANCELLING
142
143 elif self.state == CompletionState.APPLYING:
144 self.state = CompletionState.IDLE
145
146 return (
147 self.completions,
148 0 if not settings.only_show_lsp_completions
149 else sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS
150 )
151
152 def do_request(self, prefix: str, locations: 'List[int]'):
153 self.next_request = None
154 view = self.view
155
156 # don't store client so we can handle restarts
157 client = client_for_view(view)
158 if not client:
159 return
160
161 if settings.complete_all_chars or self.is_after_trigger_character(locations[0]):
162 global_events.publish("view.on_purge_changes", self.view)
163 document_position = get_document_position(view, locations[0])
164 if document_position:
165 client.send_request(
166 Request.complete(document_position),
167 self.handle_response,
168 self.handle_error)
169 self.state = CompletionState.REQUESTING
170
171 def handle_response(self, response: 'Optional[Dict]'):
172
173 if self.state == CompletionState.REQUESTING:
174 last_start = self.last_location - len(self.last_prefix)
175 last_row, last_col = self.view.rowcol(last_start)
176 self.completions = parse_completion_response(response, last_col, settings)
177
178 # if insert_best_completion was just ran, undo it before presenting new completions.
179 prev_char = self.view.substr(self.view.sel()[0].begin() - 1)
180 if prev_char.isspace():
181 if last_text_command == "insert_best_completion":
182 self.view.run_command("undo")
183
184 self.state = CompletionState.APPLYING
185 self.view.run_command("hide_auto_complete")
186 self.run_auto_complete()
187 elif self.state == CompletionState.CANCELLING:
188 self.state = CompletionState.IDLE
189 if self.next_request:
190 prefix, locations = self.next_request
191 self.do_request(prefix, locations)
192 else:
193 debug('Got unexpected response while in state {}'.format(self.state))
194
195 def handle_error(self, error: dict):
196 sublime.status_message('Completion error: ' + str(error.get('message')))
197 self.state = CompletionState.IDLE
198
199 def run_auto_complete(self):
200 self.view.run_command(
201 "auto_complete", {
202 'disable_auto_insert': True,
203 'api_completions_only': settings.only_show_lsp_completions,
204 'next_completion_if_showing': False
205 })
206
[end of plugin/completion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugin/completion.py b/plugin/completion.py
--- a/plugin/completion.py
+++ b/plugin/completion.py
@@ -51,10 +51,7 @@
@classmethod
def is_applicable(cls, settings):
syntax = settings.get('syntax')
- if syntax is not None:
- return is_supported_syntax(syntax, client_configs.all)
- else:
- return False
+ return is_supported_syntax(syntax) if syntax else False
def initialize(self):
self.initialized = True
|
{"golden_diff": "diff --git a/plugin/completion.py b/plugin/completion.py\n--- a/plugin/completion.py\n+++ b/plugin/completion.py\n@@ -51,10 +51,7 @@\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n- if syntax is not None:\n- return is_supported_syntax(syntax, client_configs.all)\n- else:\n- return False\n+ return is_supported_syntax(syntax) if syntax else False\n \n def initialize(self):\n self.initialized = True\n", "issue": "TypeError: expected string or buffer\nhttps://github.com/tomv564/LSP/blob/e37c4e6d7d959890c465cada35dff7fef22feb6e/plugin/core/types.py#L50-L54\r\n\r\nIt happened only once so far, when `plugin_loaded` was called on start up. After restarting Sublime Text right away, it did not happened again.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"F:\\SublimeText\\sublime_plugin.py\", line 298, in on_api_ready\r\n plc()\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\boot.py\", line 30, in plugin_loaded\r\n startup()\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\main.py\", line 25, in startup\r\n start_active_window()\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\main.py\", line 44, in start_active_window\r\n windows.lookup(window).start_active_views()\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\windows.py\", line 336, in start_active_views\r\n self._initialize_on_open(view)\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\windows.py\", line 348, in _initialize_on_open\r\n self._configs.syntax_configs(view))\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\configurations.py\", line 120, in syntax_configs\r\n return list(filter(lambda c: config_supports_syntax(c, syntax) and c.enabled, self.all))\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\configurations.py\", line 120, in <lambda>\r\n return list(filter(lambda c: config_supports_syntax(c, syntax) and c.enabled, self.all))\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\types.py\", line 81, in config_supports_syntax\r\n if re.search(r'|'.join(r'\\b%s\\b' % re.escape(s) for s in language.syntaxes), syntax, re.IGNORECASE):\r\n File \"./python3.3/re.py\", line 161, in search\r\nTypeError: expected string or buffer\r\n```\n", "before_files": [{"content": "import sublime\nimport sublime_plugin\n\ntry:\n from typing import Any, List, Dict, Tuple, Callable, Optional\n assert Any and List and Dict and Tuple and Callable and Optional\nexcept ImportError:\n pass\n\nfrom .core.protocol import Request\nfrom .core.events import global_events\nfrom .core.settings import settings, client_configs\nfrom .core.logging import debug\nfrom .core.completion import parse_completion_response\nfrom .core.registry import session_for_view, client_for_view\nfrom .core.configurations import is_supported_syntax\nfrom .core.documents import get_document_position\nfrom .core.sessions import Session\n\nNO_COMPLETION_SCOPES = 'comment, string'\n\n\nclass CompletionState(object):\n IDLE = 0\n REQUESTING = 1\n APPLYING = 2\n CANCELLING = 3\n\n\nlast_text_command = None\n\n\nclass CompletionHelper(sublime_plugin.EventListener):\n def on_text_command(self, view, command_name, args):\n global last_text_command\n last_text_command = command_name\n\n\nclass CompletionHandler(sublime_plugin.ViewEventListener):\n def __init__(self, view):\n self.view = view\n self.initialized = False\n self.enabled = False\n self.trigger_chars = [] # type: List[str]\n self.state = CompletionState.IDLE\n self.completions = [] # type: List[Any]\n self.next_request = None # type: Optional[Tuple[str, List[int]]]\n self.last_prefix = \"\"\n self.last_location = 0\n\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n if syntax is not None:\n return is_supported_syntax(syntax, client_configs.all)\n else:\n return False\n\n def initialize(self):\n self.initialized = True\n session = session_for_view(self.view)\n if session:\n completionProvider = session.get_capability(\n 'completionProvider')\n if completionProvider:\n self.enabled = True\n self.trigger_chars = completionProvider.get(\n 'triggerCharacters') or []\n if self.trigger_chars:\n self.register_trigger_chars(session)\n\n def _view_language(self, config_name: str) -> 'Optional[str]':\n languages = self.view.settings().get('lsp_language')\n return languages.get(config_name) if languages else None\n\n def register_trigger_chars(self, session: Session) -> None:\n completion_triggers = self.view.settings().get('auto_complete_triggers', [])\n view_language = self._view_language(session.config.name)\n if view_language:\n for language in session.config.languages:\n if language.id == view_language:\n for scope in language.scopes:\n # debug(\"registering\", self.trigger_chars, \"for\", scope)\n scope_trigger = next(\n (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),\n None\n )\n if scope_trigger:\n scope_trigger['characters'] = \"\".join(self.trigger_chars)\n else:\n completion_triggers.append({\n 'characters': \"\".join(self.trigger_chars),\n 'selector': scope\n })\n\n self.view.settings().set('auto_complete_triggers', completion_triggers)\n\n def is_after_trigger_character(self, location):\n if location > 0:\n prev_char = self.view.substr(location - 1)\n return prev_char in self.trigger_chars\n\n def is_same_completion(self, prefix, locations):\n # completion requests from the same location with the same prefix are cached.\n current_start = locations[0] - len(prefix)\n last_start = self.last_location - len(self.last_prefix)\n return prefix.startswith(self.last_prefix) and current_start == last_start\n\n def on_modified(self):\n # hide completion when backspacing past last completion.\n if self.view.sel()[0].begin() < self.last_location:\n self.last_location = 0\n self.view.run_command(\"hide_auto_complete\")\n # cancel current completion if the previous input is an space\n prev_char = self.view.substr(self.view.sel()[0].begin() - 1)\n if self.state == CompletionState.REQUESTING and prev_char.isspace():\n self.state = CompletionState.CANCELLING\n\n def on_query_completions(self, prefix, locations):\n if prefix != \"\" and self.view.match_selector(locations[0], NO_COMPLETION_SCOPES):\n # debug('discarding completion because no completion scope with prefix {}'.format(prefix))\n return (\n [],\n sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS\n )\n\n if not self.initialized:\n self.initialize()\n\n if self.enabled:\n reuse_completion = self.is_same_completion(prefix, locations)\n if self.state == CompletionState.IDLE:\n if not reuse_completion:\n self.last_prefix = prefix\n self.last_location = locations[0]\n self.do_request(prefix, locations)\n self.completions = []\n\n elif self.state in (CompletionState.REQUESTING, CompletionState.CANCELLING):\n self.next_request = (prefix, locations)\n self.state = CompletionState.CANCELLING\n\n elif self.state == CompletionState.APPLYING:\n self.state = CompletionState.IDLE\n\n return (\n self.completions,\n 0 if not settings.only_show_lsp_completions\n else sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS\n )\n\n def do_request(self, prefix: str, locations: 'List[int]'):\n self.next_request = None\n view = self.view\n\n # don't store client so we can handle restarts\n client = client_for_view(view)\n if not client:\n return\n\n if settings.complete_all_chars or self.is_after_trigger_character(locations[0]):\n global_events.publish(\"view.on_purge_changes\", self.view)\n document_position = get_document_position(view, locations[0])\n if document_position:\n client.send_request(\n Request.complete(document_position),\n self.handle_response,\n self.handle_error)\n self.state = CompletionState.REQUESTING\n\n def handle_response(self, response: 'Optional[Dict]'):\n\n if self.state == CompletionState.REQUESTING:\n last_start = self.last_location - len(self.last_prefix)\n last_row, last_col = self.view.rowcol(last_start)\n self.completions = parse_completion_response(response, last_col, settings)\n\n # if insert_best_completion was just ran, undo it before presenting new completions.\n prev_char = self.view.substr(self.view.sel()[0].begin() - 1)\n if prev_char.isspace():\n if last_text_command == \"insert_best_completion\":\n self.view.run_command(\"undo\")\n\n self.state = CompletionState.APPLYING\n self.view.run_command(\"hide_auto_complete\")\n self.run_auto_complete()\n elif self.state == CompletionState.CANCELLING:\n self.state = CompletionState.IDLE\n if self.next_request:\n prefix, locations = self.next_request\n self.do_request(prefix, locations)\n else:\n debug('Got unexpected response while in state {}'.format(self.state))\n\n def handle_error(self, error: dict):\n sublime.status_message('Completion error: ' + str(error.get('message')))\n self.state = CompletionState.IDLE\n\n def run_auto_complete(self):\n self.view.run_command(\n \"auto_complete\", {\n 'disable_auto_insert': True,\n 'api_completions_only': settings.only_show_lsp_completions,\n 'next_completion_if_showing': False\n })\n", "path": "plugin/completion.py"}]}
| 3,183 | 118 |
gh_patches_debug_32870
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-2941
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Values for Selection Custom Fields via API
<!--
Before opening a new issue, please search through the existing issues to
see if your topic has already been addressed. Note that you may need to
remove the "is:open" filter from the search bar to include closed issues.
Check the appropriate type for your issue below by placing an x between the
brackets. For assistance with installation issues, or for any other issues
other than those listed below, please raise your topic for discussion on
our mailing list:
https://groups.google.com/forum/#!forum/netbox-discuss
Please note that issues which do not fall under any of the below categories
will be closed. Due to an excessive backlog of feature requests, we are
not currently accepting any proposals which extend NetBox's feature scope.
Do not prepend any sort of tag to your issue's title. An administrator will
review your issue and assign labels as appropriate.
--->
### Issue type
[X] Feature request <!-- An enhancement of existing functionality -->
[ ] Bug report <!-- Unexpected or erroneous behavior -->
[ ] Documentation <!-- A modification to the documentation -->
<!--
Please describe the environment in which you are running NetBox. (Be sure
to verify that you are running the latest stable release of NetBox before
submitting a bug report.) If you are submitting a bug report and have made
any changes to the code base, please first validate that your bug can be
recreated while running an official release.
-->
### Environment
* Python version: 3.6
* NetBox version: 2.2.8
<!--
BUG REPORTS must include:
* A list of the steps needed for someone else to reproduce the bug
* A description of the expected and observed behavior
* Any relevant error messages (screenshots may also help)
FEATURE REQUESTS must include:
* A detailed description of the proposed functionality
* A use case for the new feature
* A rough description of any necessary changes to the database schema
* Any relevant third-party libraries which would be needed
-->
### Description
As I [already told on the mailing list](https://groups.google.com/forum/#!topic/netbox-discuss/4j7ODlKWNk0), I would like to fill in the values of _selection custom fields_ via the API.
I expected to provide the actual value of the field, i.e. the text I see in the dropdown of the GUI. But the API expects to get the ID of the option, rather than the value, and there seems to be no way of looking up the available (value,id)-pairs for a given _selection custom fields_. Also I would like to avoid hardcoding any (value,id)-pairs in my application, as it's not consistent across our different Netbox instances (local, staging, production) and it would be very fragile.
I see two solutions:
* When I set the value of such a selection custom field, Netbox accepts the String value and converts it to the respective ID internally.
* There is a new endpoint to fetch the option pairs of selection custom fields, similar to the other __options_ endpoints.
IMO both options could even live in parallel to each other: Whenever an integer is presented as value for a selection custom field, we can assume it's the ID of the value. Whenever it's a string, we can assume it's the text value and first has to be converted to the corresponding integer id.
Allowing to submit text values via the API instead of the actual IDs might imply that we don't allow the same text value on one given selection custom field twice.
</issue>
<code>
[start of netbox/extras/api/views.py]
1 from django.contrib.contenttypes.models import ContentType
2 from django.db.models import Count
3 from django.http import Http404, HttpResponse
4 from django.shortcuts import get_object_or_404
5 from rest_framework.decorators import action
6 from rest_framework.exceptions import PermissionDenied
7 from rest_framework.response import Response
8 from rest_framework.viewsets import ReadOnlyModelViewSet, ViewSet
9
10 from extras import filters
11 from extras.models import (
12 ConfigContext, CustomField, ExportTemplate, Graph, ImageAttachment, ObjectChange, ReportResult, TopologyMap,
13 Tag
14 )
15 from extras.reports import get_report, get_reports
16 from utilities.api import FieldChoicesViewSet, IsAuthenticatedOrLoginNotRequired, ModelViewSet
17 from . import serializers
18
19
20 #
21 # Field choices
22 #
23
24 class ExtrasFieldChoicesViewSet(FieldChoicesViewSet):
25 fields = (
26 (CustomField, ['type']),
27 (Graph, ['type']),
28 )
29
30
31 #
32 # Custom fields
33 #
34
35 class CustomFieldModelViewSet(ModelViewSet):
36 """
37 Include the applicable set of CustomFields in the ModelViewSet context.
38 """
39
40 def get_serializer_context(self):
41
42 # Gather all custom fields for the model
43 content_type = ContentType.objects.get_for_model(self.queryset.model)
44 custom_fields = content_type.custom_fields.prefetch_related('choices')
45
46 # Cache all relevant CustomFieldChoices. This saves us from having to do a lookup per select field per object.
47 custom_field_choices = {}
48 for field in custom_fields:
49 for cfc in field.choices.all():
50 custom_field_choices[cfc.id] = cfc.value
51 custom_field_choices = custom_field_choices
52
53 context = super().get_serializer_context()
54 context.update({
55 'custom_fields': custom_fields,
56 'custom_field_choices': custom_field_choices,
57 })
58 return context
59
60 def get_queryset(self):
61 # Prefetch custom field values
62 return super().get_queryset().prefetch_related('custom_field_values__field')
63
64
65 #
66 # Graphs
67 #
68
69 class GraphViewSet(ModelViewSet):
70 queryset = Graph.objects.all()
71 serializer_class = serializers.GraphSerializer
72 filterset_class = filters.GraphFilter
73
74
75 #
76 # Export templates
77 #
78
79 class ExportTemplateViewSet(ModelViewSet):
80 queryset = ExportTemplate.objects.all()
81 serializer_class = serializers.ExportTemplateSerializer
82 filterset_class = filters.ExportTemplateFilter
83
84
85 #
86 # Topology maps
87 #
88
89 class TopologyMapViewSet(ModelViewSet):
90 queryset = TopologyMap.objects.select_related('site')
91 serializer_class = serializers.TopologyMapSerializer
92 filterset_class = filters.TopologyMapFilter
93
94 @action(detail=True)
95 def render(self, request, pk):
96
97 tmap = get_object_or_404(TopologyMap, pk=pk)
98 img_format = 'png'
99
100 try:
101 data = tmap.render(img_format=img_format)
102 except Exception as e:
103 return HttpResponse(
104 "There was an error generating the requested graph: %s" % e
105 )
106
107 response = HttpResponse(data, content_type='image/{}'.format(img_format))
108 response['Content-Disposition'] = 'inline; filename="{}.{}"'.format(tmap.slug, img_format)
109
110 return response
111
112
113 #
114 # Tags
115 #
116
117 class TagViewSet(ModelViewSet):
118 queryset = Tag.objects.annotate(tagged_items=Count('extras_taggeditem_items'))
119 serializer_class = serializers.TagSerializer
120 filterset_class = filters.TagFilter
121
122
123 #
124 # Image attachments
125 #
126
127 class ImageAttachmentViewSet(ModelViewSet):
128 queryset = ImageAttachment.objects.all()
129 serializer_class = serializers.ImageAttachmentSerializer
130
131
132 #
133 # Config contexts
134 #
135
136 class ConfigContextViewSet(ModelViewSet):
137 queryset = ConfigContext.objects.prefetch_related(
138 'regions', 'sites', 'roles', 'platforms', 'tenant_groups', 'tenants',
139 )
140 serializer_class = serializers.ConfigContextSerializer
141 filterset_class = filters.ConfigContextFilter
142
143
144 #
145 # Reports
146 #
147
148 class ReportViewSet(ViewSet):
149 permission_classes = [IsAuthenticatedOrLoginNotRequired]
150 _ignore_model_permissions = True
151 exclude_from_schema = True
152 lookup_value_regex = '[^/]+' # Allow dots
153
154 def _retrieve_report(self, pk):
155
156 # Read the PK as "<module>.<report>"
157 if '.' not in pk:
158 raise Http404
159 module_name, report_name = pk.split('.', 1)
160
161 # Raise a 404 on an invalid Report module/name
162 report = get_report(module_name, report_name)
163 if report is None:
164 raise Http404
165
166 return report
167
168 def list(self, request):
169 """
170 Compile all reports and their related results (if any). Result data is deferred in the list view.
171 """
172 report_list = []
173
174 # Iterate through all available Reports.
175 for module_name, reports in get_reports():
176 for report in reports:
177
178 # Attach the relevant ReportResult (if any) to each Report.
179 report.result = ReportResult.objects.filter(report=report.full_name).defer('data').first()
180 report_list.append(report)
181
182 serializer = serializers.ReportSerializer(report_list, many=True, context={
183 'request': request,
184 })
185
186 return Response(serializer.data)
187
188 def retrieve(self, request, pk):
189 """
190 Retrieve a single Report identified as "<module>.<report>".
191 """
192
193 # Retrieve the Report and ReportResult, if any.
194 report = self._retrieve_report(pk)
195 report.result = ReportResult.objects.filter(report=report.full_name).first()
196
197 serializer = serializers.ReportDetailSerializer(report)
198
199 return Response(serializer.data)
200
201 @action(detail=True, methods=['post'])
202 def run(self, request, pk):
203 """
204 Run a Report and create a new ReportResult, overwriting any previous result for the Report.
205 """
206
207 # Check that the user has permission to run reports.
208 if not request.user.has_perm('extras.add_reportresult'):
209 raise PermissionDenied("This user does not have permission to run reports.")
210
211 # Retrieve and run the Report. This will create a new ReportResult.
212 report = self._retrieve_report(pk)
213 report.run()
214
215 serializer = serializers.ReportDetailSerializer(report)
216
217 return Response(serializer.data)
218
219
220 #
221 # Change logging
222 #
223
224 class ObjectChangeViewSet(ReadOnlyModelViewSet):
225 """
226 Retrieve a list of recent changes.
227 """
228 queryset = ObjectChange.objects.select_related('user')
229 serializer_class = serializers.ObjectChangeSerializer
230 filterset_class = filters.ObjectChangeFilter
231
[end of netbox/extras/api/views.py]
[start of netbox/extras/api/urls.py]
1 from rest_framework import routers
2
3 from . import views
4
5
6 class ExtrasRootView(routers.APIRootView):
7 """
8 Extras API root view
9 """
10 def get_view_name(self):
11 return 'Extras'
12
13
14 router = routers.DefaultRouter()
15 router.APIRootView = ExtrasRootView
16
17 # Field choices
18 router.register(r'_choices', views.ExtrasFieldChoicesViewSet, basename='field-choice')
19
20 # Graphs
21 router.register(r'graphs', views.GraphViewSet)
22
23 # Export templates
24 router.register(r'export-templates', views.ExportTemplateViewSet)
25
26 # Topology maps
27 router.register(r'topology-maps', views.TopologyMapViewSet)
28
29 # Tags
30 router.register(r'tags', views.TagViewSet)
31
32 # Image attachments
33 router.register(r'image-attachments', views.ImageAttachmentViewSet)
34
35 # Config contexts
36 router.register(r'config-contexts', views.ConfigContextViewSet)
37
38 # Reports
39 router.register(r'reports', views.ReportViewSet, basename='report')
40
41 # Change logging
42 router.register(r'object-changes', views.ObjectChangeViewSet)
43
44 app_name = 'extras-api'
45 urlpatterns = router.urls
46
[end of netbox/extras/api/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/netbox/extras/api/urls.py b/netbox/extras/api/urls.py
--- a/netbox/extras/api/urls.py
+++ b/netbox/extras/api/urls.py
@@ -17,6 +17,9 @@
# Field choices
router.register(r'_choices', views.ExtrasFieldChoicesViewSet, basename='field-choice')
+# Custom field choices
+router.register(r'_custom_field_choices', views.CustomFieldChoicesViewSet, base_name='custom-field-choice')
+
# Graphs
router.register(r'graphs', views.GraphViewSet)
diff --git a/netbox/extras/api/views.py b/netbox/extras/api/views.py
--- a/netbox/extras/api/views.py
+++ b/netbox/extras/api/views.py
@@ -1,3 +1,5 @@
+from collections import OrderedDict
+
from django.contrib.contenttypes.models import ContentType
from django.db.models import Count
from django.http import Http404, HttpResponse
@@ -9,8 +11,8 @@
from extras import filters
from extras.models import (
- ConfigContext, CustomField, ExportTemplate, Graph, ImageAttachment, ObjectChange, ReportResult, TopologyMap,
- Tag
+ ConfigContext, CustomField, CustomFieldChoice, ExportTemplate, Graph, ImageAttachment, ObjectChange, ReportResult, TopologyMap,
+ Tag,
)
from extras.reports import get_report, get_reports
from utilities.api import FieldChoicesViewSet, IsAuthenticatedOrLoginNotRequired, ModelViewSet
@@ -28,6 +30,36 @@
)
+#
+# Custom field choices
+#
+
+class CustomFieldChoicesViewSet(ViewSet):
+ """
+ """
+ permission_classes = [IsAuthenticatedOrLoginNotRequired]
+
+ def __init__(self, *args, **kwargs):
+ super(CustomFieldChoicesViewSet, self).__init__(*args, **kwargs)
+
+ self._fields = OrderedDict()
+
+ for cfc in CustomFieldChoice.objects.all():
+ self._fields.setdefault(cfc.field.name, {})
+ self._fields[cfc.field.name][cfc.value] = cfc.pk
+
+ def list(self, request):
+ return Response(self._fields)
+
+ def retrieve(self, request, pk):
+ if pk not in self._fields:
+ raise Http404
+ return Response(self._fields[pk])
+
+ def get_view_name(self):
+ return "Custom Field choices"
+
+
#
# Custom fields
#
|
{"golden_diff": "diff --git a/netbox/extras/api/urls.py b/netbox/extras/api/urls.py\n--- a/netbox/extras/api/urls.py\n+++ b/netbox/extras/api/urls.py\n@@ -17,6 +17,9 @@\n # Field choices\n router.register(r'_choices', views.ExtrasFieldChoicesViewSet, basename='field-choice')\n \n+# Custom field choices\n+router.register(r'_custom_field_choices', views.CustomFieldChoicesViewSet, base_name='custom-field-choice')\n+\n # Graphs\n router.register(r'graphs', views.GraphViewSet)\n \ndiff --git a/netbox/extras/api/views.py b/netbox/extras/api/views.py\n--- a/netbox/extras/api/views.py\n+++ b/netbox/extras/api/views.py\n@@ -1,3 +1,5 @@\n+from collections import OrderedDict\n+\n from django.contrib.contenttypes.models import ContentType\n from django.db.models import Count\n from django.http import Http404, HttpResponse\n@@ -9,8 +11,8 @@\n \n from extras import filters\n from extras.models import (\n- ConfigContext, CustomField, ExportTemplate, Graph, ImageAttachment, ObjectChange, ReportResult, TopologyMap,\n- Tag\n+ ConfigContext, CustomField, CustomFieldChoice, ExportTemplate, Graph, ImageAttachment, ObjectChange, ReportResult, TopologyMap,\n+ Tag,\n )\n from extras.reports import get_report, get_reports\n from utilities.api import FieldChoicesViewSet, IsAuthenticatedOrLoginNotRequired, ModelViewSet\n@@ -28,6 +30,36 @@\n )\n \n \n+#\n+# Custom field choices\n+#\n+\n+class CustomFieldChoicesViewSet(ViewSet):\n+ \"\"\"\n+ \"\"\"\n+ permission_classes = [IsAuthenticatedOrLoginNotRequired]\n+\n+ def __init__(self, *args, **kwargs):\n+ super(CustomFieldChoicesViewSet, self).__init__(*args, **kwargs)\n+\n+ self._fields = OrderedDict()\n+\n+ for cfc in CustomFieldChoice.objects.all():\n+ self._fields.setdefault(cfc.field.name, {})\n+ self._fields[cfc.field.name][cfc.value] = cfc.pk\n+\n+ def list(self, request):\n+ return Response(self._fields)\n+\n+ def retrieve(self, request, pk):\n+ if pk not in self._fields:\n+ raise Http404\n+ return Response(self._fields[pk])\n+\n+ def get_view_name(self):\n+ return \"Custom Field choices\"\n+\n+\n #\n # Custom fields\n #\n", "issue": "Values for Selection Custom Fields via API\n<!--\r\n Before opening a new issue, please search through the existing issues to\r\n see if your topic has already been addressed. Note that you may need to\r\n remove the \"is:open\" filter from the search bar to include closed issues.\r\n\r\n Check the appropriate type for your issue below by placing an x between the\r\n brackets. For assistance with installation issues, or for any other issues\r\n other than those listed below, please raise your topic for discussion on\r\n our mailing list:\r\n\r\n https://groups.google.com/forum/#!forum/netbox-discuss\r\n\r\n Please note that issues which do not fall under any of the below categories\r\n will be closed. Due to an excessive backlog of feature requests, we are\r\n not currently accepting any proposals which extend NetBox's feature scope.\r\n\r\n Do not prepend any sort of tag to your issue's title. An administrator will\r\n review your issue and assign labels as appropriate.\r\n--->\r\n### Issue type\r\n[X] Feature request <!-- An enhancement of existing functionality -->\r\n[ ] Bug report <!-- Unexpected or erroneous behavior -->\r\n[ ] Documentation <!-- A modification to the documentation -->\r\n\r\n<!--\r\n Please describe the environment in which you are running NetBox. (Be sure\r\n to verify that you are running the latest stable release of NetBox before\r\n submitting a bug report.) If you are submitting a bug report and have made\r\n any changes to the code base, please first validate that your bug can be\r\n recreated while running an official release.\r\n-->\r\n### Environment\r\n* Python version: 3.6\r\n* NetBox version: 2.2.8\r\n\r\n<!--\r\n BUG REPORTS must include:\r\n * A list of the steps needed for someone else to reproduce the bug\r\n * A description of the expected and observed behavior\r\n * Any relevant error messages (screenshots may also help)\r\n\r\n FEATURE REQUESTS must include:\r\n * A detailed description of the proposed functionality\r\n * A use case for the new feature\r\n * A rough description of any necessary changes to the database schema\r\n * Any relevant third-party libraries which would be needed\r\n-->\r\n### Description\r\n\r\nAs I [already told on the mailing list](https://groups.google.com/forum/#!topic/netbox-discuss/4j7ODlKWNk0), I would like to fill in the values of _selection custom fields_ via the API.\r\n\r\nI expected to provide the actual value of the field, i.e. the text I see in the dropdown of the GUI. But the API expects to get the ID of the option, rather than the value, and there seems to be no way of looking up the available (value,id)-pairs for a given _selection custom fields_. Also I would like to avoid hardcoding any (value,id)-pairs in my application, as it's not consistent across our different Netbox instances (local, staging, production) and it would be very fragile.\r\n\r\nI see two solutions:\r\n* When I set the value of such a selection custom field, Netbox accepts the String value and converts it to the respective ID internally.\r\n* There is a new endpoint to fetch the option pairs of selection custom fields, similar to the other __options_ endpoints.\r\n\r\nIMO both options could even live in parallel to each other: Whenever an integer is presented as value for a selection custom field, we can assume it's the ID of the value. Whenever it's a string, we can assume it's the text value and first has to be converted to the corresponding integer id.\r\n\r\nAllowing to submit text values via the API instead of the actual IDs might imply that we don't allow the same text value on one given selection custom field twice.\r\n \n", "before_files": [{"content": "from django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Count\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ReadOnlyModelViewSet, ViewSet\n\nfrom extras import filters\nfrom extras.models import (\n ConfigContext, CustomField, ExportTemplate, Graph, ImageAttachment, ObjectChange, ReportResult, TopologyMap,\n Tag\n)\nfrom extras.reports import get_report, get_reports\nfrom utilities.api import FieldChoicesViewSet, IsAuthenticatedOrLoginNotRequired, ModelViewSet\nfrom . import serializers\n\n\n#\n# Field choices\n#\n\nclass ExtrasFieldChoicesViewSet(FieldChoicesViewSet):\n fields = (\n (CustomField, ['type']),\n (Graph, ['type']),\n )\n\n\n#\n# Custom fields\n#\n\nclass CustomFieldModelViewSet(ModelViewSet):\n \"\"\"\n Include the applicable set of CustomFields in the ModelViewSet context.\n \"\"\"\n\n def get_serializer_context(self):\n\n # Gather all custom fields for the model\n content_type = ContentType.objects.get_for_model(self.queryset.model)\n custom_fields = content_type.custom_fields.prefetch_related('choices')\n\n # Cache all relevant CustomFieldChoices. This saves us from having to do a lookup per select field per object.\n custom_field_choices = {}\n for field in custom_fields:\n for cfc in field.choices.all():\n custom_field_choices[cfc.id] = cfc.value\n custom_field_choices = custom_field_choices\n\n context = super().get_serializer_context()\n context.update({\n 'custom_fields': custom_fields,\n 'custom_field_choices': custom_field_choices,\n })\n return context\n\n def get_queryset(self):\n # Prefetch custom field values\n return super().get_queryset().prefetch_related('custom_field_values__field')\n\n\n#\n# Graphs\n#\n\nclass GraphViewSet(ModelViewSet):\n queryset = Graph.objects.all()\n serializer_class = serializers.GraphSerializer\n filterset_class = filters.GraphFilter\n\n\n#\n# Export templates\n#\n\nclass ExportTemplateViewSet(ModelViewSet):\n queryset = ExportTemplate.objects.all()\n serializer_class = serializers.ExportTemplateSerializer\n filterset_class = filters.ExportTemplateFilter\n\n\n#\n# Topology maps\n#\n\nclass TopologyMapViewSet(ModelViewSet):\n queryset = TopologyMap.objects.select_related('site')\n serializer_class = serializers.TopologyMapSerializer\n filterset_class = filters.TopologyMapFilter\n\n @action(detail=True)\n def render(self, request, pk):\n\n tmap = get_object_or_404(TopologyMap, pk=pk)\n img_format = 'png'\n\n try:\n data = tmap.render(img_format=img_format)\n except Exception as e:\n return HttpResponse(\n \"There was an error generating the requested graph: %s\" % e\n )\n\n response = HttpResponse(data, content_type='image/{}'.format(img_format))\n response['Content-Disposition'] = 'inline; filename=\"{}.{}\"'.format(tmap.slug, img_format)\n\n return response\n\n\n#\n# Tags\n#\n\nclass TagViewSet(ModelViewSet):\n queryset = Tag.objects.annotate(tagged_items=Count('extras_taggeditem_items'))\n serializer_class = serializers.TagSerializer\n filterset_class = filters.TagFilter\n\n\n#\n# Image attachments\n#\n\nclass ImageAttachmentViewSet(ModelViewSet):\n queryset = ImageAttachment.objects.all()\n serializer_class = serializers.ImageAttachmentSerializer\n\n\n#\n# Config contexts\n#\n\nclass ConfigContextViewSet(ModelViewSet):\n queryset = ConfigContext.objects.prefetch_related(\n 'regions', 'sites', 'roles', 'platforms', 'tenant_groups', 'tenants',\n )\n serializer_class = serializers.ConfigContextSerializer\n filterset_class = filters.ConfigContextFilter\n\n\n#\n# Reports\n#\n\nclass ReportViewSet(ViewSet):\n permission_classes = [IsAuthenticatedOrLoginNotRequired]\n _ignore_model_permissions = True\n exclude_from_schema = True\n lookup_value_regex = '[^/]+' # Allow dots\n\n def _retrieve_report(self, pk):\n\n # Read the PK as \"<module>.<report>\"\n if '.' not in pk:\n raise Http404\n module_name, report_name = pk.split('.', 1)\n\n # Raise a 404 on an invalid Report module/name\n report = get_report(module_name, report_name)\n if report is None:\n raise Http404\n\n return report\n\n def list(self, request):\n \"\"\"\n Compile all reports and their related results (if any). Result data is deferred in the list view.\n \"\"\"\n report_list = []\n\n # Iterate through all available Reports.\n for module_name, reports in get_reports():\n for report in reports:\n\n # Attach the relevant ReportResult (if any) to each Report.\n report.result = ReportResult.objects.filter(report=report.full_name).defer('data').first()\n report_list.append(report)\n\n serializer = serializers.ReportSerializer(report_list, many=True, context={\n 'request': request,\n })\n\n return Response(serializer.data)\n\n def retrieve(self, request, pk):\n \"\"\"\n Retrieve a single Report identified as \"<module>.<report>\".\n \"\"\"\n\n # Retrieve the Report and ReportResult, if any.\n report = self._retrieve_report(pk)\n report.result = ReportResult.objects.filter(report=report.full_name).first()\n\n serializer = serializers.ReportDetailSerializer(report)\n\n return Response(serializer.data)\n\n @action(detail=True, methods=['post'])\n def run(self, request, pk):\n \"\"\"\n Run a Report and create a new ReportResult, overwriting any previous result for the Report.\n \"\"\"\n\n # Check that the user has permission to run reports.\n if not request.user.has_perm('extras.add_reportresult'):\n raise PermissionDenied(\"This user does not have permission to run reports.\")\n\n # Retrieve and run the Report. This will create a new ReportResult.\n report = self._retrieve_report(pk)\n report.run()\n\n serializer = serializers.ReportDetailSerializer(report)\n\n return Response(serializer.data)\n\n\n#\n# Change logging\n#\n\nclass ObjectChangeViewSet(ReadOnlyModelViewSet):\n \"\"\"\n Retrieve a list of recent changes.\n \"\"\"\n queryset = ObjectChange.objects.select_related('user')\n serializer_class = serializers.ObjectChangeSerializer\n filterset_class = filters.ObjectChangeFilter\n", "path": "netbox/extras/api/views.py"}, {"content": "from rest_framework import routers\n\nfrom . import views\n\n\nclass ExtrasRootView(routers.APIRootView):\n \"\"\"\n Extras API root view\n \"\"\"\n def get_view_name(self):\n return 'Extras'\n\n\nrouter = routers.DefaultRouter()\nrouter.APIRootView = ExtrasRootView\n\n# Field choices\nrouter.register(r'_choices', views.ExtrasFieldChoicesViewSet, basename='field-choice')\n\n# Graphs\nrouter.register(r'graphs', views.GraphViewSet)\n\n# Export templates\nrouter.register(r'export-templates', views.ExportTemplateViewSet)\n\n# Topology maps\nrouter.register(r'topology-maps', views.TopologyMapViewSet)\n\n# Tags\nrouter.register(r'tags', views.TagViewSet)\n\n# Image attachments\nrouter.register(r'image-attachments', views.ImageAttachmentViewSet)\n\n# Config contexts\nrouter.register(r'config-contexts', views.ConfigContextViewSet)\n\n# Reports\nrouter.register(r'reports', views.ReportViewSet, basename='report')\n\n# Change logging\nrouter.register(r'object-changes', views.ObjectChangeViewSet)\n\napp_name = 'extras-api'\nurlpatterns = router.urls\n", "path": "netbox/extras/api/urls.py"}]}
| 3,620 | 543 |
gh_patches_debug_6745
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-9228
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Output for semicolon-terminated lines with comments
cf https://github.com/JuliaLang/IJulia.jl/issues/109
The output of a semicolon-terminated line with comment is not suppressed in IPython.
```
b = 1
b; # foo
```
give `Out[13]: 1`
```
b = 1
b;
```
does not.
Havent found such and issue on our tracker.
@takluyver you are the best with the input machinery and coroutines; Thoughts ?
(Because of the comment @stevengj in sus-cited issue, marking as prio-high, but won't mind if priority decreased later, or not fixed)
</issue>
<code>
[start of IPython/core/displayhook.py]
1 # -*- coding: utf-8 -*-
2 """Displayhook for IPython.
3
4 This defines a callable class that IPython uses for `sys.displayhook`.
5 """
6
7 # Copyright (c) IPython Development Team.
8 # Distributed under the terms of the Modified BSD License.
9
10 from __future__ import print_function
11
12 import sys
13 import io as _io
14 import tokenize
15
16 from IPython.core.formatters import _safe_get_formatter_method
17 from traitlets.config.configurable import Configurable
18 from IPython.utils import io
19 from IPython.utils.py3compat import builtin_mod, cast_unicode_py2
20 from traitlets import Instance, Float
21 from IPython.utils.warn import warn
22
23 # TODO: Move the various attributes (cache_size, [others now moved]). Some
24 # of these are also attributes of InteractiveShell. They should be on ONE object
25 # only and the other objects should ask that one object for their values.
26
27 class DisplayHook(Configurable):
28 """The custom IPython displayhook to replace sys.displayhook.
29
30 This class does many things, but the basic idea is that it is a callable
31 that gets called anytime user code returns a value.
32 """
33
34 shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
35 allow_none=True)
36 exec_result = Instance('IPython.core.interactiveshell.ExecutionResult',
37 allow_none=True)
38 cull_fraction = Float(0.2)
39
40 def __init__(self, shell=None, cache_size=1000, **kwargs):
41 super(DisplayHook, self).__init__(shell=shell, **kwargs)
42 cache_size_min = 3
43 if cache_size <= 0:
44 self.do_full_cache = 0
45 cache_size = 0
46 elif cache_size < cache_size_min:
47 self.do_full_cache = 0
48 cache_size = 0
49 warn('caching was disabled (min value for cache size is %s).' %
50 cache_size_min,level=3)
51 else:
52 self.do_full_cache = 1
53
54 self.cache_size = cache_size
55
56 # we need a reference to the user-level namespace
57 self.shell = shell
58
59 self._,self.__,self.___ = '','',''
60
61 # these are deliberately global:
62 to_user_ns = {'_':self._,'__':self.__,'___':self.___}
63 self.shell.user_ns.update(to_user_ns)
64
65 @property
66 def prompt_count(self):
67 return self.shell.execution_count
68
69 #-------------------------------------------------------------------------
70 # Methods used in __call__. Override these methods to modify the behavior
71 # of the displayhook.
72 #-------------------------------------------------------------------------
73
74 def check_for_underscore(self):
75 """Check if the user has set the '_' variable by hand."""
76 # If something injected a '_' variable in __builtin__, delete
77 # ipython's automatic one so we don't clobber that. gettext() in
78 # particular uses _, so we need to stay away from it.
79 if '_' in builtin_mod.__dict__:
80 try:
81 del self.shell.user_ns['_']
82 except KeyError:
83 pass
84
85 def quiet(self):
86 """Should we silence the display hook because of ';'?"""
87 # do not print output if input ends in ';'
88
89 try:
90 cell = cast_unicode_py2(self.shell.history_manager.input_hist_parsed[-1])
91 except IndexError:
92 # some uses of ipshellembed may fail here
93 return False
94
95 sio = _io.StringIO(cell)
96 tokens = list(tokenize.generate_tokens(sio.readline))
97
98 for token in reversed(tokens):
99 if token[0] in (tokenize.ENDMARKER, tokenize.COMMENT):
100 continue
101 if (token[0] == tokenize.OP) and (token[1] == ';'):
102 return True
103 else:
104 return False
105
106 def start_displayhook(self):
107 """Start the displayhook, initializing resources."""
108 pass
109
110 def write_output_prompt(self):
111 """Write the output prompt.
112
113 The default implementation simply writes the prompt to
114 ``io.stdout``.
115 """
116 # Use write, not print which adds an extra space.
117 io.stdout.write(self.shell.separate_out)
118 outprompt = self.shell.prompt_manager.render('out')
119 if self.do_full_cache:
120 io.stdout.write(outprompt)
121
122 def compute_format_data(self, result):
123 """Compute format data of the object to be displayed.
124
125 The format data is a generalization of the :func:`repr` of an object.
126 In the default implementation the format data is a :class:`dict` of
127 key value pair where the keys are valid MIME types and the values
128 are JSON'able data structure containing the raw data for that MIME
129 type. It is up to frontends to determine pick a MIME to to use and
130 display that data in an appropriate manner.
131
132 This method only computes the format data for the object and should
133 NOT actually print or write that to a stream.
134
135 Parameters
136 ----------
137 result : object
138 The Python object passed to the display hook, whose format will be
139 computed.
140
141 Returns
142 -------
143 (format_dict, md_dict) : dict
144 format_dict is a :class:`dict` whose keys are valid MIME types and values are
145 JSON'able raw data for that MIME type. It is recommended that
146 all return values of this should always include the "text/plain"
147 MIME type representation of the object.
148 md_dict is a :class:`dict` with the same MIME type keys
149 of metadata associated with each output.
150
151 """
152 return self.shell.display_formatter.format(result)
153
154 def write_format_data(self, format_dict, md_dict=None):
155 """Write the format data dict to the frontend.
156
157 This default version of this method simply writes the plain text
158 representation of the object to ``io.stdout``. Subclasses should
159 override this method to send the entire `format_dict` to the
160 frontends.
161
162 Parameters
163 ----------
164 format_dict : dict
165 The format dict for the object passed to `sys.displayhook`.
166 md_dict : dict (optional)
167 The metadata dict to be associated with the display data.
168 """
169 if 'text/plain' not in format_dict:
170 # nothing to do
171 return
172 # We want to print because we want to always make sure we have a
173 # newline, even if all the prompt separators are ''. This is the
174 # standard IPython behavior.
175 result_repr = format_dict['text/plain']
176 if '\n' in result_repr:
177 # So that multi-line strings line up with the left column of
178 # the screen, instead of having the output prompt mess up
179 # their first line.
180 # We use the prompt template instead of the expanded prompt
181 # because the expansion may add ANSI escapes that will interfere
182 # with our ability to determine whether or not we should add
183 # a newline.
184 prompt_template = self.shell.prompt_manager.out_template
185 if prompt_template and not prompt_template.endswith('\n'):
186 # But avoid extraneous empty lines.
187 result_repr = '\n' + result_repr
188
189 print(result_repr, file=io.stdout)
190
191 def update_user_ns(self, result):
192 """Update user_ns with various things like _, __, _1, etc."""
193
194 # Avoid recursive reference when displaying _oh/Out
195 if result is not self.shell.user_ns['_oh']:
196 if len(self.shell.user_ns['_oh']) >= self.cache_size and self.do_full_cache:
197 self.cull_cache()
198 # Don't overwrite '_' and friends if '_' is in __builtin__ (otherwise
199 # we cause buggy behavior for things like gettext).
200
201 if '_' not in builtin_mod.__dict__:
202 self.___ = self.__
203 self.__ = self._
204 self._ = result
205 self.shell.push({'_':self._,
206 '__':self.__,
207 '___':self.___}, interactive=False)
208
209 # hackish access to top-level namespace to create _1,_2... dynamically
210 to_main = {}
211 if self.do_full_cache:
212 new_result = '_'+repr(self.prompt_count)
213 to_main[new_result] = result
214 self.shell.push(to_main, interactive=False)
215 self.shell.user_ns['_oh'][self.prompt_count] = result
216
217 def fill_exec_result(self, result):
218 if self.exec_result is not None:
219 self.exec_result.result = result
220
221 def log_output(self, format_dict):
222 """Log the output."""
223 if 'text/plain' not in format_dict:
224 # nothing to do
225 return
226 if self.shell.logger.log_output:
227 self.shell.logger.log_write(format_dict['text/plain'], 'output')
228 self.shell.history_manager.output_hist_reprs[self.prompt_count] = \
229 format_dict['text/plain']
230
231 def finish_displayhook(self):
232 """Finish up all displayhook activities."""
233 io.stdout.write(self.shell.separate_out2)
234 io.stdout.flush()
235
236 def __call__(self, result=None):
237 """Printing with history cache management.
238
239 This is invoked everytime the interpreter needs to print, and is
240 activated by setting the variable sys.displayhook to it.
241 """
242 self.check_for_underscore()
243 if result is not None and not self.quiet():
244 self.start_displayhook()
245 self.write_output_prompt()
246 format_dict, md_dict = self.compute_format_data(result)
247 self.update_user_ns(result)
248 self.fill_exec_result(result)
249 if format_dict:
250 self.write_format_data(format_dict, md_dict)
251 self.log_output(format_dict)
252 self.finish_displayhook()
253
254 def cull_cache(self):
255 """Output cache is full, cull the oldest entries"""
256 oh = self.shell.user_ns.get('_oh', {})
257 sz = len(oh)
258 cull_count = max(int(sz * self.cull_fraction), 2)
259 warn('Output cache limit (currently {sz} entries) hit.\n'
260 'Flushing oldest {cull_count} entries.'.format(sz=sz, cull_count=cull_count))
261
262 for i, n in enumerate(sorted(oh)):
263 if i >= cull_count:
264 break
265 self.shell.user_ns.pop('_%i' % n, None)
266 oh.pop(n, None)
267
268
269 def flush(self):
270 if not self.do_full_cache:
271 raise ValueError("You shouldn't have reached the cache flush "
272 "if full caching is not enabled!")
273 # delete auto-generated vars from global namespace
274
275 for n in range(1,self.prompt_count + 1):
276 key = '_'+repr(n)
277 try:
278 del self.shell.user_ns[key]
279 except: pass
280 # In some embedded circumstances, the user_ns doesn't have the
281 # '_oh' key set up.
282 oh = self.shell.user_ns.get('_oh', None)
283 if oh is not None:
284 oh.clear()
285
286 # Release our own references to objects:
287 self._, self.__, self.___ = '', '', ''
288
289 if '_' not in builtin_mod.__dict__:
290 self.shell.user_ns.update({'_':None,'__':None, '___':None})
291 import gc
292 # TODO: Is this really needed?
293 # IronPython blocks here forever
294 if sys.platform != "cli":
295 gc.collect()
296
297
[end of IPython/core/displayhook.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/IPython/core/displayhook.py b/IPython/core/displayhook.py
--- a/IPython/core/displayhook.py
+++ b/IPython/core/displayhook.py
@@ -96,7 +96,7 @@
tokens = list(tokenize.generate_tokens(sio.readline))
for token in reversed(tokens):
- if token[0] in (tokenize.ENDMARKER, tokenize.COMMENT):
+ if token[0] in (tokenize.ENDMARKER, tokenize.NL, tokenize.NEWLINE, tokenize.COMMENT):
continue
if (token[0] == tokenize.OP) and (token[1] == ';'):
return True
|
{"golden_diff": "diff --git a/IPython/core/displayhook.py b/IPython/core/displayhook.py\n--- a/IPython/core/displayhook.py\n+++ b/IPython/core/displayhook.py\n@@ -96,7 +96,7 @@\n tokens = list(tokenize.generate_tokens(sio.readline))\n \n for token in reversed(tokens):\n- if token[0] in (tokenize.ENDMARKER, tokenize.COMMENT):\n+ if token[0] in (tokenize.ENDMARKER, tokenize.NL, tokenize.NEWLINE, tokenize.COMMENT):\n continue\n if (token[0] == tokenize.OP) and (token[1] == ';'):\n return True\n", "issue": "Output for semicolon-terminated lines with comments\ncf https://github.com/JuliaLang/IJulia.jl/issues/109\n\nThe output of a semicolon-terminated line with comment is not suppressed in IPython.\n\n```\nb = 1\nb; # foo\n```\n\ngive `Out[13]: 1`\n\n```\nb = 1\nb;\n```\n\ndoes not.\n\nHavent found such and issue on our tracker.\n\n @takluyver you are the best with the input machinery and coroutines; Thoughts ?\n\n(Because of the comment @stevengj in sus-cited issue, marking as prio-high, but won't mind if priority decreased later, or not fixed)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Displayhook for IPython.\n\nThis defines a callable class that IPython uses for `sys.displayhook`.\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import print_function\n\nimport sys\nimport io as _io\nimport tokenize\n\nfrom IPython.core.formatters import _safe_get_formatter_method\nfrom traitlets.config.configurable import Configurable\nfrom IPython.utils import io\nfrom IPython.utils.py3compat import builtin_mod, cast_unicode_py2\nfrom traitlets import Instance, Float\nfrom IPython.utils.warn import warn\n\n# TODO: Move the various attributes (cache_size, [others now moved]). Some\n# of these are also attributes of InteractiveShell. They should be on ONE object\n# only and the other objects should ask that one object for their values.\n\nclass DisplayHook(Configurable):\n \"\"\"The custom IPython displayhook to replace sys.displayhook.\n\n This class does many things, but the basic idea is that it is a callable\n that gets called anytime user code returns a value.\n \"\"\"\n\n shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',\n allow_none=True)\n exec_result = Instance('IPython.core.interactiveshell.ExecutionResult',\n allow_none=True)\n cull_fraction = Float(0.2)\n\n def __init__(self, shell=None, cache_size=1000, **kwargs):\n super(DisplayHook, self).__init__(shell=shell, **kwargs)\n cache_size_min = 3\n if cache_size <= 0:\n self.do_full_cache = 0\n cache_size = 0\n elif cache_size < cache_size_min:\n self.do_full_cache = 0\n cache_size = 0\n warn('caching was disabled (min value for cache size is %s).' %\n cache_size_min,level=3)\n else:\n self.do_full_cache = 1\n\n self.cache_size = cache_size\n\n # we need a reference to the user-level namespace\n self.shell = shell\n \n self._,self.__,self.___ = '','',''\n\n # these are deliberately global:\n to_user_ns = {'_':self._,'__':self.__,'___':self.___}\n self.shell.user_ns.update(to_user_ns)\n\n @property\n def prompt_count(self):\n return self.shell.execution_count\n\n #-------------------------------------------------------------------------\n # Methods used in __call__. Override these methods to modify the behavior\n # of the displayhook.\n #-------------------------------------------------------------------------\n\n def check_for_underscore(self):\n \"\"\"Check if the user has set the '_' variable by hand.\"\"\"\n # If something injected a '_' variable in __builtin__, delete\n # ipython's automatic one so we don't clobber that. gettext() in\n # particular uses _, so we need to stay away from it.\n if '_' in builtin_mod.__dict__:\n try:\n del self.shell.user_ns['_']\n except KeyError:\n pass\n\n def quiet(self):\n \"\"\"Should we silence the display hook because of ';'?\"\"\"\n # do not print output if input ends in ';'\n \n try:\n cell = cast_unicode_py2(self.shell.history_manager.input_hist_parsed[-1])\n except IndexError:\n # some uses of ipshellembed may fail here\n return False\n \n sio = _io.StringIO(cell)\n tokens = list(tokenize.generate_tokens(sio.readline))\n\n for token in reversed(tokens):\n if token[0] in (tokenize.ENDMARKER, tokenize.COMMENT):\n continue\n if (token[0] == tokenize.OP) and (token[1] == ';'):\n return True\n else:\n return False\n\n def start_displayhook(self):\n \"\"\"Start the displayhook, initializing resources.\"\"\"\n pass\n\n def write_output_prompt(self):\n \"\"\"Write the output prompt.\n\n The default implementation simply writes the prompt to\n ``io.stdout``.\n \"\"\"\n # Use write, not print which adds an extra space.\n io.stdout.write(self.shell.separate_out)\n outprompt = self.shell.prompt_manager.render('out')\n if self.do_full_cache:\n io.stdout.write(outprompt)\n\n def compute_format_data(self, result):\n \"\"\"Compute format data of the object to be displayed.\n\n The format data is a generalization of the :func:`repr` of an object.\n In the default implementation the format data is a :class:`dict` of\n key value pair where the keys are valid MIME types and the values\n are JSON'able data structure containing the raw data for that MIME\n type. It is up to frontends to determine pick a MIME to to use and\n display that data in an appropriate manner.\n\n This method only computes the format data for the object and should\n NOT actually print or write that to a stream.\n\n Parameters\n ----------\n result : object\n The Python object passed to the display hook, whose format will be\n computed.\n\n Returns\n -------\n (format_dict, md_dict) : dict\n format_dict is a :class:`dict` whose keys are valid MIME types and values are\n JSON'able raw data for that MIME type. It is recommended that\n all return values of this should always include the \"text/plain\"\n MIME type representation of the object.\n md_dict is a :class:`dict` with the same MIME type keys\n of metadata associated with each output.\n \n \"\"\"\n return self.shell.display_formatter.format(result)\n\n def write_format_data(self, format_dict, md_dict=None):\n \"\"\"Write the format data dict to the frontend.\n\n This default version of this method simply writes the plain text\n representation of the object to ``io.stdout``. Subclasses should\n override this method to send the entire `format_dict` to the\n frontends.\n\n Parameters\n ----------\n format_dict : dict\n The format dict for the object passed to `sys.displayhook`.\n md_dict : dict (optional)\n The metadata dict to be associated with the display data.\n \"\"\"\n if 'text/plain' not in format_dict:\n # nothing to do\n return\n # We want to print because we want to always make sure we have a\n # newline, even if all the prompt separators are ''. This is the\n # standard IPython behavior.\n result_repr = format_dict['text/plain']\n if '\\n' in result_repr:\n # So that multi-line strings line up with the left column of\n # the screen, instead of having the output prompt mess up\n # their first line.\n # We use the prompt template instead of the expanded prompt\n # because the expansion may add ANSI escapes that will interfere\n # with our ability to determine whether or not we should add\n # a newline.\n prompt_template = self.shell.prompt_manager.out_template\n if prompt_template and not prompt_template.endswith('\\n'):\n # But avoid extraneous empty lines.\n result_repr = '\\n' + result_repr\n\n print(result_repr, file=io.stdout)\n\n def update_user_ns(self, result):\n \"\"\"Update user_ns with various things like _, __, _1, etc.\"\"\"\n\n # Avoid recursive reference when displaying _oh/Out\n if result is not self.shell.user_ns['_oh']:\n if len(self.shell.user_ns['_oh']) >= self.cache_size and self.do_full_cache:\n self.cull_cache()\n # Don't overwrite '_' and friends if '_' is in __builtin__ (otherwise\n # we cause buggy behavior for things like gettext).\n\n if '_' not in builtin_mod.__dict__:\n self.___ = self.__\n self.__ = self._\n self._ = result\n self.shell.push({'_':self._,\n '__':self.__,\n '___':self.___}, interactive=False)\n\n # hackish access to top-level namespace to create _1,_2... dynamically\n to_main = {}\n if self.do_full_cache:\n new_result = '_'+repr(self.prompt_count)\n to_main[new_result] = result\n self.shell.push(to_main, interactive=False)\n self.shell.user_ns['_oh'][self.prompt_count] = result\n\n def fill_exec_result(self, result):\n if self.exec_result is not None:\n self.exec_result.result = result\n\n def log_output(self, format_dict):\n \"\"\"Log the output.\"\"\"\n if 'text/plain' not in format_dict:\n # nothing to do\n return\n if self.shell.logger.log_output:\n self.shell.logger.log_write(format_dict['text/plain'], 'output')\n self.shell.history_manager.output_hist_reprs[self.prompt_count] = \\\n format_dict['text/plain']\n\n def finish_displayhook(self):\n \"\"\"Finish up all displayhook activities.\"\"\"\n io.stdout.write(self.shell.separate_out2)\n io.stdout.flush()\n\n def __call__(self, result=None):\n \"\"\"Printing with history cache management.\n\n This is invoked everytime the interpreter needs to print, and is\n activated by setting the variable sys.displayhook to it.\n \"\"\"\n self.check_for_underscore()\n if result is not None and not self.quiet():\n self.start_displayhook()\n self.write_output_prompt()\n format_dict, md_dict = self.compute_format_data(result)\n self.update_user_ns(result)\n self.fill_exec_result(result)\n if format_dict:\n self.write_format_data(format_dict, md_dict)\n self.log_output(format_dict)\n self.finish_displayhook()\n\n def cull_cache(self):\n \"\"\"Output cache is full, cull the oldest entries\"\"\"\n oh = self.shell.user_ns.get('_oh', {})\n sz = len(oh)\n cull_count = max(int(sz * self.cull_fraction), 2)\n warn('Output cache limit (currently {sz} entries) hit.\\n'\n 'Flushing oldest {cull_count} entries.'.format(sz=sz, cull_count=cull_count))\n \n for i, n in enumerate(sorted(oh)):\n if i >= cull_count:\n break\n self.shell.user_ns.pop('_%i' % n, None)\n oh.pop(n, None)\n \n\n def flush(self):\n if not self.do_full_cache:\n raise ValueError(\"You shouldn't have reached the cache flush \"\n \"if full caching is not enabled!\")\n # delete auto-generated vars from global namespace\n\n for n in range(1,self.prompt_count + 1):\n key = '_'+repr(n)\n try:\n del self.shell.user_ns[key]\n except: pass\n # In some embedded circumstances, the user_ns doesn't have the\n # '_oh' key set up.\n oh = self.shell.user_ns.get('_oh', None)\n if oh is not None:\n oh.clear()\n\n # Release our own references to objects:\n self._, self.__, self.___ = '', '', ''\n\n if '_' not in builtin_mod.__dict__:\n self.shell.user_ns.update({'_':None,'__':None, '___':None})\n import gc\n # TODO: Is this really needed?\n # IronPython blocks here forever\n if sys.platform != \"cli\":\n gc.collect()\n\n", "path": "IPython/core/displayhook.py"}]}
| 3,904 | 141 |
gh_patches_debug_1780
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-4075
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support ruby2.7 runtime
AWS supports the following Ruby runtimes:
<!--StartFragment-->
Name | Identifier | SDK for Ruby | Operating system
-- | -- | -- | --
Ruby 2.7 | ruby2.7 | 3.0.1 | Amazon Linux 2
Ruby 2.5 | ruby2.5 | 3.0.1 | Amazon Linux
<!--EndFragment-->
Currently, `localstack/lambda` only contains the `ruby2.5` tag. Will the 2.7 runtime be supported in the (near) future?
</issue>
<code>
[start of localstack/services/awslambda/lambda_utils.py]
1 import os
2 from collections import defaultdict
3 from localstack.utils.common import to_str
4
5 # Lambda runtime constants
6 LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
7 LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
8 LAMBDA_RUNTIME_PYTHON37 = 'python3.7'
9 LAMBDA_RUNTIME_PYTHON38 = 'python3.8'
10 LAMBDA_RUNTIME_NODEJS = 'nodejs'
11 LAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'
12 LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
13 LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
14 LAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'
15 LAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'
16 LAMBDA_RUNTIME_NODEJS14X = 'nodejs14.x'
17 LAMBDA_RUNTIME_JAVA8 = 'java8'
18 LAMBDA_RUNTIME_JAVA11 = 'java11'
19 LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
20 LAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'
21 LAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'
22 LAMBDA_RUNTIME_GOLANG = 'go1.x'
23 LAMBDA_RUNTIME_RUBY = 'ruby'
24 LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
25 LAMBDA_RUNTIME_PROVIDED = 'provided'
26
27 # default handler and runtime
28 LAMBDA_DEFAULT_HANDLER = 'handler.handler'
29 LAMBDA_DEFAULT_RUNTIME = 'python3.8'
30 LAMBDA_DEFAULT_STARTING_POSITION = 'LATEST'
31
32 # List of Dotnet Lambda runtime names
33 DOTNET_LAMBDA_RUNTIMES = [LAMBDA_RUNTIME_DOTNETCORE2, LAMBDA_RUNTIME_DOTNETCORE21, LAMBDA_RUNTIME_DOTNETCORE31]
34
35
36 def multi_value_dict_for_list(elements):
37 temp_mv_dict = defaultdict(list)
38 for key in elements:
39 if isinstance(key, (list, tuple)):
40 key, value = key
41 else:
42 value = elements[key]
43 key = to_str(key)
44 temp_mv_dict[key].append(value)
45
46 return dict((k, tuple(v)) for k, v in temp_mv_dict.items())
47
48
49 def get_handler_file_from_name(handler_name, runtime=LAMBDA_DEFAULT_RUNTIME):
50 if runtime.startswith(LAMBDA_RUNTIME_PROVIDED):
51 return 'bootstrap'
52 delimiter = '.'
53 if runtime.startswith(LAMBDA_RUNTIME_NODEJS):
54 file_ext = '.js'
55 elif runtime.startswith(LAMBDA_RUNTIME_GOLANG):
56 file_ext = ''
57 elif runtime.startswith(tuple(DOTNET_LAMBDA_RUNTIMES)):
58 file_ext = '.dll'
59 delimiter = ':'
60 elif runtime.startswith(LAMBDA_RUNTIME_RUBY):
61 file_ext = '.rb'
62 else:
63 handler_name = handler_name.rpartition(delimiter)[0].replace(delimiter, os.path.sep)
64 file_ext = '.py'
65 return '%s%s' % (handler_name.split(delimiter)[0], file_ext)
66
[end of localstack/services/awslambda/lambda_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/localstack/services/awslambda/lambda_utils.py b/localstack/services/awslambda/lambda_utils.py
--- a/localstack/services/awslambda/lambda_utils.py
+++ b/localstack/services/awslambda/lambda_utils.py
@@ -22,6 +22,7 @@
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_RUNTIME_RUBY = 'ruby'
LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
+LAMBDA_RUNTIME_RUBY27 = 'ruby2.7'
LAMBDA_RUNTIME_PROVIDED = 'provided'
# default handler and runtime
|
{"golden_diff": "diff --git a/localstack/services/awslambda/lambda_utils.py b/localstack/services/awslambda/lambda_utils.py\n--- a/localstack/services/awslambda/lambda_utils.py\n+++ b/localstack/services/awslambda/lambda_utils.py\n@@ -22,6 +22,7 @@\n LAMBDA_RUNTIME_GOLANG = 'go1.x'\n LAMBDA_RUNTIME_RUBY = 'ruby'\n LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'\n+LAMBDA_RUNTIME_RUBY27 = 'ruby2.7'\n LAMBDA_RUNTIME_PROVIDED = 'provided'\n \n # default handler and runtime\n", "issue": "Support ruby2.7 runtime\nAWS supports the following Ruby runtimes:\r\n\r\n<!--StartFragment-->\r\nName | Identifier | SDK for Ruby | Operating system\r\n-- | -- | -- | --\r\nRuby 2.7 | ruby2.7 | 3.0.1 | Amazon Linux 2\r\nRuby 2.5 | ruby2.5 | 3.0.1 | Amazon Linux\r\n\r\n<!--EndFragment-->\r\n\r\nCurrently, `localstack/lambda` only contains the `ruby2.5` tag. Will the 2.7 runtime be supported in the (near) future?\n", "before_files": [{"content": "import os\nfrom collections import defaultdict\nfrom localstack.utils.common import to_str\n\n# Lambda runtime constants\nLAMBDA_RUNTIME_PYTHON27 = 'python2.7'\nLAMBDA_RUNTIME_PYTHON36 = 'python3.6'\nLAMBDA_RUNTIME_PYTHON37 = 'python3.7'\nLAMBDA_RUNTIME_PYTHON38 = 'python3.8'\nLAMBDA_RUNTIME_NODEJS = 'nodejs'\nLAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'\nLAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'\nLAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'\nLAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'\nLAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'\nLAMBDA_RUNTIME_NODEJS14X = 'nodejs14.x'\nLAMBDA_RUNTIME_JAVA8 = 'java8'\nLAMBDA_RUNTIME_JAVA11 = 'java11'\nLAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'\nLAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'\nLAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'\nLAMBDA_RUNTIME_GOLANG = 'go1.x'\nLAMBDA_RUNTIME_RUBY = 'ruby'\nLAMBDA_RUNTIME_RUBY25 = 'ruby2.5'\nLAMBDA_RUNTIME_PROVIDED = 'provided'\n\n# default handler and runtime\nLAMBDA_DEFAULT_HANDLER = 'handler.handler'\nLAMBDA_DEFAULT_RUNTIME = 'python3.8'\nLAMBDA_DEFAULT_STARTING_POSITION = 'LATEST'\n\n# List of Dotnet Lambda runtime names\nDOTNET_LAMBDA_RUNTIMES = [LAMBDA_RUNTIME_DOTNETCORE2, LAMBDA_RUNTIME_DOTNETCORE21, LAMBDA_RUNTIME_DOTNETCORE31]\n\n\ndef multi_value_dict_for_list(elements):\n temp_mv_dict = defaultdict(list)\n for key in elements:\n if isinstance(key, (list, tuple)):\n key, value = key\n else:\n value = elements[key]\n key = to_str(key)\n temp_mv_dict[key].append(value)\n\n return dict((k, tuple(v)) for k, v in temp_mv_dict.items())\n\n\ndef get_handler_file_from_name(handler_name, runtime=LAMBDA_DEFAULT_RUNTIME):\n if runtime.startswith(LAMBDA_RUNTIME_PROVIDED):\n return 'bootstrap'\n delimiter = '.'\n if runtime.startswith(LAMBDA_RUNTIME_NODEJS):\n file_ext = '.js'\n elif runtime.startswith(LAMBDA_RUNTIME_GOLANG):\n file_ext = ''\n elif runtime.startswith(tuple(DOTNET_LAMBDA_RUNTIMES)):\n file_ext = '.dll'\n delimiter = ':'\n elif runtime.startswith(LAMBDA_RUNTIME_RUBY):\n file_ext = '.rb'\n else:\n handler_name = handler_name.rpartition(delimiter)[0].replace(delimiter, os.path.sep)\n file_ext = '.py'\n return '%s%s' % (handler_name.split(delimiter)[0], file_ext)\n", "path": "localstack/services/awslambda/lambda_utils.py"}]}
| 1,461 | 139 |
gh_patches_debug_39863
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-3031
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Introduce Priority for serving runtimes
/kind feature
**Problem**
- Currently, In auto selection mode, kserve serving runtime does not have a stable way to select a serving runtime if two are more serving runtimes support the same model format.
- Currently, Kserve [selects the first serving runtime](https://github.com/kserve/kserve/blob/f2f0717a9d6341b6ec9b939bdd324b2c8c507551/pkg/controller/v1beta1/inferenceservice/components/predictor.go#L152) from a list of serving runtimes [sorted by their respective creation timestamp descending and name ascending order](https://github.com/kserve/kserve/blob/f2f0717a9d6341b6ec9b939bdd324b2c8c507551/pkg/apis/serving/v1beta1/predictor_model.go#L162). So the users have little to no control over the runtime selection in auto select mode.
**Describe the solution you'd like**
[A clear and concise description of what you want to happen.]
- Introduce a new optional field called `Priority` in the model formats section. Through this field we can specify the priority of the serving runtimes for a sepcifice model format.
- The higher the priority value, the higher the precedence
- The priority is valid only if auto select is enabled otherwise the priority is not considered.
- If more than one serving runtime supports the model format and does not provided the priority, then the old way is followed for selecting the runtime.
- The serving runtime with priority takes precedence over the serving runtime with priority not specified.
- The same model formats with same model version cannot have the same priority.
**Example Yaml**
```yaml
apiVersion: serving.kserve.io/v1alpha1
kind: ClusterServingRuntime
metadata:
name: kserve-mlserver
spec:
annotations:
prometheus.kserve.io/path: /metrics
prometheus.kserve.io/port: '8080'
containers:
- env:
- name: MLSERVER_MODEL_IMPLEMENTATION
value: '{{.Labels.modelClass}}'
- name: MLSERVER_HTTP_PORT
value: '8080'
- name: MLSERVER_GRPC_PORT
value: '9000'
- name: MODELS_DIR
value: /mnt/models
image: docker.io/seldonio/mlserver:1.3.2
name: kserve-container
resources:
limits:
cpu: '1'
memory: 2Gi
requests:
cpu: '1'
memory: 2Gi
protocolVersions:
- v2
supportedModelFormats:
- autoSelect: true
name: sklearn
priority: 2
version: '0'
- autoSelect: true
name: xgboost
priority: 2
version: '1'
- autoSelect: true
name: lightgbm
priority: 2
version: '3'
- autoSelect: true
name: mlflow
priority: 1
version: '1'
```
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
</issue>
<code>
[start of python/kserve/kserve/models/v1alpha1_supported_model_format.py]
1 # Copyright 2023 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # coding: utf-8
16
17 """
18 KServe
19
20 Python SDK for KServe # noqa: E501
21
22 The version of the OpenAPI document: v0.1
23 Generated by: https://openapi-generator.tech
24 """
25
26
27 import pprint
28 import re # noqa: F401
29
30 import six
31
32 from kserve.configuration import Configuration
33
34
35 class V1alpha1SupportedModelFormat(object):
36 """NOTE: This class is auto generated by OpenAPI Generator.
37 Ref: https://openapi-generator.tech
38
39 Do not edit the class manually.
40 """
41
42 """
43 Attributes:
44 openapi_types (dict): The key is attribute name
45 and the value is attribute type.
46 attribute_map (dict): The key is attribute name
47 and the value is json key in definition.
48 """
49 openapi_types = {
50 'auto_select': 'bool',
51 'name': 'str',
52 'version': 'str'
53 }
54
55 attribute_map = {
56 'auto_select': 'autoSelect',
57 'name': 'name',
58 'version': 'version'
59 }
60
61 def __init__(self, auto_select=None, name='', version=None, local_vars_configuration=None): # noqa: E501
62 """V1alpha1SupportedModelFormat - a model defined in OpenAPI""" # noqa: E501
63 if local_vars_configuration is None:
64 local_vars_configuration = Configuration()
65 self.local_vars_configuration = local_vars_configuration
66
67 self._auto_select = None
68 self._name = None
69 self._version = None
70 self.discriminator = None
71
72 if auto_select is not None:
73 self.auto_select = auto_select
74 if name is not None:
75 self.name = name
76 if version is not None:
77 self.version = version
78
79 @property
80 def auto_select(self):
81 """Gets the auto_select of this V1alpha1SupportedModelFormat. # noqa: E501
82
83 Set to true to allow the ServingRuntime to be used for automatic model placement if this model format is specified with no explicit runtime. # noqa: E501
84
85 :return: The auto_select of this V1alpha1SupportedModelFormat. # noqa: E501
86 :rtype: bool
87 """
88 return self._auto_select
89
90 @auto_select.setter
91 def auto_select(self, auto_select):
92 """Sets the auto_select of this V1alpha1SupportedModelFormat.
93
94 Set to true to allow the ServingRuntime to be used for automatic model placement if this model format is specified with no explicit runtime. # noqa: E501
95
96 :param auto_select: The auto_select of this V1alpha1SupportedModelFormat. # noqa: E501
97 :type: bool
98 """
99
100 self._auto_select = auto_select
101
102 @property
103 def name(self):
104 """Gets the name of this V1alpha1SupportedModelFormat. # noqa: E501
105
106 Name of the model format. # noqa: E501
107
108 :return: The name of this V1alpha1SupportedModelFormat. # noqa: E501
109 :rtype: str
110 """
111 return self._name
112
113 @name.setter
114 def name(self, name):
115 """Sets the name of this V1alpha1SupportedModelFormat.
116
117 Name of the model format. # noqa: E501
118
119 :param name: The name of this V1alpha1SupportedModelFormat. # noqa: E501
120 :type: str
121 """
122
123 self._name = name
124
125 @property
126 def version(self):
127 """Gets the version of this V1alpha1SupportedModelFormat. # noqa: E501
128
129 Version of the model format. Used in validating that a predictor is supported by a runtime. Can be \"major\", \"major.minor\" or \"major.minor.patch\". # noqa: E501
130
131 :return: The version of this V1alpha1SupportedModelFormat. # noqa: E501
132 :rtype: str
133 """
134 return self._version
135
136 @version.setter
137 def version(self, version):
138 """Sets the version of this V1alpha1SupportedModelFormat.
139
140 Version of the model format. Used in validating that a predictor is supported by a runtime. Can be \"major\", \"major.minor\" or \"major.minor.patch\". # noqa: E501
141
142 :param version: The version of this V1alpha1SupportedModelFormat. # noqa: E501
143 :type: str
144 """
145
146 self._version = version
147
148 def to_dict(self):
149 """Returns the model properties as a dict"""
150 result = {}
151
152 for attr, _ in six.iteritems(self.openapi_types):
153 value = getattr(self, attr)
154 if isinstance(value, list):
155 result[attr] = list(map(
156 lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
157 value
158 ))
159 elif hasattr(value, "to_dict"):
160 result[attr] = value.to_dict()
161 elif isinstance(value, dict):
162 result[attr] = dict(map(
163 lambda item: (item[0], item[1].to_dict())
164 if hasattr(item[1], "to_dict") else item,
165 value.items()
166 ))
167 else:
168 result[attr] = value
169
170 return result
171
172 def to_str(self):
173 """Returns the string representation of the model"""
174 return pprint.pformat(self.to_dict())
175
176 def __repr__(self):
177 """For `print` and `pprint`"""
178 return self.to_str()
179
180 def __eq__(self, other):
181 """Returns true if both objects are equal"""
182 if not isinstance(other, V1alpha1SupportedModelFormat):
183 return False
184
185 return self.to_dict() == other.to_dict()
186
187 def __ne__(self, other):
188 """Returns true if both objects are not equal"""
189 if not isinstance(other, V1alpha1SupportedModelFormat):
190 return True
191
192 return self.to_dict() != other.to_dict()
193
[end of python/kserve/kserve/models/v1alpha1_supported_model_format.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/python/kserve/kserve/models/v1alpha1_supported_model_format.py b/python/kserve/kserve/models/v1alpha1_supported_model_format.py
--- a/python/kserve/kserve/models/v1alpha1_supported_model_format.py
+++ b/python/kserve/kserve/models/v1alpha1_supported_model_format.py
@@ -49,16 +49,18 @@
openapi_types = {
'auto_select': 'bool',
'name': 'str',
+ 'priority': 'int',
'version': 'str'
}
attribute_map = {
'auto_select': 'autoSelect',
'name': 'name',
+ 'priority': 'priority',
'version': 'version'
}
- def __init__(self, auto_select=None, name='', version=None, local_vars_configuration=None): # noqa: E501
+ def __init__(self, auto_select=None, name='', priority=None, version=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1SupportedModelFormat - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
@@ -66,6 +68,7 @@
self._auto_select = None
self._name = None
+ self._priority = None
self._version = None
self.discriminator = None
@@ -73,6 +76,8 @@
self.auto_select = auto_select
if name is not None:
self.name = name
+ if priority is not None:
+ self.priority = priority
if version is not None:
self.version = version
@@ -122,6 +127,29 @@
self._name = name
+ @property
+ def priority(self):
+ """Gets the priority of this V1alpha1SupportedModelFormat. # noqa: E501
+
+ Priority of this serving runtime for auto selection. This is used to select the serving runtime if more than one serving runtime supports the same model format. The value should be greater than zero. The higher the value, the higher the priority. Priority is not considered if AutoSelect is either false or not specified. Priority can be overridden by specifying the runtime in the InferenceService. # noqa: E501
+
+ :return: The priority of this V1alpha1SupportedModelFormat. # noqa: E501
+ :rtype: int
+ """
+ return self._priority
+
+ @priority.setter
+ def priority(self, priority):
+ """Sets the priority of this V1alpha1SupportedModelFormat.
+
+ Priority of this serving runtime for auto selection. This is used to select the serving runtime if more than one serving runtime supports the same model format. The value should be greater than zero. The higher the value, the higher the priority. Priority is not considered if AutoSelect is either false or not specified. Priority can be overridden by specifying the runtime in the InferenceService. # noqa: E501
+
+ :param priority: The priority of this V1alpha1SupportedModelFormat. # noqa: E501
+ :type: int
+ """
+
+ self._priority = priority
+
@property
def version(self):
"""Gets the version of this V1alpha1SupportedModelFormat. # noqa: E501
|
{"golden_diff": "diff --git a/python/kserve/kserve/models/v1alpha1_supported_model_format.py b/python/kserve/kserve/models/v1alpha1_supported_model_format.py\n--- a/python/kserve/kserve/models/v1alpha1_supported_model_format.py\n+++ b/python/kserve/kserve/models/v1alpha1_supported_model_format.py\n@@ -49,16 +49,18 @@\n openapi_types = {\n 'auto_select': 'bool',\n 'name': 'str',\n+ 'priority': 'int',\n 'version': 'str'\n }\n \n attribute_map = {\n 'auto_select': 'autoSelect',\n 'name': 'name',\n+ 'priority': 'priority',\n 'version': 'version'\n }\n \n- def __init__(self, auto_select=None, name='', version=None, local_vars_configuration=None): # noqa: E501\n+ def __init__(self, auto_select=None, name='', priority=None, version=None, local_vars_configuration=None): # noqa: E501\n \"\"\"V1alpha1SupportedModelFormat - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n@@ -66,6 +68,7 @@\n \n self._auto_select = None\n self._name = None\n+ self._priority = None\n self._version = None\n self.discriminator = None\n \n@@ -73,6 +76,8 @@\n self.auto_select = auto_select\n if name is not None:\n self.name = name\n+ if priority is not None:\n+ self.priority = priority\n if version is not None:\n self.version = version\n \n@@ -122,6 +127,29 @@\n \n self._name = name\n \n+ @property\n+ def priority(self):\n+ \"\"\"Gets the priority of this V1alpha1SupportedModelFormat. # noqa: E501\n+\n+ Priority of this serving runtime for auto selection. This is used to select the serving runtime if more than one serving runtime supports the same model format. The value should be greater than zero. The higher the value, the higher the priority. Priority is not considered if AutoSelect is either false or not specified. Priority can be overridden by specifying the runtime in the InferenceService. # noqa: E501\n+\n+ :return: The priority of this V1alpha1SupportedModelFormat. # noqa: E501\n+ :rtype: int\n+ \"\"\"\n+ return self._priority\n+\n+ @priority.setter\n+ def priority(self, priority):\n+ \"\"\"Sets the priority of this V1alpha1SupportedModelFormat.\n+\n+ Priority of this serving runtime for auto selection. This is used to select the serving runtime if more than one serving runtime supports the same model format. The value should be greater than zero. The higher the value, the higher the priority. Priority is not considered if AutoSelect is either false or not specified. Priority can be overridden by specifying the runtime in the InferenceService. # noqa: E501\n+\n+ :param priority: The priority of this V1alpha1SupportedModelFormat. # noqa: E501\n+ :type: int\n+ \"\"\"\n+\n+ self._priority = priority\n+\n @property\n def version(self):\n \"\"\"Gets the version of this V1alpha1SupportedModelFormat. # noqa: E501\n", "issue": "Introduce Priority for serving runtimes\n/kind feature\r\n\r\n**Problem**\r\n- Currently, In auto selection mode, kserve serving runtime does not have a stable way to select a serving runtime if two are more serving runtimes support the same model format. \r\n- Currently, Kserve [selects the first serving runtime](https://github.com/kserve/kserve/blob/f2f0717a9d6341b6ec9b939bdd324b2c8c507551/pkg/controller/v1beta1/inferenceservice/components/predictor.go#L152) from a list of serving runtimes [sorted by their respective creation timestamp descending and name ascending order](https://github.com/kserve/kserve/blob/f2f0717a9d6341b6ec9b939bdd324b2c8c507551/pkg/apis/serving/v1beta1/predictor_model.go#L162). So the users have little to no control over the runtime selection in auto select mode.\r\n\r\n\r\n**Describe the solution you'd like**\r\n[A clear and concise description of what you want to happen.]\r\n- Introduce a new optional field called `Priority` in the model formats section. Through this field we can specify the priority of the serving runtimes for a sepcifice model format.\r\n- The higher the priority value, the higher the precedence\r\n- The priority is valid only if auto select is enabled otherwise the priority is not considered.\r\n- If more than one serving runtime supports the model format and does not provided the priority, then the old way is followed for selecting the runtime.\r\n- The serving runtime with priority takes precedence over the serving runtime with priority not specified.\r\n- The same model formats with same model version cannot have the same priority.\r\n\r\n**Example Yaml**\r\n```yaml\r\napiVersion: serving.kserve.io/v1alpha1\r\nkind: ClusterServingRuntime\r\nmetadata:\r\n name: kserve-mlserver\r\nspec:\r\n annotations:\r\n prometheus.kserve.io/path: /metrics\r\n prometheus.kserve.io/port: '8080'\r\n containers:\r\n - env:\r\n - name: MLSERVER_MODEL_IMPLEMENTATION\r\n value: '{{.Labels.modelClass}}'\r\n - name: MLSERVER_HTTP_PORT\r\n value: '8080'\r\n - name: MLSERVER_GRPC_PORT\r\n value: '9000'\r\n - name: MODELS_DIR\r\n value: /mnt/models\r\n image: docker.io/seldonio/mlserver:1.3.2\r\n name: kserve-container\r\n resources:\r\n limits:\r\n cpu: '1'\r\n memory: 2Gi\r\n requests:\r\n cpu: '1'\r\n memory: 2Gi\r\n protocolVersions:\r\n - v2\r\n supportedModelFormats:\r\n - autoSelect: true\r\n name: sklearn\r\n priority: 2\r\n version: '0'\r\n - autoSelect: true\r\n name: xgboost\r\n priority: 2\r\n version: '1'\r\n - autoSelect: true\r\n name: lightgbm\r\n priority: 2\r\n version: '3'\r\n - autoSelect: true\r\n name: mlflow\r\n priority: 1\r\n version: '1'\r\n\r\n```\r\n\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\n", "before_files": [{"content": "# Copyright 2023 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n\n\"\"\"\n KServe\n\n Python SDK for KServe # noqa: E501\n\n The version of the OpenAPI document: v0.1\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom kserve.configuration import Configuration\n\n\nclass V1alpha1SupportedModelFormat(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'auto_select': 'bool',\n 'name': 'str',\n 'version': 'str'\n }\n\n attribute_map = {\n 'auto_select': 'autoSelect',\n 'name': 'name',\n 'version': 'version'\n }\n\n def __init__(self, auto_select=None, name='', version=None, local_vars_configuration=None): # noqa: E501\n \"\"\"V1alpha1SupportedModelFormat - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._auto_select = None\n self._name = None\n self._version = None\n self.discriminator = None\n\n if auto_select is not None:\n self.auto_select = auto_select\n if name is not None:\n self.name = name\n if version is not None:\n self.version = version\n\n @property\n def auto_select(self):\n \"\"\"Gets the auto_select of this V1alpha1SupportedModelFormat. # noqa: E501\n\n Set to true to allow the ServingRuntime to be used for automatic model placement if this model format is specified with no explicit runtime. # noqa: E501\n\n :return: The auto_select of this V1alpha1SupportedModelFormat. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._auto_select\n\n @auto_select.setter\n def auto_select(self, auto_select):\n \"\"\"Sets the auto_select of this V1alpha1SupportedModelFormat.\n\n Set to true to allow the ServingRuntime to be used for automatic model placement if this model format is specified with no explicit runtime. # noqa: E501\n\n :param auto_select: The auto_select of this V1alpha1SupportedModelFormat. # noqa: E501\n :type: bool\n \"\"\"\n\n self._auto_select = auto_select\n\n @property\n def name(self):\n \"\"\"Gets the name of this V1alpha1SupportedModelFormat. # noqa: E501\n\n Name of the model format. # noqa: E501\n\n :return: The name of this V1alpha1SupportedModelFormat. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this V1alpha1SupportedModelFormat.\n\n Name of the model format. # noqa: E501\n\n :param name: The name of this V1alpha1SupportedModelFormat. # noqa: E501\n :type: str\n \"\"\"\n\n self._name = name\n\n @property\n def version(self):\n \"\"\"Gets the version of this V1alpha1SupportedModelFormat. # noqa: E501\n\n Version of the model format. Used in validating that a predictor is supported by a runtime. Can be \\\"major\\\", \\\"major.minor\\\" or \\\"major.minor.patch\\\". # noqa: E501\n\n :return: The version of this V1alpha1SupportedModelFormat. # noqa: E501\n :rtype: str\n \"\"\"\n return self._version\n\n @version.setter\n def version(self, version):\n \"\"\"Sets the version of this V1alpha1SupportedModelFormat.\n\n Version of the model format. Used in validating that a predictor is supported by a runtime. Can be \\\"major\\\", \\\"major.minor\\\" or \\\"major.minor.patch\\\". # noqa: E501\n\n :param version: The version of this V1alpha1SupportedModelFormat. # noqa: E501\n :type: str\n \"\"\"\n\n self._version = version\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, V1alpha1SupportedModelFormat):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, V1alpha1SupportedModelFormat):\n return True\n\n return self.to_dict() != other.to_dict()\n", "path": "python/kserve/kserve/models/v1alpha1_supported_model_format.py"}]}
| 3,256 | 769 |
gh_patches_debug_13003
|
rasdani/github-patches
|
git_diff
|
replicate__cog-362
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support importing model from subdirectories
This works:
```yaml
model: "predict.py:MyModel"
```
But this doesn't:
```yaml:
model: "mymodel/predict.py:MyModel"
```
It should.
</issue>
<code>
[start of python/cog/predictor.py]
1 from abc import ABC, abstractmethod
2 import importlib
3 import os.path
4 from pathlib import Path
5 from typing import Dict, Any
6
7 import yaml
8
9 from .errors import ConfigDoesNotExist, PredictorNotSet
10
11
12 # TODO(andreas): handle directory input
13 # TODO(andreas): handle List[Dict[str, int]], etc.
14 # TODO(andreas): model-level documentation
15
16
17 class Predictor(ABC):
18 @abstractmethod
19 def setup(self):
20 pass
21
22 @abstractmethod
23 def predict(self, **kwargs):
24 pass
25
26 def get_type_signature(self):
27 """
28 Returns a dict describing the inputs of the model.
29 """
30 from .input import (
31 get_type_name,
32 UNSPECIFIED,
33 )
34
35 inputs = []
36 if hasattr(self.predict, "_inputs"):
37 input_specs = self.predict._inputs
38 for spec in input_specs:
39 arg: Dict[str, Any] = {
40 "name": spec.name,
41 "type": get_type_name(spec.type),
42 }
43 if spec.help:
44 arg["help"] = spec.help
45 if spec.default is not UNSPECIFIED:
46 arg["default"] = str(spec.default) # TODO: don't string this
47 if spec.min is not None:
48 arg["min"] = str(spec.min) # TODO: don't string this
49 if spec.max is not None:
50 arg["max"] = str(spec.max) # TODO: don't string this
51 if spec.options is not None:
52 arg["options"] = [str(o) for o in spec.options]
53 inputs.append(arg)
54 return {"inputs": inputs}
55
56
57 def run_prediction(predictor, inputs, cleanup_functions):
58 """
59 Run the predictor on the inputs, and append resulting paths
60 to cleanup functions for removal.
61 """
62 result = predictor.predict(**inputs)
63 if isinstance(result, Path):
64 cleanup_functions.append(result.unlink)
65 return result
66
67
68 def load_predictor():
69 # Assumes the working directory is /src
70 config_path = os.path.abspath("cog.yaml")
71 try:
72 with open(config_path) as fh:
73 config = yaml.safe_load(fh)
74 except FileNotFoundError:
75 raise ConfigDoesNotExist(
76 f"Could not find {config_path}",
77 )
78
79 if "predict" not in config:
80 raise PredictorNotSet(
81 "Can't run predictions: 'predict' option not found in cog.yaml"
82 )
83
84 # TODO: handle predict scripts in subdirectories
85 predict_string = config["predict"]
86 module_name, class_name = predict_string.split(".py:", 1)
87 module = importlib.import_module(module_name)
88 predictor_class = getattr(module, class_name)
89 return predictor_class()
90
[end of python/cog/predictor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/python/cog/predictor.py b/python/cog/predictor.py
--- a/python/cog/predictor.py
+++ b/python/cog/predictor.py
@@ -81,9 +81,11 @@
"Can't run predictions: 'predict' option not found in cog.yaml"
)
- # TODO: handle predict scripts in subdirectories
predict_string = config["predict"]
- module_name, class_name = predict_string.split(".py:", 1)
- module = importlib.import_module(module_name)
+ module_path, class_name = predict_string.split(":", 1)
+ module_name = os.path.basename(module_path).split(".py", 1)[0]
+ spec = importlib.util.spec_from_file_location(module_name, module_path)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
predictor_class = getattr(module, class_name)
return predictor_class()
|
{"golden_diff": "diff --git a/python/cog/predictor.py b/python/cog/predictor.py\n--- a/python/cog/predictor.py\n+++ b/python/cog/predictor.py\n@@ -81,9 +81,11 @@\n \"Can't run predictions: 'predict' option not found in cog.yaml\"\n )\n \n- # TODO: handle predict scripts in subdirectories\n predict_string = config[\"predict\"]\n- module_name, class_name = predict_string.split(\".py:\", 1)\n- module = importlib.import_module(module_name)\n+ module_path, class_name = predict_string.split(\":\", 1)\n+ module_name = os.path.basename(module_path).split(\".py\", 1)[0]\n+ spec = importlib.util.spec_from_file_location(module_name, module_path)\n+ module = importlib.util.module_from_spec(spec)\n+ spec.loader.exec_module(module)\n predictor_class = getattr(module, class_name)\n return predictor_class()\n", "issue": "Support importing model from subdirectories\nThis works:\r\n\r\n```yaml\r\nmodel: \"predict.py:MyModel\"\r\n```\r\n\r\nBut this doesn't:\r\n\r\n```yaml:\r\nmodel: \"mymodel/predict.py:MyModel\"\r\n```\r\n\r\nIt should.\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nimport importlib\nimport os.path\nfrom pathlib import Path\nfrom typing import Dict, Any\n\nimport yaml\n\nfrom .errors import ConfigDoesNotExist, PredictorNotSet\n\n\n# TODO(andreas): handle directory input\n# TODO(andreas): handle List[Dict[str, int]], etc.\n# TODO(andreas): model-level documentation\n\n\nclass Predictor(ABC):\n @abstractmethod\n def setup(self):\n pass\n\n @abstractmethod\n def predict(self, **kwargs):\n pass\n\n def get_type_signature(self):\n \"\"\"\n Returns a dict describing the inputs of the model.\n \"\"\"\n from .input import (\n get_type_name,\n UNSPECIFIED,\n )\n\n inputs = []\n if hasattr(self.predict, \"_inputs\"):\n input_specs = self.predict._inputs\n for spec in input_specs:\n arg: Dict[str, Any] = {\n \"name\": spec.name,\n \"type\": get_type_name(spec.type),\n }\n if spec.help:\n arg[\"help\"] = spec.help\n if spec.default is not UNSPECIFIED:\n arg[\"default\"] = str(spec.default) # TODO: don't string this\n if spec.min is not None:\n arg[\"min\"] = str(spec.min) # TODO: don't string this\n if spec.max is not None:\n arg[\"max\"] = str(spec.max) # TODO: don't string this\n if spec.options is not None:\n arg[\"options\"] = [str(o) for o in spec.options]\n inputs.append(arg)\n return {\"inputs\": inputs}\n\n\ndef run_prediction(predictor, inputs, cleanup_functions):\n \"\"\"\n Run the predictor on the inputs, and append resulting paths\n to cleanup functions for removal.\n \"\"\"\n result = predictor.predict(**inputs)\n if isinstance(result, Path):\n cleanup_functions.append(result.unlink)\n return result\n\n\ndef load_predictor():\n # Assumes the working directory is /src\n config_path = os.path.abspath(\"cog.yaml\")\n try:\n with open(config_path) as fh:\n config = yaml.safe_load(fh)\n except FileNotFoundError:\n raise ConfigDoesNotExist(\n f\"Could not find {config_path}\",\n )\n\n if \"predict\" not in config:\n raise PredictorNotSet(\n \"Can't run predictions: 'predict' option not found in cog.yaml\"\n )\n\n # TODO: handle predict scripts in subdirectories\n predict_string = config[\"predict\"]\n module_name, class_name = predict_string.split(\".py:\", 1)\n module = importlib.import_module(module_name)\n predictor_class = getattr(module, class_name)\n return predictor_class()\n", "path": "python/cog/predictor.py"}]}
| 1,338 | 209 |
gh_patches_debug_35521
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-929
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using a docker/config.json file causes "TypeError: string indices must be integers"
Using a ~/.docker/config.json file causes docker-compose to output a Python error. @dnephin in https://github.com/docker/compose/issues/2697#issuecomment-172936366 suggests that this is an issue to be raised with the docker-py project instead of the compose project.
So here's a simple config.json file, and a dummy docker-compose.yml file which demonstrate the issue:
```
$ cat ~/.docker/config.json
{
"detachKeys": "ctrl-q,ctrl-u,ctrl-i,ctrl-t"
}
$ cat docker-compose.yml
version: '2'
services:
s1:
image: ubuntu
$ docker-compose ps
Traceback (most recent call last):
File "<string>", line 3, in <module>
File "/compose/compose/cli/main.py", line 55, in main
File "/compose/compose/cli/docopt_command.py", line 23, in sys_dispatch
File "/compose/compose/cli/docopt_command.py", line 26, in dispatch
File "/compose/compose/cli/main.py", line 172, in perform_command
File "/compose/compose/cli/command.py", line 52, in project_from_options
File "/compose/compose/cli/command.py", line 85, in get_project
File "/compose/compose/cli/command.py", line 66, in get_client
File "/compose/compose/cli/docker_client.py", line 37, in docker_client
File "/compose/venv/lib/python2.7/site-packages/docker/client.py", line 56, in __init__
File "/compose/venv/lib/python2.7/site-packages/docker/auth/auth.py", line 182, in load_config
File "/compose/venv/lib/python2.7/site-packages/docker/auth/auth.py", line 118, in parse_auth
TypeError: string indices must be integers
docker-compose returned -1
```
And if we delete the config and try again:
```
$ rm ~/.docker/config.json
$ docker-compose ps
Name Command State Ports
------------------------------
```
I'm using Docker Toolbox 1.10.0 on OSX 10.11.3. The docker container is being run on a VirtualBox VM managed by docker-machine. Here are some more version numbers:
```
$ docker version
Client:
Version: 1.10.0
API version: 1.22
Go version: go1.5.3
Git commit: 590d5108
Built: Thu Feb 4 18:18:11 2016
OS/Arch: darwin/amd64
Server:
Version: 1.10.0
API version: 1.22
Go version: go1.5.3
Git commit: 590d5108
Built: Thu Feb 4 19:55:25 2016
OS/Arch: linux/amd64
$ docker-compose version
docker-compose version 1.6.0, build d99cad6
docker-py version: 1.7.0
CPython version: 2.7.9
OpenSSL version: OpenSSL 1.0.1j 15 Oct 2014
```
</issue>
<code>
[start of docker/auth/auth.py]
1 # Copyright 2013 dotCloud inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import base64
16 import json
17 import logging
18 import os
19
20 import six
21
22 from .. import errors
23
24 INDEX_NAME = 'docker.io'
25 INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
26 DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
27 LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
28
29 log = logging.getLogger(__name__)
30
31
32 def resolve_repository_name(repo_name):
33 if '://' in repo_name:
34 raise errors.InvalidRepository(
35 'Repository name cannot contain a scheme ({0})'.format(repo_name)
36 )
37
38 index_name, remote_name = split_repo_name(repo_name)
39 if index_name[0] == '-' or index_name[-1] == '-':
40 raise errors.InvalidRepository(
41 'Invalid index name ({0}). Cannot begin or end with a'
42 ' hyphen.'.format(index_name)
43 )
44 return resolve_index_name(index_name), remote_name
45
46
47 def resolve_index_name(index_name):
48 index_name = convert_to_hostname(index_name)
49 if index_name == 'index.'+INDEX_NAME:
50 index_name = INDEX_NAME
51 return index_name
52
53
54 def split_repo_name(repo_name):
55 parts = repo_name.split('/', 1)
56 if len(parts) == 1 or (
57 '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
58 ):
59 # This is a docker index repo (ex: username/foobar or ubuntu)
60 return INDEX_NAME, repo_name
61 return tuple(parts)
62
63
64 def resolve_authconfig(authconfig, registry=None):
65 """
66 Returns the authentication data from the given auth configuration for a
67 specific registry. As with the Docker client, legacy entries in the config
68 with full URLs are stripped down to hostnames before checking for a match.
69 Returns None if no match was found.
70 """
71 # Default to the public index server
72 registry = resolve_index_name(registry) if registry else INDEX_NAME
73 log.debug("Looking for auth entry for {0}".format(repr(registry)))
74
75 if registry in authconfig:
76 log.debug("Found {0}".format(repr(registry)))
77 return authconfig[registry]
78
79 for key, config in six.iteritems(authconfig):
80 if resolve_index_name(key) == registry:
81 log.debug("Found {0}".format(repr(key)))
82 return config
83
84 log.debug("No entry found")
85 return None
86
87
88 def convert_to_hostname(url):
89 return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
90
91
92 def decode_auth(auth):
93 if isinstance(auth, six.string_types):
94 auth = auth.encode('ascii')
95 s = base64.b64decode(auth)
96 login, pwd = s.split(b':', 1)
97 return login.decode('utf8'), pwd.decode('utf8')
98
99
100 def encode_header(auth):
101 auth_json = json.dumps(auth).encode('ascii')
102 return base64.urlsafe_b64encode(auth_json)
103
104
105 def parse_auth(entries):
106 """
107 Parses authentication entries
108
109 Args:
110 entries: Dict of authentication entries.
111
112 Returns:
113 Authentication registry.
114 """
115
116 conf = {}
117 for registry, entry in six.iteritems(entries):
118 username, password = decode_auth(entry['auth'])
119 log.debug(
120 'Found entry (registry={0}, username={1})'
121 .format(repr(registry), repr(username))
122 )
123 conf[registry] = {
124 'username': username,
125 'password': password,
126 'email': entry.get('email'),
127 'serveraddress': registry,
128 }
129 return conf
130
131
132 def find_config_file(config_path=None):
133 environment_path = os.path.join(
134 os.environ.get('DOCKER_CONFIG'),
135 os.path.basename(DOCKER_CONFIG_FILENAME)
136 ) if os.environ.get('DOCKER_CONFIG') else None
137
138 paths = [
139 config_path, # 1
140 environment_path, # 2
141 os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3
142 os.path.join(
143 os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME
144 ) # 4
145 ]
146
147 for path in paths:
148 if path and os.path.exists(path):
149 return path
150 return None
151
152
153 def load_config(config_path=None):
154 """
155 Loads authentication data from a Docker configuration file in the given
156 root directory or if config_path is passed use given path.
157 Lookup priority:
158 explicit config_path parameter > DOCKER_CONFIG environment variable >
159 ~/.docker/config.json > ~/.dockercfg
160 """
161 config_file = find_config_file(config_path)
162
163 if not config_file:
164 log.debug("File doesn't exist")
165 return {}
166
167 try:
168 with open(config_file) as f:
169 data = json.load(f)
170 res = {}
171 if data.get('auths'):
172 log.debug("Found 'auths' section")
173 res.update(parse_auth(data['auths']))
174 if data.get('HttpHeaders'):
175 log.debug("Found 'HttpHeaders' section")
176 res.update({'HttpHeaders': data['HttpHeaders']})
177 if res:
178 return res
179 else:
180 log.debug("Couldn't find 'auths' or 'HttpHeaders' sections")
181 f.seek(0)
182 return parse_auth(json.load(f))
183 except (IOError, KeyError, ValueError) as e:
184 # Likely missing new Docker config file or it's in an
185 # unknown format, continue to attempt to read old location
186 # and format.
187 log.debug(e)
188
189 log.debug("Attempting to parse legacy auth file format")
190 try:
191 data = []
192 with open(config_file) as f:
193 for line in f.readlines():
194 data.append(line.strip().split(' = ')[1])
195 if len(data) < 2:
196 # Not enough data
197 raise errors.InvalidConfigFile(
198 'Invalid or empty configuration file!'
199 )
200
201 username, password = decode_auth(data[0])
202 return {
203 INDEX_NAME: {
204 'username': username,
205 'password': password,
206 'email': data[1],
207 'serveraddress': INDEX_URL,
208 }
209 }
210 except Exception as e:
211 log.debug(e)
212 pass
213
214 log.debug("All parsing attempts failed - returning empty config")
215 return {}
216
[end of docker/auth/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docker/auth/auth.py b/docker/auth/auth.py
--- a/docker/auth/auth.py
+++ b/docker/auth/auth.py
@@ -46,7 +46,7 @@
def resolve_index_name(index_name):
index_name = convert_to_hostname(index_name)
- if index_name == 'index.'+INDEX_NAME:
+ if index_name == 'index.' + INDEX_NAME:
index_name = INDEX_NAME
return index_name
@@ -102,12 +102,14 @@
return base64.urlsafe_b64encode(auth_json)
-def parse_auth(entries):
+def parse_auth(entries, raise_on_error=False):
"""
Parses authentication entries
Args:
- entries: Dict of authentication entries.
+ entries: Dict of authentication entries.
+ raise_on_error: If set to true, an invalid format will raise
+ InvalidConfigFile
Returns:
Authentication registry.
@@ -115,6 +117,19 @@
conf = {}
for registry, entry in six.iteritems(entries):
+ if not (isinstance(entry, dict) and 'auth' in entry):
+ log.debug(
+ 'Config entry for key {0} is not auth config'.format(registry)
+ )
+ # We sometimes fall back to parsing the whole config as if it was
+ # the auth config by itself, for legacy purposes. In that case, we
+ # fail silently and return an empty conf if any of the keys is not
+ # formatted properly.
+ if raise_on_error:
+ raise errors.InvalidConfigFile(
+ 'Invalid configuration for registry {0}'.format(registry)
+ )
+ return {}
username, password = decode_auth(entry['auth'])
log.debug(
'Found entry (registry={0}, username={1})'
@@ -170,7 +185,7 @@
res = {}
if data.get('auths'):
log.debug("Found 'auths' section")
- res.update(parse_auth(data['auths']))
+ res.update(parse_auth(data['auths'], raise_on_error=True))
if data.get('HttpHeaders'):
log.debug("Found 'HttpHeaders' section")
res.update({'HttpHeaders': data['HttpHeaders']})
|
{"golden_diff": "diff --git a/docker/auth/auth.py b/docker/auth/auth.py\n--- a/docker/auth/auth.py\n+++ b/docker/auth/auth.py\n@@ -46,7 +46,7 @@\n \n def resolve_index_name(index_name):\n index_name = convert_to_hostname(index_name)\n- if index_name == 'index.'+INDEX_NAME:\n+ if index_name == 'index.' + INDEX_NAME:\n index_name = INDEX_NAME\n return index_name\n \n@@ -102,12 +102,14 @@\n return base64.urlsafe_b64encode(auth_json)\n \n \n-def parse_auth(entries):\n+def parse_auth(entries, raise_on_error=False):\n \"\"\"\n Parses authentication entries\n \n Args:\n- entries: Dict of authentication entries.\n+ entries: Dict of authentication entries.\n+ raise_on_error: If set to true, an invalid format will raise\n+ InvalidConfigFile\n \n Returns:\n Authentication registry.\n@@ -115,6 +117,19 @@\n \n conf = {}\n for registry, entry in six.iteritems(entries):\n+ if not (isinstance(entry, dict) and 'auth' in entry):\n+ log.debug(\n+ 'Config entry for key {0} is not auth config'.format(registry)\n+ )\n+ # We sometimes fall back to parsing the whole config as if it was\n+ # the auth config by itself, for legacy purposes. In that case, we\n+ # fail silently and return an empty conf if any of the keys is not\n+ # formatted properly.\n+ if raise_on_error:\n+ raise errors.InvalidConfigFile(\n+ 'Invalid configuration for registry {0}'.format(registry)\n+ )\n+ return {}\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n@@ -170,7 +185,7 @@\n res = {}\n if data.get('auths'):\n log.debug(\"Found 'auths' section\")\n- res.update(parse_auth(data['auths']))\n+ res.update(parse_auth(data['auths'], raise_on_error=True))\n if data.get('HttpHeaders'):\n log.debug(\"Found 'HttpHeaders' section\")\n res.update({'HttpHeaders': data['HttpHeaders']})\n", "issue": "Using a docker/config.json file causes \"TypeError: string indices must be integers\"\nUsing a ~/.docker/config.json file causes docker-compose to output a Python error. @dnephin in https://github.com/docker/compose/issues/2697#issuecomment-172936366 suggests that this is an issue to be raised with the docker-py project instead of the compose project.\n\nSo here's a simple config.json file, and a dummy docker-compose.yml file which demonstrate the issue:\n\n```\n$ cat ~/.docker/config.json \n{\n \"detachKeys\": \"ctrl-q,ctrl-u,ctrl-i,ctrl-t\"\n}\n$ cat docker-compose.yml \nversion: '2'\nservices:\n s1:\n image: ubuntu\n$ docker-compose ps\nTraceback (most recent call last):\n File \"<string>\", line 3, in <module>\n File \"/compose/compose/cli/main.py\", line 55, in main\n File \"/compose/compose/cli/docopt_command.py\", line 23, in sys_dispatch\n File \"/compose/compose/cli/docopt_command.py\", line 26, in dispatch\n File \"/compose/compose/cli/main.py\", line 172, in perform_command\n File \"/compose/compose/cli/command.py\", line 52, in project_from_options\n File \"/compose/compose/cli/command.py\", line 85, in get_project\n File \"/compose/compose/cli/command.py\", line 66, in get_client\n File \"/compose/compose/cli/docker_client.py\", line 37, in docker_client\n File \"/compose/venv/lib/python2.7/site-packages/docker/client.py\", line 56, in __init__\n File \"/compose/venv/lib/python2.7/site-packages/docker/auth/auth.py\", line 182, in load_config\n File \"/compose/venv/lib/python2.7/site-packages/docker/auth/auth.py\", line 118, in parse_auth\nTypeError: string indices must be integers\ndocker-compose returned -1\n```\n\nAnd if we delete the config and try again:\n\n```\n$ rm ~/.docker/config.json \n$ docker-compose ps\nName Command State Ports \n------------------------------\n```\n\nI'm using Docker Toolbox 1.10.0 on OSX 10.11.3. The docker container is being run on a VirtualBox VM managed by docker-machine. Here are some more version numbers:\n\n```\n$ docker version\nClient:\n Version: 1.10.0\n API version: 1.22\n Go version: go1.5.3\n Git commit: 590d5108\n Built: Thu Feb 4 18:18:11 2016\n OS/Arch: darwin/amd64\n\nServer:\n Version: 1.10.0\n API version: 1.22\n Go version: go1.5.3\n Git commit: 590d5108\n Built: Thu Feb 4 19:55:25 2016\n OS/Arch: linux/amd64\n$ docker-compose version\ndocker-compose version 1.6.0, build d99cad6\ndocker-py version: 1.7.0\nCPython version: 2.7.9\nOpenSSL version: OpenSSL 1.0.1j 15 Oct 2014\n```\n\n", "before_files": [{"content": "# Copyright 2013 dotCloud inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nimport logging\nimport os\n\nimport six\n\nfrom .. import errors\n\nINDEX_NAME = 'docker.io'\nINDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)\nDOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')\nLEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name):\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name)\n )\n\n index_name, remote_name = split_repo_name(repo_name)\n if index_name[0] == '-' or index_name[-1] == '-':\n raise errors.InvalidRepository(\n 'Invalid index name ({0}). Cannot begin or end with a'\n ' hyphen.'.format(index_name)\n )\n return resolve_index_name(index_name), remote_name\n\n\ndef resolve_index_name(index_name):\n index_name = convert_to_hostname(index_name)\n if index_name == 'index.'+INDEX_NAME:\n index_name = INDEX_NAME\n return index_name\n\n\ndef split_repo_name(repo_name):\n parts = repo_name.split('/', 1)\n if len(parts) == 1 or (\n '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'\n ):\n # This is a docker index repo (ex: username/foobar or ubuntu)\n return INDEX_NAME, repo_name\n return tuple(parts)\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n # Default to the public index server\n registry = resolve_index_name(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n if registry in authconfig:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authconfig[registry]\n\n for key, config in six.iteritems(authconfig):\n if resolve_index_name(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return config\n\n log.debug(\"No entry found\")\n return None\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('utf8'), pwd.decode('utf8')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.urlsafe_b64encode(auth_json)\n\n\ndef parse_auth(entries):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry.get('email'),\n 'serveraddress': registry,\n }\n return conf\n\n\ndef find_config_file(config_path=None):\n environment_path = os.path.join(\n os.environ.get('DOCKER_CONFIG'),\n os.path.basename(DOCKER_CONFIG_FILENAME)\n ) if os.environ.get('DOCKER_CONFIG') else None\n\n paths = [\n config_path, # 1\n environment_path, # 2\n os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3\n os.path.join(\n os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME\n ) # 4\n ]\n\n for path in paths:\n if path and os.path.exists(path):\n return path\n return None\n\n\ndef load_config(config_path=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n Lookup priority:\n explicit config_path parameter > DOCKER_CONFIG environment variable >\n ~/.docker/config.json > ~/.dockercfg\n \"\"\"\n config_file = find_config_file(config_path)\n\n if not config_file:\n log.debug(\"File doesn't exist\")\n return {}\n\n try:\n with open(config_file) as f:\n data = json.load(f)\n res = {}\n if data.get('auths'):\n log.debug(\"Found 'auths' section\")\n res.update(parse_auth(data['auths']))\n if data.get('HttpHeaders'):\n log.debug(\"Found 'HttpHeaders' section\")\n res.update({'HttpHeaders': data['HttpHeaders']})\n if res:\n return res\n else:\n log.debug(\"Couldn't find 'auths' or 'HttpHeaders' sections\")\n f.seek(0)\n return parse_auth(json.load(f))\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n with open(config_file) as f:\n for line in f.readlines():\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!'\n )\n\n username, password = decode_auth(data[0])\n return {\n INDEX_NAME: {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n }\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth/auth.py"}]}
| 3,338 | 498 |
gh_patches_debug_31576
|
rasdani/github-patches
|
git_diff
|
deepset-ai__haystack-7904
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`ContextRelevanceEvaluator`: statements extraction can be made more robust
Slightly different inputs produce very different outputs in the `ContextRelevanceEvaluator`.
**Example in the docs**:
```python
from haystack.components.evaluators import ContextRelevanceEvaluator
questions = ["Who created the Python language?"]
contexts = [
[
"Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects."
],
]
evaluator = ContextRelevanceEvaluator()
result = evaluator.run(questions=questions, contexts=contexts)
print(result["score"])
# 1.0
print(result["individual_scores"])
# [1.0]
print(result["results"])
# [{'statements': ['Python, created by Guido van Rossum in the late 1980s.'], 'statement_scores': [1], 'score': 1.0}]
```
**A different example**:
```python
from haystack.components.evaluators import ContextRelevanceEvaluator
questions = ["Who created the Python language?"]
contexts = [
[
"Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects.",
"Java is a high-level, class-based, object-oriented programming language. It allows you to write once, run anywhere, meaning that compiled Java code can run on all platforms that support Java without the need for recompilation.",
"Scala is a high-level, statically typed programming language.",
],
]
evaluator = ContextRelevanceEvaluator()
result = evaluator.run(questions=questions, contexts=contexts)
print(result)
# {'results': [{'statements': ['Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language.', 'Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects.'], 'statement_scores': [1, 0], 'score': 0.5}], 'score': 0.5, 'individual_scores': [0.5]}
```
In this case, the first context is broken into 2 statements, while the 2nd and 3rd statements are not considered.
**Expected behavior**
The extraction of statements should be more robust/consistent.
**Additional context**
From @julian-risch:
> I checked that the prompt sent to the LLM correctly contains all three contexts. The problem is that the LLM extract statements only for the first context at least in this example. It seems that the prompt instructions aren't good enough. You could try extending the _DEFAULT_EXAMPLES with an example that has multiple contexts for one question.
**System:**
- Haystack version (commit or version number): 2.2.0
</issue>
<code>
[start of haystack/components/evaluators/context_relevance.py]
1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>
2 #
3 # SPDX-License-Identifier: Apache-2.0
4
5 from typing import Any, Dict, List, Optional
6
7 from numpy import mean as np_mean
8
9 from haystack import component, default_from_dict, default_to_dict
10 from haystack.components.evaluators.llm_evaluator import LLMEvaluator
11 from haystack.utils import Secret, deserialize_secrets_inplace
12
13 # Private global variable for default examples to include in the prompt if the user does not provide any examples
14 _DEFAULT_EXAMPLES = [
15 {
16 "inputs": {
17 "questions": "What is the capital of Germany?",
18 "contexts": ["Berlin is the capital of Germany and was founded in 1244."],
19 },
20 "outputs": {
21 "statements": ["Berlin is the capital of Germany.", "Berlin was founded in 1244."],
22 "statement_scores": [1, 0],
23 },
24 },
25 {
26 "inputs": {"questions": "What is the capital of France?", "contexts": ["Berlin is the capital of Germany."]},
27 "outputs": {"statements": ["Berlin is the capital of Germany."], "statement_scores": [0]},
28 },
29 {
30 "inputs": {"questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."]},
31 "outputs": {"statements": ["Rome is the capital of Italy."], "statement_scores": [1]},
32 },
33 ]
34
35
36 @component
37 class ContextRelevanceEvaluator(LLMEvaluator):
38 """
39 Evaluator that checks if a provided context is relevant to the question.
40
41 An LLM breaks up the context into multiple statements and checks whether each statement
42 is relevant for answering a question.
43 The final score for the context relevance is a number from 0.0 to 1.0. It represents the proportion of
44 statements that can be inferred from the provided contexts.
45
46 Usage example:
47 ```python
48 from haystack.components.evaluators import ContextRelevanceEvaluator
49
50 questions = ["Who created the Python language?"]
51 contexts = [
52 [(
53 "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming "
54 "language. Its design philosophy emphasizes code readability, and its language constructs aim to help "
55 "programmers write clear, logical code for both small and large-scale software projects."
56 )],
57 ]
58
59 evaluator = ContextRelevanceEvaluator()
60 result = evaluator.run(questions=questions, contexts=contexts)
61 print(result["score"])
62 # 1.0
63 print(result["individual_scores"])
64 # [1.0]
65 print(result["results"])
66 # [{
67 # 'statements': ['Python, created by Guido van Rossum in the late 1980s.'],
68 # 'statement_scores': [1],
69 # 'score': 1.0
70 # }]
71 ```
72 """
73
74 def __init__(
75 self,
76 examples: Optional[List[Dict[str, Any]]] = None,
77 progress_bar: bool = True,
78 api: str = "openai",
79 api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"),
80 raise_on_failure: bool = True,
81 ):
82 """
83 Creates an instance of ContextRelevanceEvaluator.
84
85 :param examples:
86 Optional few-shot examples conforming to the expected input and output format of ContextRelevanceEvaluator.
87 Default examples will be used if none are provided.
88 Each example must be a dictionary with keys "inputs" and "outputs".
89 "inputs" must be a dictionary with keys "questions" and "contexts".
90 "outputs" must be a dictionary with "statements" and "statement_scores".
91 Expected format:
92 [{
93 "inputs": {
94 "questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."],
95 },
96 "outputs": {
97 "statements": ["Rome is the capital of Italy."],
98 "statement_scores": [1],
99 },
100 }]
101 :param progress_bar:
102 Whether to show a progress bar during the evaluation.
103 :param api:
104 The API to use for calling an LLM through a Generator.
105 Supported APIs: "openai".
106 :param api_key:
107 The API key.
108 :param raise_on_failure:
109 Whether to raise an exception if the API call fails.
110
111 """
112 self.instructions = (
113 "Your task is to judge how relevant the provided context is for answering a question. "
114 "First, please extract statements from the provided context. "
115 "Second, calculate a relevance score for each statement in the context. "
116 "The score is 1 if the statement is relevant to answer the question or 0 if it is not relevant."
117 )
118 self.inputs = [("questions", List[str]), ("contexts", List[List[str]])]
119 self.outputs = ["statements", "statement_scores"]
120 self.examples = examples or _DEFAULT_EXAMPLES
121 self.api = api
122 self.api_key = api_key
123
124 super(ContextRelevanceEvaluator, self).__init__(
125 instructions=self.instructions,
126 inputs=self.inputs,
127 outputs=self.outputs,
128 examples=self.examples,
129 api=self.api,
130 api_key=self.api_key,
131 raise_on_failure=raise_on_failure,
132 progress_bar=progress_bar,
133 )
134
135 @component.output_types(individual_scores=List[int], score=float, results=List[Dict[str, Any]])
136 def run(self, questions: List[str], contexts: List[List[str]]) -> Dict[str, Any]:
137 """
138 Run the LLM evaluator.
139
140 :param questions:
141 A list of questions.
142 :param contexts:
143 A list of lists of contexts. Each list of contexts corresponds to one question.
144 :returns:
145 A dictionary with the following outputs:
146 - `score`: Mean context relevance score over all the provided input questions.
147 - `individual_scores`: A list of context relevance scores for each input question.
148 - `results`: A list of dictionaries with `statements` and `statement_scores` for each input context.
149 """
150 result = super(ContextRelevanceEvaluator, self).run(questions=questions, contexts=contexts)
151
152 # calculate average statement relevance score per query
153 for idx, res in enumerate(result["results"]):
154 if res is None:
155 result["results"][idx] = {"statements": [], "statement_scores": [], "score": float("nan")}
156 continue
157 if not res["statements"]:
158 res["score"] = 0
159 else:
160 res["score"] = np_mean(res["statement_scores"])
161
162 # calculate average context relevance score over all queries
163 result["score"] = np_mean([res["score"] for res in result["results"]])
164 result["individual_scores"] = [res["score"] for res in result["results"]]
165
166 return result
167
168 def to_dict(self) -> Dict[str, Any]:
169 """
170 Serialize this component to a dictionary.
171
172 :returns:
173 A dictionary with serialized data.
174 """
175 return default_to_dict(
176 self,
177 api=self.api,
178 api_key=self.api_key.to_dict() if self.api_key else None,
179 examples=self.examples,
180 progress_bar=self.progress_bar,
181 raise_on_failure=self.raise_on_failure,
182 )
183
184 @classmethod
185 def from_dict(cls, data: Dict[str, Any]) -> "ContextRelevanceEvaluator":
186 """
187 Deserialize this component from a dictionary.
188
189 :param data:
190 The dictionary representation of this component.
191 :returns:
192 The deserialized component instance.
193 """
194 deserialize_secrets_inplace(data["init_parameters"], keys=["api_key"])
195 return default_from_dict(cls, data)
196
[end of haystack/components/evaluators/context_relevance.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/haystack/components/evaluators/context_relevance.py b/haystack/components/evaluators/context_relevance.py
--- a/haystack/components/evaluators/context_relevance.py
+++ b/haystack/components/evaluators/context_relevance.py
@@ -23,8 +23,23 @@
},
},
{
- "inputs": {"questions": "What is the capital of France?", "contexts": ["Berlin is the capital of Germany."]},
- "outputs": {"statements": ["Berlin is the capital of Germany."], "statement_scores": [0]},
+ "inputs": {
+ "questions": "What is the capital of France?",
+ "contexts": [
+ "Berlin is the capital of Germany and was founded in 1244.",
+ "Europe is a continent with 44 countries.",
+ "Madrid is the capital of Spain.",
+ ],
+ },
+ "outputs": {
+ "statements": [
+ "Berlin is the capital of Germany.",
+ "Berlin was founded in 1244.",
+ "Europe is a continent with 44 countries.",
+ "Madrid is the capital of Spain.",
+ ],
+ "statement_scores": [0, 0, 0, 0],
+ },
},
{
"inputs": {"questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."]},
@@ -113,7 +128,8 @@
"Your task is to judge how relevant the provided context is for answering a question. "
"First, please extract statements from the provided context. "
"Second, calculate a relevance score for each statement in the context. "
- "The score is 1 if the statement is relevant to answer the question or 0 if it is not relevant."
+ "The score is 1 if the statement is relevant to answer the question or 0 if it is not relevant. "
+ "Each statement should be scored individually."
)
self.inputs = [("questions", List[str]), ("contexts", List[List[str]])]
self.outputs = ["statements", "statement_scores"]
|
{"golden_diff": "diff --git a/haystack/components/evaluators/context_relevance.py b/haystack/components/evaluators/context_relevance.py\n--- a/haystack/components/evaluators/context_relevance.py\n+++ b/haystack/components/evaluators/context_relevance.py\n@@ -23,8 +23,23 @@\n },\n },\n {\n- \"inputs\": {\"questions\": \"What is the capital of France?\", \"contexts\": [\"Berlin is the capital of Germany.\"]},\n- \"outputs\": {\"statements\": [\"Berlin is the capital of Germany.\"], \"statement_scores\": [0]},\n+ \"inputs\": {\n+ \"questions\": \"What is the capital of France?\",\n+ \"contexts\": [\n+ \"Berlin is the capital of Germany and was founded in 1244.\",\n+ \"Europe is a continent with 44 countries.\",\n+ \"Madrid is the capital of Spain.\",\n+ ],\n+ },\n+ \"outputs\": {\n+ \"statements\": [\n+ \"Berlin is the capital of Germany.\",\n+ \"Berlin was founded in 1244.\",\n+ \"Europe is a continent with 44 countries.\",\n+ \"Madrid is the capital of Spain.\",\n+ ],\n+ \"statement_scores\": [0, 0, 0, 0],\n+ },\n },\n {\n \"inputs\": {\"questions\": \"What is the capital of Italy?\", \"contexts\": [\"Rome is the capital of Italy.\"]},\n@@ -113,7 +128,8 @@\n \"Your task is to judge how relevant the provided context is for answering a question. \"\n \"First, please extract statements from the provided context. \"\n \"Second, calculate a relevance score for each statement in the context. \"\n- \"The score is 1 if the statement is relevant to answer the question or 0 if it is not relevant.\"\n+ \"The score is 1 if the statement is relevant to answer the question or 0 if it is not relevant. \"\n+ \"Each statement should be scored individually.\"\n )\n self.inputs = [(\"questions\", List[str]), (\"contexts\", List[List[str]])]\n self.outputs = [\"statements\", \"statement_scores\"]\n", "issue": "`ContextRelevanceEvaluator`: statements extraction can be made more robust\nSlightly different inputs produce very different outputs in the `ContextRelevanceEvaluator`.\r\n\r\n**Example in the docs**:\r\n```python\r\nfrom haystack.components.evaluators import ContextRelevanceEvaluator\r\n\r\nquestions = [\"Who created the Python language?\"]\r\ncontexts = [\r\n [\r\n \"Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects.\"\r\n ],\r\n]\r\n\r\nevaluator = ContextRelevanceEvaluator()\r\nresult = evaluator.run(questions=questions, contexts=contexts)\r\nprint(result[\"score\"])\r\n# 1.0\r\nprint(result[\"individual_scores\"])\r\n# [1.0]\r\nprint(result[\"results\"])\r\n# [{'statements': ['Python, created by Guido van Rossum in the late 1980s.'], 'statement_scores': [1], 'score': 1.0}]\r\n```\r\n\r\n**A different example**:\r\n```python\r\nfrom haystack.components.evaluators import ContextRelevanceEvaluator\r\nquestions = [\"Who created the Python language?\"]\r\ncontexts = [\r\n [\r\n \"Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects.\",\r\n \"Java is a high-level, class-based, object-oriented programming language. It allows you to write once, run anywhere, meaning that compiled Java code can run on all platforms that support Java without the need for recompilation.\",\r\n \"Scala is a high-level, statically typed programming language.\",\r\n ],\r\n]\r\nevaluator = ContextRelevanceEvaluator()\r\nresult = evaluator.run(questions=questions, contexts=contexts)\r\n\r\nprint(result)\r\n# {'results': [{'statements': ['Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language.', 'Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects.'], 'statement_scores': [1, 0], 'score': 0.5}], 'score': 0.5, 'individual_scores': [0.5]}\r\n```\r\n\r\nIn this case, the first context is broken into 2 statements, while the 2nd and 3rd statements are not considered.\r\n\r\n**Expected behavior**\r\nThe extraction of statements should be more robust/consistent.\r\n\r\n**Additional context**\r\nFrom @julian-risch:\r\n> I checked that the prompt sent to the LLM correctly contains all three contexts. The problem is that the LLM extract statements only for the first context at least in this example. It seems that the prompt instructions aren't good enough. You could try extending the _DEFAULT_EXAMPLES with an example that has multiple contexts for one question.\r\n\r\n\r\n**System:**\r\n - Haystack version (commit or version number): 2.2.0\n", "before_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import Any, Dict, List, Optional\n\nfrom numpy import mean as np_mean\n\nfrom haystack import component, default_from_dict, default_to_dict\nfrom haystack.components.evaluators.llm_evaluator import LLMEvaluator\nfrom haystack.utils import Secret, deserialize_secrets_inplace\n\n# Private global variable for default examples to include in the prompt if the user does not provide any examples\n_DEFAULT_EXAMPLES = [\n {\n \"inputs\": {\n \"questions\": \"What is the capital of Germany?\",\n \"contexts\": [\"Berlin is the capital of Germany and was founded in 1244.\"],\n },\n \"outputs\": {\n \"statements\": [\"Berlin is the capital of Germany.\", \"Berlin was founded in 1244.\"],\n \"statement_scores\": [1, 0],\n },\n },\n {\n \"inputs\": {\"questions\": \"What is the capital of France?\", \"contexts\": [\"Berlin is the capital of Germany.\"]},\n \"outputs\": {\"statements\": [\"Berlin is the capital of Germany.\"], \"statement_scores\": [0]},\n },\n {\n \"inputs\": {\"questions\": \"What is the capital of Italy?\", \"contexts\": [\"Rome is the capital of Italy.\"]},\n \"outputs\": {\"statements\": [\"Rome is the capital of Italy.\"], \"statement_scores\": [1]},\n },\n]\n\n\n@component\nclass ContextRelevanceEvaluator(LLMEvaluator):\n \"\"\"\n Evaluator that checks if a provided context is relevant to the question.\n\n An LLM breaks up the context into multiple statements and checks whether each statement\n is relevant for answering a question.\n The final score for the context relevance is a number from 0.0 to 1.0. It represents the proportion of\n statements that can be inferred from the provided contexts.\n\n Usage example:\n ```python\n from haystack.components.evaluators import ContextRelevanceEvaluator\n\n questions = [\"Who created the Python language?\"]\n contexts = [\n [(\n \"Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming \"\n \"language. Its design philosophy emphasizes code readability, and its language constructs aim to help \"\n \"programmers write clear, logical code for both small and large-scale software projects.\"\n )],\n ]\n\n evaluator = ContextRelevanceEvaluator()\n result = evaluator.run(questions=questions, contexts=contexts)\n print(result[\"score\"])\n # 1.0\n print(result[\"individual_scores\"])\n # [1.0]\n print(result[\"results\"])\n # [{\n # 'statements': ['Python, created by Guido van Rossum in the late 1980s.'],\n # 'statement_scores': [1],\n # 'score': 1.0\n # }]\n ```\n \"\"\"\n\n def __init__(\n self,\n examples: Optional[List[Dict[str, Any]]] = None,\n progress_bar: bool = True,\n api: str = \"openai\",\n api_key: Secret = Secret.from_env_var(\"OPENAI_API_KEY\"),\n raise_on_failure: bool = True,\n ):\n \"\"\"\n Creates an instance of ContextRelevanceEvaluator.\n\n :param examples:\n Optional few-shot examples conforming to the expected input and output format of ContextRelevanceEvaluator.\n Default examples will be used if none are provided.\n Each example must be a dictionary with keys \"inputs\" and \"outputs\".\n \"inputs\" must be a dictionary with keys \"questions\" and \"contexts\".\n \"outputs\" must be a dictionary with \"statements\" and \"statement_scores\".\n Expected format:\n [{\n \"inputs\": {\n \"questions\": \"What is the capital of Italy?\", \"contexts\": [\"Rome is the capital of Italy.\"],\n },\n \"outputs\": {\n \"statements\": [\"Rome is the capital of Italy.\"],\n \"statement_scores\": [1],\n },\n }]\n :param progress_bar:\n Whether to show a progress bar during the evaluation.\n :param api:\n The API to use for calling an LLM through a Generator.\n Supported APIs: \"openai\".\n :param api_key:\n The API key.\n :param raise_on_failure:\n Whether to raise an exception if the API call fails.\n\n \"\"\"\n self.instructions = (\n \"Your task is to judge how relevant the provided context is for answering a question. \"\n \"First, please extract statements from the provided context. \"\n \"Second, calculate a relevance score for each statement in the context. \"\n \"The score is 1 if the statement is relevant to answer the question or 0 if it is not relevant.\"\n )\n self.inputs = [(\"questions\", List[str]), (\"contexts\", List[List[str]])]\n self.outputs = [\"statements\", \"statement_scores\"]\n self.examples = examples or _DEFAULT_EXAMPLES\n self.api = api\n self.api_key = api_key\n\n super(ContextRelevanceEvaluator, self).__init__(\n instructions=self.instructions,\n inputs=self.inputs,\n outputs=self.outputs,\n examples=self.examples,\n api=self.api,\n api_key=self.api_key,\n raise_on_failure=raise_on_failure,\n progress_bar=progress_bar,\n )\n\n @component.output_types(individual_scores=List[int], score=float, results=List[Dict[str, Any]])\n def run(self, questions: List[str], contexts: List[List[str]]) -> Dict[str, Any]:\n \"\"\"\n Run the LLM evaluator.\n\n :param questions:\n A list of questions.\n :param contexts:\n A list of lists of contexts. Each list of contexts corresponds to one question.\n :returns:\n A dictionary with the following outputs:\n - `score`: Mean context relevance score over all the provided input questions.\n - `individual_scores`: A list of context relevance scores for each input question.\n - `results`: A list of dictionaries with `statements` and `statement_scores` for each input context.\n \"\"\"\n result = super(ContextRelevanceEvaluator, self).run(questions=questions, contexts=contexts)\n\n # calculate average statement relevance score per query\n for idx, res in enumerate(result[\"results\"]):\n if res is None:\n result[\"results\"][idx] = {\"statements\": [], \"statement_scores\": [], \"score\": float(\"nan\")}\n continue\n if not res[\"statements\"]:\n res[\"score\"] = 0\n else:\n res[\"score\"] = np_mean(res[\"statement_scores\"])\n\n # calculate average context relevance score over all queries\n result[\"score\"] = np_mean([res[\"score\"] for res in result[\"results\"]])\n result[\"individual_scores\"] = [res[\"score\"] for res in result[\"results\"]]\n\n return result\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n A dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n api=self.api,\n api_key=self.api_key.to_dict() if self.api_key else None,\n examples=self.examples,\n progress_bar=self.progress_bar,\n raise_on_failure=self.raise_on_failure,\n )\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"ContextRelevanceEvaluator\":\n \"\"\"\n Deserialize this component from a dictionary.\n\n :param data:\n The dictionary representation of this component.\n :returns:\n The deserialized component instance.\n \"\"\"\n deserialize_secrets_inplace(data[\"init_parameters\"], keys=[\"api_key\"])\n return default_from_dict(cls, data)\n", "path": "haystack/components/evaluators/context_relevance.py"}]}
| 3,339 | 476 |
gh_patches_debug_25546
|
rasdani/github-patches
|
git_diff
|
OCHA-DAP__hdx-ckan-553
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
About menu: glitch on home page functionality
On **homepage only** and only in Chrome.
Click on about to open the submenu.
Click again to close the menu.
Move the cursor around the `About` text.
The menu text 'About' disappears and reappears unreliably as the cursor moves around it. This is on chrome. Does not occur in firefox.
</issue>
<code>
[start of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
1 hdx_version='v0.2.3'
[end of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
[start of ckanext-hdx_theme/ckanext/hdx_theme/splash_page.py]
1 import sys, re
2 import ckan.lib.base as base
3 from ckan.lib.base import request
4 from ckan.lib.base import c,g,h
5 from ckan.lib.base import model
6 from ckan.lib.base import render
7 from ckan.lib.base import _
8 import ckan.logic as logic
9 import ckan.plugins.toolkit as tk
10
11 from ckan.controllers.group import GroupController as gc
12 from ckan.controllers.home import HomeController
13
14 import ckanext.hdx_theme.caching as caching
15
16 NotAuthorized = logic.NotAuthorized
17 check_access = logic.check_access
18 get_action = logic.get_action
19
20
21 class SplashPageController(HomeController):
22
23 group_type = 'group'
24
25 def index(self):
26 group_type = None
27 context = {'model': model, 'session': model.Session,
28 'user': c.user or c.author, 'for_view': True,
29 'with_private': False}
30
31 q = c.q = request.params.get('q', '')
32 data_dict = {'all_fields': True, 'q': q}
33 sort_by = c.sort_by_selected = request.params.get('sort')
34 if sort_by:
35 data_dict['sort'] = sort_by
36 try:
37 self._check_access('site_read', context)
38 except NotAuthorized:
39 abort(401, _('Not authorized to see this page'))
40 if c.userobj:
41 context['user_id'] = c.userobj.id
42 context['user_is_admin'] = c.userobj.sysadmin
43
44
45
46 c.group_package_stuff = caching.cached_get_group_package_stuff()
47
48 ##Removing groups without geojson for the map
49 c.group_map = []
50 for gp in c.group_package_stuff:
51 for e in gp['extras']:
52 if e['key'] == 'geojson' and e['value']:
53 c.group_map.append(gp)
54
55 #print c.group_package_stuff
56
57 if c.userobj is not None:
58 msg = None
59 url = h.url_for(controller='user', action='edit')
60 is_google_id = \
61 c.userobj.name.startswith('https://www.google.com/accounts/o8/id')
62 if not c.userobj.email and (is_google_id and not c.userobj.fullname):
63 msg = _(u'Please <a href="{link}">update your profile</a>'
64 u' and add your email address and your full name. '
65 u'{site} uses your email address'
66 u' if you need to reset your password.'.format(
67 link=url, site=g.site_title))
68 elif not c.userobj.email:
69 msg = _('Please <a href="%s">update your profile</a>'
70 ' and add your email address. ') % url + \
71 _('%s uses your email address'
72 ' if you need to reset your password.') \
73 % g.site_title
74 elif is_google_id and not c.userobj.fullname:
75 msg = _('Please <a href="%s">update your profile</a>'
76 ' and add your full name.') % (url)
77 if msg:
78 h.flash_notice(msg, allow_html=True)
79
80 return base.render('home/index.html', cache_force=True)
81
82 def _check_access(self, action_name, *args, **kw):
83 ''' select the correct group/org check_access '''
84 return check_access(self._replace_group_org(action_name), *args, **kw)
85
86 def _replace_group_org(self, string):
87 ''' substitute organization for group if this is an org'''
88 if self.group_type == 'organization':
89 string = re.sub('^group', 'organization', string)
90 return string
91
92 def _action(self, action_name):
93 ''' select the correct group/org action '''
94 return get_action(self._replace_group_org(action_name))
95
96
[end of ckanext-hdx_theme/ckanext/hdx_theme/splash_page.py]
[start of ckanext-hdx_theme/ckanext/hdx_theme/plugin.py]
1 import ckanext.hdx_theme.licenses as hdx_licenses
2
3 import ckan.plugins as plugins
4 import ckan.plugins.toolkit as toolkit
5 import ckan.model.package as package
6 import ckan.model.license as license
7 import version
8
9 import ckanext.hdx_theme.caching as caching
10 import ckanext.hdx_theme.auth as auth
11
12
13 def run_on_startup():
14 _generate_license_list()
15
16 caching.cached_get_group_package_stuff()
17
18
19 def _generate_license_list():
20 package.Package._license_register = license.LicenseRegister()
21 package.Package._license_register.licenses = [
22 license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),
23 license.License(license.LicenseCreativeCommonsAttribution()),
24 license.License(license.LicenseCreativeCommonsAttributionShareAlike()),
25 license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),
26 license.License(hdx_licenses.LicenseHdxMultiple()),
27 license.License(hdx_licenses.LicenseHdxOther())
28 ]
29
30 class HDXThemePlugin(plugins.SingletonPlugin):
31 plugins.implements(plugins.IConfigurer)
32 plugins.implements(plugins.IRoutes, inherit=True)
33 plugins.implements(plugins.ITemplateHelpers)
34 plugins.implements(plugins.IActions)
35 plugins.implements(plugins.IAuthFunctions)
36 plugins.implements(plugins.IGroupController, inherit=True)
37 plugins.implements(plugins.IMiddleware, inherit=True)
38
39 def update_config(self, config):
40 toolkit.add_template_directory(config, 'templates')
41 toolkit.add_public_directory(config, 'public')
42 toolkit.add_resource('fanstatic', 'hdx_theme')
43
44
45 def before_map(self, map):
46 map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')
47 map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')
48 map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')
49 map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')
50 map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')
51 map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')
52
53 map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')
54
55 map.connect('/organization/{id}/request_membership', controller='ckanext.hdx_theme.org_controller:HDXOrgController', action='request_membership')
56 return map
57
58 def create(self, entity):
59 caching.invalidate_group_caches()
60
61 def edit(self, entity):
62 caching.invalidate_group_caches()
63
64 def get_helpers(self):
65 from ckanext.hdx_theme import helpers as hdx_helpers
66 return {
67 'is_downloadable': hdx_helpers.is_downloadable,
68 'get_facet_items_dict':hdx_helpers.get_facet_items_dict,
69 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,
70 'get_filtered_params_list':hdx_helpers.get_filtered_params_list,
71 'get_last_revision_package':hdx_helpers.get_last_revision_package,
72 'get_last_modifier_user':hdx_helpers.get_last_modifier_user,
73 'get_last_revision_group':hdx_helpers.get_last_revision_group,
74 'get_group_followers':hdx_helpers.get_group_followers,
75 'get_group_members':hdx_helpers.get_group_members,
76 'markdown_extract_strip':hdx_helpers.markdown_extract_strip,
77 'render_date_from_concat_str':hdx_helpers.render_date_from_concat_str,
78 'hdx_version':hdx_helpers.hdx_version,
79 'hdx_build_nav_icon_with_message':hdx_helpers.hdx_build_nav_icon_with_message,
80 'hdx_num_of_new_related_items':hdx_helpers.hdx_num_of_new_related_items,
81 'hdx_get_extras_element':hdx_helpers.hdx_get_extras_element,
82 'hdx_get_user_info':hdx_helpers.hdx_get_user_info,
83 'hdx_linked_user':hdx_helpers.hdx_linked_user,
84 'hdx_show_singular_plural':hdx_helpers.hdx_show_singular_plural,
85 'hdx_member_roles_list':hdx_helpers.hdx_member_roles_list
86
87 }
88
89 def get_actions(self):
90 from ckanext.hdx_theme import actions as hdx_actions
91 return {
92 'organization_list_for_user':hdx_actions.organization_list_for_user,
93 'cached_group_list': hdx_actions.cached_group_list,
94 'hdx_basic_user_info': hdx_actions.hdx_basic_user_info,
95 'member_list': hdx_actions.member_list,
96 'hdx_get_sys_admins': hdx_actions.hdx_get_sys_admins
97
98 }
99 def get_auth_functions(self):
100 return {
101 'hdx_basic_user_info': auth.hdx_basic_user_info
102 }
103
104 def make_middleware(self, app, config):
105 run_on_startup()
106 return app
107
108
109
110
111
[end of ckanext-hdx_theme/ckanext/hdx_theme/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
@@ -53,6 +53,8 @@
map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')
map.connect('/organization/{id}/request_membership', controller='ckanext.hdx_theme.org_controller:HDXOrgController', action='request_membership')
+
+ map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')
return map
def create(self, entity):
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/splash_page.py b/ckanext-hdx_theme/ckanext/hdx_theme/splash_page.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/splash_page.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/splash_page.py
@@ -92,4 +92,14 @@
def _action(self, action_name):
''' select the correct group/org action '''
return get_action(self._replace_group_org(action_name))
-
+
+ def about(self, page):
+ title = {'license': _('Data Licenses'),
+ 'terms': _('Terms of Service')}
+ html = {'license': 'home/snippets/hdx_licenses.html',
+ 'terms': 'home/snippets/hdx_terms_of_service.html'}
+
+ extraVars = {'title': title.get(page), 'html': html.get(page), 'page': page}
+ return base.render('home/about2.html', extra_vars = extraVars)
+
+
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version='v0.2.3'
\ No newline at end of file
+hdx_version='v0.2.4'
\ No newline at end of file
|
{"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n@@ -53,6 +53,8 @@\n map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')\n \n map.connect('/organization/{id}/request_membership', controller='ckanext.hdx_theme.org_controller:HDXOrgController', action='request_membership')\n+\n+ map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')\n return map\n \n def create(self, entity):\ndiff --git a/ckanext-hdx_theme/ckanext/hdx_theme/splash_page.py b/ckanext-hdx_theme/ckanext/hdx_theme/splash_page.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/splash_page.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/splash_page.py\n@@ -92,4 +92,14 @@\n \tdef _action(self, action_name):\n \t\t''' select the correct group/org action '''\n \t\treturn get_action(self._replace_group_org(action_name))\n-\t\n+\n+\tdef about(self, page):\n+\t\ttitle = {'license': _('Data Licenses'),\n+\t\t\t\t 'terms': _('Terms of Service')}\n+\t\thtml = {'license': 'home/snippets/hdx_licenses.html',\n+\t\t\t\t 'terms': 'home/snippets/hdx_terms_of_service.html'}\n+\n+\t\textraVars = {'title': title.get(page), 'html': html.get(page), 'page': page}\n+\t\treturn base.render('home/about2.html', extra_vars = extraVars)\n+\n+\ndiff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version='v0.2.3'\n\\ No newline at end of file\n+hdx_version='v0.2.4'\n\\ No newline at end of file\n", "issue": "About menu: glitch on home page functionality\nOn **homepage only** and only in Chrome.\nClick on about to open the submenu.\nClick again to close the menu.\nMove the cursor around the `About` text.\nThe menu text 'About' disappears and reappears unreliably as the cursor moves around it. This is on chrome. Does not occur in firefox.\n\n", "before_files": [{"content": "hdx_version='v0.2.3'", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}, {"content": "import sys, re\nimport ckan.lib.base as base\nfrom ckan.lib.base import request\nfrom ckan.lib.base import c,g,h\nfrom ckan.lib.base import model\nfrom ckan.lib.base import render\nfrom ckan.lib.base import _\nimport ckan.logic as logic\nimport ckan.plugins.toolkit as tk\n\nfrom ckan.controllers.group import GroupController as gc\nfrom ckan.controllers.home import HomeController\n\nimport ckanext.hdx_theme.caching as caching\n\nNotAuthorized = logic.NotAuthorized\ncheck_access = logic.check_access\nget_action = logic.get_action\n\n\nclass SplashPageController(HomeController):\n\n\tgroup_type = 'group'\n\n\tdef index(self):\n\t\tgroup_type = None\n\t\tcontext = {'model': model, 'session': model.Session,\n\t\t\t\t 'user': c.user or c.author, 'for_view': True,\n\t\t\t\t 'with_private': False}\n\n\t\tq = c.q = request.params.get('q', '')\n\t\tdata_dict = {'all_fields': True, 'q': q}\n\t\tsort_by = c.sort_by_selected = request.params.get('sort')\n\t\tif sort_by:\n\t\t\tdata_dict['sort'] = sort_by\n\t\ttry:\n\t\t\tself._check_access('site_read', context)\n\t\texcept NotAuthorized:\n\t\t\tabort(401, _('Not authorized to see this page'))\n\t\tif c.userobj:\n\t\t\tcontext['user_id'] = c.userobj.id\n\t\t\tcontext['user_is_admin'] = c.userobj.sysadmin\n\t\t\n\t\t\n\t\t\n\t\tc.group_package_stuff = caching.cached_get_group_package_stuff()\n\n\t\t##Removing groups without geojson for the map\n\t\tc.group_map = []\n\t\tfor gp in c.group_package_stuff:\n\t\t\tfor e in gp['extras']:\n\t\t\t\tif e['key'] == 'geojson' and e['value']:\n\t\t\t\t\tc.group_map.append(gp)\n\n\t\t#print c.group_package_stuff\n\n\t\tif c.userobj is not None:\n\t\t\tmsg = None\n\t\t\turl = h.url_for(controller='user', action='edit')\n\t\t\tis_google_id = \\\n\t\t\t\tc.userobj.name.startswith('https://www.google.com/accounts/o8/id')\n\t\t\tif not c.userobj.email and (is_google_id and not c.userobj.fullname):\n\t\t\t\tmsg = _(u'Please <a href=\"{link}\">update your profile</a>'\n\t\t\t\t\t\tu' and add your email address and your full name. '\n\t\t\t\t\t\tu'{site} uses your email address'\n\t\t\t\t\t\tu' if you need to reset your password.'.format(\n\t\t\t\t\t\t\tlink=url, site=g.site_title))\n\t\t\telif not c.userobj.email:\n\t\t\t\tmsg = _('Please <a href=\"%s\">update your profile</a>'\n\t\t\t\t\t\t' and add your email address. ') % url + \\\n\t\t\t\t\t_('%s uses your email address'\n\t\t\t\t\t\t' if you need to reset your password.') \\\n\t\t\t\t\t% g.site_title\n\t\t\telif is_google_id and not c.userobj.fullname:\n\t\t\t\tmsg = _('Please <a href=\"%s\">update your profile</a>'\n\t\t\t\t\t\t' and add your full name.') % (url)\n\t\t\tif msg:\n\t\t\t\th.flash_notice(msg, allow_html=True)\n\n\t\treturn base.render('home/index.html', cache_force=True)\n\n\tdef _check_access(self, action_name, *args, **kw):\n\t\t''' select the correct group/org check_access '''\n\t\treturn check_access(self._replace_group_org(action_name), *args, **kw)\n\n\tdef _replace_group_org(self, string):\n\t\t''' substitute organization for group if this is an org'''\n\t\tif self.group_type == 'organization':\n\t\t\tstring = re.sub('^group', 'organization', string)\n\t\treturn string\n\n\tdef _action(self, action_name):\n\t\t''' select the correct group/org action '''\n\t\treturn get_action(self._replace_group_org(action_name))\n\t\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/splash_page.py"}, {"content": "import ckanext.hdx_theme.licenses as hdx_licenses\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as toolkit\nimport ckan.model.package as package\nimport ckan.model.license as license\nimport version\n\nimport ckanext.hdx_theme.caching as caching\nimport ckanext.hdx_theme.auth as auth\n\n\ndef run_on_startup():\n _generate_license_list()\n \n caching.cached_get_group_package_stuff()\n \n\ndef _generate_license_list():\n package.Package._license_register = license.LicenseRegister() \n package.Package._license_register.licenses = [\n license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),\n license.License(license.LicenseCreativeCommonsAttribution()),\n license.License(license.LicenseCreativeCommonsAttributionShareAlike()),\n license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),\n license.License(hdx_licenses.LicenseHdxMultiple()),\n license.License(hdx_licenses.LicenseHdxOther())\n ]\n\nclass HDXThemePlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n plugins.implements(plugins.IAuthFunctions)\n plugins.implements(plugins.IGroupController, inherit=True)\n plugins.implements(plugins.IMiddleware, inherit=True)\n \n def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_public_directory(config, 'public')\n toolkit.add_resource('fanstatic', 'hdx_theme')\n \n\n def before_map(self, map):\n map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')\n map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')\n map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')\n map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')\n map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')\n map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')\n \n map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')\n \n map.connect('/organization/{id}/request_membership', controller='ckanext.hdx_theme.org_controller:HDXOrgController', action='request_membership')\n return map\n \n def create(self, entity):\n caching.invalidate_group_caches()\n\n def edit(self, entity):\n caching.invalidate_group_caches()\n\n def get_helpers(self):\n from ckanext.hdx_theme import helpers as hdx_helpers\n return {\n 'is_downloadable': hdx_helpers.is_downloadable,\n 'get_facet_items_dict':hdx_helpers.get_facet_items_dict,\n 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,\n 'get_filtered_params_list':hdx_helpers.get_filtered_params_list,\n 'get_last_revision_package':hdx_helpers.get_last_revision_package,\n 'get_last_modifier_user':hdx_helpers.get_last_modifier_user,\n 'get_last_revision_group':hdx_helpers.get_last_revision_group,\n 'get_group_followers':hdx_helpers.get_group_followers,\n 'get_group_members':hdx_helpers.get_group_members,\n 'markdown_extract_strip':hdx_helpers.markdown_extract_strip,\n 'render_date_from_concat_str':hdx_helpers.render_date_from_concat_str,\n 'hdx_version':hdx_helpers.hdx_version,\n 'hdx_build_nav_icon_with_message':hdx_helpers.hdx_build_nav_icon_with_message,\n 'hdx_num_of_new_related_items':hdx_helpers.hdx_num_of_new_related_items,\n 'hdx_get_extras_element':hdx_helpers.hdx_get_extras_element,\n 'hdx_get_user_info':hdx_helpers.hdx_get_user_info,\n 'hdx_linked_user':hdx_helpers.hdx_linked_user,\n 'hdx_show_singular_plural':hdx_helpers.hdx_show_singular_plural,\n 'hdx_member_roles_list':hdx_helpers.hdx_member_roles_list\n \n }\n \n def get_actions(self):\n from ckanext.hdx_theme import actions as hdx_actions\n return {\n 'organization_list_for_user':hdx_actions.organization_list_for_user, \n 'cached_group_list': hdx_actions.cached_group_list,\n 'hdx_basic_user_info': hdx_actions.hdx_basic_user_info,\n 'member_list': hdx_actions.member_list,\n 'hdx_get_sys_admins': hdx_actions.hdx_get_sys_admins\n \n }\n def get_auth_functions(self):\n return {\n 'hdx_basic_user_info': auth.hdx_basic_user_info\n }\n \n def make_middleware(self, app, config):\n run_on_startup()\n return app\n\n \n \n\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/plugin.py"}]}
| 3,041 | 545 |
gh_patches_debug_10862
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-964
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add HTTP user-agent to WSGI instrumentation
As discussed for ASGI in [#716 (comment)](https://github.com/open-telemetry/opentelemetry-python/pull/716/files#r428364828), the WSGI instrumentation should also set an attribute from the user-agent in a HTTP request.
</issue>
<code>
[start of instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """
15 This library provides a WSGI middleware that can be used on any WSGI framework
16 (such as Django / Flask) to track requests timing through OpenTelemetry.
17
18 Usage (Flask)
19 -------------
20
21 .. code-block:: python
22
23 from flask import Flask
24 from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware
25
26 app = Flask(__name__)
27 app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app)
28
29 @app.route("/")
30 def hello():
31 return "Hello!"
32
33 if __name__ == "__main__":
34 app.run(debug=True)
35
36
37 Usage (Django)
38 --------------
39
40 Modify the application's ``wsgi.py`` file as shown below.
41
42 .. code-block:: python
43
44 import os
45 from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware
46 from django.core.wsgi import get_wsgi_application
47
48 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings')
49
50 application = get_wsgi_application()
51 application = OpenTelemetryMiddleware(application)
52
53 API
54 ---
55 """
56
57 import functools
58 import typing
59 import wsgiref.util as wsgiref_util
60
61 from opentelemetry import context, propagators, trace
62 from opentelemetry.instrumentation.utils import http_status_to_canonical_code
63 from opentelemetry.instrumentation.wsgi.version import __version__
64 from opentelemetry.trace.status import Status, StatusCanonicalCode
65
66 _HTTP_VERSION_PREFIX = "HTTP/"
67
68
69 def get_header_from_environ(
70 environ: dict, header_name: str
71 ) -> typing.List[str]:
72 """Retrieve a HTTP header value from the PEP3333-conforming WSGI environ.
73
74 Returns:
75 A list with a single string with the header value if it exists, else an empty list.
76 """
77 environ_key = "HTTP_" + header_name.upper().replace("-", "_")
78 value = environ.get(environ_key)
79 if value is not None:
80 return [value]
81 return []
82
83
84 def setifnotnone(dic, key, value):
85 if value is not None:
86 dic[key] = value
87
88
89 def collect_request_attributes(environ):
90 """Collects HTTP request attributes from the PEP3333-conforming
91 WSGI environ and returns a dictionary to be used as span creation attributes."""
92
93 result = {
94 "component": "http",
95 "http.method": environ.get("REQUEST_METHOD"),
96 "http.server_name": environ.get("SERVER_NAME"),
97 "http.scheme": environ.get("wsgi.url_scheme"),
98 }
99
100 host_port = environ.get("SERVER_PORT")
101 if host_port is not None:
102 result.update({"host.port": int(host_port)})
103
104 setifnotnone(result, "http.host", environ.get("HTTP_HOST"))
105 target = environ.get("RAW_URI")
106 if target is None: # Note: `"" or None is None`
107 target = environ.get("REQUEST_URI")
108 if target is not None:
109 result["http.target"] = target
110 else:
111 result["http.url"] = wsgiref_util.request_uri(environ)
112
113 remote_addr = environ.get("REMOTE_ADDR")
114 if remote_addr:
115 result["net.peer.ip"] = remote_addr
116 remote_host = environ.get("REMOTE_HOST")
117 if remote_host and remote_host != remote_addr:
118 result["net.peer.name"] = remote_host
119
120 setifnotnone(result, "net.peer.port", environ.get("REMOTE_PORT"))
121 flavor = environ.get("SERVER_PROTOCOL", "")
122 if flavor.upper().startswith(_HTTP_VERSION_PREFIX):
123 flavor = flavor[len(_HTTP_VERSION_PREFIX) :]
124 if flavor:
125 result["http.flavor"] = flavor
126
127 return result
128
129
130 def add_response_attributes(
131 span, start_response_status, response_headers
132 ): # pylint: disable=unused-argument
133 """Adds HTTP response attributes to span using the arguments
134 passed to a PEP3333-conforming start_response callable."""
135
136 status_code, status_text = start_response_status.split(" ", 1)
137 span.set_attribute("http.status_text", status_text)
138
139 try:
140 status_code = int(status_code)
141 except ValueError:
142 span.set_status(
143 Status(
144 StatusCanonicalCode.UNKNOWN,
145 "Non-integer HTTP status: " + repr(status_code),
146 )
147 )
148 else:
149 span.set_attribute("http.status_code", status_code)
150 span.set_status(Status(http_status_to_canonical_code(status_code)))
151
152
153 def get_default_span_name(environ):
154 """Default implementation for name_callback, returns HTTP {METHOD_NAME}."""
155 return "HTTP {}".format(environ.get("REQUEST_METHOD", "")).strip()
156
157
158 class OpenTelemetryMiddleware:
159 """The WSGI application middleware.
160
161 This class is a PEP 3333 conforming WSGI middleware that starts and
162 annotates spans for any requests it is invoked with.
163
164 Args:
165 wsgi: The WSGI application callable to forward requests to.
166 name_callback: Callback which calculates a generic span name for an
167 incoming HTTP request based on the PEP3333 WSGI environ.
168 Optional: Defaults to get_default_span_name.
169 """
170
171 def __init__(self, wsgi, name_callback=get_default_span_name):
172 self.wsgi = wsgi
173 self.tracer = trace.get_tracer(__name__, __version__)
174 self.name_callback = name_callback
175
176 @staticmethod
177 def _create_start_response(span, start_response):
178 @functools.wraps(start_response)
179 def _start_response(status, response_headers, *args, **kwargs):
180 add_response_attributes(span, status, response_headers)
181 return start_response(status, response_headers, *args, **kwargs)
182
183 return _start_response
184
185 def __call__(self, environ, start_response):
186 """The WSGI application
187
188 Args:
189 environ: A WSGI environment.
190 start_response: The WSGI start_response callable.
191 """
192
193 token = context.attach(
194 propagators.extract(get_header_from_environ, environ)
195 )
196 span_name = self.name_callback(environ)
197
198 span = self.tracer.start_span(
199 span_name,
200 kind=trace.SpanKind.SERVER,
201 attributes=collect_request_attributes(environ),
202 )
203
204 try:
205 with self.tracer.use_span(span):
206 start_response = self._create_start_response(
207 span, start_response
208 )
209 iterable = self.wsgi(environ, start_response)
210 return _end_span_after_iterating(
211 iterable, span, self.tracer, token
212 )
213 except Exception as ex:
214 span.set_status(Status(StatusCanonicalCode.INTERNAL, str(ex)))
215 span.end()
216 context.detach(token)
217 raise
218
219
220 # Put this in a subfunction to not delay the call to the wrapped
221 # WSGI application (instrumentation should change the application
222 # behavior as little as possible).
223 def _end_span_after_iterating(iterable, span, tracer, token):
224 try:
225 with tracer.use_span(span):
226 for yielded in iterable:
227 yield yielded
228 finally:
229 close = getattr(iterable, "close", None)
230 if close:
231 close()
232 span.end()
233 context.detach(token)
234
[end of instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py
@@ -117,6 +117,10 @@
if remote_host and remote_host != remote_addr:
result["net.peer.name"] = remote_host
+ user_agent = environ.get("HTTP_USER_AGENT")
+ if user_agent is not None and len(user_agent) > 0:
+ result["http.user_agent"] = user_agent
+
setifnotnone(result, "net.peer.port", environ.get("REMOTE_PORT"))
flavor = environ.get("SERVER_PROTOCOL", "")
if flavor.upper().startswith(_HTTP_VERSION_PREFIX):
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n@@ -117,6 +117,10 @@\n if remote_host and remote_host != remote_addr:\n result[\"net.peer.name\"] = remote_host\n \n+ user_agent = environ.get(\"HTTP_USER_AGENT\")\n+ if user_agent is not None and len(user_agent) > 0:\n+ result[\"http.user_agent\"] = user_agent\n+\n setifnotnone(result, \"net.peer.port\", environ.get(\"REMOTE_PORT\"))\n flavor = environ.get(\"SERVER_PROTOCOL\", \"\")\n if flavor.upper().startswith(_HTTP_VERSION_PREFIX):\n", "issue": "Add HTTP user-agent to WSGI instrumentation\nAs discussed for ASGI in [#716 (comment)](https://github.com/open-telemetry/opentelemetry-python/pull/716/files#r428364828), the WSGI instrumentation should also set an attribute from the user-agent in a HTTP request. \n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis library provides a WSGI middleware that can be used on any WSGI framework\n(such as Django / Flask) to track requests timing through OpenTelemetry.\n\nUsage (Flask)\n-------------\n\n.. code-block:: python\n\n from flask import Flask\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n\n app = Flask(__name__)\n app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\n\nUsage (Django)\n--------------\n\nModify the application's ``wsgi.py`` file as shown below.\n\n.. code-block:: python\n\n import os\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n from django.core.wsgi import get_wsgi_application\n\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings')\n\n application = get_wsgi_application()\n application = OpenTelemetryMiddleware(application)\n\nAPI\n---\n\"\"\"\n\nimport functools\nimport typing\nimport wsgiref.util as wsgiref_util\n\nfrom opentelemetry import context, propagators, trace\nfrom opentelemetry.instrumentation.utils import http_status_to_canonical_code\nfrom opentelemetry.instrumentation.wsgi.version import __version__\nfrom opentelemetry.trace.status import Status, StatusCanonicalCode\n\n_HTTP_VERSION_PREFIX = \"HTTP/\"\n\n\ndef get_header_from_environ(\n environ: dict, header_name: str\n) -> typing.List[str]:\n \"\"\"Retrieve a HTTP header value from the PEP3333-conforming WSGI environ.\n\n Returns:\n A list with a single string with the header value if it exists, else an empty list.\n \"\"\"\n environ_key = \"HTTP_\" + header_name.upper().replace(\"-\", \"_\")\n value = environ.get(environ_key)\n if value is not None:\n return [value]\n return []\n\n\ndef setifnotnone(dic, key, value):\n if value is not None:\n dic[key] = value\n\n\ndef collect_request_attributes(environ):\n \"\"\"Collects HTTP request attributes from the PEP3333-conforming\n WSGI environ and returns a dictionary to be used as span creation attributes.\"\"\"\n\n result = {\n \"component\": \"http\",\n \"http.method\": environ.get(\"REQUEST_METHOD\"),\n \"http.server_name\": environ.get(\"SERVER_NAME\"),\n \"http.scheme\": environ.get(\"wsgi.url_scheme\"),\n }\n\n host_port = environ.get(\"SERVER_PORT\")\n if host_port is not None:\n result.update({\"host.port\": int(host_port)})\n\n setifnotnone(result, \"http.host\", environ.get(\"HTTP_HOST\"))\n target = environ.get(\"RAW_URI\")\n if target is None: # Note: `\"\" or None is None`\n target = environ.get(\"REQUEST_URI\")\n if target is not None:\n result[\"http.target\"] = target\n else:\n result[\"http.url\"] = wsgiref_util.request_uri(environ)\n\n remote_addr = environ.get(\"REMOTE_ADDR\")\n if remote_addr:\n result[\"net.peer.ip\"] = remote_addr\n remote_host = environ.get(\"REMOTE_HOST\")\n if remote_host and remote_host != remote_addr:\n result[\"net.peer.name\"] = remote_host\n\n setifnotnone(result, \"net.peer.port\", environ.get(\"REMOTE_PORT\"))\n flavor = environ.get(\"SERVER_PROTOCOL\", \"\")\n if flavor.upper().startswith(_HTTP_VERSION_PREFIX):\n flavor = flavor[len(_HTTP_VERSION_PREFIX) :]\n if flavor:\n result[\"http.flavor\"] = flavor\n\n return result\n\n\ndef add_response_attributes(\n span, start_response_status, response_headers\n): # pylint: disable=unused-argument\n \"\"\"Adds HTTP response attributes to span using the arguments\n passed to a PEP3333-conforming start_response callable.\"\"\"\n\n status_code, status_text = start_response_status.split(\" \", 1)\n span.set_attribute(\"http.status_text\", status_text)\n\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCanonicalCode.UNKNOWN,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(\"http.status_code\", status_code)\n span.set_status(Status(http_status_to_canonical_code(status_code)))\n\n\ndef get_default_span_name(environ):\n \"\"\"Default implementation for name_callback, returns HTTP {METHOD_NAME}.\"\"\"\n return \"HTTP {}\".format(environ.get(\"REQUEST_METHOD\", \"\")).strip()\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The WSGI application middleware.\n\n This class is a PEP 3333 conforming WSGI middleware that starts and\n annotates spans for any requests it is invoked with.\n\n Args:\n wsgi: The WSGI application callable to forward requests to.\n name_callback: Callback which calculates a generic span name for an\n incoming HTTP request based on the PEP3333 WSGI environ.\n Optional: Defaults to get_default_span_name.\n \"\"\"\n\n def __init__(self, wsgi, name_callback=get_default_span_name):\n self.wsgi = wsgi\n self.tracer = trace.get_tracer(__name__, __version__)\n self.name_callback = name_callback\n\n @staticmethod\n def _create_start_response(span, start_response):\n @functools.wraps(start_response)\n def _start_response(status, response_headers, *args, **kwargs):\n add_response_attributes(span, status, response_headers)\n return start_response(status, response_headers, *args, **kwargs)\n\n return _start_response\n\n def __call__(self, environ, start_response):\n \"\"\"The WSGI application\n\n Args:\n environ: A WSGI environment.\n start_response: The WSGI start_response callable.\n \"\"\"\n\n token = context.attach(\n propagators.extract(get_header_from_environ, environ)\n )\n span_name = self.name_callback(environ)\n\n span = self.tracer.start_span(\n span_name,\n kind=trace.SpanKind.SERVER,\n attributes=collect_request_attributes(environ),\n )\n\n try:\n with self.tracer.use_span(span):\n start_response = self._create_start_response(\n span, start_response\n )\n iterable = self.wsgi(environ, start_response)\n return _end_span_after_iterating(\n iterable, span, self.tracer, token\n )\n except Exception as ex:\n span.set_status(Status(StatusCanonicalCode.INTERNAL, str(ex)))\n span.end()\n context.detach(token)\n raise\n\n\n# Put this in a subfunction to not delay the call to the wrapped\n# WSGI application (instrumentation should change the application\n# behavior as little as possible).\ndef _end_span_after_iterating(iterable, span, tracer, token):\n try:\n with tracer.use_span(span):\n for yielded in iterable:\n yield yielded\n finally:\n close = getattr(iterable, \"close\", None)\n if close:\n close()\n span.end()\n context.detach(token)\n", "path": "instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py"}]}
| 2,914 | 226 |
gh_patches_debug_37344
|
rasdani/github-patches
|
git_diff
|
sbi-dev__sbi-421
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fix progress bar and simulation batch size in simulate_in_batches
Two problems related to `simulate_in_batches`:
1) The progress bar fills up by sending jobs to the workers, i.e., before these are actually done. E.g., when running 100 parameters with batch size 10 and 10 workers, the progress bar is done instantly, but simulations may continue for minutes or hours. We should find a way to fill up the pbar only when the jobs are done.
2) The simulation batch size defaults to 1. This can make things very slow when one uses a simulator that cannot simulate multiple parameters at once (vectorised) and one use multiple workers. In this scenario each worker deals with a single simulation -- creating a lot of overhead. It would be much better (for me it gave a 10x speed up) to write a simple "batch"-simulator with a for loop and set the simulation batch size to 10 or 100 so that each worker call will run at least 10 or 100 simulations before going back. Actually, it could make sense to just set
```
sim_batch_size = int(num_simulations / num_workers)
```
in order to minimise overhead.
The only downside would be that the pbar becomes less informative. So one could think about reducing the batch size a bit to obtain more "steps" in the pbar.
Any thoughts?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
5 # under the Affero General Public License v3, see <https://www.gnu.org/licenses/>.
6 #
7 # Note: To use the 'upload' functionality of this file, you must:
8 # $ pipenv install twine --dev
9
10 import io
11 import os
12 import sys
13 from shutil import rmtree
14
15 from setuptools import find_packages, setup, Command
16
17 # Package meta-data.
18 NAME = "sbi"
19 DESCRIPTION = "Simulation-based inference."
20 KEYWORDS = "bayesian parameter inference system_identification simulator PyTorch"
21 URL = "https://github.com/mackelab/sbi"
22 EMAIL = "[email protected]"
23 AUTHOR = "Álvaro Tejero-Cantero, Jakob H. Macke, Jan-Matthis Lückmann, Conor M. Durkan, Michael Deistler, Jan Bölts"
24 REQUIRES_PYTHON = ">=3.6.0"
25
26 REQUIRED = [
27 "joblib",
28 "matplotlib",
29 "numpy",
30 "pillow",
31 "pyknos>=0.14.0",
32 "pyro-ppl>=1.3.1",
33 "scipy",
34 "tensorboard",
35 "torch>=1.6.0",
36 "tqdm",
37 ]
38
39 EXTRAS = {
40 "dev": [
41 "autoflake",
42 "black",
43 "deepdiff",
44 "flake8",
45 "isort",
46 "jupyter",
47 "mkdocs",
48 "mkdocs-material",
49 "markdown-include",
50 "mkdocs-redirects",
51 "mkdocstrings",
52 "nbconvert",
53 "pep517",
54 "pytest",
55 "pyyaml",
56 "scikit-learn",
57 "torchtestcase",
58 "twine",
59 ],
60 }
61
62 here = os.path.abspath(os.path.dirname(__file__))
63
64 # Import the README and use it as the long-description.
65 try:
66 with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
67 long_description = "\n" + f.read()
68 except FileNotFoundError:
69 long_description = DESCRIPTION
70
71 # Load the package's __version__.py module as a dictionary.
72 about = {}
73 project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
74 with open(os.path.join(here, project_slug, "__version__.py")) as f:
75 exec(f.read(), about)
76
77
78 class UploadCommand(Command):
79 """Support setup.py upload."""
80
81 description = "Build and publish the package."
82 user_options = []
83
84 @staticmethod
85 def status(s):
86 """Prints things in bold."""
87 print("\033[1m{0}\033[0m".format(s))
88
89 def initialize_options(self):
90 pass
91
92 def finalize_options(self):
93 pass
94
95 def run(self):
96 try:
97 self.status("Removing previous builds…")
98 rmtree(os.path.join(here, "dist"))
99 except OSError:
100 pass
101
102 self.status("Building Source and Wheel (universal) distribution…")
103 os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
104
105 self.status("Uploading the package to PyPI via Twine…")
106 os.system("twine upload dist/*")
107
108 self.status("Pushing git tags…")
109 os.system("git tag v{0}".format(about["__version__"]))
110 os.system("git push --tags")
111
112 sys.exit()
113
114
115 setup(
116 name=NAME,
117 version=about["__version__"],
118 description=DESCRIPTION,
119 keywords=KEYWORDS,
120 long_description=long_description,
121 long_description_content_type="text/markdown",
122 author=AUTHOR,
123 author_email=EMAIL,
124 python_requires=REQUIRES_PYTHON,
125 url=URL,
126 packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
127 install_requires=REQUIRED,
128 extras_require=EXTRAS,
129 include_package_data=True,
130 license="AGPLv3",
131 classifiers=[
132 # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
133 "Development Status :: 3 - Alpha",
134 "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
135 "Intended Audience :: Developers",
136 "Intended Audience :: Education",
137 "Intended Audience :: Science/Research",
138 "Topic :: Adaptive Technologies",
139 "Topic :: Scientific/Engineering",
140 "Topic :: Scientific/Engineering :: Artificial Intelligence",
141 "Topic :: Scientific/Engineering :: Mathematics",
142 "Programming Language :: Python",
143 "Programming Language :: Python :: 3",
144 "Programming Language :: Python :: 3.6",
145 "Programming Language :: Python :: 3.7",
146 "Programming Language :: Python :: 3.8",
147 ],
148 # $ setup.py publish support.
149 cmdclass=dict(upload=UploadCommand),
150 )
151
[end of setup.py]
[start of sbi/simulators/simutils.py]
1 # This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
2 # under the Affero General Public License v3, see <https://www.gnu.org/licenses/>.
3
4
5 from typing import Callable
6
7 import torch
8 from torch import Tensor
9 from tqdm.auto import tqdm
10 from joblib import Parallel, delayed
11
12
13 def simulate_in_batches(
14 simulator: Callable,
15 theta: Tensor,
16 sim_batch_size: int = 1,
17 num_workers: int = 1,
18 show_progress_bars: bool = True,
19 ) -> Tensor:
20 r"""
21 Return simulations $x$ for parameters $\theta$ conducted batchwise.
22
23 Parameters are batched with size `sim_batch_size` (default whole theta at once).
24 Multiprocessing is used when `num_workers > 1`.
25
26 Args:
27 simulator: Simulator callable (a function or a class with `__call__`).
28 theta: All parameters $\theta$ sampled from prior or posterior.
29 sim_batch_size: Number of simulations per batch. Default is to simulate
30 the entire theta in a single batch.
31 num_workers: Number of workers for multiprocessing.
32 show_progress_bars: Whether to show a progress bar during simulation.
33
34 Returns:
35 Parameters theta and simulations $x$.
36 """
37
38 num_sims, *_ = theta.shape
39
40 if num_sims == 0:
41 x = torch.tensor([])
42 elif sim_batch_size is not None and sim_batch_size < num_sims:
43 # Dev note: pyright complains of torch.split lacking a type stub
44 # as of PyTorch 1.4.0, see https://github.com/microsoft/pyright/issues/291
45 batches = torch.split(theta, sim_batch_size, dim=0)
46
47 if num_workers > 1:
48 # Parallelize the sequence of batches across workers.
49 # TODO: This usage of tqdm tracks the dispatching of jobs instead of the
50 # moment when they are done, resulting in waiting time at 100% in case the
51 # last jobs takes long. A potential solution can be found here: https://
52 # stackoverflow.com/a/61689175
53 simulation_outputs = Parallel(n_jobs=num_workers)(
54 delayed(simulator)(batch)
55 for batch in tqdm(
56 batches,
57 disable=not show_progress_bars,
58 desc=f"Running {num_sims} simulations in {len(batches)} batches.",
59 total=len(batches),
60 )
61 )
62 else:
63 pbar = tqdm(
64 total=num_sims,
65 disable=not show_progress_bars,
66 desc=f"Running {num_sims} simulations.",
67 )
68
69 with pbar:
70 simulation_outputs = []
71 for batch in batches:
72 simulation_outputs.append(simulator(batch))
73 pbar.update(sim_batch_size)
74
75 x = torch.cat(simulation_outputs, dim=0)
76 else:
77 x = simulator(theta)
78
79 return x
80
[end of sbi/simulators/simutils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sbi/simulators/simutils.py b/sbi/simulators/simutils.py
--- a/sbi/simulators/simutils.py
+++ b/sbi/simulators/simutils.py
@@ -8,6 +8,8 @@
from torch import Tensor
from tqdm.auto import tqdm
from joblib import Parallel, delayed
+import contextlib
+import joblib
def simulate_in_batches(
@@ -46,19 +48,19 @@
if num_workers > 1:
# Parallelize the sequence of batches across workers.
- # TODO: This usage of tqdm tracks the dispatching of jobs instead of the
- # moment when they are done, resulting in waiting time at 100% in case the
- # last jobs takes long. A potential solution can be found here: https://
- # stackoverflow.com/a/61689175
- simulation_outputs = Parallel(n_jobs=num_workers)(
- delayed(simulator)(batch)
- for batch in tqdm(
+ # We use the solution proposed here: https://stackoverflow.com/a/61689175
+ # to update the pbar only after the workers finished a task.
+ with tqdm_joblib(
+ tqdm(
batches,
disable=not show_progress_bars,
desc=f"Running {num_sims} simulations in {len(batches)} batches.",
total=len(batches),
)
- )
+ ) as progress_bar:
+ simulation_outputs = Parallel(n_jobs=num_workers)(
+ delayed(simulator)(batch) for batch in batches
+ )
else:
pbar = tqdm(
total=num_sims,
@@ -77,3 +79,28 @@
x = simulator(theta)
return x
+
+
[email protected]
+def tqdm_joblib(tqdm_object):
+ """Context manager to patch joblib to report into tqdm progress bar given as
+ argument
+
+ This wrapped context manager obtains the number of finished tasks from the tqdm
+ print function and uses it to update the pbar, as suggested in
+ https://stackoverflow.com/a/61689175. See #419, #421
+ """
+
+ def tqdm_print_progress(self):
+ if self.n_completed_tasks > tqdm_object.n:
+ n_completed = self.n_completed_tasks - tqdm_object.n
+ tqdm_object.update(n=n_completed)
+
+ original_print_progress = joblib.parallel.Parallel.print_progress
+ joblib.parallel.Parallel.print_progress = tqdm_print_progress
+
+ try:
+ yield tqdm_object
+ finally:
+ joblib.parallel.Parallel.print_progress = original_print_progress
+ tqdm_object.close()
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@
REQUIRES_PYTHON = ">=3.6.0"
REQUIRED = [
- "joblib",
+ "joblib>=1.0.0",
"matplotlib",
"numpy",
"pillow",
|
{"golden_diff": "diff --git a/sbi/simulators/simutils.py b/sbi/simulators/simutils.py\n--- a/sbi/simulators/simutils.py\n+++ b/sbi/simulators/simutils.py\n@@ -8,6 +8,8 @@\n from torch import Tensor\n from tqdm.auto import tqdm\n from joblib import Parallel, delayed\n+import contextlib\n+import joblib\n \n \n def simulate_in_batches(\n@@ -46,19 +48,19 @@\n \n if num_workers > 1:\n # Parallelize the sequence of batches across workers.\n- # TODO: This usage of tqdm tracks the dispatching of jobs instead of the\n- # moment when they are done, resulting in waiting time at 100% in case the\n- # last jobs takes long. A potential solution can be found here: https://\n- # stackoverflow.com/a/61689175\n- simulation_outputs = Parallel(n_jobs=num_workers)(\n- delayed(simulator)(batch)\n- for batch in tqdm(\n+ # We use the solution proposed here: https://stackoverflow.com/a/61689175\n+ # to update the pbar only after the workers finished a task.\n+ with tqdm_joblib(\n+ tqdm(\n batches,\n disable=not show_progress_bars,\n desc=f\"Running {num_sims} simulations in {len(batches)} batches.\",\n total=len(batches),\n )\n- )\n+ ) as progress_bar:\n+ simulation_outputs = Parallel(n_jobs=num_workers)(\n+ delayed(simulator)(batch) for batch in batches\n+ )\n else:\n pbar = tqdm(\n total=num_sims,\n@@ -77,3 +79,28 @@\n x = simulator(theta)\n \n return x\n+\n+\[email protected]\n+def tqdm_joblib(tqdm_object):\n+ \"\"\"Context manager to patch joblib to report into tqdm progress bar given as\n+ argument\n+\n+ This wrapped context manager obtains the number of finished tasks from the tqdm\n+ print function and uses it to update the pbar, as suggested in\n+ https://stackoverflow.com/a/61689175. See #419, #421\n+ \"\"\"\n+\n+ def tqdm_print_progress(self):\n+ if self.n_completed_tasks > tqdm_object.n:\n+ n_completed = self.n_completed_tasks - tqdm_object.n\n+ tqdm_object.update(n=n_completed)\n+\n+ original_print_progress = joblib.parallel.Parallel.print_progress\n+ joblib.parallel.Parallel.print_progress = tqdm_print_progress\n+\n+ try:\n+ yield tqdm_object\n+ finally:\n+ joblib.parallel.Parallel.print_progress = original_print_progress\n+ tqdm_object.close()\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,7 +24,7 @@\n REQUIRES_PYTHON = \">=3.6.0\"\n \n REQUIRED = [\n- \"joblib\",\n+ \"joblib>=1.0.0\",\n \"matplotlib\",\n \"numpy\",\n \"pillow\",\n", "issue": "fix progress bar and simulation batch size in simulate_in_batches\nTwo problems related to `simulate_in_batches`: \r\n\r\n1) The progress bar fills up by sending jobs to the workers, i.e., before these are actually done. E.g., when running 100 parameters with batch size 10 and 10 workers, the progress bar is done instantly, but simulations may continue for minutes or hours. We should find a way to fill up the pbar only when the jobs are done. \r\n\r\n2) The simulation batch size defaults to 1. This can make things very slow when one uses a simulator that cannot simulate multiple parameters at once (vectorised) and one use multiple workers. In this scenario each worker deals with a single simulation -- creating a lot of overhead. It would be much better (for me it gave a 10x speed up) to write a simple \"batch\"-simulator with a for loop and set the simulation batch size to 10 or 100 so that each worker call will run at least 10 or 100 simulations before going back. Actually, it could make sense to just set \r\n```\r\nsim_batch_size = int(num_simulations / num_workers)\r\n``` \r\nin order to minimise overhead. \r\nThe only downside would be that the pbar becomes less informative. So one could think about reducing the batch size a bit to obtain more \"steps\" in the pbar. \r\n\r\nAny thoughts? \n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed\n# under the Affero General Public License v3, see <https://www.gnu.org/licenses/>.\n#\n# Note: To use the 'upload' functionality of this file, you must:\n# $ pipenv install twine --dev\n\nimport io\nimport os\nimport sys\nfrom shutil import rmtree\n\nfrom setuptools import find_packages, setup, Command\n\n# Package meta-data.\nNAME = \"sbi\"\nDESCRIPTION = \"Simulation-based inference.\"\nKEYWORDS = \"bayesian parameter inference system_identification simulator PyTorch\"\nURL = \"https://github.com/mackelab/sbi\"\nEMAIL = \"[email protected]\"\nAUTHOR = \"\u00c1lvaro Tejero-Cantero, Jakob H. Macke, Jan-Matthis L\u00fcckmann, Conor M. Durkan, Michael Deistler, Jan B\u00f6lts\"\nREQUIRES_PYTHON = \">=3.6.0\"\n\nREQUIRED = [\n \"joblib\",\n \"matplotlib\",\n \"numpy\",\n \"pillow\",\n \"pyknos>=0.14.0\",\n \"pyro-ppl>=1.3.1\",\n \"scipy\",\n \"tensorboard\",\n \"torch>=1.6.0\",\n \"tqdm\",\n]\n\nEXTRAS = {\n \"dev\": [\n \"autoflake\",\n \"black\",\n \"deepdiff\",\n \"flake8\",\n \"isort\",\n \"jupyter\",\n \"mkdocs\",\n \"mkdocs-material\",\n \"markdown-include\",\n \"mkdocs-redirects\",\n \"mkdocstrings\",\n \"nbconvert\",\n \"pep517\",\n \"pytest\",\n \"pyyaml\",\n \"scikit-learn\",\n \"torchtestcase\",\n \"twine\",\n ],\n}\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# Import the README and use it as the long-description.\ntry:\n with io.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = \"\\n\" + f.read()\nexcept FileNotFoundError:\n long_description = DESCRIPTION\n\n# Load the package's __version__.py module as a dictionary.\nabout = {}\nproject_slug = NAME.lower().replace(\"-\", \"_\").replace(\" \", \"_\")\nwith open(os.path.join(here, project_slug, \"__version__.py\")) as f:\n exec(f.read(), about)\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n\n description = \"Build and publish the package.\"\n user_options = []\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print(\"\\033[1m{0}\\033[0m\".format(s))\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n self.status(\"Removing previous builds\u2026\")\n rmtree(os.path.join(here, \"dist\"))\n except OSError:\n pass\n\n self.status(\"Building Source and Wheel (universal) distribution\u2026\")\n os.system(\"{0} setup.py sdist bdist_wheel --universal\".format(sys.executable))\n\n self.status(\"Uploading the package to PyPI via Twine\u2026\")\n os.system(\"twine upload dist/*\")\n\n self.status(\"Pushing git tags\u2026\")\n os.system(\"git tag v{0}\".format(about[\"__version__\"]))\n os.system(\"git push --tags\")\n\n sys.exit()\n\n\nsetup(\n name=NAME,\n version=about[\"__version__\"],\n description=DESCRIPTION,\n keywords=KEYWORDS,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=AUTHOR,\n author_email=EMAIL,\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=[\"tests\", \"*.tests\", \"*.tests.*\", \"tests.*\"]),\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n include_package_data=True,\n license=\"AGPLv3\",\n classifiers=[\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"Development Status :: 3 - Alpha\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Adaptive Technologies\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n # $ setup.py publish support.\n cmdclass=dict(upload=UploadCommand),\n)\n", "path": "setup.py"}, {"content": "# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed\n# under the Affero General Public License v3, see <https://www.gnu.org/licenses/>.\n\n\nfrom typing import Callable\n\nimport torch\nfrom torch import Tensor\nfrom tqdm.auto import tqdm\nfrom joblib import Parallel, delayed\n\n\ndef simulate_in_batches(\n simulator: Callable,\n theta: Tensor,\n sim_batch_size: int = 1,\n num_workers: int = 1,\n show_progress_bars: bool = True,\n) -> Tensor:\n r\"\"\"\n Return simulations $x$ for parameters $\\theta$ conducted batchwise.\n\n Parameters are batched with size `sim_batch_size` (default whole theta at once).\n Multiprocessing is used when `num_workers > 1`.\n\n Args:\n simulator: Simulator callable (a function or a class with `__call__`).\n theta: All parameters $\\theta$ sampled from prior or posterior.\n sim_batch_size: Number of simulations per batch. Default is to simulate\n the entire theta in a single batch.\n num_workers: Number of workers for multiprocessing.\n show_progress_bars: Whether to show a progress bar during simulation.\n\n Returns:\n Parameters theta and simulations $x$.\n \"\"\"\n\n num_sims, *_ = theta.shape\n\n if num_sims == 0:\n x = torch.tensor([])\n elif sim_batch_size is not None and sim_batch_size < num_sims:\n # Dev note: pyright complains of torch.split lacking a type stub\n # as of PyTorch 1.4.0, see https://github.com/microsoft/pyright/issues/291\n batches = torch.split(theta, sim_batch_size, dim=0)\n\n if num_workers > 1:\n # Parallelize the sequence of batches across workers.\n # TODO: This usage of tqdm tracks the dispatching of jobs instead of the\n # moment when they are done, resulting in waiting time at 100% in case the\n # last jobs takes long. A potential solution can be found here: https://\n # stackoverflow.com/a/61689175\n simulation_outputs = Parallel(n_jobs=num_workers)(\n delayed(simulator)(batch)\n for batch in tqdm(\n batches,\n disable=not show_progress_bars,\n desc=f\"Running {num_sims} simulations in {len(batches)} batches.\",\n total=len(batches),\n )\n )\n else:\n pbar = tqdm(\n total=num_sims,\n disable=not show_progress_bars,\n desc=f\"Running {num_sims} simulations.\",\n )\n\n with pbar:\n simulation_outputs = []\n for batch in batches:\n simulation_outputs.append(simulator(batch))\n pbar.update(sim_batch_size)\n\n x = torch.cat(simulation_outputs, dim=0)\n else:\n x = simulator(theta)\n\n return x\n", "path": "sbi/simulators/simutils.py"}]}
| 3,076 | 692 |
gh_patches_debug_12895
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-4066
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of scripts/build_glossary_page.py]
1 import json
2
3
4 def build_glossary_tuples(source_json_path):
5 # Open the source_json_path
6 with open(source_json_path) as json_file:
7 # Read the contents as a json
8 data = json.load(json_file)
9 # Convert to list of tuples containing ("term", "definition", "url")
10 data_list = [(x['term'], x['definition']) for x in data.values()]
11 # Order list alphabetically by "term"
12 data_list.sort(key=lambda y: y[0])
13 # return the ordered list.
14 return data_list
15
16
17 def build_glossary_page(orderd_list_of_terms_tuples, glossary_file_path):
18 # Open the glossary page for editing
19 with open(glossary_file_path, "w") as glossary_file:
20 # Write the glossary page header
21 glossary_file.write("---\nid: glossary\ntitle: \"Glossary of Terms\"\n---\n")
22 # iterate the glossary list of tuples and write glossary entries.
23 for term, definition in orderd_list_of_terms_tuples:
24 glossary_file.write(f"**{term}:** {definition}\n\n")
25
26
27 def all_together_now(source_json_path, glossary_file_path):
28 list_of_terms_tuples = build_glossary_tuples(source_json_path)
29 build_glossary_page(list_of_terms_tuples, glossary_file_path)
30
31
32 if __name__ == '__main__':
33 all_together_now(
34 source_json_path="../docs/term_tags/terms.json",
35 glossary_file_path="../docs/glossary.md"
36 )
37
[end of scripts/build_glossary_page.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/build_glossary_page.py b/scripts/build_glossary_page.py
--- a/scripts/build_glossary_page.py
+++ b/scripts/build_glossary_page.py
@@ -18,7 +18,7 @@
# Open the glossary page for editing
with open(glossary_file_path, "w") as glossary_file:
# Write the glossary page header
- glossary_file.write("---\nid: glossary\ntitle: \"Glossary of Terms\"\n---\n")
+ glossary_file.write("---\nid: glossary\ntitle: \"Glossary of Terms\"\n---\n\n")
# iterate the glossary list of tuples and write glossary entries.
for term, definition in orderd_list_of_terms_tuples:
glossary_file.write(f"**{term}:** {definition}\n\n")
|
{"golden_diff": "diff --git a/scripts/build_glossary_page.py b/scripts/build_glossary_page.py\n--- a/scripts/build_glossary_page.py\n+++ b/scripts/build_glossary_page.py\n@@ -18,7 +18,7 @@\n # Open the glossary page for editing\n with open(glossary_file_path, \"w\") as glossary_file:\n # Write the glossary page header\n- glossary_file.write(\"---\\nid: glossary\\ntitle: \\\"Glossary of Terms\\\"\\n---\\n\")\n+ glossary_file.write(\"---\\nid: glossary\\ntitle: \\\"Glossary of Terms\\\"\\n---\\n\\n\")\n # iterate the glossary list of tuples and write glossary entries.\n for term, definition in orderd_list_of_terms_tuples:\n glossary_file.write(f\"**{term}:** {definition}\\n\\n\")\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import json\n\n\ndef build_glossary_tuples(source_json_path):\n # Open the source_json_path\n with open(source_json_path) as json_file:\n # Read the contents as a json\n data = json.load(json_file)\n # Convert to list of tuples containing (\"term\", \"definition\", \"url\")\n data_list = [(x['term'], x['definition']) for x in data.values()]\n # Order list alphabetically by \"term\"\n data_list.sort(key=lambda y: y[0])\n # return the ordered list.\n return data_list\n\n\ndef build_glossary_page(orderd_list_of_terms_tuples, glossary_file_path):\n # Open the glossary page for editing\n with open(glossary_file_path, \"w\") as glossary_file:\n # Write the glossary page header\n glossary_file.write(\"---\\nid: glossary\\ntitle: \\\"Glossary of Terms\\\"\\n---\\n\")\n # iterate the glossary list of tuples and write glossary entries.\n for term, definition in orderd_list_of_terms_tuples:\n glossary_file.write(f\"**{term}:** {definition}\\n\\n\")\n\n\ndef all_together_now(source_json_path, glossary_file_path):\n list_of_terms_tuples = build_glossary_tuples(source_json_path)\n build_glossary_page(list_of_terms_tuples, glossary_file_path)\n\n\nif __name__ == '__main__':\n all_together_now(\n source_json_path=\"../docs/term_tags/terms.json\",\n glossary_file_path=\"../docs/glossary.md\"\n )\n", "path": "scripts/build_glossary_page.py"}]}
| 974 | 191 |
gh_patches_debug_35189
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-2506
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
API returns 400 for some requests due to "bad" Content-type header
It seems that _some_ API endpoints will return a HTTP 400 when presented with a header such as:
```
Content-type: application/json; charset=utf-8
```
but works fine with:
```
Content-type: application/json
```
I ran into this with a POST to the URL `/webhooks/st2`. I'm currently running version `1.4dev-52`
</issue>
<code>
[start of st2common/st2common/util/http.py]
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import six
17
18 http_client = six.moves.http_client
19
20 HTTP_SUCCESS = [http_client.OK, http_client.CREATED, http_client.ACCEPTED,
21 http_client.NON_AUTHORITATIVE_INFORMATION, http_client.NO_CONTENT,
22 http_client.RESET_CONTENT, http_client.PARTIAL_CONTENT,
23 http_client.MULTI_STATUS, http_client.IM_USED,
24 ]
25
[end of st2common/st2common/util/http.py]
[start of st2api/st2api/controllers/v1/webhooks.py]
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 try:
17 import simplejson as json
18 except ImportError:
19 import json
20
21 import six
22 import pecan
23 import uuid
24 from pecan import abort
25 from pecan.rest import RestController
26 from six.moves.urllib import parse as urlparse
27 urljoin = urlparse.urljoin
28
29 from st2common import log as logging
30 from st2common.constants.triggers import WEBHOOK_TRIGGER_TYPES
31 from st2common.models.api.base import jsexpose
32 from st2common.models.api.trace import TraceContext
33 import st2common.services.triggers as trigger_service
34 from st2common.services.triggerwatcher import TriggerWatcher
35 from st2common.transport.reactor import TriggerDispatcher
36 from st2common.rbac.types import PermissionType
37 from st2common.rbac.decorators import request_user_has_webhook_permission
38
39 http_client = six.moves.http_client
40
41 LOG = logging.getLogger(__name__)
42
43 TRACE_TAG_HEADER = 'St2-Trace-Tag'
44
45
46 class WebhooksController(RestController):
47 def __init__(self, *args, **kwargs):
48 super(WebhooksController, self).__init__(*args, **kwargs)
49 self._hooks = {}
50 self._base_url = '/webhooks/'
51 self._trigger_types = WEBHOOK_TRIGGER_TYPES.keys()
52
53 self._trigger_dispatcher = TriggerDispatcher(LOG)
54 queue_suffix = self.__class__.__name__
55 self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,
56 update_handler=self._handle_update_trigger,
57 delete_handler=self._handle_delete_trigger,
58 trigger_types=self._trigger_types,
59 queue_suffix=queue_suffix,
60 exclusive=True)
61 self._trigger_watcher.start()
62 self._register_webhook_trigger_types()
63
64 @jsexpose()
65 def get_all(self):
66 # Return only the hooks known by this controller.
67 return [trigger for trigger in six.itervalues(self._hooks)]
68
69 @jsexpose()
70 def get_one(self, name):
71 hook = self._hooks.get(name, None)
72
73 if not hook:
74 abort(http_client.NOT_FOUND)
75 return
76
77 return hook
78
79 @request_user_has_webhook_permission(permission_type=PermissionType.WEBHOOK_SEND)
80 @jsexpose(arg_types=[str], status_code=http_client.ACCEPTED)
81 def post(self, *args, **kwargs):
82 hook = '/'.join(args) # TODO: There must be a better way to do this.
83
84 # Note: For backward compatibility reasons we default to application/json if content
85 # type is not explicitly provided
86 content_type = pecan.request.headers.get('Content-Type', 'application/json')
87 body = pecan.request.body
88
89 try:
90 body = self._parse_request_body(content_type=content_type, body=body)
91 except Exception as e:
92 self._log_request('Failed to parse request body: %s.' % (str(e)), pecan.request)
93 msg = 'Failed to parse request body "%s": %s' % (body, str(e))
94 return pecan.abort(http_client.BAD_REQUEST, msg)
95
96 headers = self._get_headers_as_dict(pecan.request.headers)
97 # If webhook contains a trace-tag use that else create create a unique trace-tag.
98 trace_context = self._create_trace_context(trace_tag=headers.pop(TRACE_TAG_HEADER, None),
99 hook=hook)
100
101 if hook == 'st2' or hook == 'st2/':
102 return self._handle_st2_webhook(body, trace_context=trace_context)
103
104 if not self._is_valid_hook(hook):
105 self._log_request('Invalid hook.', pecan.request)
106 msg = 'Webhook %s not registered with st2' % hook
107 return pecan.abort(http_client.NOT_FOUND, msg)
108
109 trigger = self._get_trigger_for_hook(hook)
110 payload = {}
111
112 payload['headers'] = headers
113 payload['body'] = body
114 self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context)
115
116 return body
117
118 def _parse_request_body(self, content_type, body):
119 if content_type == 'application/json':
120 self._log_request('Parsing request body as JSON', request=pecan.request)
121 body = json.loads(body)
122 elif content_type in ['application/x-www-form-urlencoded', 'multipart/form-data']:
123 self._log_request('Parsing request body as form encoded data', request=pecan.request)
124 body = urlparse.parse_qs(body)
125 else:
126 raise ValueError('Unsupported Content-Type: "%s"' % (content_type))
127
128 return body
129
130 def _handle_st2_webhook(self, body, trace_context):
131 trigger = body.get('trigger', None)
132 payload = body.get('payload', None)
133 if not trigger:
134 msg = 'Trigger not specified.'
135 return pecan.abort(http_client.BAD_REQUEST, msg)
136 self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context)
137
138 return body
139
140 def _is_valid_hook(self, hook):
141 # TODO: Validate hook payload with payload_schema.
142 return hook in self._hooks
143
144 def _get_trigger_for_hook(self, hook):
145 return self._hooks[hook]
146
147 def _register_webhook_trigger_types(self):
148 for trigger_type in WEBHOOK_TRIGGER_TYPES.values():
149 trigger_service.create_trigger_type_db(trigger_type)
150
151 def _create_trace_context(self, trace_tag, hook):
152 # if no trace_tag then create a unique one
153 if not trace_tag:
154 trace_tag = 'webhook-%s-%s' % (hook, uuid.uuid4().hex)
155 return TraceContext(trace_tag=trace_tag)
156
157 def add_trigger(self, trigger):
158 # Note: Permission checking for creating and deleting a webhook is done during rule
159 # creation
160 url = trigger['parameters']['url']
161 LOG.info('Listening to endpoint: %s', urljoin(self._base_url, url))
162 self._hooks[url] = trigger
163
164 def update_trigger(self, trigger):
165 pass
166
167 def remove_trigger(self, trigger):
168 # Note: Permission checking for creating and deleting a webhook is done during rule
169 # creation
170 url = trigger['parameters']['url']
171
172 if url in self._hooks:
173 LOG.info('Stop listening to endpoint: %s', urljoin(self._base_url, url))
174 del self._hooks[url]
175
176 def _get_headers_as_dict(self, headers):
177 headers_dict = {}
178 for key, value in headers.items():
179 headers_dict[key] = value
180 return headers_dict
181
182 def _log_request(self, msg, request, log_method=LOG.debug):
183 headers = self._get_headers_as_dict(request.headers)
184 body = str(request.body)
185 log_method('%s\n\trequest.header: %s.\n\trequest.body: %s.', msg, headers, body)
186
187 ##############################################
188 # Event handler methods for the trigger events
189 ##############################################
190
191 def _handle_create_trigger(self, trigger):
192 LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type))
193 trigger = self._sanitize_trigger(trigger=trigger)
194 self.add_trigger(trigger=trigger)
195
196 def _handle_update_trigger(self, trigger):
197 LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type))
198 trigger = self._sanitize_trigger(trigger=trigger)
199 self.update_trigger(trigger=trigger)
200
201 def _handle_delete_trigger(self, trigger):
202 LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type))
203 trigger = self._sanitize_trigger(trigger=trigger)
204 self.remove_trigger(trigger=trigger)
205
206 def _sanitize_trigger(self, trigger):
207 sanitized = trigger._data
208 if 'id' in sanitized:
209 # Friendly objectid rather than the MongoEngine representation.
210 sanitized['id'] = str(sanitized['id'])
211 return sanitized
212
[end of st2api/st2api/controllers/v1/webhooks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/st2api/st2api/controllers/v1/webhooks.py b/st2api/st2api/controllers/v1/webhooks.py
--- a/st2api/st2api/controllers/v1/webhooks.py
+++ b/st2api/st2api/controllers/v1/webhooks.py
@@ -33,6 +33,7 @@
import st2common.services.triggers as trigger_service
from st2common.services.triggerwatcher import TriggerWatcher
from st2common.transport.reactor import TriggerDispatcher
+from st2common.util.http import parse_content_type_header
from st2common.rbac.types import PermissionType
from st2common.rbac.decorators import request_user_has_webhook_permission
@@ -84,6 +85,7 @@
# Note: For backward compatibility reasons we default to application/json if content
# type is not explicitly provided
content_type = pecan.request.headers.get('Content-Type', 'application/json')
+ content_type = parse_content_type_header(content_type=content_type)[0]
body = pecan.request.body
try:
diff --git a/st2common/st2common/util/http.py b/st2common/st2common/util/http.py
--- a/st2common/st2common/util/http.py
+++ b/st2common/st2common/util/http.py
@@ -17,8 +17,43 @@
http_client = six.moves.http_client
+__all__ = [
+ 'HTTP_SUCCESS',
+ 'parse_content_type_header'
+]
+
HTTP_SUCCESS = [http_client.OK, http_client.CREATED, http_client.ACCEPTED,
http_client.NON_AUTHORITATIVE_INFORMATION, http_client.NO_CONTENT,
http_client.RESET_CONTENT, http_client.PARTIAL_CONTENT,
http_client.MULTI_STATUS, http_client.IM_USED,
]
+
+
+def parse_content_type_header(content_type):
+ """
+ Parse and normalize request content type and return a tuple with the content type and the
+ options.
+
+ :rype: ``tuple``
+ """
+ if ';' in content_type:
+ split = content_type.split(';')
+ media = split[0]
+ options = {}
+
+ for pair in split[1:]:
+ split_pair = pair.split('=', 1)
+
+ if len(split_pair) != 2:
+ continue
+
+ key = split_pair[0].strip()
+ value = split_pair[1].strip()
+
+ options[key] = value
+ else:
+ media = content_type
+ options = {}
+
+ result = (media, options)
+ return result
|
{"golden_diff": "diff --git a/st2api/st2api/controllers/v1/webhooks.py b/st2api/st2api/controllers/v1/webhooks.py\n--- a/st2api/st2api/controllers/v1/webhooks.py\n+++ b/st2api/st2api/controllers/v1/webhooks.py\n@@ -33,6 +33,7 @@\n import st2common.services.triggers as trigger_service\n from st2common.services.triggerwatcher import TriggerWatcher\n from st2common.transport.reactor import TriggerDispatcher\n+from st2common.util.http import parse_content_type_header\n from st2common.rbac.types import PermissionType\n from st2common.rbac.decorators import request_user_has_webhook_permission\n \n@@ -84,6 +85,7 @@\n # Note: For backward compatibility reasons we default to application/json if content\n # type is not explicitly provided\n content_type = pecan.request.headers.get('Content-Type', 'application/json')\n+ content_type = parse_content_type_header(content_type=content_type)[0]\n body = pecan.request.body\n \n try:\ndiff --git a/st2common/st2common/util/http.py b/st2common/st2common/util/http.py\n--- a/st2common/st2common/util/http.py\n+++ b/st2common/st2common/util/http.py\n@@ -17,8 +17,43 @@\n \n http_client = six.moves.http_client\n \n+__all__ = [\n+ 'HTTP_SUCCESS',\n+ 'parse_content_type_header'\n+]\n+\n HTTP_SUCCESS = [http_client.OK, http_client.CREATED, http_client.ACCEPTED,\n http_client.NON_AUTHORITATIVE_INFORMATION, http_client.NO_CONTENT,\n http_client.RESET_CONTENT, http_client.PARTIAL_CONTENT,\n http_client.MULTI_STATUS, http_client.IM_USED,\n ]\n+\n+\n+def parse_content_type_header(content_type):\n+ \"\"\"\n+ Parse and normalize request content type and return a tuple with the content type and the\n+ options.\n+\n+ :rype: ``tuple``\n+ \"\"\"\n+ if ';' in content_type:\n+ split = content_type.split(';')\n+ media = split[0]\n+ options = {}\n+\n+ for pair in split[1:]:\n+ split_pair = pair.split('=', 1)\n+\n+ if len(split_pair) != 2:\n+ continue\n+\n+ key = split_pair[0].strip()\n+ value = split_pair[1].strip()\n+\n+ options[key] = value\n+ else:\n+ media = content_type\n+ options = {}\n+\n+ result = (media, options)\n+ return result\n", "issue": "API returns 400 for some requests due to \"bad\" Content-type header\nIt seems that _some_ API endpoints will return a HTTP 400 when presented with a header such as:\n\n```\nContent-type: application/json; charset=utf-8\n```\n\nbut works fine with:\n\n```\nContent-type: application/json\n```\n\nI ran into this with a POST to the URL `/webhooks/st2`. I'm currently running version `1.4dev-52`\n\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport six\n\nhttp_client = six.moves.http_client\n\nHTTP_SUCCESS = [http_client.OK, http_client.CREATED, http_client.ACCEPTED,\n http_client.NON_AUTHORITATIVE_INFORMATION, http_client.NO_CONTENT,\n http_client.RESET_CONTENT, http_client.PARTIAL_CONTENT,\n http_client.MULTI_STATUS, http_client.IM_USED,\n ]\n", "path": "st2common/st2common/util/http.py"}, {"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nimport six\nimport pecan\nimport uuid\nfrom pecan import abort\nfrom pecan.rest import RestController\nfrom six.moves.urllib import parse as urlparse\nurljoin = urlparse.urljoin\n\nfrom st2common import log as logging\nfrom st2common.constants.triggers import WEBHOOK_TRIGGER_TYPES\nfrom st2common.models.api.base import jsexpose\nfrom st2common.models.api.trace import TraceContext\nimport st2common.services.triggers as trigger_service\nfrom st2common.services.triggerwatcher import TriggerWatcher\nfrom st2common.transport.reactor import TriggerDispatcher\nfrom st2common.rbac.types import PermissionType\nfrom st2common.rbac.decorators import request_user_has_webhook_permission\n\nhttp_client = six.moves.http_client\n\nLOG = logging.getLogger(__name__)\n\nTRACE_TAG_HEADER = 'St2-Trace-Tag'\n\n\nclass WebhooksController(RestController):\n def __init__(self, *args, **kwargs):\n super(WebhooksController, self).__init__(*args, **kwargs)\n self._hooks = {}\n self._base_url = '/webhooks/'\n self._trigger_types = WEBHOOK_TRIGGER_TYPES.keys()\n\n self._trigger_dispatcher = TriggerDispatcher(LOG)\n queue_suffix = self.__class__.__name__\n self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,\n update_handler=self._handle_update_trigger,\n delete_handler=self._handle_delete_trigger,\n trigger_types=self._trigger_types,\n queue_suffix=queue_suffix,\n exclusive=True)\n self._trigger_watcher.start()\n self._register_webhook_trigger_types()\n\n @jsexpose()\n def get_all(self):\n # Return only the hooks known by this controller.\n return [trigger for trigger in six.itervalues(self._hooks)]\n\n @jsexpose()\n def get_one(self, name):\n hook = self._hooks.get(name, None)\n\n if not hook:\n abort(http_client.NOT_FOUND)\n return\n\n return hook\n\n @request_user_has_webhook_permission(permission_type=PermissionType.WEBHOOK_SEND)\n @jsexpose(arg_types=[str], status_code=http_client.ACCEPTED)\n def post(self, *args, **kwargs):\n hook = '/'.join(args) # TODO: There must be a better way to do this.\n\n # Note: For backward compatibility reasons we default to application/json if content\n # type is not explicitly provided\n content_type = pecan.request.headers.get('Content-Type', 'application/json')\n body = pecan.request.body\n\n try:\n body = self._parse_request_body(content_type=content_type, body=body)\n except Exception as e:\n self._log_request('Failed to parse request body: %s.' % (str(e)), pecan.request)\n msg = 'Failed to parse request body \"%s\": %s' % (body, str(e))\n return pecan.abort(http_client.BAD_REQUEST, msg)\n\n headers = self._get_headers_as_dict(pecan.request.headers)\n # If webhook contains a trace-tag use that else create create a unique trace-tag.\n trace_context = self._create_trace_context(trace_tag=headers.pop(TRACE_TAG_HEADER, None),\n hook=hook)\n\n if hook == 'st2' or hook == 'st2/':\n return self._handle_st2_webhook(body, trace_context=trace_context)\n\n if not self._is_valid_hook(hook):\n self._log_request('Invalid hook.', pecan.request)\n msg = 'Webhook %s not registered with st2' % hook\n return pecan.abort(http_client.NOT_FOUND, msg)\n\n trigger = self._get_trigger_for_hook(hook)\n payload = {}\n\n payload['headers'] = headers\n payload['body'] = body\n self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context)\n\n return body\n\n def _parse_request_body(self, content_type, body):\n if content_type == 'application/json':\n self._log_request('Parsing request body as JSON', request=pecan.request)\n body = json.loads(body)\n elif content_type in ['application/x-www-form-urlencoded', 'multipart/form-data']:\n self._log_request('Parsing request body as form encoded data', request=pecan.request)\n body = urlparse.parse_qs(body)\n else:\n raise ValueError('Unsupported Content-Type: \"%s\"' % (content_type))\n\n return body\n\n def _handle_st2_webhook(self, body, trace_context):\n trigger = body.get('trigger', None)\n payload = body.get('payload', None)\n if not trigger:\n msg = 'Trigger not specified.'\n return pecan.abort(http_client.BAD_REQUEST, msg)\n self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context)\n\n return body\n\n def _is_valid_hook(self, hook):\n # TODO: Validate hook payload with payload_schema.\n return hook in self._hooks\n\n def _get_trigger_for_hook(self, hook):\n return self._hooks[hook]\n\n def _register_webhook_trigger_types(self):\n for trigger_type in WEBHOOK_TRIGGER_TYPES.values():\n trigger_service.create_trigger_type_db(trigger_type)\n\n def _create_trace_context(self, trace_tag, hook):\n # if no trace_tag then create a unique one\n if not trace_tag:\n trace_tag = 'webhook-%s-%s' % (hook, uuid.uuid4().hex)\n return TraceContext(trace_tag=trace_tag)\n\n def add_trigger(self, trigger):\n # Note: Permission checking for creating and deleting a webhook is done during rule\n # creation\n url = trigger['parameters']['url']\n LOG.info('Listening to endpoint: %s', urljoin(self._base_url, url))\n self._hooks[url] = trigger\n\n def update_trigger(self, trigger):\n pass\n\n def remove_trigger(self, trigger):\n # Note: Permission checking for creating and deleting a webhook is done during rule\n # creation\n url = trigger['parameters']['url']\n\n if url in self._hooks:\n LOG.info('Stop listening to endpoint: %s', urljoin(self._base_url, url))\n del self._hooks[url]\n\n def _get_headers_as_dict(self, headers):\n headers_dict = {}\n for key, value in headers.items():\n headers_dict[key] = value\n return headers_dict\n\n def _log_request(self, msg, request, log_method=LOG.debug):\n headers = self._get_headers_as_dict(request.headers)\n body = str(request.body)\n log_method('%s\\n\\trequest.header: %s.\\n\\trequest.body: %s.', msg, headers, body)\n\n ##############################################\n # Event handler methods for the trigger events\n ##############################################\n\n def _handle_create_trigger(self, trigger):\n LOG.debug('Calling \"add_trigger\" method (trigger.type=%s)' % (trigger.type))\n trigger = self._sanitize_trigger(trigger=trigger)\n self.add_trigger(trigger=trigger)\n\n def _handle_update_trigger(self, trigger):\n LOG.debug('Calling \"update_trigger\" method (trigger.type=%s)' % (trigger.type))\n trigger = self._sanitize_trigger(trigger=trigger)\n self.update_trigger(trigger=trigger)\n\n def _handle_delete_trigger(self, trigger):\n LOG.debug('Calling \"remove_trigger\" method (trigger.type=%s)' % (trigger.type))\n trigger = self._sanitize_trigger(trigger=trigger)\n self.remove_trigger(trigger=trigger)\n\n def _sanitize_trigger(self, trigger):\n sanitized = trigger._data\n if 'id' in sanitized:\n # Friendly objectid rather than the MongoEngine representation.\n sanitized['id'] = str(sanitized['id'])\n return sanitized\n", "path": "st2api/st2api/controllers/v1/webhooks.py"}]}
| 3,337 | 560 |
gh_patches_debug_15247
|
rasdani/github-patches
|
git_diff
|
ycm-core__ycmd-397
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LookupError: unknown encoding on Mac with Python 2
With latest `ycmd` I get the following error on Mac with Py2:
``` python
Traceback (most recent call last):
File "/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 162, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/Users/peter/src/ycmd/ycmd/__main__.py", line 181, in <module>
Main()
File "/Users/peter/src/ycmd/ycmd/__main__.py", line 151, in Main
options, hmac_secret = SetupOptions( args.options_file )
File "/Users/peter/src/ycmd/ycmd/__main__.py", line 127, in SetupOptions
options = user_options_store.DefaultOptions()
File "/Users/peter/src/ycmd/ycmd/../ycmd/user_options_store.py", line 52, in DefaultOptions
with open( settings_path ) as f:
LookupError: unknown encoding:
Process ycmd-server exited abnormally with code 1
```
I have a python 2 installation from homebrew
</issue>
<code>
[start of ycmd/user_options_store.py]
1 # Copyright (C) 2013 Google Inc.
2 #
3 # This file is part of ycmd.
4 #
5 # ycmd is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # ycmd is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with ycmd. If not, see <http://www.gnu.org/licenses/>.
17
18 from __future__ import unicode_literals
19 from __future__ import print_function
20 from __future__ import division
21 from __future__ import absolute_import
22 from future import standard_library
23 standard_library.install_aliases()
24 from builtins import * # noqa
25
26 import json
27 import os
28 from frozendict import frozendict
29
30 _USER_OPTIONS = {}
31
32 def SetAll( new_options ):
33 global _USER_OPTIONS
34 _USER_OPTIONS = frozendict( new_options )
35
36
37 def GetAll():
38 return _USER_OPTIONS
39
40
41 def Value( key ):
42 return _USER_OPTIONS[ key ]
43
44
45 def LoadDefaults():
46 SetAll( DefaultOptions() )
47
48
49 def DefaultOptions():
50 settings_path = os.path.join(
51 os.path.dirname( os.path.abspath( __file__ ) ), 'default_settings.json' )
52 with open( settings_path ) as f:
53 options = json.loads( f.read() )
54 options.pop( 'hmac_secret', None )
55 return options
56
57
[end of ycmd/user_options_store.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ycmd/user_options_store.py b/ycmd/user_options_store.py
--- a/ycmd/user_options_store.py
+++ b/ycmd/user_options_store.py
@@ -27,8 +27,11 @@
import os
from frozendict import frozendict
+from ycmd.utils import ReadFile
+
_USER_OPTIONS = {}
+
def SetAll( new_options ):
global _USER_OPTIONS
_USER_OPTIONS = frozendict( new_options )
@@ -49,8 +52,6 @@
def DefaultOptions():
settings_path = os.path.join(
os.path.dirname( os.path.abspath( __file__ ) ), 'default_settings.json' )
- with open( settings_path ) as f:
- options = json.loads( f.read() )
- options.pop( 'hmac_secret', None )
- return options
-
+ options = json.loads( ReadFile( settings_path ) )
+ options.pop( 'hmac_secret', None )
+ return options
|
{"golden_diff": "diff --git a/ycmd/user_options_store.py b/ycmd/user_options_store.py\n--- a/ycmd/user_options_store.py\n+++ b/ycmd/user_options_store.py\n@@ -27,8 +27,11 @@\n import os\n from frozendict import frozendict\n \n+from ycmd.utils import ReadFile\n+\n _USER_OPTIONS = {}\n \n+\n def SetAll( new_options ):\n global _USER_OPTIONS\n _USER_OPTIONS = frozendict( new_options )\n@@ -49,8 +52,6 @@\n def DefaultOptions():\n settings_path = os.path.join(\n os.path.dirname( os.path.abspath( __file__ ) ), 'default_settings.json' )\n- with open( settings_path ) as f:\n- options = json.loads( f.read() )\n- options.pop( 'hmac_secret', None )\n- return options\n-\n+ options = json.loads( ReadFile( settings_path ) )\n+ options.pop( 'hmac_secret', None )\n+ return options\n", "issue": "LookupError: unknown encoding on Mac with Python 2\nWith latest `ycmd` I get the following error on Mac with Py2:\n\n``` python\nTraceback (most recent call last):\n File \"/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py\", line 162, in _run_module_as_main\n \"__main__\", fname, loader, pkg_name)\n File \"/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py\", line 72, in _run_code\n exec code in run_globals\n File \"/Users/peter/src/ycmd/ycmd/__main__.py\", line 181, in <module>\n Main()\n File \"/Users/peter/src/ycmd/ycmd/__main__.py\", line 151, in Main\n options, hmac_secret = SetupOptions( args.options_file )\n File \"/Users/peter/src/ycmd/ycmd/__main__.py\", line 127, in SetupOptions\n options = user_options_store.DefaultOptions()\n File \"/Users/peter/src/ycmd/ycmd/../ycmd/user_options_store.py\", line 52, in DefaultOptions\n with open( settings_path ) as f:\nLookupError: unknown encoding: \n\nProcess ycmd-server exited abnormally with code 1\n```\n\nI have a python 2 installation from homebrew\n\n", "before_files": [{"content": "# Copyright (C) 2013 Google Inc.\n#\n# This file is part of ycmd.\n#\n# ycmd is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ycmd is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ycmd. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import * # noqa\n\nimport json\nimport os\nfrom frozendict import frozendict\n\n_USER_OPTIONS = {}\n\ndef SetAll( new_options ):\n global _USER_OPTIONS\n _USER_OPTIONS = frozendict( new_options )\n\n\ndef GetAll():\n return _USER_OPTIONS\n\n\ndef Value( key ):\n return _USER_OPTIONS[ key ]\n\n\ndef LoadDefaults():\n SetAll( DefaultOptions() )\n\n\ndef DefaultOptions():\n settings_path = os.path.join(\n os.path.dirname( os.path.abspath( __file__ ) ), 'default_settings.json' )\n with open( settings_path ) as f:\n options = json.loads( f.read() )\n options.pop( 'hmac_secret', None )\n return options\n\n", "path": "ycmd/user_options_store.py"}]}
| 1,350 | 222 |
gh_patches_debug_17035
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-3131
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DatasetFolder incorrectly specifies required folder structure
## 📚 Documentation
<!-- A clear and concise description of what content in https://pytorch.org/docs is an issue. If this has to do with the general https://pytorch.org website, please file an issue at https://github.com/pytorch/pytorch.github.io/issues/new/choose instead. If this has to do with https://pytorch.org/tutorials, please file an issue at https://github.com/pytorch/tutorials/issues/new -->
The documentation for [DatasetFolder](https://pytorch.org/docs/stable/torchvision/datasets.html#datasetfolder) specifies the following required folder organization:
```
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
```
However, the [source code for DatasetFolder](https://pytorch.org/docs/stable/_modules/torchvision/datasets/folder.html#DatasetFolder) shows that `make_dataset()` uses `os.walk()` to discover the files in each class' folder. This means that any file within the class folder **or its subfolders** that passes `is_valid_file()` will be included. To me, this behavior was unexpected.
A more complete specification of the format could be:
```
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/[...]/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/[...]/asd932_.ext
```
cc @pmeier
DatasetFolder incorrectly specifies required folder structure
## 📚 Documentation
<!-- A clear and concise description of what content in https://pytorch.org/docs is an issue. If this has to do with the general https://pytorch.org website, please file an issue at https://github.com/pytorch/pytorch.github.io/issues/new/choose instead. If this has to do with https://pytorch.org/tutorials, please file an issue at https://github.com/pytorch/tutorials/issues/new -->
The documentation for [DatasetFolder](https://pytorch.org/docs/stable/torchvision/datasets.html#datasetfolder) specifies the following required folder organization:
```
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
```
However, the [source code for DatasetFolder](https://pytorch.org/docs/stable/_modules/torchvision/datasets/folder.html#DatasetFolder) shows that `make_dataset()` uses `os.walk()` to discover the files in each class' folder. This means that any file within the class folder **or its subfolders** that passes `is_valid_file()` will be included. To me, this behavior was unexpected.
A more complete specification of the format could be:
```
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/[...]/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/[...]/asd932_.ext
```
cc @pmeier
</issue>
<code>
[start of torchvision/datasets/folder.py]
1 from .vision import VisionDataset
2
3 from PIL import Image
4
5 import os
6 import os.path
7 from typing import Any, Callable, cast, Dict, List, Optional, Tuple
8
9
10 def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool:
11 """Checks if a file is an allowed extension.
12
13 Args:
14 filename (string): path to a file
15 extensions (tuple of strings): extensions to consider (lowercase)
16
17 Returns:
18 bool: True if the filename ends with one of given extensions
19 """
20 return filename.lower().endswith(extensions)
21
22
23 def is_image_file(filename: str) -> bool:
24 """Checks if a file is an allowed image extension.
25
26 Args:
27 filename (string): path to a file
28
29 Returns:
30 bool: True if the filename ends with a known image extension
31 """
32 return has_file_allowed_extension(filename, IMG_EXTENSIONS)
33
34
35 def make_dataset(
36 directory: str,
37 class_to_idx: Dict[str, int],
38 extensions: Optional[Tuple[str, ...]] = None,
39 is_valid_file: Optional[Callable[[str], bool]] = None,
40 ) -> List[Tuple[str, int]]:
41 """Generates a list of samples of a form (path_to_sample, class).
42
43 Args:
44 directory (str): root dataset directory
45 class_to_idx (Dict[str, int]): dictionary mapping class name to class index
46 extensions (optional): A list of allowed extensions.
47 Either extensions or is_valid_file should be passed. Defaults to None.
48 is_valid_file (optional): A function that takes path of a file
49 and checks if the file is a valid file
50 (used to check of corrupt files) both extensions and
51 is_valid_file should not be passed. Defaults to None.
52
53 Raises:
54 ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.
55
56 Returns:
57 List[Tuple[str, int]]: samples of a form (path_to_sample, class)
58 """
59 instances = []
60 directory = os.path.expanduser(directory)
61 both_none = extensions is None and is_valid_file is None
62 both_something = extensions is not None and is_valid_file is not None
63 if both_none or both_something:
64 raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
65 if extensions is not None:
66 def is_valid_file(x: str) -> bool:
67 return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
68 is_valid_file = cast(Callable[[str], bool], is_valid_file)
69 for target_class in sorted(class_to_idx.keys()):
70 class_index = class_to_idx[target_class]
71 target_dir = os.path.join(directory, target_class)
72 if not os.path.isdir(target_dir):
73 continue
74 for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
75 for fname in sorted(fnames):
76 path = os.path.join(root, fname)
77 if is_valid_file(path):
78 item = path, class_index
79 instances.append(item)
80 return instances
81
82
83 class DatasetFolder(VisionDataset):
84 """A generic data loader where the samples are arranged in this way: ::
85
86 root/class_x/xxx.ext
87 root/class_x/xxy.ext
88 root/class_x/xxz.ext
89
90 root/class_y/123.ext
91 root/class_y/nsdf3.ext
92 root/class_y/asd932_.ext
93
94 Args:
95 root (string): Root directory path.
96 loader (callable): A function to load a sample given its path.
97 extensions (tuple[string]): A list of allowed extensions.
98 both extensions and is_valid_file should not be passed.
99 transform (callable, optional): A function/transform that takes in
100 a sample and returns a transformed version.
101 E.g, ``transforms.RandomCrop`` for images.
102 target_transform (callable, optional): A function/transform that takes
103 in the target and transforms it.
104 is_valid_file (callable, optional): A function that takes path of a file
105 and check if the file is a valid file (used to check of corrupt files)
106 both extensions and is_valid_file should not be passed.
107
108 Attributes:
109 classes (list): List of the class names sorted alphabetically.
110 class_to_idx (dict): Dict with items (class_name, class_index).
111 samples (list): List of (sample path, class_index) tuples
112 targets (list): The class_index value for each image in the dataset
113 """
114
115 def __init__(
116 self,
117 root: str,
118 loader: Callable[[str], Any],
119 extensions: Optional[Tuple[str, ...]] = None,
120 transform: Optional[Callable] = None,
121 target_transform: Optional[Callable] = None,
122 is_valid_file: Optional[Callable[[str], bool]] = None,
123 ) -> None:
124 super(DatasetFolder, self).__init__(root, transform=transform,
125 target_transform=target_transform)
126 classes, class_to_idx = self._find_classes(self.root)
127 samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
128 if len(samples) == 0:
129 msg = "Found 0 files in subfolders of: {}\n".format(self.root)
130 if extensions is not None:
131 msg += "Supported extensions are: {}".format(",".join(extensions))
132 raise RuntimeError(msg)
133
134 self.loader = loader
135 self.extensions = extensions
136
137 self.classes = classes
138 self.class_to_idx = class_to_idx
139 self.samples = samples
140 self.targets = [s[1] for s in samples]
141
142 def _find_classes(self, dir: str) -> Tuple[List[str], Dict[str, int]]:
143 """
144 Finds the class folders in a dataset.
145
146 Args:
147 dir (string): Root directory path.
148
149 Returns:
150 tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
151
152 Ensures:
153 No class is a subdirectory of another.
154 """
155 classes = [d.name for d in os.scandir(dir) if d.is_dir()]
156 classes.sort()
157 class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
158 return classes, class_to_idx
159
160 def __getitem__(self, index: int) -> Tuple[Any, Any]:
161 """
162 Args:
163 index (int): Index
164
165 Returns:
166 tuple: (sample, target) where target is class_index of the target class.
167 """
168 path, target = self.samples[index]
169 sample = self.loader(path)
170 if self.transform is not None:
171 sample = self.transform(sample)
172 if self.target_transform is not None:
173 target = self.target_transform(target)
174
175 return sample, target
176
177 def __len__(self) -> int:
178 return len(self.samples)
179
180
181 IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
182
183
184 def pil_loader(path: str) -> Image.Image:
185 # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
186 with open(path, 'rb') as f:
187 img = Image.open(f)
188 return img.convert('RGB')
189
190
191 # TODO: specify the return type
192 def accimage_loader(path: str) -> Any:
193 import accimage
194 try:
195 return accimage.Image(path)
196 except IOError:
197 # Potentially a decoding problem, fall back to PIL.Image
198 return pil_loader(path)
199
200
201 def default_loader(path: str) -> Any:
202 from torchvision import get_image_backend
203 if get_image_backend() == 'accimage':
204 return accimage_loader(path)
205 else:
206 return pil_loader(path)
207
208
209 class ImageFolder(DatasetFolder):
210 """A generic data loader where the images are arranged in this way: ::
211
212 root/dog/xxx.png
213 root/dog/xxy.png
214 root/dog/xxz.png
215
216 root/cat/123.png
217 root/cat/nsdf3.png
218 root/cat/asd932_.png
219
220 Args:
221 root (string): Root directory path.
222 transform (callable, optional): A function/transform that takes in an PIL image
223 and returns a transformed version. E.g, ``transforms.RandomCrop``
224 target_transform (callable, optional): A function/transform that takes in the
225 target and transforms it.
226 loader (callable, optional): A function to load an image given its path.
227 is_valid_file (callable, optional): A function that takes path of an Image file
228 and check if the file is a valid file (used to check of corrupt files)
229
230 Attributes:
231 classes (list): List of the class names sorted alphabetically.
232 class_to_idx (dict): Dict with items (class_name, class_index).
233 imgs (list): List of (image path, class_index) tuples
234 """
235
236 def __init__(
237 self,
238 root: str,
239 transform: Optional[Callable] = None,
240 target_transform: Optional[Callable] = None,
241 loader: Callable[[str], Any] = default_loader,
242 is_valid_file: Optional[Callable[[str], bool]] = None,
243 ):
244 super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,
245 transform=transform,
246 target_transform=target_transform,
247 is_valid_file=is_valid_file)
248 self.imgs = self.samples
249
[end of torchvision/datasets/folder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py
--- a/torchvision/datasets/folder.py
+++ b/torchvision/datasets/folder.py
@@ -85,11 +85,11 @@
root/class_x/xxx.ext
root/class_x/xxy.ext
- root/class_x/xxz.ext
+ root/class_x/[...]/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
- root/class_y/asd932_.ext
+ root/class_y/[...]/asd932_.ext
Args:
root (string): Root directory path.
@@ -211,11 +211,11 @@
root/dog/xxx.png
root/dog/xxy.png
- root/dog/xxz.png
+ root/dog/[...]/xxz.png
root/cat/123.png
root/cat/nsdf3.png
- root/cat/asd932_.png
+ root/cat/[...]/asd932_.png
Args:
root (string): Root directory path.
|
{"golden_diff": "diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py\n--- a/torchvision/datasets/folder.py\n+++ b/torchvision/datasets/folder.py\n@@ -85,11 +85,11 @@\n \n root/class_x/xxx.ext\n root/class_x/xxy.ext\n- root/class_x/xxz.ext\n+ root/class_x/[...]/xxz.ext\n \n root/class_y/123.ext\n root/class_y/nsdf3.ext\n- root/class_y/asd932_.ext\n+ root/class_y/[...]/asd932_.ext\n \n Args:\n root (string): Root directory path.\n@@ -211,11 +211,11 @@\n \n root/dog/xxx.png\n root/dog/xxy.png\n- root/dog/xxz.png\n+ root/dog/[...]/xxz.png\n \n root/cat/123.png\n root/cat/nsdf3.png\n- root/cat/asd932_.png\n+ root/cat/[...]/asd932_.png\n \n Args:\n root (string): Root directory path.\n", "issue": "DatasetFolder incorrectly specifies required folder structure\n## \ud83d\udcda Documentation\r\n\r\n<!-- A clear and concise description of what content in https://pytorch.org/docs is an issue. If this has to do with the general https://pytorch.org website, please file an issue at https://github.com/pytorch/pytorch.github.io/issues/new/choose instead. If this has to do with https://pytorch.org/tutorials, please file an issue at https://github.com/pytorch/tutorials/issues/new -->\r\n\r\nThe documentation for [DatasetFolder](https://pytorch.org/docs/stable/torchvision/datasets.html#datasetfolder) specifies the following required folder organization:\r\n\r\n```\r\nroot/class_x/xxx.ext\r\nroot/class_x/xxy.ext\r\nroot/class_x/xxz.ext\r\n\r\nroot/class_y/123.ext\r\nroot/class_y/nsdf3.ext\r\nroot/class_y/asd932_.ext\r\n```\r\n\r\nHowever, the [source code for DatasetFolder](https://pytorch.org/docs/stable/_modules/torchvision/datasets/folder.html#DatasetFolder) shows that `make_dataset()` uses `os.walk()` to discover the files in each class' folder. This means that any file within the class folder **or its subfolders** that passes `is_valid_file()` will be included. To me, this behavior was unexpected.\r\n\r\nA more complete specification of the format could be:\r\n\r\n```\r\nroot/class_x/xxx.ext\r\nroot/class_x/xxy.ext\r\nroot/class_x/[...]/xxz.ext\r\n\r\nroot/class_y/123.ext\r\nroot/class_y/nsdf3.ext\r\nroot/class_y/[...]/asd932_.ext\r\n```\n\ncc @pmeier\nDatasetFolder incorrectly specifies required folder structure\n## \ud83d\udcda Documentation\r\n\r\n<!-- A clear and concise description of what content in https://pytorch.org/docs is an issue. If this has to do with the general https://pytorch.org website, please file an issue at https://github.com/pytorch/pytorch.github.io/issues/new/choose instead. If this has to do with https://pytorch.org/tutorials, please file an issue at https://github.com/pytorch/tutorials/issues/new -->\r\n\r\nThe documentation for [DatasetFolder](https://pytorch.org/docs/stable/torchvision/datasets.html#datasetfolder) specifies the following required folder organization:\r\n\r\n```\r\nroot/class_x/xxx.ext\r\nroot/class_x/xxy.ext\r\nroot/class_x/xxz.ext\r\n\r\nroot/class_y/123.ext\r\nroot/class_y/nsdf3.ext\r\nroot/class_y/asd932_.ext\r\n```\r\n\r\nHowever, the [source code for DatasetFolder](https://pytorch.org/docs/stable/_modules/torchvision/datasets/folder.html#DatasetFolder) shows that `make_dataset()` uses `os.walk()` to discover the files in each class' folder. This means that any file within the class folder **or its subfolders** that passes `is_valid_file()` will be included. To me, this behavior was unexpected.\r\n\r\nA more complete specification of the format could be:\r\n\r\n```\r\nroot/class_x/xxx.ext\r\nroot/class_x/xxy.ext\r\nroot/class_x/[...]/xxz.ext\r\n\r\nroot/class_y/123.ext\r\nroot/class_y/nsdf3.ext\r\nroot/class_y/[...]/asd932_.ext\r\n```\n\ncc @pmeier\n", "before_files": [{"content": "from .vision import VisionDataset\n\nfrom PIL import Image\n\nimport os\nimport os.path\nfrom typing import Any, Callable, cast, Dict, List, Optional, Tuple\n\n\ndef has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool:\n \"\"\"Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n extensions (tuple of strings): extensions to consider (lowercase)\n\n Returns:\n bool: True if the filename ends with one of given extensions\n \"\"\"\n return filename.lower().endswith(extensions)\n\n\ndef is_image_file(filename: str) -> bool:\n \"\"\"Checks if a file is an allowed image extension.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n return has_file_allowed_extension(filename, IMG_EXTENSIONS)\n\n\ndef make_dataset(\n directory: str,\n class_to_idx: Dict[str, int],\n extensions: Optional[Tuple[str, ...]] = None,\n is_valid_file: Optional[Callable[[str], bool]] = None,\n) -> List[Tuple[str, int]]:\n \"\"\"Generates a list of samples of a form (path_to_sample, class).\n\n Args:\n directory (str): root dataset directory\n class_to_idx (Dict[str, int]): dictionary mapping class name to class index\n extensions (optional): A list of allowed extensions.\n Either extensions or is_valid_file should be passed. Defaults to None.\n is_valid_file (optional): A function that takes path of a file\n and checks if the file is a valid file\n (used to check of corrupt files) both extensions and\n is_valid_file should not be passed. Defaults to None.\n\n Raises:\n ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.\n\n Returns:\n List[Tuple[str, int]]: samples of a form (path_to_sample, class)\n \"\"\"\n instances = []\n directory = os.path.expanduser(directory)\n both_none = extensions is None and is_valid_file is None\n both_something = extensions is not None and is_valid_file is not None\n if both_none or both_something:\n raise ValueError(\"Both extensions and is_valid_file cannot be None or not None at the same time\")\n if extensions is not None:\n def is_valid_file(x: str) -> bool:\n return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))\n is_valid_file = cast(Callable[[str], bool], is_valid_file)\n for target_class in sorted(class_to_idx.keys()):\n class_index = class_to_idx[target_class]\n target_dir = os.path.join(directory, target_class)\n if not os.path.isdir(target_dir):\n continue\n for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):\n for fname in sorted(fnames):\n path = os.path.join(root, fname)\n if is_valid_file(path):\n item = path, class_index\n instances.append(item)\n return instances\n\n\nclass DatasetFolder(VisionDataset):\n \"\"\"A generic data loader where the samples are arranged in this way: ::\n\n root/class_x/xxx.ext\n root/class_x/xxy.ext\n root/class_x/xxz.ext\n\n root/class_y/123.ext\n root/class_y/nsdf3.ext\n root/class_y/asd932_.ext\n\n Args:\n root (string): Root directory path.\n loader (callable): A function to load a sample given its path.\n extensions (tuple[string]): A list of allowed extensions.\n both extensions and is_valid_file should not be passed.\n transform (callable, optional): A function/transform that takes in\n a sample and returns a transformed version.\n E.g, ``transforms.RandomCrop`` for images.\n target_transform (callable, optional): A function/transform that takes\n in the target and transforms it.\n is_valid_file (callable, optional): A function that takes path of a file\n and check if the file is a valid file (used to check of corrupt files)\n both extensions and is_valid_file should not be passed.\n\n Attributes:\n classes (list): List of the class names sorted alphabetically.\n class_to_idx (dict): Dict with items (class_name, class_index).\n samples (list): List of (sample path, class_index) tuples\n targets (list): The class_index value for each image in the dataset\n \"\"\"\n\n def __init__(\n self,\n root: str,\n loader: Callable[[str], Any],\n extensions: Optional[Tuple[str, ...]] = None,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n is_valid_file: Optional[Callable[[str], bool]] = None,\n ) -> None:\n super(DatasetFolder, self).__init__(root, transform=transform,\n target_transform=target_transform)\n classes, class_to_idx = self._find_classes(self.root)\n samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)\n if len(samples) == 0:\n msg = \"Found 0 files in subfolders of: {}\\n\".format(self.root)\n if extensions is not None:\n msg += \"Supported extensions are: {}\".format(\",\".join(extensions))\n raise RuntimeError(msg)\n\n self.loader = loader\n self.extensions = extensions\n\n self.classes = classes\n self.class_to_idx = class_to_idx\n self.samples = samples\n self.targets = [s[1] for s in samples]\n\n def _find_classes(self, dir: str) -> Tuple[List[str], Dict[str, int]]:\n \"\"\"\n Finds the class folders in a dataset.\n\n Args:\n dir (string): Root directory path.\n\n Returns:\n tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.\n\n Ensures:\n No class is a subdirectory of another.\n \"\"\"\n classes = [d.name for d in os.scandir(dir) if d.is_dir()]\n classes.sort()\n class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}\n return classes, class_to_idx\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (sample, target) where target is class_index of the target class.\n \"\"\"\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform is not None:\n sample = self.transform(sample)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return sample, target\n\n def __len__(self) -> int:\n return len(self.samples)\n\n\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')\n\n\ndef pil_loader(path: str) -> Image.Image:\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n\n# TODO: specify the return type\ndef accimage_loader(path: str) -> Any:\n import accimage\n try:\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return pil_loader(path)\n\n\ndef default_loader(path: str) -> Any:\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else:\n return pil_loader(path)\n\n\nclass ImageFolder(DatasetFolder):\n \"\"\"A generic data loader where the images are arranged in this way: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/asd932_.png\n\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n is_valid_file (callable, optional): A function that takes path of an Image file\n and check if the file is a valid file (used to check of corrupt files)\n\n Attributes:\n classes (list): List of the class names sorted alphabetically.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n \"\"\"\n\n def __init__(\n self,\n root: str,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n loader: Callable[[str], Any] = default_loader,\n is_valid_file: Optional[Callable[[str], bool]] = None,\n ):\n super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,\n transform=transform,\n target_transform=target_transform,\n is_valid_file=is_valid_file)\n self.imgs = self.samples\n", "path": "torchvision/datasets/folder.py"}]}
| 3,941 | 266 |
gh_patches_debug_39887
|
rasdani/github-patches
|
git_diff
|
translate__pootle-6699
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pootle FS file system backends list isn't sorted
</issue>
<code>
[start of pootle/apps/pootle_fs/forms.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import uuid
10 from collections import Counter, OrderedDict
11
12 from django import forms
13 from django.utils.functional import cached_property
14
15 from pootle.core.delegate import revision
16 from pootle.i18n.gettext import ugettext_lazy as _
17 from pootle_language.models import Language
18
19 from .delegate import (
20 fs_plugins, fs_translation_mapping_validator, fs_url_validator)
21
22
23 FS_CHOICES = (
24 ("gnu", _("GNU-style"), "/po/<language_code>.<ext>"),
25 ("non-gnu",
26 _("non GNU-style"),
27 "/<language_code>/<dir_path>/<filename>.<ext>"),
28 ("django",
29 _("Django-style"),
30 "/locale/<language_code>/LC_MESSAGES/<filename>.<ext>"),
31 ("custom", _("Custom"), ""))
32
33
34 class ProjectFSAdminForm(forms.Form):
35
36 fs_type = forms.ChoiceField(
37 label=_("Filesystem backend"),
38 help_text=_("Select a filesystem backend"),
39 choices=(),
40 widget=forms.Select(
41 attrs={'class': 'js-select2'}))
42 fs_url = forms.CharField(
43 label=_("Backend URL or path"),
44 help_text=_(
45 "The URL or path to your translation files"))
46 translation_mapping_presets = forms.ChoiceField(
47 label=_("Translation mapping presets"),
48 required=False,
49 choices=(
50 [("", "-----"), ]
51 + [(x[0], x[1]) for x in FS_CHOICES]),
52 widget=forms.Select(
53 attrs={'class': 'js-select2 js-select-fs-mapping'}))
54 translation_mapping = forms.CharField(
55 label=_("Translation path mapping"),
56 help_text=_("Translation path mapping that maps the localisation "
57 "files on the filesystem to files on Pootle."),
58 widget=forms.TextInput(
59 attrs={'class': 'js-select-fs-mapping-target'}))
60
61 def should_save(self):
62 return self.is_valid()
63
64 @property
65 def fs_type_choices(self):
66 return (
67 (plugin_type, plugin.name or plugin.fs_type)
68 for plugin_type, plugin
69 in fs_plugins.gather().items())
70
71 def __init__(self, *args, **kwargs):
72 self.project = kwargs.pop("project")
73 super(ProjectFSAdminForm, self).__init__(*args, **kwargs)
74 self.fields["fs_type"].choices = self.fs_type_choices
75
76 self.fields["fs_url"].initial = self.project.config.get("pootle_fs.fs_url")
77 self.fields["fs_type"].initial = (
78 self.project.config.get("pootle_fs.fs_type"))
79 translation_mapping = (
80 self.project.config.get("pootle_fs.translation_mappings"))
81 if translation_mapping:
82 self.fields["translation_mapping"].initial = (
83 translation_mapping.get("default"))
84
85 @property
86 def fs_path_validator(self):
87 return fs_translation_mapping_validator.get()
88
89 @cached_property
90 def fs_plugin(self):
91 if self.cleaned_data.get("fs_type"):
92 return fs_plugins.gather()[self.cleaned_data["fs_type"]]
93
94 @cached_property
95 def fs_url_validator(self):
96 validator = fs_url_validator.get(self.fs_plugin)
97 return validator and validator()
98
99 def clean(self):
100 if not hasattr(self, "cleaned_data") or not self.cleaned_data:
101 return
102 if self.cleaned_data.get("translation_mapping"):
103 try:
104 self.fs_path_validator(
105 self.cleaned_data["translation_mapping"]).validate()
106 except ValueError as e:
107 self.add_error("translation_mapping", e)
108 if not self.fs_url_validator or not self.cleaned_data.get("fs_url"):
109 return
110 try:
111 self.fs_url_validator.validate(self.cleaned_data["fs_url"])
112 except forms.ValidationError as e:
113 self.add_error(
114 "fs_url",
115 forms.ValidationError(
116 "Incorrect URL or path ('%s') for plugin type '%s': %s"
117 % (self.cleaned_data.get("fs_url"),
118 self.cleaned_data.get("fs_type"),
119 e)))
120
121 def save(self):
122 self.project.config["pootle_fs.fs_type"] = self.cleaned_data["fs_type"]
123 self.project.config["pootle_fs.fs_url"] = self.cleaned_data["fs_url"]
124 self.project.config["pootle_fs.translation_mappings"] = dict(
125 default=self.cleaned_data["translation_mapping"])
126
127
128 class LangMappingForm(forms.Form):
129 remove = forms.BooleanField(required=False)
130 pootle_code = forms.ModelChoiceField(
131 Language.objects.all(),
132 to_field_name="code",
133 widget=forms.Select(attrs={'class': 'js-select2'}))
134 fs_code = forms.CharField(
135 max_length=32)
136
137 def __init__(self, *args, **kwargs):
138 self.project = kwargs.pop("project")
139 existing_codes = kwargs.pop("existing_codes")
140 super(LangMappingForm, self).__init__(*args, **kwargs)
141 if existing_codes:
142 excluded_codes = (
143 [c for c in existing_codes if c != self.initial["pootle_code"]]
144 if self.initial and self.initial.get("pootle_code")
145 else existing_codes)
146 self.fields["pootle_code"].queryset = (
147 self.fields["pootle_code"].queryset.exclude(
148 code__in=excluded_codes))
149
150
151 class BaseLangMappingFormSet(forms.BaseFormSet):
152
153 def __init__(self, *args, **kwargs):
154 self.project = kwargs.pop("project")
155 mappings = self.project.config.get("pootle.core.lang_mapping", {})
156 if mappings:
157 kwargs["initial"] = [
158 dict(pootle_code=v, fs_code=k)
159 for k, v in mappings.items()]
160 super(BaseLangMappingFormSet, self).__init__(*args, **kwargs)
161
162 @property
163 def cleaned_mapping(self):
164 mapping = OrderedDict()
165 for mapped in self.cleaned_data:
166 if not mapped or mapped["remove"]:
167 continue
168 mapping[mapped["fs_code"]] = mapped["pootle_code"].code
169 return mapping
170
171 def save(self):
172 self.project.config["pootle.core.lang_mapping"] = self.cleaned_mapping
173 revision.get(self.project.__class__)(self.project).set(
174 keys=["pootle.fs.sync"], value=uuid.uuid4().hex)
175
176 def clean(self):
177 if any(self.errors):
178 return
179 fs_counter = Counter([v["fs_code"] for v in self.cleaned_data if v])
180 if set(fs_counter.values()) != set([1]):
181 raise forms.ValidationError(
182 _("Filesystem language codes must be unique"))
183 pootle_counter = Counter([v["pootle_code"] for v in self.cleaned_data if v])
184 if set(pootle_counter.values()) != set([1]):
185 raise forms.ValidationError(
186 _("Pootle language mappings must be unique"))
187
188 def get_form_kwargs(self, index):
189 kwargs = super(BaseLangMappingFormSet, self).get_form_kwargs(index)
190 kwargs["project"] = self.project
191 kwargs["existing_codes"] = (
192 [i["pootle_code"] for i in self.initial]
193 if self.initial
194 else [])
195 return kwargs
196
197
198 LangMappingFormSet = forms.formset_factory(
199 LangMappingForm,
200 formset=BaseLangMappingFormSet)
201
[end of pootle/apps/pootle_fs/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pootle/apps/pootle_fs/forms.py b/pootle/apps/pootle_fs/forms.py
--- a/pootle/apps/pootle_fs/forms.py
+++ b/pootle/apps/pootle_fs/forms.py
@@ -33,12 +33,6 @@
class ProjectFSAdminForm(forms.Form):
- fs_type = forms.ChoiceField(
- label=_("Filesystem backend"),
- help_text=_("Select a filesystem backend"),
- choices=(),
- widget=forms.Select(
- attrs={'class': 'js-select2'}))
fs_url = forms.CharField(
label=_("Backend URL or path"),
help_text=_(
@@ -61,36 +55,26 @@
def should_save(self):
return self.is_valid()
- @property
- def fs_type_choices(self):
- return (
- (plugin_type, plugin.name or plugin.fs_type)
- for plugin_type, plugin
- in fs_plugins.gather().items())
-
def __init__(self, *args, **kwargs):
self.project = kwargs.pop("project")
super(ProjectFSAdminForm, self).__init__(*args, **kwargs)
- self.fields["fs_type"].choices = self.fs_type_choices
self.fields["fs_url"].initial = self.project.config.get("pootle_fs.fs_url")
- self.fields["fs_type"].initial = (
- self.project.config.get("pootle_fs.fs_type"))
translation_mapping = (
self.project.config.get("pootle_fs.translation_mappings"))
if translation_mapping:
self.fields["translation_mapping"].initial = (
translation_mapping.get("default"))
+ @cached_property
+ def fs_plugin(self):
+ return fs_plugins.gather()[
+ self.project.config["pootle_fs.fs_type"]]
+
@property
def fs_path_validator(self):
return fs_translation_mapping_validator.get()
- @cached_property
- def fs_plugin(self):
- if self.cleaned_data.get("fs_type"):
- return fs_plugins.gather()[self.cleaned_data["fs_type"]]
-
@cached_property
def fs_url_validator(self):
validator = fs_url_validator.get(self.fs_plugin)
@@ -115,11 +99,10 @@
forms.ValidationError(
"Incorrect URL or path ('%s') for plugin type '%s': %s"
% (self.cleaned_data.get("fs_url"),
- self.cleaned_data.get("fs_type"),
+ self.fs_plugin.fs_type,
e)))
def save(self):
- self.project.config["pootle_fs.fs_type"] = self.cleaned_data["fs_type"]
self.project.config["pootle_fs.fs_url"] = self.cleaned_data["fs_url"]
self.project.config["pootle_fs.translation_mappings"] = dict(
default=self.cleaned_data["translation_mapping"])
|
{"golden_diff": "diff --git a/pootle/apps/pootle_fs/forms.py b/pootle/apps/pootle_fs/forms.py\n--- a/pootle/apps/pootle_fs/forms.py\n+++ b/pootle/apps/pootle_fs/forms.py\n@@ -33,12 +33,6 @@\n \n class ProjectFSAdminForm(forms.Form):\n \n- fs_type = forms.ChoiceField(\n- label=_(\"Filesystem backend\"),\n- help_text=_(\"Select a filesystem backend\"),\n- choices=(),\n- widget=forms.Select(\n- attrs={'class': 'js-select2'}))\n fs_url = forms.CharField(\n label=_(\"Backend URL or path\"),\n help_text=_(\n@@ -61,36 +55,26 @@\n def should_save(self):\n return self.is_valid()\n \n- @property\n- def fs_type_choices(self):\n- return (\n- (plugin_type, plugin.name or plugin.fs_type)\n- for plugin_type, plugin\n- in fs_plugins.gather().items())\n-\n def __init__(self, *args, **kwargs):\n self.project = kwargs.pop(\"project\")\n super(ProjectFSAdminForm, self).__init__(*args, **kwargs)\n- self.fields[\"fs_type\"].choices = self.fs_type_choices\n \n self.fields[\"fs_url\"].initial = self.project.config.get(\"pootle_fs.fs_url\")\n- self.fields[\"fs_type\"].initial = (\n- self.project.config.get(\"pootle_fs.fs_type\"))\n translation_mapping = (\n self.project.config.get(\"pootle_fs.translation_mappings\"))\n if translation_mapping:\n self.fields[\"translation_mapping\"].initial = (\n translation_mapping.get(\"default\"))\n \n+ @cached_property\n+ def fs_plugin(self):\n+ return fs_plugins.gather()[\n+ self.project.config[\"pootle_fs.fs_type\"]]\n+\n @property\n def fs_path_validator(self):\n return fs_translation_mapping_validator.get()\n \n- @cached_property\n- def fs_plugin(self):\n- if self.cleaned_data.get(\"fs_type\"):\n- return fs_plugins.gather()[self.cleaned_data[\"fs_type\"]]\n-\n @cached_property\n def fs_url_validator(self):\n validator = fs_url_validator.get(self.fs_plugin)\n@@ -115,11 +99,10 @@\n forms.ValidationError(\n \"Incorrect URL or path ('%s') for plugin type '%s': %s\"\n % (self.cleaned_data.get(\"fs_url\"),\n- self.cleaned_data.get(\"fs_type\"),\n+ self.fs_plugin.fs_type,\n e)))\n \n def save(self):\n- self.project.config[\"pootle_fs.fs_type\"] = self.cleaned_data[\"fs_type\"]\n self.project.config[\"pootle_fs.fs_url\"] = self.cleaned_data[\"fs_url\"]\n self.project.config[\"pootle_fs.translation_mappings\"] = dict(\n default=self.cleaned_data[\"translation_mapping\"])\n", "issue": "Pootle FS file system backends list isn't sorted\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport uuid\nfrom collections import Counter, OrderedDict\n\nfrom django import forms\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import revision\nfrom pootle.i18n.gettext import ugettext_lazy as _\nfrom pootle_language.models import Language\n\nfrom .delegate import (\n fs_plugins, fs_translation_mapping_validator, fs_url_validator)\n\n\nFS_CHOICES = (\n (\"gnu\", _(\"GNU-style\"), \"/po/<language_code>.<ext>\"),\n (\"non-gnu\",\n _(\"non GNU-style\"),\n \"/<language_code>/<dir_path>/<filename>.<ext>\"),\n (\"django\",\n _(\"Django-style\"),\n \"/locale/<language_code>/LC_MESSAGES/<filename>.<ext>\"),\n (\"custom\", _(\"Custom\"), \"\"))\n\n\nclass ProjectFSAdminForm(forms.Form):\n\n fs_type = forms.ChoiceField(\n label=_(\"Filesystem backend\"),\n help_text=_(\"Select a filesystem backend\"),\n choices=(),\n widget=forms.Select(\n attrs={'class': 'js-select2'}))\n fs_url = forms.CharField(\n label=_(\"Backend URL or path\"),\n help_text=_(\n \"The URL or path to your translation files\"))\n translation_mapping_presets = forms.ChoiceField(\n label=_(\"Translation mapping presets\"),\n required=False,\n choices=(\n [(\"\", \"-----\"), ]\n + [(x[0], x[1]) for x in FS_CHOICES]),\n widget=forms.Select(\n attrs={'class': 'js-select2 js-select-fs-mapping'}))\n translation_mapping = forms.CharField(\n label=_(\"Translation path mapping\"),\n help_text=_(\"Translation path mapping that maps the localisation \"\n \"files on the filesystem to files on Pootle.\"),\n widget=forms.TextInput(\n attrs={'class': 'js-select-fs-mapping-target'}))\n\n def should_save(self):\n return self.is_valid()\n\n @property\n def fs_type_choices(self):\n return (\n (plugin_type, plugin.name or plugin.fs_type)\n for plugin_type, plugin\n in fs_plugins.gather().items())\n\n def __init__(self, *args, **kwargs):\n self.project = kwargs.pop(\"project\")\n super(ProjectFSAdminForm, self).__init__(*args, **kwargs)\n self.fields[\"fs_type\"].choices = self.fs_type_choices\n\n self.fields[\"fs_url\"].initial = self.project.config.get(\"pootle_fs.fs_url\")\n self.fields[\"fs_type\"].initial = (\n self.project.config.get(\"pootle_fs.fs_type\"))\n translation_mapping = (\n self.project.config.get(\"pootle_fs.translation_mappings\"))\n if translation_mapping:\n self.fields[\"translation_mapping\"].initial = (\n translation_mapping.get(\"default\"))\n\n @property\n def fs_path_validator(self):\n return fs_translation_mapping_validator.get()\n\n @cached_property\n def fs_plugin(self):\n if self.cleaned_data.get(\"fs_type\"):\n return fs_plugins.gather()[self.cleaned_data[\"fs_type\"]]\n\n @cached_property\n def fs_url_validator(self):\n validator = fs_url_validator.get(self.fs_plugin)\n return validator and validator()\n\n def clean(self):\n if not hasattr(self, \"cleaned_data\") or not self.cleaned_data:\n return\n if self.cleaned_data.get(\"translation_mapping\"):\n try:\n self.fs_path_validator(\n self.cleaned_data[\"translation_mapping\"]).validate()\n except ValueError as e:\n self.add_error(\"translation_mapping\", e)\n if not self.fs_url_validator or not self.cleaned_data.get(\"fs_url\"):\n return\n try:\n self.fs_url_validator.validate(self.cleaned_data[\"fs_url\"])\n except forms.ValidationError as e:\n self.add_error(\n \"fs_url\",\n forms.ValidationError(\n \"Incorrect URL or path ('%s') for plugin type '%s': %s\"\n % (self.cleaned_data.get(\"fs_url\"),\n self.cleaned_data.get(\"fs_type\"),\n e)))\n\n def save(self):\n self.project.config[\"pootle_fs.fs_type\"] = self.cleaned_data[\"fs_type\"]\n self.project.config[\"pootle_fs.fs_url\"] = self.cleaned_data[\"fs_url\"]\n self.project.config[\"pootle_fs.translation_mappings\"] = dict(\n default=self.cleaned_data[\"translation_mapping\"])\n\n\nclass LangMappingForm(forms.Form):\n remove = forms.BooleanField(required=False)\n pootle_code = forms.ModelChoiceField(\n Language.objects.all(),\n to_field_name=\"code\",\n widget=forms.Select(attrs={'class': 'js-select2'}))\n fs_code = forms.CharField(\n max_length=32)\n\n def __init__(self, *args, **kwargs):\n self.project = kwargs.pop(\"project\")\n existing_codes = kwargs.pop(\"existing_codes\")\n super(LangMappingForm, self).__init__(*args, **kwargs)\n if existing_codes:\n excluded_codes = (\n [c for c in existing_codes if c != self.initial[\"pootle_code\"]]\n if self.initial and self.initial.get(\"pootle_code\")\n else existing_codes)\n self.fields[\"pootle_code\"].queryset = (\n self.fields[\"pootle_code\"].queryset.exclude(\n code__in=excluded_codes))\n\n\nclass BaseLangMappingFormSet(forms.BaseFormSet):\n\n def __init__(self, *args, **kwargs):\n self.project = kwargs.pop(\"project\")\n mappings = self.project.config.get(\"pootle.core.lang_mapping\", {})\n if mappings:\n kwargs[\"initial\"] = [\n dict(pootle_code=v, fs_code=k)\n for k, v in mappings.items()]\n super(BaseLangMappingFormSet, self).__init__(*args, **kwargs)\n\n @property\n def cleaned_mapping(self):\n mapping = OrderedDict()\n for mapped in self.cleaned_data:\n if not mapped or mapped[\"remove\"]:\n continue\n mapping[mapped[\"fs_code\"]] = mapped[\"pootle_code\"].code\n return mapping\n\n def save(self):\n self.project.config[\"pootle.core.lang_mapping\"] = self.cleaned_mapping\n revision.get(self.project.__class__)(self.project).set(\n keys=[\"pootle.fs.sync\"], value=uuid.uuid4().hex)\n\n def clean(self):\n if any(self.errors):\n return\n fs_counter = Counter([v[\"fs_code\"] for v in self.cleaned_data if v])\n if set(fs_counter.values()) != set([1]):\n raise forms.ValidationError(\n _(\"Filesystem language codes must be unique\"))\n pootle_counter = Counter([v[\"pootle_code\"] for v in self.cleaned_data if v])\n if set(pootle_counter.values()) != set([1]):\n raise forms.ValidationError(\n _(\"Pootle language mappings must be unique\"))\n\n def get_form_kwargs(self, index):\n kwargs = super(BaseLangMappingFormSet, self).get_form_kwargs(index)\n kwargs[\"project\"] = self.project\n kwargs[\"existing_codes\"] = (\n [i[\"pootle_code\"] for i in self.initial]\n if self.initial\n else [])\n return kwargs\n\n\nLangMappingFormSet = forms.formset_factory(\n LangMappingForm,\n formset=BaseLangMappingFormSet)\n", "path": "pootle/apps/pootle_fs/forms.py"}]}
| 2,650 | 622 |
gh_patches_debug_46628
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-1028
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
1.2 Homepage object counts for DCIM all point to Rack model
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Python version: 3.9
* Nautobot version: 1.2.0-alpha1
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Create a set of each DCIM object which is listed on the homepage, like devices, interfaces, etc., and exactly 1 Rack
2. Navigate to the home page
<!-- What did you expect to happen? -->
### Expected Behavior
The home page widgets for object counts for DCIM should list correct counts for each object type.
<!-- What happened instead? -->
### Observed Behavior
Other than sites, all other DCIM object counts show the number of racks (in this case, exactly 1).
</issue>
<code>
[start of nautobot/dcim/homepage.py]
1 from nautobot.dcim.models.power import PowerFeed, PowerPanel
2 from nautobot.dcim.models import Cable, ConsolePort, Interface, PowerOutlet, Rack, Site
3 from nautobot.core.apps import HomePageGroup, HomePageItem, HomePagePanel
4
5
6 layout = (
7 HomePagePanel(
8 name="Organization",
9 weight=100,
10 items=(
11 HomePageItem(
12 name="Sites",
13 link="dcim:site_list",
14 model=Site,
15 description="Geographic location",
16 permissions=["dcim.view_site"],
17 weight=100,
18 ),
19 ),
20 ),
21 HomePagePanel(
22 name="DCIM",
23 weight=200,
24 items=(
25 HomePageItem(
26 name="Racks",
27 link="dcim:rack_list",
28 model=Rack,
29 description="Equipment racks, optionally organized by group",
30 permissions=["dcim.view_rack"],
31 weight=100,
32 ),
33 HomePageItem(
34 name="Device Types",
35 link="dcim:devicetype_list",
36 model=Rack,
37 description="Physical hardware models by manufacturer",
38 permissions=["dcim.view_devicetype"],
39 weight=200,
40 ),
41 HomePageItem(
42 name="Devices",
43 link="dcim:device_list",
44 model=Rack,
45 description="Rack-mounted network equipment, servers, and other devices",
46 permissions=["dcim.view_device"],
47 weight=300,
48 ),
49 HomePageItem(
50 name="Virtual Chassis",
51 link="dcim:virtualchassis_list",
52 model=Rack,
53 permissions=["dcim.view_virtualchassis"],
54 description="Represents a set of devices which share a common control plane",
55 weight=400,
56 ),
57 HomePageGroup(
58 name="Connections",
59 weight=500,
60 items=(
61 HomePageItem(
62 name="Cables",
63 link="dcim:cable_list",
64 model=Cable,
65 permissions=["dcim.view_cable"],
66 weight=100,
67 ),
68 HomePageItem(
69 name="Interfaces",
70 link="dcim:interface_connections_list",
71 model=Interface,
72 permissions=["dcim.view_interface"],
73 weight=200,
74 ),
75 HomePageItem(
76 name="Console",
77 link="dcim:console_connections_list",
78 model=ConsolePort,
79 permissions=["dcim.view_consoleport", "dcim.view_consoleserverport"],
80 weight=300,
81 ),
82 HomePageItem(
83 name="Power",
84 link="dcim:power_connections_list",
85 model=PowerOutlet,
86 permissions=["dcim.view_powerport", "dcim.view_poweroutlet"],
87 weight=400,
88 ),
89 ),
90 ),
91 ),
92 ),
93 HomePagePanel(
94 name="Power",
95 weight=300,
96 items=(
97 HomePageItem(
98 name="Power Feeds",
99 link="dcim:powerfeed_list",
100 model=PowerFeed,
101 description="Electrical circuits delivering power from panels",
102 permissions=["dcim.view_powerfeed"],
103 weight=100,
104 ),
105 HomePageItem(
106 name="Power Panels",
107 link="dcim:powerpanel_list",
108 model=PowerPanel,
109 description="Electrical panels receiving utility power",
110 permissions=["dcim.view_powerpanel"],
111 weight=200,
112 ),
113 ),
114 ),
115 )
116
[end of nautobot/dcim/homepage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nautobot/dcim/homepage.py b/nautobot/dcim/homepage.py
--- a/nautobot/dcim/homepage.py
+++ b/nautobot/dcim/homepage.py
@@ -1,6 +1,5 @@
-from nautobot.dcim.models.power import PowerFeed, PowerPanel
-from nautobot.dcim.models import Cable, ConsolePort, Interface, PowerOutlet, Rack, Site
from nautobot.core.apps import HomePageGroup, HomePageItem, HomePagePanel
+from nautobot.dcim import models
layout = (
@@ -11,7 +10,7 @@
HomePageItem(
name="Sites",
link="dcim:site_list",
- model=Site,
+ model=models.Site,
description="Geographic location",
permissions=["dcim.view_site"],
weight=100,
@@ -25,7 +24,7 @@
HomePageItem(
name="Racks",
link="dcim:rack_list",
- model=Rack,
+ model=models.Rack,
description="Equipment racks, optionally organized by group",
permissions=["dcim.view_rack"],
weight=100,
@@ -33,7 +32,7 @@
HomePageItem(
name="Device Types",
link="dcim:devicetype_list",
- model=Rack,
+ model=models.DeviceType,
description="Physical hardware models by manufacturer",
permissions=["dcim.view_devicetype"],
weight=200,
@@ -41,7 +40,7 @@
HomePageItem(
name="Devices",
link="dcim:device_list",
- model=Rack,
+ model=models.Device,
description="Rack-mounted network equipment, servers, and other devices",
permissions=["dcim.view_device"],
weight=300,
@@ -49,7 +48,7 @@
HomePageItem(
name="Virtual Chassis",
link="dcim:virtualchassis_list",
- model=Rack,
+ model=models.VirtualChassis,
permissions=["dcim.view_virtualchassis"],
description="Represents a set of devices which share a common control plane",
weight=400,
@@ -61,28 +60,28 @@
HomePageItem(
name="Cables",
link="dcim:cable_list",
- model=Cable,
+ model=models.Cable,
permissions=["dcim.view_cable"],
weight=100,
),
HomePageItem(
name="Interfaces",
link="dcim:interface_connections_list",
- model=Interface,
+ model=models.Interface,
permissions=["dcim.view_interface"],
weight=200,
),
HomePageItem(
name="Console",
link="dcim:console_connections_list",
- model=ConsolePort,
+ model=models.ConsolePort,
permissions=["dcim.view_consoleport", "dcim.view_consoleserverport"],
weight=300,
),
HomePageItem(
name="Power",
link="dcim:power_connections_list",
- model=PowerOutlet,
+ model=models.PowerOutlet,
permissions=["dcim.view_powerport", "dcim.view_poweroutlet"],
weight=400,
),
@@ -97,7 +96,7 @@
HomePageItem(
name="Power Feeds",
link="dcim:powerfeed_list",
- model=PowerFeed,
+ model=models.PowerFeed,
description="Electrical circuits delivering power from panels",
permissions=["dcim.view_powerfeed"],
weight=100,
@@ -105,7 +104,7 @@
HomePageItem(
name="Power Panels",
link="dcim:powerpanel_list",
- model=PowerPanel,
+ model=models.PowerPanel,
description="Electrical panels receiving utility power",
permissions=["dcim.view_powerpanel"],
weight=200,
|
{"golden_diff": "diff --git a/nautobot/dcim/homepage.py b/nautobot/dcim/homepage.py\n--- a/nautobot/dcim/homepage.py\n+++ b/nautobot/dcim/homepage.py\n@@ -1,6 +1,5 @@\n-from nautobot.dcim.models.power import PowerFeed, PowerPanel\n-from nautobot.dcim.models import Cable, ConsolePort, Interface, PowerOutlet, Rack, Site\n from nautobot.core.apps import HomePageGroup, HomePageItem, HomePagePanel\n+from nautobot.dcim import models\n \n \n layout = (\n@@ -11,7 +10,7 @@\n HomePageItem(\n name=\"Sites\",\n link=\"dcim:site_list\",\n- model=Site,\n+ model=models.Site,\n description=\"Geographic location\",\n permissions=[\"dcim.view_site\"],\n weight=100,\n@@ -25,7 +24,7 @@\n HomePageItem(\n name=\"Racks\",\n link=\"dcim:rack_list\",\n- model=Rack,\n+ model=models.Rack,\n description=\"Equipment racks, optionally organized by group\",\n permissions=[\"dcim.view_rack\"],\n weight=100,\n@@ -33,7 +32,7 @@\n HomePageItem(\n name=\"Device Types\",\n link=\"dcim:devicetype_list\",\n- model=Rack,\n+ model=models.DeviceType,\n description=\"Physical hardware models by manufacturer\",\n permissions=[\"dcim.view_devicetype\"],\n weight=200,\n@@ -41,7 +40,7 @@\n HomePageItem(\n name=\"Devices\",\n link=\"dcim:device_list\",\n- model=Rack,\n+ model=models.Device,\n description=\"Rack-mounted network equipment, servers, and other devices\",\n permissions=[\"dcim.view_device\"],\n weight=300,\n@@ -49,7 +48,7 @@\n HomePageItem(\n name=\"Virtual Chassis\",\n link=\"dcim:virtualchassis_list\",\n- model=Rack,\n+ model=models.VirtualChassis,\n permissions=[\"dcim.view_virtualchassis\"],\n description=\"Represents a set of devices which share a common control plane\",\n weight=400,\n@@ -61,28 +60,28 @@\n HomePageItem(\n name=\"Cables\",\n link=\"dcim:cable_list\",\n- model=Cable,\n+ model=models.Cable,\n permissions=[\"dcim.view_cable\"],\n weight=100,\n ),\n HomePageItem(\n name=\"Interfaces\",\n link=\"dcim:interface_connections_list\",\n- model=Interface,\n+ model=models.Interface,\n permissions=[\"dcim.view_interface\"],\n weight=200,\n ),\n HomePageItem(\n name=\"Console\",\n link=\"dcim:console_connections_list\",\n- model=ConsolePort,\n+ model=models.ConsolePort,\n permissions=[\"dcim.view_consoleport\", \"dcim.view_consoleserverport\"],\n weight=300,\n ),\n HomePageItem(\n name=\"Power\",\n link=\"dcim:power_connections_list\",\n- model=PowerOutlet,\n+ model=models.PowerOutlet,\n permissions=[\"dcim.view_powerport\", \"dcim.view_poweroutlet\"],\n weight=400,\n ),\n@@ -97,7 +96,7 @@\n HomePageItem(\n name=\"Power Feeds\",\n link=\"dcim:powerfeed_list\",\n- model=PowerFeed,\n+ model=models.PowerFeed,\n description=\"Electrical circuits delivering power from panels\",\n permissions=[\"dcim.view_powerfeed\"],\n weight=100,\n@@ -105,7 +104,7 @@\n HomePageItem(\n name=\"Power Panels\",\n link=\"dcim:powerpanel_list\",\n- model=PowerPanel,\n+ model=models.PowerPanel,\n description=\"Electrical panels receiving utility power\",\n permissions=[\"dcim.view_powerpanel\"],\n weight=200,\n", "issue": "1.2 Homepage object counts for DCIM all point to Rack model\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Python version: 3.9\r\n* Nautobot version: 1.2.0-alpha1\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Create a set of each DCIM object which is listed on the homepage, like devices, interfaces, etc., and exactly 1 Rack\r\n2. Navigate to the home page\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nThe home page widgets for object counts for DCIM should list correct counts for each object type.\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nOther than sites, all other DCIM object counts show the number of racks (in this case, exactly 1).\n", "before_files": [{"content": "from nautobot.dcim.models.power import PowerFeed, PowerPanel\nfrom nautobot.dcim.models import Cable, ConsolePort, Interface, PowerOutlet, Rack, Site\nfrom nautobot.core.apps import HomePageGroup, HomePageItem, HomePagePanel\n\n\nlayout = (\n HomePagePanel(\n name=\"Organization\",\n weight=100,\n items=(\n HomePageItem(\n name=\"Sites\",\n link=\"dcim:site_list\",\n model=Site,\n description=\"Geographic location\",\n permissions=[\"dcim.view_site\"],\n weight=100,\n ),\n ),\n ),\n HomePagePanel(\n name=\"DCIM\",\n weight=200,\n items=(\n HomePageItem(\n name=\"Racks\",\n link=\"dcim:rack_list\",\n model=Rack,\n description=\"Equipment racks, optionally organized by group\",\n permissions=[\"dcim.view_rack\"],\n weight=100,\n ),\n HomePageItem(\n name=\"Device Types\",\n link=\"dcim:devicetype_list\",\n model=Rack,\n description=\"Physical hardware models by manufacturer\",\n permissions=[\"dcim.view_devicetype\"],\n weight=200,\n ),\n HomePageItem(\n name=\"Devices\",\n link=\"dcim:device_list\",\n model=Rack,\n description=\"Rack-mounted network equipment, servers, and other devices\",\n permissions=[\"dcim.view_device\"],\n weight=300,\n ),\n HomePageItem(\n name=\"Virtual Chassis\",\n link=\"dcim:virtualchassis_list\",\n model=Rack,\n permissions=[\"dcim.view_virtualchassis\"],\n description=\"Represents a set of devices which share a common control plane\",\n weight=400,\n ),\n HomePageGroup(\n name=\"Connections\",\n weight=500,\n items=(\n HomePageItem(\n name=\"Cables\",\n link=\"dcim:cable_list\",\n model=Cable,\n permissions=[\"dcim.view_cable\"],\n weight=100,\n ),\n HomePageItem(\n name=\"Interfaces\",\n link=\"dcim:interface_connections_list\",\n model=Interface,\n permissions=[\"dcim.view_interface\"],\n weight=200,\n ),\n HomePageItem(\n name=\"Console\",\n link=\"dcim:console_connections_list\",\n model=ConsolePort,\n permissions=[\"dcim.view_consoleport\", \"dcim.view_consoleserverport\"],\n weight=300,\n ),\n HomePageItem(\n name=\"Power\",\n link=\"dcim:power_connections_list\",\n model=PowerOutlet,\n permissions=[\"dcim.view_powerport\", \"dcim.view_poweroutlet\"],\n weight=400,\n ),\n ),\n ),\n ),\n ),\n HomePagePanel(\n name=\"Power\",\n weight=300,\n items=(\n HomePageItem(\n name=\"Power Feeds\",\n link=\"dcim:powerfeed_list\",\n model=PowerFeed,\n description=\"Electrical circuits delivering power from panels\",\n permissions=[\"dcim.view_powerfeed\"],\n weight=100,\n ),\n HomePageItem(\n name=\"Power Panels\",\n link=\"dcim:powerpanel_list\",\n model=PowerPanel,\n description=\"Electrical panels receiving utility power\",\n permissions=[\"dcim.view_powerpanel\"],\n weight=200,\n ),\n ),\n ),\n)\n", "path": "nautobot/dcim/homepage.py"}]}
| 1,865 | 860 |
gh_patches_debug_19669
|
rasdani/github-patches
|
git_diff
|
PrefectHQ__prefect-10059
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
2.10.17 failure starting prefect agent on a windows machine
### First check
- [X] I added a descriptive title to this issue.
- [X] I used the GitHub search to find a similar issue and didn't find it.
- [X] I searched the Prefect documentation for this issue.
- [X] I checked that this issue is related to Prefect and not one of its dependencies.
### Bug summary
prefect agent won't start in 2.10.17 but it does in 2.10.16
### Reproduction
```python3
(venv) C:\Users\jkang1\PycharmProjects\site_analytics\venv\Scripts>prefect agent start --pool default-agent-pool
```
### Error
```python3
Traceback (most recent call last):
File "<frozen runpy>", line 198, in _run_module_as_main
File "<frozen runpy>", line 88, in _run_code
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Scripts\prefect.exe\__main__.py", line 4, in <module>
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\__init__.py", line 37, in <module>
from prefect.states import State
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\states.py", line 14, in <module>
from prefect.client.schemas import State as State
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\client\schemas\__init__.py", line 4, in <module>
from .actions import BlockTypeUpdate, StateCreate
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\client\schemas\actions.py", line 9, in <module>
import prefect.client.schemas.objects as objects
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\client\schemas\objects.py", line 20, in <module>
from prefect._internal.schemas.bases import ObjectBaseModel, PrefectBaseModel
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\_internal\schemas\bases.py", line 18, in <module>
from prefect._internal.compatibility.experimental import experiment_enabled
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\_internal\compatibility\experimental.py", line 19, in <module>
from prefect.settings import PREFECT_EXPERIMENTAL_WARN, SETTING_VARIABLES, Setting
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\settings.py", line 70, in <module>
from prefect._internal.compatibility.deprecated import generate_deprecation_message
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\_internal\compatibility\deprecated.py", line 21, in <module>
from prefect.utilities.importtools import (
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\utilities\importtools.py", line 17, in <module>
from prefect.utilities.filesystem import filename, is_local_path, tmpchdir
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\utilities\filesystem.py", line 5, in <module>
import resource
ModuleNotFoundError: No module named 'resource'
```
### Versions
```Text
If I try to call it I receive the same error:
(venv) C:\Users\jkang1\PycharmProjects\site_analytics\venv\Scripts>prefect version
Traceback (most recent call last):
File "<frozen runpy>", line 198, in _run_module_as_main
File "<frozen runpy>", line 88, in _run_code
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Scripts\prefect.exe\__main__.py", line 4, in <module>
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\__init__.py", line 37, in <module>
from prefect.states import State
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\states.py", line 14, in <module>
from prefect.client.schemas import State as State
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\client\schemas\__init__.py", line 4, in <module>
from .actions import BlockTypeUpdate, StateCreate
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\client\schemas\actions.py", line 9, in <module>
import prefect.client.schemas.objects as objects
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\client\schemas\objects.py", line 20, in <module>
from prefect._internal.schemas.bases import ObjectBaseModel, PrefectBaseModel
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\_internal\schemas\bases.py", line 18, in <module>
from prefect._internal.compatibility.experimental import experiment_enabled
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\_internal\compatibility\experimental.py", line 19, in <module>
from prefect.settings import PREFECT_EXPERIMENTAL_WARN, SETTING_VARIABLES, Setting
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\settings.py", line 70, in <module>
from prefect._internal.compatibility.deprecated import generate_deprecation_message
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\_internal\compatibility\deprecated.py", line 21, in <module>
from prefect.utilities.importtools import (
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\utilities\importtools.py", line 17, in <module>
from prefect.utilities.filesystem import filename, is_local_path, tmpchdir
File "C:\Users\jkang1\PycharmProjects\site_analytics\venv\Lib\site-packages\prefect\utilities\filesystem.py", line 5, in <module>
import resource
ModuleNotFoundError: No module named 'resource'
When I downgrade to 2.10.16 and run it:
(venv) C:\Users\jkang1\PycharmProjects\site_analytics\venv\Scripts>prefect version
Version: 2.10.16
API version: 0.8.4
Python version: 3.11.3
Git commit: 6cd7c3ee
Built: Tue, Jun 20, 2023 2:59 PM
OS/Arch: win32/AMD64
Profile: remote-desktop
Server type: cloud
```
### Additional context
_No response_
</issue>
<code>
[start of src/prefect/utilities/filesystem.py]
1 """
2 Utilities for working with file systems
3 """
4 import os
5 import resource
6 import pathlib
7 from contextlib import contextmanager
8 from pathlib import Path, PureWindowsPath
9 from typing import Union
10
11 import fsspec
12 import pathspec
13 import prefect
14 from fsspec.core import OpenFile
15 from fsspec.implementations.local import LocalFileSystem
16
17
18 def create_default_ignore_file(path: str) -> bool:
19 """
20 Creates default ignore file in the provided path if one does not already exist; returns boolean specifying
21 whether a file was created.
22 """
23 path = pathlib.Path(path)
24 ignore_file = path / ".prefectignore"
25 if ignore_file.exists():
26 return False
27 default_file = pathlib.Path(prefect.__module_path__) / ".prefectignore"
28 with ignore_file.open(mode="w") as f:
29 f.write(default_file.read_text())
30 return True
31
32
33 def filter_files(
34 root: str = ".", ignore_patterns: list = None, include_dirs: bool = True
35 ) -> set:
36 """
37 This function accepts a root directory path and a list of file patterns to ignore, and returns
38 a list of files that excludes those that should be ignored.
39
40 The specification matches that of [.gitignore files](https://git-scm.com/docs/gitignore).
41 """
42 if ignore_patterns is None:
43 ignore_patterns = []
44 spec = pathspec.PathSpec.from_lines("gitwildmatch", ignore_patterns)
45 ignored_files = {p.path for p in spec.match_tree_entries(root)}
46 if include_dirs:
47 all_files = {p.path for p in pathspec.util.iter_tree_entries(root)}
48 else:
49 all_files = set(pathspec.util.iter_tree_files(root))
50 included_files = all_files - ignored_files
51 return included_files
52
53
54 @contextmanager
55 def tmpchdir(path: str):
56 """
57 Change current-working directories for the duration of the context
58 """
59 path = os.path.abspath(path)
60 if os.path.isfile(path) or (not os.path.exists(path) and not path.endswith("/")):
61 path = os.path.dirname(path)
62
63 owd = os.getcwd()
64
65 try:
66 os.chdir(path)
67 yield path
68 finally:
69 os.chdir(owd)
70
71
72 def filename(path: str) -> str:
73 """Extract the file name from a path with remote file system support"""
74 try:
75 of: OpenFile = fsspec.open(path)
76 sep = of.fs.sep
77 except (ImportError, AttributeError):
78 sep = "\\" if "\\" in path else "/"
79 return path.split(sep)[-1]
80
81
82 def is_local_path(path: Union[str, pathlib.Path, OpenFile]):
83 """Check if the given path points to a local or remote file system"""
84 if isinstance(path, str):
85 try:
86 of = fsspec.open(path)
87 except ImportError:
88 # The path is a remote file system that uses a lib that is not installed
89 return False
90 elif isinstance(path, pathlib.Path):
91 return True
92 elif isinstance(path, OpenFile):
93 of = path
94 else:
95 raise TypeError(f"Invalid path of type {type(path).__name__!r}")
96
97 return type(of.fs) == LocalFileSystem
98
99
100 def to_display_path(
101 path: Union[pathlib.Path, str], relative_to: Union[pathlib.Path, str] = None
102 ) -> str:
103 """
104 Convert a path to a displayable path. The absolute path or relative path to the
105 current (or given) directory will be returned, whichever is shorter.
106 """
107 path, relative_to = (
108 pathlib.Path(path).resolve(),
109 pathlib.Path(relative_to or ".").resolve(),
110 )
111 relative_path = str(path.relative_to(relative_to))
112 absolute_path = str(path)
113 return relative_path if len(relative_path) < len(absolute_path) else absolute_path
114
115
116 def relative_path_to_current_platform(path_str: str) -> Path:
117 """
118 Converts a relative path generated on any platform to a relative path for the
119 current platform.
120 """
121
122 return Path(PureWindowsPath(path_str).as_posix())
123
124
125 def get_open_file_limit():
126 soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
127 return soft_limit
128
[end of src/prefect/utilities/filesystem.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/prefect/utilities/filesystem.py b/src/prefect/utilities/filesystem.py
--- a/src/prefect/utilities/filesystem.py
+++ b/src/prefect/utilities/filesystem.py
@@ -2,7 +2,6 @@
Utilities for working with file systems
"""
import os
-import resource
import pathlib
from contextlib import contextmanager
from pathlib import Path, PureWindowsPath
@@ -122,6 +121,21 @@
return Path(PureWindowsPath(path_str).as_posix())
-def get_open_file_limit():
- soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
- return soft_limit
+def get_open_file_limit() -> int:
+ """Get the maximum number of open files allowed for the current process"""
+
+ try:
+ if os.name == "nt":
+ import ctypes
+
+ return ctypes.cdll.ucrtbase._getmaxstdio()
+ else:
+ import resource
+
+ soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
+ return soft_limit
+ except Exception:
+ # Catch all exceptions, as ctypes can raise several errors
+ # depending on what went wrong. Return a safe default if we
+ # can't get the limit from the OS.
+ return 200
|
{"golden_diff": "diff --git a/src/prefect/utilities/filesystem.py b/src/prefect/utilities/filesystem.py\n--- a/src/prefect/utilities/filesystem.py\n+++ b/src/prefect/utilities/filesystem.py\n@@ -2,7 +2,6 @@\n Utilities for working with file systems\n \"\"\"\n import os\n-import resource\n import pathlib\n from contextlib import contextmanager\n from pathlib import Path, PureWindowsPath\n@@ -122,6 +121,21 @@\n return Path(PureWindowsPath(path_str).as_posix())\n \n \n-def get_open_file_limit():\n- soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)\n- return soft_limit\n+def get_open_file_limit() -> int:\n+ \"\"\"Get the maximum number of open files allowed for the current process\"\"\"\n+\n+ try:\n+ if os.name == \"nt\":\n+ import ctypes\n+\n+ return ctypes.cdll.ucrtbase._getmaxstdio()\n+ else:\n+ import resource\n+\n+ soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)\n+ return soft_limit\n+ except Exception:\n+ # Catch all exceptions, as ctypes can raise several errors\n+ # depending on what went wrong. Return a safe default if we\n+ # can't get the limit from the OS.\n+ return 200\n", "issue": "2.10.17 failure starting prefect agent on a windows machine\n### First check\n\n- [X] I added a descriptive title to this issue.\n- [X] I used the GitHub search to find a similar issue and didn't find it.\n- [X] I searched the Prefect documentation for this issue.\n- [X] I checked that this issue is related to Prefect and not one of its dependencies.\n\n### Bug summary\n\nprefect agent won't start in 2.10.17 but it does in 2.10.16\n\n### Reproduction\n\n```python3\n(venv) C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Scripts>prefect agent start --pool default-agent-pool\n```\n\n\n### Error\n\n```python3\nTraceback (most recent call last):\r\n File \"<frozen runpy>\", line 198, in _run_module_as_main\r\n File \"<frozen runpy>\", line 88, in _run_code\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Scripts\\prefect.exe\\__main__.py\", line 4, in <module>\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\__init__.py\", line 37, in <module>\r\n from prefect.states import State\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\states.py\", line 14, in <module>\r\n from prefect.client.schemas import State as State\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\client\\schemas\\__init__.py\", line 4, in <module>\r\n from .actions import BlockTypeUpdate, StateCreate\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\client\\schemas\\actions.py\", line 9, in <module>\r\n import prefect.client.schemas.objects as objects\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\client\\schemas\\objects.py\", line 20, in <module>\r\n from prefect._internal.schemas.bases import ObjectBaseModel, PrefectBaseModel\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\_internal\\schemas\\bases.py\", line 18, in <module>\r\n from prefect._internal.compatibility.experimental import experiment_enabled\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\_internal\\compatibility\\experimental.py\", line 19, in <module>\r\n from prefect.settings import PREFECT_EXPERIMENTAL_WARN, SETTING_VARIABLES, Setting\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\settings.py\", line 70, in <module>\r\n from prefect._internal.compatibility.deprecated import generate_deprecation_message\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\_internal\\compatibility\\deprecated.py\", line 21, in <module>\r\n from prefect.utilities.importtools import (\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\utilities\\importtools.py\", line 17, in <module>\r\n from prefect.utilities.filesystem import filename, is_local_path, tmpchdir\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\utilities\\filesystem.py\", line 5, in <module>\r\n import resource\r\nModuleNotFoundError: No module named 'resource'\n```\n\n\n### Versions\n\n```Text\nIf I try to call it I receive the same error:\r\n\r\n(venv) C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Scripts>prefect version\r\nTraceback (most recent call last):\r\n File \"<frozen runpy>\", line 198, in _run_module_as_main\r\n File \"<frozen runpy>\", line 88, in _run_code\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Scripts\\prefect.exe\\__main__.py\", line 4, in <module>\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\__init__.py\", line 37, in <module>\r\n from prefect.states import State\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\states.py\", line 14, in <module>\r\n from prefect.client.schemas import State as State\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\client\\schemas\\__init__.py\", line 4, in <module>\r\n from .actions import BlockTypeUpdate, StateCreate\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\client\\schemas\\actions.py\", line 9, in <module>\r\n import prefect.client.schemas.objects as objects\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\client\\schemas\\objects.py\", line 20, in <module>\r\n from prefect._internal.schemas.bases import ObjectBaseModel, PrefectBaseModel\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\_internal\\schemas\\bases.py\", line 18, in <module>\r\n from prefect._internal.compatibility.experimental import experiment_enabled\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\_internal\\compatibility\\experimental.py\", line 19, in <module>\r\n from prefect.settings import PREFECT_EXPERIMENTAL_WARN, SETTING_VARIABLES, Setting\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\settings.py\", line 70, in <module>\r\n from prefect._internal.compatibility.deprecated import generate_deprecation_message\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\_internal\\compatibility\\deprecated.py\", line 21, in <module>\r\n from prefect.utilities.importtools import (\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\utilities\\importtools.py\", line 17, in <module>\r\n from prefect.utilities.filesystem import filename, is_local_path, tmpchdir\r\n File \"C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Lib\\site-packages\\prefect\\utilities\\filesystem.py\", line 5, in <module>\r\n import resource\r\nModuleNotFoundError: No module named 'resource'\r\n\r\nWhen I downgrade to 2.10.16 and run it:\r\n(venv) C:\\Users\\jkang1\\PycharmProjects\\site_analytics\\venv\\Scripts>prefect version\r\nVersion: 2.10.16\r\nAPI version: 0.8.4\r\nPython version: 3.11.3\r\nGit commit: 6cd7c3ee\r\nBuilt: Tue, Jun 20, 2023 2:59 PM\r\nOS/Arch: win32/AMD64\r\nProfile: remote-desktop\r\nServer type: cloud\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "\"\"\"\nUtilities for working with file systems\n\"\"\"\nimport os\nimport resource\nimport pathlib\nfrom contextlib import contextmanager\nfrom pathlib import Path, PureWindowsPath\nfrom typing import Union\n\nimport fsspec\nimport pathspec\nimport prefect\nfrom fsspec.core import OpenFile\nfrom fsspec.implementations.local import LocalFileSystem\n\n\ndef create_default_ignore_file(path: str) -> bool:\n \"\"\"\n Creates default ignore file in the provided path if one does not already exist; returns boolean specifying\n whether a file was created.\n \"\"\"\n path = pathlib.Path(path)\n ignore_file = path / \".prefectignore\"\n if ignore_file.exists():\n return False\n default_file = pathlib.Path(prefect.__module_path__) / \".prefectignore\"\n with ignore_file.open(mode=\"w\") as f:\n f.write(default_file.read_text())\n return True\n\n\ndef filter_files(\n root: str = \".\", ignore_patterns: list = None, include_dirs: bool = True\n) -> set:\n \"\"\"\n This function accepts a root directory path and a list of file patterns to ignore, and returns\n a list of files that excludes those that should be ignored.\n\n The specification matches that of [.gitignore files](https://git-scm.com/docs/gitignore).\n \"\"\"\n if ignore_patterns is None:\n ignore_patterns = []\n spec = pathspec.PathSpec.from_lines(\"gitwildmatch\", ignore_patterns)\n ignored_files = {p.path for p in spec.match_tree_entries(root)}\n if include_dirs:\n all_files = {p.path for p in pathspec.util.iter_tree_entries(root)}\n else:\n all_files = set(pathspec.util.iter_tree_files(root))\n included_files = all_files - ignored_files\n return included_files\n\n\n@contextmanager\ndef tmpchdir(path: str):\n \"\"\"\n Change current-working directories for the duration of the context\n \"\"\"\n path = os.path.abspath(path)\n if os.path.isfile(path) or (not os.path.exists(path) and not path.endswith(\"/\")):\n path = os.path.dirname(path)\n\n owd = os.getcwd()\n\n try:\n os.chdir(path)\n yield path\n finally:\n os.chdir(owd)\n\n\ndef filename(path: str) -> str:\n \"\"\"Extract the file name from a path with remote file system support\"\"\"\n try:\n of: OpenFile = fsspec.open(path)\n sep = of.fs.sep\n except (ImportError, AttributeError):\n sep = \"\\\\\" if \"\\\\\" in path else \"/\"\n return path.split(sep)[-1]\n\n\ndef is_local_path(path: Union[str, pathlib.Path, OpenFile]):\n \"\"\"Check if the given path points to a local or remote file system\"\"\"\n if isinstance(path, str):\n try:\n of = fsspec.open(path)\n except ImportError:\n # The path is a remote file system that uses a lib that is not installed\n return False\n elif isinstance(path, pathlib.Path):\n return True\n elif isinstance(path, OpenFile):\n of = path\n else:\n raise TypeError(f\"Invalid path of type {type(path).__name__!r}\")\n\n return type(of.fs) == LocalFileSystem\n\n\ndef to_display_path(\n path: Union[pathlib.Path, str], relative_to: Union[pathlib.Path, str] = None\n) -> str:\n \"\"\"\n Convert a path to a displayable path. The absolute path or relative path to the\n current (or given) directory will be returned, whichever is shorter.\n \"\"\"\n path, relative_to = (\n pathlib.Path(path).resolve(),\n pathlib.Path(relative_to or \".\").resolve(),\n )\n relative_path = str(path.relative_to(relative_to))\n absolute_path = str(path)\n return relative_path if len(relative_path) < len(absolute_path) else absolute_path\n\n\ndef relative_path_to_current_platform(path_str: str) -> Path:\n \"\"\"\n Converts a relative path generated on any platform to a relative path for the\n current platform.\n \"\"\"\n\n return Path(PureWindowsPath(path_str).as_posix())\n\n\ndef get_open_file_limit():\n soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)\n return soft_limit\n", "path": "src/prefect/utilities/filesystem.py"}]}
| 3,523 | 298 |
gh_patches_debug_15188
|
rasdani/github-patches
|
git_diff
|
celery__celery-4870
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to save pickled objects with couchbase as result backend
Hi it seems like when I attempt to process groups of chords, the couchbase result backend is consistently failing to unlock the chord when reading from the db:
`celery.chord_unlock[e3139ae5-a67d-4f0c-8c54-73b1e19433d2] retry: Retry in 1s: ValueFormatError()`
This behavior does not occur with the redis result backend, i can switch between them and see that the error unlocking only occurs on couchbase.
## Steps to reproduce
Attempt to process a chord with couchbase backend using pickle serialization.
## Expected behavior
Chords process correctly, and resulting data is fed to the next task
## Actual behavior
Celery is unable to unlock the chord from the result backend
## Celery project info:
```
celery -A ipaassteprunner report
software -> celery:4.1.0 (latentcall) kombu:4.1.0 py:2.7.10
billiard:3.5.0.3 py-amqp:2.2.2
platform -> system:Darwin arch:64bit imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:pyamqp results:couchbase://isadmin:**@localhost:8091/tasks
task_serializer: 'pickle'
result_serializer: 'pickle'
dbconfig: <ipaascommon.ipaas_config.DatabaseConfig object at 0x10fbbfe10>
db_pass: u'********'
IpaasConfig: <class 'ipaascommon.ipaas_config.IpaasConfig'>
imports:
('ipaassteprunner.tasks',)
worker_redirect_stdouts: False
DatabaseConfig: u'********'
db_port: '8091'
ipaas_constants: <module 'ipaascommon.ipaas_constants' from '/Library/Python/2.7/site-packages/ipaascommon/ipaas_constants.pyc'>
enable_utc: True
db_user: 'isadmin'
db_host: 'localhost'
result_backend: u'couchbase://isadmin:********@localhost:8091/tasks'
result_expires: 3600
iconfig: <ipaascommon.ipaas_config.IpaasConfig object at 0x10fbbfd90>
broker_url: u'amqp://guest:********@localhost:5672//'
task_bucket: 'tasks'
accept_content: ['pickle']
```
### Additional Debug output
```
[2017-12-13 15:39:57,860: INFO/MainProcess] Received task: celery.chord_unlock[e3139ae5-a67d-4f0c-8c54-73b1e19433d2] ETA:[2017-12-13 20:39:58.853535+00:00]
[2017-12-13 15:39:57,861: DEBUG/MainProcess] basic.qos: prefetch_count->27
[2017-12-13 15:39:58,859: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x10b410b90> (args:('celery.chord_unlock', 'e3139ae5-a67d-4f0c-8c54-73b1e19433d2', {'origin': 'gen53678@silo2460', 'lang': 'py', 'task': 'celery.chord_unlock', 'group': None, 'root_id': '0acd3e0d-7532-445c-8916-b5fc8a6395ab', u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'celery', u'exchange': u''}, 'expires': None, u'correlation_id': 'e3139ae5-a67d-4f0c-8c54-73b1e19433d2', 'retries': 311, 'timelimit': [None, None], 'argsrepr': "('90c64bef-21ba-42f9-be75-fdd724375a7a', {'chord_size': 2, 'task': 'ipaassteprunner.tasks.transfer_data', 'subtask_type': None, 'kwargs': {}, 'args': (), 'options': {'chord_size': None, 'chain': [...], 'task_id': '9c6b5e1c-2089-4db7-9590-117aeaf782c7', 'root_id': '0acd3e0d-7532-445c-8916-b5fc8a6395ab', 'parent_id': 'c27c9565-19a6-4683-8180-60f0c25007e9', 'reply_to': '0a58093c-6fdd-3458-9a34-7d5e094ac6a8'}, 'immutable': False})", 'eta': '2017-12-13T20:39:58.853535+00:00', 'parent_id': 'c27c9565-19a6-4683-8180-60f0c25007e9', u'reply_to':... kwargs:{})
[2017-12-13 15:40:00,061: DEBUG/MainProcess] basic.qos: prefetch_count->26
[2017-12-13 15:40:00,065: DEBUG/MainProcess] Task accepted: celery.chord_unlock[e3139ae5-a67d-4f0c-8c54-73b1e19433d2] pid:53679
[2017-12-13 15:40:00,076: INFO/ForkPoolWorker-6] Task celery.chord_unlock[e3139ae5-a67d-4f0c-8c54-73b1e19433d2] retry: Retry in 1s: ValueFormatError()
```
### Stack trace from chord unlocking failure
```python
Traceback (most recent call last):
File "/Library/Python/2.7/site-packages/celery/app/trace.py", line 374, in trace_task
R = retval = fun(*args, **kwargs)
File "/Library/Python/2.7/site-packages/celery/app/trace.py", line 629, in __protected_call__
return self.run(*args, **kwargs)
File "/Library/Python/2.7/site-packages/celery/app/builtins.py", line 75, in unlock_chord
raise self.retry(countdown=interval, max_retries=max_retries)
File "/Library/Python/2.7/site-packages/celery/app/task.py", line 689, in retry
raise ret
Retry: Retry in 1s
```
</issue>
<code>
[start of celery/backends/couchbase.py]
1 # -*- coding: utf-8 -*-
2 """Couchbase result store backend."""
3 from __future__ import absolute_import, unicode_literals
4
5 import logging
6
7 from kombu.utils.encoding import str_t
8 from kombu.utils.url import _parse_url
9
10 from celery.exceptions import ImproperlyConfigured
11
12 from .base import KeyValueStoreBackend
13
14 try:
15 import couchbase_ffi # noqa
16 except ImportError:
17 pass # noqa
18 try:
19 from couchbase import Couchbase
20 from couchbase.connection import Connection
21 from couchbase.exceptions import NotFoundError
22 except ImportError:
23 Couchbase = Connection = NotFoundError = None # noqa
24
25 __all__ = ('CouchbaseBackend',)
26
27
28 class CouchbaseBackend(KeyValueStoreBackend):
29 """Couchbase backend.
30
31 Raises:
32 celery.exceptions.ImproperlyConfigured:
33 if module :pypi:`couchbase` is not available.
34 """
35
36 bucket = 'default'
37 host = 'localhost'
38 port = 8091
39 username = None
40 password = None
41 quiet = False
42 supports_autoexpire = True
43
44 timeout = 2.5
45
46 # Use str as couchbase key not bytes
47 key_t = str_t
48
49 def __init__(self, url=None, *args, **kwargs):
50 kwargs.setdefault('expires_type', int)
51 super(CouchbaseBackend, self).__init__(*args, **kwargs)
52 self.url = url
53
54 if Couchbase is None:
55 raise ImproperlyConfigured(
56 'You need to install the couchbase library to use the '
57 'Couchbase backend.',
58 )
59
60 uhost = uport = uname = upass = ubucket = None
61 if url:
62 _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url)
63 ubucket = ubucket.strip('/') if ubucket else None
64
65 config = self.app.conf.get('couchbase_backend_settings', None)
66 if config is not None:
67 if not isinstance(config, dict):
68 raise ImproperlyConfigured(
69 'Couchbase backend settings should be grouped in a dict',
70 )
71 else:
72 config = {}
73
74 self.host = uhost or config.get('host', self.host)
75 self.port = int(uport or config.get('port', self.port))
76 self.bucket = ubucket or config.get('bucket', self.bucket)
77 self.username = uname or config.get('username', self.username)
78 self.password = upass or config.get('password', self.password)
79
80 self._connection = None
81
82 def _get_connection(self):
83 """Connect to the Couchbase server."""
84 if self._connection is None:
85 kwargs = {'bucket': self.bucket, 'host': self.host}
86
87 if self.port:
88 kwargs.update({'port': self.port})
89 if self.username:
90 kwargs.update({'username': self.username})
91 if self.password:
92 kwargs.update({'password': self.password})
93
94 logging.debug('couchbase settings %r', kwargs)
95 self._connection = Connection(**kwargs)
96 return self._connection
97
98 @property
99 def connection(self):
100 return self._get_connection()
101
102 def get(self, key):
103 try:
104 return self.connection.get(key).value
105 except NotFoundError:
106 return None
107
108 def set(self, key, value):
109 self.connection.set(key, value, ttl=self.expires)
110
111 def mget(self, keys):
112 return [self.get(key) for key in keys]
113
114 def delete(self, key):
115 self.connection.delete(key)
116
[end of celery/backends/couchbase.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/celery/backends/couchbase.py b/celery/backends/couchbase.py
--- a/celery/backends/couchbase.py
+++ b/celery/backends/couchbase.py
@@ -19,6 +19,7 @@
from couchbase import Couchbase
from couchbase.connection import Connection
from couchbase.exceptions import NotFoundError
+ from couchbase import FMT_AUTO
except ImportError:
Couchbase = Connection = NotFoundError = None # noqa
@@ -106,7 +107,7 @@
return None
def set(self, key, value):
- self.connection.set(key, value, ttl=self.expires)
+ self.connection.set(key, value, ttl=self.expires, format=FMT_AUTO)
def mget(self, keys):
return [self.get(key) for key in keys]
|
{"golden_diff": "diff --git a/celery/backends/couchbase.py b/celery/backends/couchbase.py\n--- a/celery/backends/couchbase.py\n+++ b/celery/backends/couchbase.py\n@@ -19,6 +19,7 @@\n from couchbase import Couchbase\n from couchbase.connection import Connection\n from couchbase.exceptions import NotFoundError\n+ from couchbase import FMT_AUTO\n except ImportError:\n Couchbase = Connection = NotFoundError = None # noqa\n \n@@ -106,7 +107,7 @@\n return None\n \n def set(self, key, value):\n- self.connection.set(key, value, ttl=self.expires)\n+ self.connection.set(key, value, ttl=self.expires, format=FMT_AUTO)\n \n def mget(self, keys):\n return [self.get(key) for key in keys]\n", "issue": "Unable to save pickled objects with couchbase as result backend\nHi it seems like when I attempt to process groups of chords, the couchbase result backend is consistently failing to unlock the chord when reading from the db:\r\n\r\n`celery.chord_unlock[e3139ae5-a67d-4f0c-8c54-73b1e19433d2] retry: Retry in 1s: ValueFormatError()`\r\n\r\nThis behavior does not occur with the redis result backend, i can switch between them and see that the error unlocking only occurs on couchbase.\r\n\r\n## Steps to reproduce\r\nAttempt to process a chord with couchbase backend using pickle serialization.\r\n\r\n## Expected behavior\r\nChords process correctly, and resulting data is fed to the next task\r\n\r\n## Actual behavior\r\nCelery is unable to unlock the chord from the result backend\r\n\r\n## Celery project info: \r\n```\r\ncelery -A ipaassteprunner report\r\n\r\nsoftware -> celery:4.1.0 (latentcall) kombu:4.1.0 py:2.7.10\r\n billiard:3.5.0.3 py-amqp:2.2.2\r\nplatform -> system:Darwin arch:64bit imp:CPython\r\nloader -> celery.loaders.app.AppLoader\r\nsettings -> transport:pyamqp results:couchbase://isadmin:**@localhost:8091/tasks\r\n\r\ntask_serializer: 'pickle'\r\nresult_serializer: 'pickle'\r\ndbconfig: <ipaascommon.ipaas_config.DatabaseConfig object at 0x10fbbfe10>\r\ndb_pass: u'********'\r\nIpaasConfig: <class 'ipaascommon.ipaas_config.IpaasConfig'>\r\nimports:\r\n ('ipaassteprunner.tasks',)\r\nworker_redirect_stdouts: False\r\nDatabaseConfig: u'********'\r\ndb_port: '8091'\r\nipaas_constants: <module 'ipaascommon.ipaas_constants' from '/Library/Python/2.7/site-packages/ipaascommon/ipaas_constants.pyc'>\r\nenable_utc: True\r\ndb_user: 'isadmin'\r\ndb_host: 'localhost'\r\nresult_backend: u'couchbase://isadmin:********@localhost:8091/tasks'\r\nresult_expires: 3600\r\niconfig: <ipaascommon.ipaas_config.IpaasConfig object at 0x10fbbfd90>\r\nbroker_url: u'amqp://guest:********@localhost:5672//'\r\ntask_bucket: 'tasks'\r\naccept_content: ['pickle']\r\n```\r\n### Additional Debug output\r\n```\r\n[2017-12-13 15:39:57,860: INFO/MainProcess] Received task: celery.chord_unlock[e3139ae5-a67d-4f0c-8c54-73b1e19433d2] ETA:[2017-12-13 20:39:58.853535+00:00] \r\n[2017-12-13 15:39:57,861: DEBUG/MainProcess] basic.qos: prefetch_count->27\r\n[2017-12-13 15:39:58,859: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x10b410b90> (args:('celery.chord_unlock', 'e3139ae5-a67d-4f0c-8c54-73b1e19433d2', {'origin': 'gen53678@silo2460', 'lang': 'py', 'task': 'celery.chord_unlock', 'group': None, 'root_id': '0acd3e0d-7532-445c-8916-b5fc8a6395ab', u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'celery', u'exchange': u''}, 'expires': None, u'correlation_id': 'e3139ae5-a67d-4f0c-8c54-73b1e19433d2', 'retries': 311, 'timelimit': [None, None], 'argsrepr': \"('90c64bef-21ba-42f9-be75-fdd724375a7a', {'chord_size': 2, 'task': 'ipaassteprunner.tasks.transfer_data', 'subtask_type': None, 'kwargs': {}, 'args': (), 'options': {'chord_size': None, 'chain': [...], 'task_id': '9c6b5e1c-2089-4db7-9590-117aeaf782c7', 'root_id': '0acd3e0d-7532-445c-8916-b5fc8a6395ab', 'parent_id': 'c27c9565-19a6-4683-8180-60f0c25007e9', 'reply_to': '0a58093c-6fdd-3458-9a34-7d5e094ac6a8'}, 'immutable': False})\", 'eta': '2017-12-13T20:39:58.853535+00:00', 'parent_id': 'c27c9565-19a6-4683-8180-60f0c25007e9', u'reply_to':... kwargs:{})\r\n[2017-12-13 15:40:00,061: DEBUG/MainProcess] basic.qos: prefetch_count->26\r\n[2017-12-13 15:40:00,065: DEBUG/MainProcess] Task accepted: celery.chord_unlock[e3139ae5-a67d-4f0c-8c54-73b1e19433d2] pid:53679\r\n[2017-12-13 15:40:00,076: INFO/ForkPoolWorker-6] Task celery.chord_unlock[e3139ae5-a67d-4f0c-8c54-73b1e19433d2] retry: Retry in 1s: ValueFormatError()\r\n```\r\n\r\n### Stack trace from chord unlocking failure\r\n```python\r\nTraceback (most recent call last):\r\n File \"/Library/Python/2.7/site-packages/celery/app/trace.py\", line 374, in trace_task\r\n R = retval = fun(*args, **kwargs)\r\n File \"/Library/Python/2.7/site-packages/celery/app/trace.py\", line 629, in __protected_call__\r\n return self.run(*args, **kwargs)\r\n File \"/Library/Python/2.7/site-packages/celery/app/builtins.py\", line 75, in unlock_chord\r\n raise self.retry(countdown=interval, max_retries=max_retries)\r\n File \"/Library/Python/2.7/site-packages/celery/app/task.py\", line 689, in retry\r\n raise ret\r\nRetry: Retry in 1s\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Couchbase result store backend.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport logging\n\nfrom kombu.utils.encoding import str_t\nfrom kombu.utils.url import _parse_url\n\nfrom celery.exceptions import ImproperlyConfigured\n\nfrom .base import KeyValueStoreBackend\n\ntry:\n import couchbase_ffi # noqa\nexcept ImportError:\n pass # noqa\ntry:\n from couchbase import Couchbase\n from couchbase.connection import Connection\n from couchbase.exceptions import NotFoundError\nexcept ImportError:\n Couchbase = Connection = NotFoundError = None # noqa\n\n__all__ = ('CouchbaseBackend',)\n\n\nclass CouchbaseBackend(KeyValueStoreBackend):\n \"\"\"Couchbase backend.\n\n Raises:\n celery.exceptions.ImproperlyConfigured:\n if module :pypi:`couchbase` is not available.\n \"\"\"\n\n bucket = 'default'\n host = 'localhost'\n port = 8091\n username = None\n password = None\n quiet = False\n supports_autoexpire = True\n\n timeout = 2.5\n\n # Use str as couchbase key not bytes\n key_t = str_t\n\n def __init__(self, url=None, *args, **kwargs):\n kwargs.setdefault('expires_type', int)\n super(CouchbaseBackend, self).__init__(*args, **kwargs)\n self.url = url\n\n if Couchbase is None:\n raise ImproperlyConfigured(\n 'You need to install the couchbase library to use the '\n 'Couchbase backend.',\n )\n\n uhost = uport = uname = upass = ubucket = None\n if url:\n _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url)\n ubucket = ubucket.strip('/') if ubucket else None\n\n config = self.app.conf.get('couchbase_backend_settings', None)\n if config is not None:\n if not isinstance(config, dict):\n raise ImproperlyConfigured(\n 'Couchbase backend settings should be grouped in a dict',\n )\n else:\n config = {}\n\n self.host = uhost or config.get('host', self.host)\n self.port = int(uport or config.get('port', self.port))\n self.bucket = ubucket or config.get('bucket', self.bucket)\n self.username = uname or config.get('username', self.username)\n self.password = upass or config.get('password', self.password)\n\n self._connection = None\n\n def _get_connection(self):\n \"\"\"Connect to the Couchbase server.\"\"\"\n if self._connection is None:\n kwargs = {'bucket': self.bucket, 'host': self.host}\n\n if self.port:\n kwargs.update({'port': self.port})\n if self.username:\n kwargs.update({'username': self.username})\n if self.password:\n kwargs.update({'password': self.password})\n\n logging.debug('couchbase settings %r', kwargs)\n self._connection = Connection(**kwargs)\n return self._connection\n\n @property\n def connection(self):\n return self._get_connection()\n\n def get(self, key):\n try:\n return self.connection.get(key).value\n except NotFoundError:\n return None\n\n def set(self, key, value):\n self.connection.set(key, value, ttl=self.expires)\n\n def mget(self, keys):\n return [self.get(key) for key in keys]\n\n def delete(self, key):\n self.connection.delete(key)\n", "path": "celery/backends/couchbase.py"}]}
| 3,268 | 191 |
gh_patches_debug_33144
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-358
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Instrument Starlette authentication
Starlette has a basic authentication framework: https://www.starlette.io/authentication/
We should probably copy what we do for Django here, tagging user ID's/ display names on traces.
</issue>
<code>
[start of src/scout_apm/async_/starlette.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import wrapt
5 from starlette.background import BackgroundTask
6 from starlette.requests import Request
7
8 import scout_apm.core
9 from scout_apm.core.tracked_request import TrackedRequest
10 from scout_apm.core.web_requests import (
11 create_filtered_path,
12 ignore_path,
13 track_amazon_request_queue_time,
14 track_request_queue_time,
15 )
16
17
18 class ScoutMiddleware:
19 def __init__(self, app):
20 self.app = app
21 installed = scout_apm.core.install()
22 self._do_nothing = not installed
23 if installed:
24 install_background_instrumentation()
25
26 async def __call__(self, scope, receive, send):
27 if self._do_nothing or scope["type"] != "http":
28 await self.app(scope, receive, send)
29 return
30
31 request = Request(scope)
32 tracked_request = TrackedRequest.instance()
33 # Can't name controller until post-routing - see final clause
34 controller_span = tracked_request.start_span(operation="Controller/Unknown")
35
36 tracked_request.tag(
37 "path",
38 create_filtered_path(request.url.path, request.query_params.multi_items()),
39 )
40 if ignore_path(request.url.path):
41 tracked_request.tag("ignore_transaction", True)
42
43 user_ip = (
44 request.headers.get("x-forwarded-for", default="").split(",")[0]
45 or request.headers.get("client-ip", default="").split(",")[0]
46 or request.client.host
47 )
48 tracked_request.tag("user_ip", user_ip)
49
50 queue_time = request.headers.get(
51 "x-queue-start", default=""
52 ) or request.headers.get("x-request-start", default="")
53 tracked_queue_time = track_request_queue_time(queue_time, tracked_request)
54 if not tracked_queue_time:
55 amazon_queue_time = request.headers.get("x-amzn-trace-id", default="")
56 track_amazon_request_queue_time(amazon_queue_time, tracked_request)
57
58 def rename_controller_span_from_endpoint():
59 if "endpoint" in scope:
60 # Rename top span
61 endpoint = scope["endpoint"]
62 controller_span.operation = "Controller/{}.{}".format(
63 endpoint.__module__, endpoint.__qualname__
64 )
65 tracked_request.is_real_request = True
66
67 async def wrapped_send(data):
68 # Finish HTTP span when body finishes sending, not later (e.g.
69 # after background tasks)
70 if data.get("type", None) == "http.response.body" and not data.get(
71 "more_body", False
72 ):
73 rename_controller_span_from_endpoint()
74 tracked_request.stop_span()
75 return await send(data)
76
77 try:
78 await self.app(scope, receive, wrapped_send)
79 except Exception as exc:
80 tracked_request.tag("error", "true")
81 raise exc
82 finally:
83 if tracked_request.end_time is None:
84 rename_controller_span_from_endpoint()
85 tracked_request.stop_span()
86
87
88 background_instrumentation_installed = False
89
90
91 def install_background_instrumentation():
92 global background_instrumentation_installed
93 if background_instrumentation_installed:
94 return
95 background_instrumentation_installed = True
96
97 @wrapt.decorator
98 async def wrapped_background_call(wrapped, instance, args, kwargs):
99 tracked_request = TrackedRequest.instance()
100 tracked_request.is_real_request = True
101 tracked_request.start_span(
102 operation="Job/{}.{}".format(
103 instance.func.__module__, instance.func.__qualname__
104 )
105 )
106 try:
107 return await wrapped(*args, **kwargs)
108 finally:
109 tracked_request.stop_span()
110
111 BackgroundTask.__call__ = wrapped_background_call(BackgroundTask.__call__)
112
[end of src/scout_apm/async_/starlette.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/scout_apm/async_/starlette.py b/src/scout_apm/async_/starlette.py
--- a/src/scout_apm/async_/starlette.py
+++ b/src/scout_apm/async_/starlette.py
@@ -55,7 +55,7 @@
amazon_queue_time = request.headers.get("x-amzn-trace-id", default="")
track_amazon_request_queue_time(amazon_queue_time, tracked_request)
- def rename_controller_span_from_endpoint():
+ def grab_extra_data():
if "endpoint" in scope:
# Rename top span
endpoint = scope["endpoint"]
@@ -64,13 +64,23 @@
)
tracked_request.is_real_request = True
+ # From AuthenticationMiddleware - bypass request.user because it
+ # throws AssertionError if 'user' is not in Scope, and we need a
+ # try/except already
+ try:
+ username = scope["user"].display_name
+ except (KeyError, AttributeError):
+ pass
+ else:
+ tracked_request.tag("username", username)
+
async def wrapped_send(data):
# Finish HTTP span when body finishes sending, not later (e.g.
# after background tasks)
if data.get("type", None) == "http.response.body" and not data.get(
"more_body", False
):
- rename_controller_span_from_endpoint()
+ grab_extra_data()
tracked_request.stop_span()
return await send(data)
@@ -81,7 +91,7 @@
raise exc
finally:
if tracked_request.end_time is None:
- rename_controller_span_from_endpoint()
+ grab_extra_data()
tracked_request.stop_span()
|
{"golden_diff": "diff --git a/src/scout_apm/async_/starlette.py b/src/scout_apm/async_/starlette.py\n--- a/src/scout_apm/async_/starlette.py\n+++ b/src/scout_apm/async_/starlette.py\n@@ -55,7 +55,7 @@\n amazon_queue_time = request.headers.get(\"x-amzn-trace-id\", default=\"\")\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n \n- def rename_controller_span_from_endpoint():\n+ def grab_extra_data():\n if \"endpoint\" in scope:\n # Rename top span\n endpoint = scope[\"endpoint\"]\n@@ -64,13 +64,23 @@\n )\n tracked_request.is_real_request = True\n \n+ # From AuthenticationMiddleware - bypass request.user because it\n+ # throws AssertionError if 'user' is not in Scope, and we need a\n+ # try/except already\n+ try:\n+ username = scope[\"user\"].display_name\n+ except (KeyError, AttributeError):\n+ pass\n+ else:\n+ tracked_request.tag(\"username\", username)\n+\n async def wrapped_send(data):\n # Finish HTTP span when body finishes sending, not later (e.g.\n # after background tasks)\n if data.get(\"type\", None) == \"http.response.body\" and not data.get(\n \"more_body\", False\n ):\n- rename_controller_span_from_endpoint()\n+ grab_extra_data()\n tracked_request.stop_span()\n return await send(data)\n \n@@ -81,7 +91,7 @@\n raise exc\n finally:\n if tracked_request.end_time is None:\n- rename_controller_span_from_endpoint()\n+ grab_extra_data()\n tracked_request.stop_span()\n", "issue": "Instrument Starlette authentication\nStarlette has a basic authentication framework: https://www.starlette.io/authentication/\r\n\r\nWe should probably copy what we do for Django here, tagging user ID's/ display names on traces.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport wrapt\nfrom starlette.background import BackgroundTask\nfrom starlette.requests import Request\n\nimport scout_apm.core\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.web_requests import (\n create_filtered_path,\n ignore_path,\n track_amazon_request_queue_time,\n track_request_queue_time,\n)\n\n\nclass ScoutMiddleware:\n def __init__(self, app):\n self.app = app\n installed = scout_apm.core.install()\n self._do_nothing = not installed\n if installed:\n install_background_instrumentation()\n\n async def __call__(self, scope, receive, send):\n if self._do_nothing or scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n request = Request(scope)\n tracked_request = TrackedRequest.instance()\n # Can't name controller until post-routing - see final clause\n controller_span = tracked_request.start_span(operation=\"Controller/Unknown\")\n\n tracked_request.tag(\n \"path\",\n create_filtered_path(request.url.path, request.query_params.multi_items()),\n )\n if ignore_path(request.url.path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n user_ip = (\n request.headers.get(\"x-forwarded-for\", default=\"\").split(\",\")[0]\n or request.headers.get(\"client-ip\", default=\"\").split(\",\")[0]\n or request.client.host\n )\n tracked_request.tag(\"user_ip\", user_ip)\n\n queue_time = request.headers.get(\n \"x-queue-start\", default=\"\"\n ) or request.headers.get(\"x-request-start\", default=\"\")\n tracked_queue_time = track_request_queue_time(queue_time, tracked_request)\n if not tracked_queue_time:\n amazon_queue_time = request.headers.get(\"x-amzn-trace-id\", default=\"\")\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n\n def rename_controller_span_from_endpoint():\n if \"endpoint\" in scope:\n # Rename top span\n endpoint = scope[\"endpoint\"]\n controller_span.operation = \"Controller/{}.{}\".format(\n endpoint.__module__, endpoint.__qualname__\n )\n tracked_request.is_real_request = True\n\n async def wrapped_send(data):\n # Finish HTTP span when body finishes sending, not later (e.g.\n # after background tasks)\n if data.get(\"type\", None) == \"http.response.body\" and not data.get(\n \"more_body\", False\n ):\n rename_controller_span_from_endpoint()\n tracked_request.stop_span()\n return await send(data)\n\n try:\n await self.app(scope, receive, wrapped_send)\n except Exception as exc:\n tracked_request.tag(\"error\", \"true\")\n raise exc\n finally:\n if tracked_request.end_time is None:\n rename_controller_span_from_endpoint()\n tracked_request.stop_span()\n\n\nbackground_instrumentation_installed = False\n\n\ndef install_background_instrumentation():\n global background_instrumentation_installed\n if background_instrumentation_installed:\n return\n background_instrumentation_installed = True\n\n @wrapt.decorator\n async def wrapped_background_call(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.is_real_request = True\n tracked_request.start_span(\n operation=\"Job/{}.{}\".format(\n instance.func.__module__, instance.func.__qualname__\n )\n )\n try:\n return await wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n\n BackgroundTask.__call__ = wrapped_background_call(BackgroundTask.__call__)\n", "path": "src/scout_apm/async_/starlette.py"}]}
| 1,600 | 380 |
gh_patches_debug_25796
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-3426
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Results framework loads very slowly for projects with lot of indicator dimensions
</issue>
<code>
[start of akvo/rest/views/indicator_dimension.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import IndicatorDimension
9
10 from ..serializers import IndicatorDimensionSerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class IndicatorDimensionViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = IndicatorDimension.objects.all()
18 serializer_class = IndicatorDimensionSerializer
19 project_relation = 'indicator__result__project__'
20
[end of akvo/rest/views/indicator_dimension.py]
[start of akvo/rest/pagination.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from rest_framework import pagination
8 from rest_framework.response import Response
9
10
11 class LimitSizePageNumberPagination(pagination.PageNumberPagination):
12 page_size = 30
13 page_size_query_param = 'limit'
14 max_page_size = 100
15
16
17 class TastypieOffsetPagination(pagination.LimitOffsetPagination):
18
19 def get_paginated_response(self, data):
20 """ Emulate the old style Tastypie format if the URL contains /api/v1/
21 """
22 return Response({
23 'meta': {
24 'next': self.get_next_link(),
25 'previous': self.get_previous_link(),
26 'total_count': self.count,
27 'limit': self.limit,
28 'offset': self.offset,
29 },
30 'objects': data
31 })
32
[end of akvo/rest/pagination.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/akvo/rest/pagination.py b/akvo/rest/pagination.py
--- a/akvo/rest/pagination.py
+++ b/akvo/rest/pagination.py
@@ -8,12 +8,18 @@
from rest_framework.response import Response
-class LimitSizePageNumberPagination(pagination.PageNumberPagination):
+class StandardSizePageNumberPagination(pagination.PageNumberPagination):
page_size = 30
page_size_query_param = 'limit'
max_page_size = 100
+class LargeSizePageNumberPagination(pagination.PageNumberPagination):
+ page_size = 100
+ page_size_query_param = 'limit'
+ max_page_size = 1000
+
+
class TastypieOffsetPagination(pagination.LimitOffsetPagination):
def get_paginated_response(self, data):
diff --git a/akvo/rest/views/indicator_dimension.py b/akvo/rest/views/indicator_dimension.py
--- a/akvo/rest/views/indicator_dimension.py
+++ b/akvo/rest/views/indicator_dimension.py
@@ -6,6 +6,7 @@
from akvo.rsr.models import IndicatorDimension
+from akvo.rest.pagination import LargeSizePageNumberPagination
from ..serializers import IndicatorDimensionSerializer
from ..viewsets import PublicProjectViewSet
@@ -17,3 +18,4 @@
queryset = IndicatorDimension.objects.all()
serializer_class = IndicatorDimensionSerializer
project_relation = 'indicator__result__project__'
+ pagination_class = LargeSizePageNumberPagination
|
{"golden_diff": "diff --git a/akvo/rest/pagination.py b/akvo/rest/pagination.py\n--- a/akvo/rest/pagination.py\n+++ b/akvo/rest/pagination.py\n@@ -8,12 +8,18 @@\n from rest_framework.response import Response\n \n \n-class LimitSizePageNumberPagination(pagination.PageNumberPagination):\n+class StandardSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 30\n page_size_query_param = 'limit'\n max_page_size = 100\n \n \n+class LargeSizePageNumberPagination(pagination.PageNumberPagination):\n+ page_size = 100\n+ page_size_query_param = 'limit'\n+ max_page_size = 1000\n+\n+\n class TastypieOffsetPagination(pagination.LimitOffsetPagination):\n \n def get_paginated_response(self, data):\ndiff --git a/akvo/rest/views/indicator_dimension.py b/akvo/rest/views/indicator_dimension.py\n--- a/akvo/rest/views/indicator_dimension.py\n+++ b/akvo/rest/views/indicator_dimension.py\n@@ -6,6 +6,7 @@\n \n \n from akvo.rsr.models import IndicatorDimension\n+from akvo.rest.pagination import LargeSizePageNumberPagination\n \n from ..serializers import IndicatorDimensionSerializer\n from ..viewsets import PublicProjectViewSet\n@@ -17,3 +18,4 @@\n queryset = IndicatorDimension.objects.all()\n serializer_class = IndicatorDimensionSerializer\n project_relation = 'indicator__result__project__'\n+ pagination_class = LargeSizePageNumberPagination\n", "issue": "Results framework loads very slowly for projects with lot of indicator dimensions\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorDimension\n\nfrom ..serializers import IndicatorDimensionSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimension.objects.all()\n serializer_class = IndicatorDimensionSerializer\n project_relation = 'indicator__result__project__'\n", "path": "akvo/rest/views/indicator_dimension.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom rest_framework import pagination\nfrom rest_framework.response import Response\n\n\nclass LimitSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 30\n page_size_query_param = 'limit'\n max_page_size = 100\n\n\nclass TastypieOffsetPagination(pagination.LimitOffsetPagination):\n\n def get_paginated_response(self, data):\n \"\"\" Emulate the old style Tastypie format if the URL contains /api/v1/\n \"\"\"\n return Response({\n 'meta': {\n 'next': self.get_next_link(),\n 'previous': self.get_previous_link(),\n 'total_count': self.count,\n 'limit': self.limit,\n 'offset': self.offset,\n },\n 'objects': data\n })\n", "path": "akvo/rest/pagination.py"}]}
| 1,033 | 336 |
gh_patches_debug_9421
|
rasdani/github-patches
|
git_diff
|
arviz-devs__arviz-374
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add API docs for plot_ppc
https://arviz-devs.github.io/arviz/generated/arviz.plot_ppc.html#arviz.plot_ppc
See Posterior Docs for example
https://arviz-devs.github.io/arviz/generated/arviz.plot_posterior.html#arviz.plot_posterior
</issue>
<code>
[start of arviz/plots/ppcplot.py]
1 """Posterior predictive plot."""
2 import numpy as np
3 from .kdeplot import plot_kde, _fast_kde
4 from .plot_utils import _scale_fig_size, _create_axes_grid, default_grid
5
6
7 def plot_ppc(
8 data, kind="density", alpha=0.2, mean=True, figsize=None, textsize=None, data_pairs=None
9 ):
10 """
11 Plot for Posterior Predictive checks.
12
13 Note that this plot will flatten out any dimensions in the posterior predictive variables.
14
15 Parameters
16 ----------
17 data : Array-like
18 Observed values
19 kind : str
20 Type of plot to display (density or cumulative)
21 alpha : float
22 Opacity of posterior predictive density curves
23 mean : bool
24 Whether or not to plot the mean posterior predictive distribution. Defaults to True
25 figsize : tuple
26 Figure size. If None it will be defined automatically.
27 textsize: float
28 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
29 on figsize.
30 data_pairs : dict
31 Dictionary containing relations between observed data and posterior predictive data.
32 Dictionary structure:
33 Key = data var_name
34 Value = posterior predictive var_name
35 Example: `data_pairs = {'y' : 'y_hat'}`
36
37 Returns
38 -------
39 axes : matplotlib axes
40 """
41 for group in ("posterior_predictive", "observed_data"):
42 if not hasattr(data, group):
43 raise TypeError(
44 '`data` argument must have the group "{group}" for ppcplot'.format(group=group)
45 )
46
47 if kind.lower() not in ("density", "cumulative"):
48 raise TypeError("`kind` argument must be either `density` or `cumulative`")
49
50 if data_pairs is None:
51 data_pairs = {}
52
53 observed = data.observed_data
54 posterior_predictive = data.posterior_predictive
55
56 rows, cols = default_grid(len(observed.data_vars))
57
58 (figsize, ax_labelsize, _, xt_labelsize, linewidth, _) = _scale_fig_size(
59 figsize, textsize, rows, cols
60 )
61
62 _, axes = _create_axes_grid(len(observed.data_vars), rows, cols, figsize=figsize)
63
64 for ax, var_name in zip(np.atleast_1d(axes), observed.data_vars):
65 dtype = observed[var_name].dtype.kind
66 if kind == "density":
67 if dtype == "f":
68 plot_kde(
69 observed[var_name].values.flatten(),
70 label="Observed {}".format(var_name),
71 plot_kwargs={"color": "k", "linewidth": linewidth, "zorder": 3},
72 fill_kwargs={"alpha": 0},
73 ax=ax,
74 )
75 else:
76 vals = observed[var_name].values.flatten()
77 nbins = round(len(vals) ** 0.5)
78 hist, bin_edges = np.histogram(vals, bins=nbins, density=True)
79 hist = np.concatenate((hist[:1], hist))
80 ax.plot(
81 bin_edges,
82 hist,
83 label="Observed {}".format(var_name),
84 color="k",
85 linewidth=linewidth,
86 zorder=3,
87 drawstyle="steps-pre",
88 )
89 pp_var_name = data_pairs.get(var_name, var_name)
90 # run plot_kde manually with one plot call
91 pp_densities = []
92 for _, chain_vals in posterior_predictive[pp_var_name].groupby("chain"):
93 for _, vals in chain_vals.groupby("draw"):
94 if dtype == "f":
95 pp_density, lower, upper = _fast_kde(vals)
96 pp_x = np.linspace(lower, upper, len(pp_density))
97 pp_densities.extend([pp_x, pp_density])
98 else:
99 nbins = round(len(vals) ** 0.5)
100 hist, bin_edges = np.histogram(vals, bins=nbins, density=True)
101 hist = np.concatenate((hist[:1], hist))
102 pp_densities.extend([bin_edges, hist])
103 plot_kwargs = {"color": "C5", "alpha": alpha, "linewidth": 0.5 * linewidth}
104 if dtype == "i":
105 plot_kwargs["drawstyle"] = "steps-pre"
106 ax.plot(*pp_densities, **plot_kwargs)
107 ax.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name))
108 if mean:
109 if dtype == "f":
110 plot_kde(
111 posterior_predictive[pp_var_name].values.flatten(),
112 plot_kwargs={
113 "color": "C0",
114 "linestyle": "--",
115 "linewidth": linewidth,
116 "zorder": 2,
117 },
118 label="Posterior predictive mean {}".format(pp_var_name),
119 ax=ax,
120 )
121 else:
122 vals = posterior_predictive[pp_var_name].values.flatten()
123 nbins = round(len(vals) ** 0.5)
124 hist, bin_edges = np.histogram(vals, bins=nbins, density=True)
125 hist = np.concatenate((hist[:1], hist))
126 ax.plot(
127 bin_edges,
128 hist,
129 color="C0",
130 linewidth=linewidth,
131 label="Posterior predictive mean {}".format(pp_var_name),
132 zorder=2,
133 linestyle="--",
134 drawstyle="steps-pre",
135 )
136 if var_name != pp_var_name:
137 xlabel = "{} / {}".format(var_name, pp_var_name)
138 else:
139 xlabel = var_name
140 ax.set_xlabel(xlabel, fontsize=ax_labelsize)
141 ax.tick_params(labelsize=xt_labelsize)
142 ax.set_yticks([])
143
144 elif kind == "cumulative":
145 if dtype == "f":
146 ax.plot(
147 *_empirical_cdf(observed[var_name].values.flatten()),
148 color="k",
149 linewidth=linewidth,
150 label="Observed {}".format(var_name),
151 zorder=3
152 )
153 else:
154 ax.plot(
155 *_empirical_cdf(observed[var_name].values.flatten()),
156 color="k",
157 linewidth=linewidth,
158 label="Observed {}".format(var_name),
159 drawstyle="steps-pre",
160 zorder=3
161 )
162 pp_var_name = data_pairs.get(var_name, var_name)
163 # run plot_kde manually with one plot call
164 pp_densities = []
165 for _, chain_vals in posterior_predictive[pp_var_name].groupby("chain"):
166 for _, vals in chain_vals.groupby("draw"):
167 pp_x, pp_density = _empirical_cdf(vals)
168 pp_densities.extend([pp_x, pp_density])
169 if dtype == "f":
170 ax.plot(*pp_densities, alpha=alpha, color="C5", linewidth=linewidth)
171 else:
172 ax.plot(
173 *pp_densities,
174 alpha=alpha,
175 color="C5",
176 drawstyle="steps-pre",
177 linewidth=linewidth
178 )
179 ax.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name))
180 if mean:
181 if dtype == "f":
182 ax.plot(
183 *_empirical_cdf(posterior_predictive[pp_var_name].values.flatten()),
184 color="C0",
185 linestyle="--",
186 linewidth=linewidth,
187 label="Posterior predictive mean {}".format(pp_var_name)
188 )
189 else:
190 ax.plot(
191 *_empirical_cdf(posterior_predictive[pp_var_name].values.flatten()),
192 color="C0",
193 linestyle="--",
194 linewidth=linewidth,
195 drawstyle="steps-pre",
196 label="Posterior predictive mean {}".format(pp_var_name)
197 )
198 if var_name != pp_var_name:
199 xlabel = "{} / {}".format(var_name, pp_var_name)
200 else:
201 xlabel = var_name
202 ax.set_xlabel(var_name, fontsize=ax_labelsize)
203 ax.set_yticks([0, 0.5, 1])
204 ax.legend(fontsize=xt_labelsize)
205 return axes
206
207
208 def _empirical_cdf(data):
209 """Compute empirical cdf of a numpy array.
210
211 Parameters
212 ----------
213 data : np.array
214 1d array
215
216 Returns
217 -------
218 np.array, np.array
219 x and y coordinates for the empirical cdf of the data
220 """
221 return np.sort(data), np.linspace(0, 1, len(data))
222
[end of arviz/plots/ppcplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/arviz/plots/ppcplot.py b/arviz/plots/ppcplot.py
--- a/arviz/plots/ppcplot.py
+++ b/arviz/plots/ppcplot.py
@@ -37,6 +37,25 @@
Returns
-------
axes : matplotlib axes
+
+ Examples
+ --------
+ Plot the observed data KDE overlaid on posterior predictive KDEs.
+
+ .. plot::
+ :context: close-figs
+
+ >>> import arviz as az
+ >>> data = az.load_arviz_data('radon')
+ >>> az.plot_ppc(data)
+
+ Plot the overlay with empirical CDFs.
+
+ .. plot::
+ :context: close-figs
+
+ >>> az.plot_ppc(data, kind='cumulative')
+
"""
for group in ("posterior_predictive", "observed_data"):
if not hasattr(data, group):
|
{"golden_diff": "diff --git a/arviz/plots/ppcplot.py b/arviz/plots/ppcplot.py\n--- a/arviz/plots/ppcplot.py\n+++ b/arviz/plots/ppcplot.py\n@@ -37,6 +37,25 @@\n Returns\n -------\n axes : matplotlib axes\n+\n+ Examples\n+ --------\n+ Plot the observed data KDE overlaid on posterior predictive KDEs.\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> import arviz as az\n+ >>> data = az.load_arviz_data('radon')\n+ >>> az.plot_ppc(data)\n+\n+ Plot the overlay with empirical CDFs.\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> az.plot_ppc(data, kind='cumulative')\n+\n \"\"\"\n for group in (\"posterior_predictive\", \"observed_data\"):\n if not hasattr(data, group):\n", "issue": "Add API docs for plot_ppc\nhttps://arviz-devs.github.io/arviz/generated/arviz.plot_ppc.html#arviz.plot_ppc\r\n\r\nSee Posterior Docs for example\r\nhttps://arviz-devs.github.io/arviz/generated/arviz.plot_posterior.html#arviz.plot_posterior\n", "before_files": [{"content": "\"\"\"Posterior predictive plot.\"\"\"\nimport numpy as np\nfrom .kdeplot import plot_kde, _fast_kde\nfrom .plot_utils import _scale_fig_size, _create_axes_grid, default_grid\n\n\ndef plot_ppc(\n data, kind=\"density\", alpha=0.2, mean=True, figsize=None, textsize=None, data_pairs=None\n):\n \"\"\"\n Plot for Posterior Predictive checks.\n\n Note that this plot will flatten out any dimensions in the posterior predictive variables.\n\n Parameters\n ----------\n data : Array-like\n Observed values\n kind : str\n Type of plot to display (density or cumulative)\n alpha : float\n Opacity of posterior predictive density curves\n mean : bool\n Whether or not to plot the mean posterior predictive distribution. Defaults to True\n figsize : tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n data_pairs : dict\n Dictionary containing relations between observed data and posterior predictive data.\n Dictionary structure:\n Key = data var_name\n Value = posterior predictive var_name\n Example: `data_pairs = {'y' : 'y_hat'}`\n\n Returns\n -------\n axes : matplotlib axes\n \"\"\"\n for group in (\"posterior_predictive\", \"observed_data\"):\n if not hasattr(data, group):\n raise TypeError(\n '`data` argument must have the group \"{group}\" for ppcplot'.format(group=group)\n )\n\n if kind.lower() not in (\"density\", \"cumulative\"):\n raise TypeError(\"`kind` argument must be either `density` or `cumulative`\")\n\n if data_pairs is None:\n data_pairs = {}\n\n observed = data.observed_data\n posterior_predictive = data.posterior_predictive\n\n rows, cols = default_grid(len(observed.data_vars))\n\n (figsize, ax_labelsize, _, xt_labelsize, linewidth, _) = _scale_fig_size(\n figsize, textsize, rows, cols\n )\n\n _, axes = _create_axes_grid(len(observed.data_vars), rows, cols, figsize=figsize)\n\n for ax, var_name in zip(np.atleast_1d(axes), observed.data_vars):\n dtype = observed[var_name].dtype.kind\n if kind == \"density\":\n if dtype == \"f\":\n plot_kde(\n observed[var_name].values.flatten(),\n label=\"Observed {}\".format(var_name),\n plot_kwargs={\"color\": \"k\", \"linewidth\": linewidth, \"zorder\": 3},\n fill_kwargs={\"alpha\": 0},\n ax=ax,\n )\n else:\n vals = observed[var_name].values.flatten()\n nbins = round(len(vals) ** 0.5)\n hist, bin_edges = np.histogram(vals, bins=nbins, density=True)\n hist = np.concatenate((hist[:1], hist))\n ax.plot(\n bin_edges,\n hist,\n label=\"Observed {}\".format(var_name),\n color=\"k\",\n linewidth=linewidth,\n zorder=3,\n drawstyle=\"steps-pre\",\n )\n pp_var_name = data_pairs.get(var_name, var_name)\n # run plot_kde manually with one plot call\n pp_densities = []\n for _, chain_vals in posterior_predictive[pp_var_name].groupby(\"chain\"):\n for _, vals in chain_vals.groupby(\"draw\"):\n if dtype == \"f\":\n pp_density, lower, upper = _fast_kde(vals)\n pp_x = np.linspace(lower, upper, len(pp_density))\n pp_densities.extend([pp_x, pp_density])\n else:\n nbins = round(len(vals) ** 0.5)\n hist, bin_edges = np.histogram(vals, bins=nbins, density=True)\n hist = np.concatenate((hist[:1], hist))\n pp_densities.extend([bin_edges, hist])\n plot_kwargs = {\"color\": \"C5\", \"alpha\": alpha, \"linewidth\": 0.5 * linewidth}\n if dtype == \"i\":\n plot_kwargs[\"drawstyle\"] = \"steps-pre\"\n ax.plot(*pp_densities, **plot_kwargs)\n ax.plot([], color=\"C5\", label=\"Posterior predictive {}\".format(pp_var_name))\n if mean:\n if dtype == \"f\":\n plot_kde(\n posterior_predictive[pp_var_name].values.flatten(),\n plot_kwargs={\n \"color\": \"C0\",\n \"linestyle\": \"--\",\n \"linewidth\": linewidth,\n \"zorder\": 2,\n },\n label=\"Posterior predictive mean {}\".format(pp_var_name),\n ax=ax,\n )\n else:\n vals = posterior_predictive[pp_var_name].values.flatten()\n nbins = round(len(vals) ** 0.5)\n hist, bin_edges = np.histogram(vals, bins=nbins, density=True)\n hist = np.concatenate((hist[:1], hist))\n ax.plot(\n bin_edges,\n hist,\n color=\"C0\",\n linewidth=linewidth,\n label=\"Posterior predictive mean {}\".format(pp_var_name),\n zorder=2,\n linestyle=\"--\",\n drawstyle=\"steps-pre\",\n )\n if var_name != pp_var_name:\n xlabel = \"{} / {}\".format(var_name, pp_var_name)\n else:\n xlabel = var_name\n ax.set_xlabel(xlabel, fontsize=ax_labelsize)\n ax.tick_params(labelsize=xt_labelsize)\n ax.set_yticks([])\n\n elif kind == \"cumulative\":\n if dtype == \"f\":\n ax.plot(\n *_empirical_cdf(observed[var_name].values.flatten()),\n color=\"k\",\n linewidth=linewidth,\n label=\"Observed {}\".format(var_name),\n zorder=3\n )\n else:\n ax.plot(\n *_empirical_cdf(observed[var_name].values.flatten()),\n color=\"k\",\n linewidth=linewidth,\n label=\"Observed {}\".format(var_name),\n drawstyle=\"steps-pre\",\n zorder=3\n )\n pp_var_name = data_pairs.get(var_name, var_name)\n # run plot_kde manually with one plot call\n pp_densities = []\n for _, chain_vals in posterior_predictive[pp_var_name].groupby(\"chain\"):\n for _, vals in chain_vals.groupby(\"draw\"):\n pp_x, pp_density = _empirical_cdf(vals)\n pp_densities.extend([pp_x, pp_density])\n if dtype == \"f\":\n ax.plot(*pp_densities, alpha=alpha, color=\"C5\", linewidth=linewidth)\n else:\n ax.plot(\n *pp_densities,\n alpha=alpha,\n color=\"C5\",\n drawstyle=\"steps-pre\",\n linewidth=linewidth\n )\n ax.plot([], color=\"C5\", label=\"Posterior predictive {}\".format(pp_var_name))\n if mean:\n if dtype == \"f\":\n ax.plot(\n *_empirical_cdf(posterior_predictive[pp_var_name].values.flatten()),\n color=\"C0\",\n linestyle=\"--\",\n linewidth=linewidth,\n label=\"Posterior predictive mean {}\".format(pp_var_name)\n )\n else:\n ax.plot(\n *_empirical_cdf(posterior_predictive[pp_var_name].values.flatten()),\n color=\"C0\",\n linestyle=\"--\",\n linewidth=linewidth,\n drawstyle=\"steps-pre\",\n label=\"Posterior predictive mean {}\".format(pp_var_name)\n )\n if var_name != pp_var_name:\n xlabel = \"{} / {}\".format(var_name, pp_var_name)\n else:\n xlabel = var_name\n ax.set_xlabel(var_name, fontsize=ax_labelsize)\n ax.set_yticks([0, 0.5, 1])\n ax.legend(fontsize=xt_labelsize)\n return axes\n\n\ndef _empirical_cdf(data):\n \"\"\"Compute empirical cdf of a numpy array.\n\n Parameters\n ----------\n data : np.array\n 1d array\n\n Returns\n -------\n np.array, np.array\n x and y coordinates for the empirical cdf of the data\n \"\"\"\n return np.sort(data), np.linspace(0, 1, len(data))\n", "path": "arviz/plots/ppcplot.py"}]}
| 2,940 | 209 |
gh_patches_debug_23266
|
rasdani/github-patches
|
git_diff
|
feast-dev__feast-3203
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support requested features on Postgresql online store
**Is your feature request related to a problem? Please describe.**
As a user, I want to retrieve subset of features on Postgresql online store instead of fetching all features
**Describe the solution you'd like**
Update online_read method to retrieve subset of features
**Additional context**
I'm close to creating a pull request for this issue myself.
</issue>
<code>
[start of sdk/python/feast/infra/online_stores/contrib/postgres.py]
1 import logging
2 from collections import defaultdict
3 from datetime import datetime
4 from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
5
6 import psycopg2
7 import pytz
8 from psycopg2 import sql
9 from psycopg2.extras import execute_values
10 from pydantic.schema import Literal
11
12 from feast import Entity
13 from feast.feature_view import FeatureView
14 from feast.infra.key_encoding_utils import serialize_entity_key
15 from feast.infra.online_stores.online_store import OnlineStore
16 from feast.infra.utils.postgres.connection_utils import _get_conn
17 from feast.infra.utils.postgres.postgres_config import PostgreSQLConfig
18 from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
19 from feast.protos.feast.types.Value_pb2 import Value as ValueProto
20 from feast.repo_config import RepoConfig
21 from feast.usage import log_exceptions_and_usage
22
23
24 class PostgreSQLOnlineStoreConfig(PostgreSQLConfig):
25 type: Literal["postgres"] = "postgres"
26
27
28 class PostgreSQLOnlineStore(OnlineStore):
29 _conn: Optional[psycopg2._psycopg.connection] = None
30
31 def _get_conn(self, config: RepoConfig):
32 if not self._conn:
33 assert config.online_store.type == "postgres"
34 self._conn = _get_conn(config.online_store)
35 return self._conn
36
37 @log_exceptions_and_usage(online_store="postgres")
38 def online_write_batch(
39 self,
40 config: RepoConfig,
41 table: FeatureView,
42 data: List[
43 Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
44 ],
45 progress: Optional[Callable[[int], Any]],
46 ) -> None:
47 project = config.project
48
49 with self._get_conn(config) as conn, conn.cursor() as cur:
50 insert_values = []
51 for entity_key, values, timestamp, created_ts in data:
52 entity_key_bin = serialize_entity_key(
53 entity_key,
54 entity_key_serialization_version=config.entity_key_serialization_version,
55 )
56 timestamp = _to_naive_utc(timestamp)
57 if created_ts is not None:
58 created_ts = _to_naive_utc(created_ts)
59
60 for feature_name, val in values.items():
61 insert_values.append(
62 (
63 entity_key_bin,
64 feature_name,
65 val.SerializeToString(),
66 timestamp,
67 created_ts,
68 )
69 )
70 # Control the batch so that we can update the progress
71 batch_size = 5000
72 for i in range(0, len(insert_values), batch_size):
73 cur_batch = insert_values[i : i + batch_size]
74 execute_values(
75 cur,
76 sql.SQL(
77 """
78 INSERT INTO {}
79 (entity_key, feature_name, value, event_ts, created_ts)
80 VALUES %s
81 ON CONFLICT (entity_key, feature_name) DO
82 UPDATE SET
83 value = EXCLUDED.value,
84 event_ts = EXCLUDED.event_ts,
85 created_ts = EXCLUDED.created_ts;
86 """,
87 ).format(sql.Identifier(_table_id(project, table))),
88 cur_batch,
89 page_size=batch_size,
90 )
91 if progress:
92 progress(len(cur_batch))
93
94 @log_exceptions_and_usage(online_store="postgres")
95 def online_read(
96 self,
97 config: RepoConfig,
98 table: FeatureView,
99 entity_keys: List[EntityKeyProto],
100 requested_features: Optional[List[str]] = None,
101 ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
102 result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []
103
104 project = config.project
105 with self._get_conn(config) as conn, conn.cursor() as cur:
106 # Collecting all the keys to a list allows us to make fewer round trips
107 # to PostgreSQL
108 keys = []
109 for entity_key in entity_keys:
110 keys.append(
111 serialize_entity_key(
112 entity_key,
113 entity_key_serialization_version=config.entity_key_serialization_version,
114 )
115 )
116
117 cur.execute(
118 sql.SQL(
119 """
120 SELECT entity_key, feature_name, value, event_ts
121 FROM {} WHERE entity_key = ANY(%s);
122 """
123 ).format(
124 sql.Identifier(_table_id(project, table)),
125 ),
126 (keys,),
127 )
128
129 rows = cur.fetchall()
130
131 # Since we don't know the order returned from PostgreSQL we'll need
132 # to construct a dict to be able to quickly look up the correct row
133 # when we iterate through the keys since they are in the correct order
134 values_dict = defaultdict(list)
135 for row in rows if rows is not None else []:
136 values_dict[row[0].tobytes()].append(row[1:])
137
138 for key in keys:
139 if key in values_dict:
140 value = values_dict[key]
141 res = {}
142 for feature_name, value_bin, event_ts in value:
143 val = ValueProto()
144 val.ParseFromString(bytes(value_bin))
145 res[feature_name] = val
146 result.append((event_ts, res))
147 else:
148 result.append((None, None))
149
150 return result
151
152 @log_exceptions_and_usage(online_store="postgres")
153 def update(
154 self,
155 config: RepoConfig,
156 tables_to_delete: Sequence[FeatureView],
157 tables_to_keep: Sequence[FeatureView],
158 entities_to_delete: Sequence[Entity],
159 entities_to_keep: Sequence[Entity],
160 partial: bool,
161 ):
162 project = config.project
163 schema_name = config.online_store.db_schema or config.online_store.user
164 with self._get_conn(config) as conn, conn.cursor() as cur:
165 # If a db_schema is provided, then that schema gets created if it doesn't
166 # exist. Else a schema is created for the feature store user.
167
168 cur.execute(
169 """
170 SELECT schema_name
171 FROM information_schema.schemata
172 WHERE schema_name = %s
173 """,
174 (schema_name,),
175 )
176 schema_exists = cur.fetchone()
177 if not schema_exists:
178 cur.execute(
179 sql.SQL("CREATE SCHEMA IF NOT EXISTS {} AUTHORIZATION {}").format(
180 sql.Identifier(schema_name),
181 sql.Identifier(config.online_store.user),
182 ),
183 )
184
185 for table in tables_to_delete:
186 table_name = _table_id(project, table)
187 cur.execute(_drop_table_and_index(table_name))
188
189 for table in tables_to_keep:
190 table_name = _table_id(project, table)
191 cur.execute(
192 sql.SQL(
193 """
194 CREATE TABLE IF NOT EXISTS {}
195 (
196 entity_key BYTEA,
197 feature_name TEXT,
198 value BYTEA,
199 event_ts TIMESTAMPTZ,
200 created_ts TIMESTAMPTZ,
201 PRIMARY KEY(entity_key, feature_name)
202 );
203 CREATE INDEX IF NOT EXISTS {} ON {} (entity_key);
204 """
205 ).format(
206 sql.Identifier(table_name),
207 sql.Identifier(f"{table_name}_ek"),
208 sql.Identifier(table_name),
209 )
210 )
211
212 conn.commit()
213
214 def teardown(
215 self,
216 config: RepoConfig,
217 tables: Sequence[FeatureView],
218 entities: Sequence[Entity],
219 ):
220 project = config.project
221 try:
222 with self._get_conn(config) as conn, conn.cursor() as cur:
223 for table in tables:
224 table_name = _table_id(project, table)
225 cur.execute(_drop_table_and_index(table_name))
226 except Exception:
227 logging.exception("Teardown failed")
228 raise
229
230
231 def _table_id(project: str, table: FeatureView) -> str:
232 return f"{project}_{table.name}"
233
234
235 def _drop_table_and_index(table_name):
236 return sql.SQL(
237 """
238 DROP TABLE IF EXISTS {};
239 DROP INDEX IF EXISTS {};
240 """
241 ).format(
242 sql.Identifier(table_name),
243 sql.Identifier(f"{table_name}_ek"),
244 )
245
246
247 def _to_naive_utc(ts: datetime):
248 if ts.tzinfo is None:
249 return ts
250 else:
251 return ts.astimezone(pytz.utc).replace(tzinfo=None)
252
[end of sdk/python/feast/infra/online_stores/contrib/postgres.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sdk/python/feast/infra/online_stores/contrib/postgres.py b/sdk/python/feast/infra/online_stores/contrib/postgres.py
--- a/sdk/python/feast/infra/online_stores/contrib/postgres.py
+++ b/sdk/python/feast/infra/online_stores/contrib/postgres.py
@@ -114,17 +114,30 @@
)
)
- cur.execute(
- sql.SQL(
- """
- SELECT entity_key, feature_name, value, event_ts
- FROM {} WHERE entity_key = ANY(%s);
- """
- ).format(
- sql.Identifier(_table_id(project, table)),
- ),
- (keys,),
- )
+ if not requested_features:
+ cur.execute(
+ sql.SQL(
+ """
+ SELECT entity_key, feature_name, value, event_ts
+ FROM {} WHERE entity_key = ANY(%s);
+ """
+ ).format(
+ sql.Identifier(_table_id(project, table)),
+ ),
+ (keys,),
+ )
+ else:
+ cur.execute(
+ sql.SQL(
+ """
+ SELECT entity_key, feature_name, value, event_ts
+ FROM {} WHERE entity_key = ANY(%s) and feature_name = ANY(%s);
+ """
+ ).format(
+ sql.Identifier(_table_id(project, table)),
+ ),
+ (keys, requested_features),
+ )
rows = cur.fetchall()
|
{"golden_diff": "diff --git a/sdk/python/feast/infra/online_stores/contrib/postgres.py b/sdk/python/feast/infra/online_stores/contrib/postgres.py\n--- a/sdk/python/feast/infra/online_stores/contrib/postgres.py\n+++ b/sdk/python/feast/infra/online_stores/contrib/postgres.py\n@@ -114,17 +114,30 @@\n )\n )\n \n- cur.execute(\n- sql.SQL(\n- \"\"\"\n- SELECT entity_key, feature_name, value, event_ts\n- FROM {} WHERE entity_key = ANY(%s);\n- \"\"\"\n- ).format(\n- sql.Identifier(_table_id(project, table)),\n- ),\n- (keys,),\n- )\n+ if not requested_features:\n+ cur.execute(\n+ sql.SQL(\n+ \"\"\"\n+ SELECT entity_key, feature_name, value, event_ts\n+ FROM {} WHERE entity_key = ANY(%s);\n+ \"\"\"\n+ ).format(\n+ sql.Identifier(_table_id(project, table)),\n+ ),\n+ (keys,),\n+ )\n+ else:\n+ cur.execute(\n+ sql.SQL(\n+ \"\"\"\n+ SELECT entity_key, feature_name, value, event_ts\n+ FROM {} WHERE entity_key = ANY(%s) and feature_name = ANY(%s);\n+ \"\"\"\n+ ).format(\n+ sql.Identifier(_table_id(project, table)),\n+ ),\n+ (keys, requested_features),\n+ )\n \n rows = cur.fetchall()\n", "issue": "Support requested features on Postgresql online store\n**Is your feature request related to a problem? Please describe.**\r\nAs a user, I want to retrieve subset of features on Postgresql online store instead of fetching all features\r\n\r\n**Describe the solution you'd like**\r\nUpdate online_read method to retrieve subset of features\r\n\r\n**Additional context**\r\nI'm close to creating a pull request for this issue myself.\n", "before_files": [{"content": "import logging\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple\n\nimport psycopg2\nimport pytz\nfrom psycopg2 import sql\nfrom psycopg2.extras import execute_values\nfrom pydantic.schema import Literal\n\nfrom feast import Entity\nfrom feast.feature_view import FeatureView\nfrom feast.infra.key_encoding_utils import serialize_entity_key\nfrom feast.infra.online_stores.online_store import OnlineStore\nfrom feast.infra.utils.postgres.connection_utils import _get_conn\nfrom feast.infra.utils.postgres.postgres_config import PostgreSQLConfig\nfrom feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto\nfrom feast.protos.feast.types.Value_pb2 import Value as ValueProto\nfrom feast.repo_config import RepoConfig\nfrom feast.usage import log_exceptions_and_usage\n\n\nclass PostgreSQLOnlineStoreConfig(PostgreSQLConfig):\n type: Literal[\"postgres\"] = \"postgres\"\n\n\nclass PostgreSQLOnlineStore(OnlineStore):\n _conn: Optional[psycopg2._psycopg.connection] = None\n\n def _get_conn(self, config: RepoConfig):\n if not self._conn:\n assert config.online_store.type == \"postgres\"\n self._conn = _get_conn(config.online_store)\n return self._conn\n\n @log_exceptions_and_usage(online_store=\"postgres\")\n def online_write_batch(\n self,\n config: RepoConfig,\n table: FeatureView,\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n project = config.project\n\n with self._get_conn(config) as conn, conn.cursor() as cur:\n insert_values = []\n for entity_key, values, timestamp, created_ts in data:\n entity_key_bin = serialize_entity_key(\n entity_key,\n entity_key_serialization_version=config.entity_key_serialization_version,\n )\n timestamp = _to_naive_utc(timestamp)\n if created_ts is not None:\n created_ts = _to_naive_utc(created_ts)\n\n for feature_name, val in values.items():\n insert_values.append(\n (\n entity_key_bin,\n feature_name,\n val.SerializeToString(),\n timestamp,\n created_ts,\n )\n )\n # Control the batch so that we can update the progress\n batch_size = 5000\n for i in range(0, len(insert_values), batch_size):\n cur_batch = insert_values[i : i + batch_size]\n execute_values(\n cur,\n sql.SQL(\n \"\"\"\n INSERT INTO {}\n (entity_key, feature_name, value, event_ts, created_ts)\n VALUES %s\n ON CONFLICT (entity_key, feature_name) DO\n UPDATE SET\n value = EXCLUDED.value,\n event_ts = EXCLUDED.event_ts,\n created_ts = EXCLUDED.created_ts;\n \"\"\",\n ).format(sql.Identifier(_table_id(project, table))),\n cur_batch,\n page_size=batch_size,\n )\n if progress:\n progress(len(cur_batch))\n\n @log_exceptions_and_usage(online_store=\"postgres\")\n def online_read(\n self,\n config: RepoConfig,\n table: FeatureView,\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n\n project = config.project\n with self._get_conn(config) as conn, conn.cursor() as cur:\n # Collecting all the keys to a list allows us to make fewer round trips\n # to PostgreSQL\n keys = []\n for entity_key in entity_keys:\n keys.append(\n serialize_entity_key(\n entity_key,\n entity_key_serialization_version=config.entity_key_serialization_version,\n )\n )\n\n cur.execute(\n sql.SQL(\n \"\"\"\n SELECT entity_key, feature_name, value, event_ts\n FROM {} WHERE entity_key = ANY(%s);\n \"\"\"\n ).format(\n sql.Identifier(_table_id(project, table)),\n ),\n (keys,),\n )\n\n rows = cur.fetchall()\n\n # Since we don't know the order returned from PostgreSQL we'll need\n # to construct a dict to be able to quickly look up the correct row\n # when we iterate through the keys since they are in the correct order\n values_dict = defaultdict(list)\n for row in rows if rows is not None else []:\n values_dict[row[0].tobytes()].append(row[1:])\n\n for key in keys:\n if key in values_dict:\n value = values_dict[key]\n res = {}\n for feature_name, value_bin, event_ts in value:\n val = ValueProto()\n val.ParseFromString(bytes(value_bin))\n res[feature_name] = val\n result.append((event_ts, res))\n else:\n result.append((None, None))\n\n return result\n\n @log_exceptions_and_usage(online_store=\"postgres\")\n def update(\n self,\n config: RepoConfig,\n tables_to_delete: Sequence[FeatureView],\n tables_to_keep: Sequence[FeatureView],\n entities_to_delete: Sequence[Entity],\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n project = config.project\n schema_name = config.online_store.db_schema or config.online_store.user\n with self._get_conn(config) as conn, conn.cursor() as cur:\n # If a db_schema is provided, then that schema gets created if it doesn't\n # exist. Else a schema is created for the feature store user.\n\n cur.execute(\n \"\"\"\n SELECT schema_name\n FROM information_schema.schemata\n WHERE schema_name = %s\n \"\"\",\n (schema_name,),\n )\n schema_exists = cur.fetchone()\n if not schema_exists:\n cur.execute(\n sql.SQL(\"CREATE SCHEMA IF NOT EXISTS {} AUTHORIZATION {}\").format(\n sql.Identifier(schema_name),\n sql.Identifier(config.online_store.user),\n ),\n )\n\n for table in tables_to_delete:\n table_name = _table_id(project, table)\n cur.execute(_drop_table_and_index(table_name))\n\n for table in tables_to_keep:\n table_name = _table_id(project, table)\n cur.execute(\n sql.SQL(\n \"\"\"\n CREATE TABLE IF NOT EXISTS {}\n (\n entity_key BYTEA,\n feature_name TEXT,\n value BYTEA,\n event_ts TIMESTAMPTZ,\n created_ts TIMESTAMPTZ,\n PRIMARY KEY(entity_key, feature_name)\n );\n CREATE INDEX IF NOT EXISTS {} ON {} (entity_key);\n \"\"\"\n ).format(\n sql.Identifier(table_name),\n sql.Identifier(f\"{table_name}_ek\"),\n sql.Identifier(table_name),\n )\n )\n\n conn.commit()\n\n def teardown(\n self,\n config: RepoConfig,\n tables: Sequence[FeatureView],\n entities: Sequence[Entity],\n ):\n project = config.project\n try:\n with self._get_conn(config) as conn, conn.cursor() as cur:\n for table in tables:\n table_name = _table_id(project, table)\n cur.execute(_drop_table_and_index(table_name))\n except Exception:\n logging.exception(\"Teardown failed\")\n raise\n\n\ndef _table_id(project: str, table: FeatureView) -> str:\n return f\"{project}_{table.name}\"\n\n\ndef _drop_table_and_index(table_name):\n return sql.SQL(\n \"\"\"\n DROP TABLE IF EXISTS {};\n DROP INDEX IF EXISTS {};\n \"\"\"\n ).format(\n sql.Identifier(table_name),\n sql.Identifier(f\"{table_name}_ek\"),\n )\n\n\ndef _to_naive_utc(ts: datetime):\n if ts.tzinfo is None:\n return ts\n else:\n return ts.astimezone(pytz.utc).replace(tzinfo=None)\n", "path": "sdk/python/feast/infra/online_stores/contrib/postgres.py"}]}
| 3,005 | 333 |
gh_patches_debug_37322
|
rasdani/github-patches
|
git_diff
|
quantumlib__Cirq-3218
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sqrtISWAP serializer doesn't like rz, ry, rx
The `cirq.rz` helper function ([cirq.ops.common_gates:1045](https://github.com/quantumlib/Cirq/blob/d998b3afe20dd6783e00dcc2590ba0f162b95af7/cirq/ops/common_gates.py#L1045)) converts inputs to half turns (radians?) by dividing by sympy.pi for parametrized inputs, but this division is actually expressed as `sympy.Mul(sympy.Pow(sympy.pi, sympy.Integer(-1))`
The `sympy.Pow` part of that expression then causes problems with serializers that expect only `Mul` and `Add` in Sympy expressions (i.e. [cirq.google.arg_func_langs:143](https://github.com/quantumlib/Cirq/blob/d998b3afe20dd6783e00dcc2590ba0f162b95af7/cirq/google/arg_func_langs.py#L143))
Would it make more sense to convert all forms of `rz` inputs (symbolic or not) with division by `np.pi`?
reproducing code:
```
# This introduces `Pow`:
contains_pow = sympy.Symbol('x') / sympy.pi
for arg in contains_pow.args:
print(arg, type(arg))
>>> x <class 'sympy.core.symbol.Symbol'>
>>> 1/pi <class 'sympy.core.power.Pow'>
# This casts to float:
no_pow = sympy.Symbol('x') / np.pi
for arg in no_pow.args:
print(arg, type(arg))
>>> 0.318309886183791 <class 'sympy.core.numbers.Float'>
>>> x <class 'sympy.core.symbol.Symbol'>
# Serializer chokes up on Pow due to sympy.pi division
q0 = cirq.GridQubit(0, 0)
c0 = cirq.Circuit(cirq.rz(sympy.Symbol('x')).on(q0))
c1 = cirq.Circuit(cirq.ZPowGate(exponent=sympy.Symbol('x')/np.pi, global_shift=-0.5).on(q0) )
cirq.google.SQRT_ISWAP_GATESET.serialize(c0) # Throws ValueError: Unrecognized arg type: <class 'sympy.core.power.Pow'>
cirq.google.SQRT_ISWAP_GATESET.serialize(c1) # Works fine
```
@dstrain115
</issue>
<code>
[start of cirq/google/arg_func_langs.py]
1 # Copyright 2019 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import math
15 from typing import (
16 List,
17 Union,
18 Optional,
19 Iterator,
20 Iterable,
21 cast,
22 Set,
23 Dict,
24 FrozenSet,
25 )
26
27 import numpy as np
28 import sympy
29 from cirq.google.api import v2
30
31 SUPPORTED_FUNCTIONS_FOR_LANGUAGE: Dict[Optional[str], FrozenSet[str]] = {
32 '': frozenset(),
33 'linear': frozenset({'add', 'mul'}),
34 # None means any. Is used when inferring the language during serialization.
35 None: frozenset({'add', 'mul'}),
36 }
37
38 SUPPORTED_SYMPY_OPS = (sympy.Symbol, sympy.Add, sympy.Mul)
39
40 # Argument types for gates.
41 ARG_LIKE = Union[int, float, List[bool], str, sympy.Symbol, sympy.Add, sympy.
42 Mul]
43
44 # Supported function languages in order from least to most flexible.
45 # Clients should use the least flexible language they can, to make it easier
46 # to gradually roll out new capabilities to clients and servers.
47 LANGUAGE_ORDER = [
48 '',
49 'linear',
50 ]
51
52
53 def _max_lang(langs: Iterable[str]) -> str:
54 i = max((LANGUAGE_ORDER.index(e) for e in langs), default=0)
55 return LANGUAGE_ORDER[i]
56
57
58 def _infer_function_language_from_circuit(value: v2.program_pb2.Circuit) -> str:
59 return _max_lang({
60 e for moment in value.moments for op in moment.operations
61 for e in _function_languages_from_operation(op)
62 })
63
64
65 def _infer_function_language_from_schedule(value: v2.program_pb2.Schedule
66 ) -> str:
67 return _max_lang({
68 e for op in value.scheduled_operations
69 for e in _function_languages_from_operation(op.operation)
70 })
71
72
73 def _function_languages_from_operation(value: v2.program_pb2.Operation
74 ) -> Iterator[str]:
75 for arg in value.args.values():
76 yield from _function_languages_from_arg(arg)
77
78
79 def _function_languages_from_arg(arg_proto: v2.program_pb2.Arg
80 ) -> Iterator[str]:
81
82 which = arg_proto.WhichOneof('arg')
83 if which == 'func':
84 if arg_proto.func.type in ['add', 'mul']:
85 yield 'linear'
86 for a in arg_proto.func.args:
87 yield from _function_languages_from_arg(a)
88
89
90 def _arg_to_proto(value: ARG_LIKE,
91 *,
92 arg_function_language: Optional[str],
93 out: Optional[v2.program_pb2.Arg] = None
94 ) -> v2.program_pb2.Arg:
95 """Writes an argument value into an Arg proto.
96
97 Args:
98 value: The value to encode.
99 arg_function_language: The language to use when encoding functions. If
100 this is set to None, it will be set to the minimal language
101 necessary to support the features that were actually used.
102 out: The proto to write the result into. Defaults to a new instance.
103
104 Returns:
105 The proto that was written into as well as the `arg_function_language`
106 that was used.
107 """
108
109 if arg_function_language not in SUPPORTED_FUNCTIONS_FOR_LANGUAGE:
110 raise ValueError(f'Unrecognized arg_function_language: '
111 f'{arg_function_language!r}')
112 supported = SUPPORTED_FUNCTIONS_FOR_LANGUAGE[arg_function_language]
113
114 msg = v2.program_pb2.Arg() if out is None else out
115
116 def check_support(func_type: str) -> str:
117 if func_type not in supported:
118 lang = (repr(arg_function_language)
119 if arg_function_language is not None else '[any]')
120 raise ValueError(f'Function type {func_type!r} not supported by '
121 f'arg_function_language {lang}')
122 return func_type
123
124 if isinstance(value, (float, int, sympy.Integer, sympy.Float,
125 sympy.Rational, sympy.NumberSymbol)):
126 msg.arg_value.float_value = float(value)
127 elif isinstance(value, str):
128 msg.arg_value.string_value = value
129 elif (isinstance(value, (list, tuple, np.ndarray)) and
130 all(isinstance(x, (bool, np.bool_)) for x in value)):
131 # Some protobuf / numpy combinations do not support np.bool_, so cast.
132 msg.arg_value.bool_values.values.extend([bool(x) for x in value])
133 elif isinstance(value, sympy.Symbol):
134 msg.symbol = str(value.free_symbols.pop())
135 elif isinstance(value, sympy.Add):
136 msg.func.type = check_support('add')
137 for arg in value.args:
138 _arg_to_proto(arg,
139 arg_function_language=arg_function_language,
140 out=msg.func.args.add())
141 elif isinstance(value, sympy.Mul):
142 msg.func.type = check_support('mul')
143 for arg in value.args:
144 _arg_to_proto(arg,
145 arg_function_language=arg_function_language,
146 out=msg.func.args.add())
147 else:
148 raise ValueError(f'Unrecognized arg type: {type(value)}')
149
150 return msg
151
152
153 def _arg_from_proto(
154 arg_proto: v2.program_pb2.Arg,
155 *,
156 arg_function_language: str,
157 required_arg_name: Optional[str] = None,
158 ) -> Optional[ARG_LIKE]:
159 """Extracts a python value from an argument value proto.
160
161 Args:
162 arg_proto: The proto containing a serialized value.
163 arg_function_language: The `arg_function_language` field from
164 `Program.Language`.
165 required_arg_name: If set to `None`, the method will return `None` when
166 given an unset proto value. If set to a string, the method will
167 instead raise an error complaining that the value is missing in that
168 situation.
169
170 Returns:
171 The deserialized value, or else None if there was no set value and
172 `required_arg_name` was set to `None`.
173 """
174 supported = SUPPORTED_FUNCTIONS_FOR_LANGUAGE.get(arg_function_language)
175 if supported is None:
176 raise ValueError(f'Unrecognized arg_function_language: '
177 f'{arg_function_language!r}')
178
179 which = arg_proto.WhichOneof('arg')
180 if which == 'arg_value':
181 arg_value = arg_proto.arg_value
182 which_val = arg_value.WhichOneof('arg_value')
183 if which_val == 'float_value' or which_val == 'double_value':
184 if which_val == 'double_value':
185 result = float(arg_value.double_value)
186 else:
187 result = float(arg_value.float_value)
188 if math.ceil(result) == math.floor(result):
189 result = int(result)
190 return result
191 if which_val == 'bool_values':
192 return list(arg_value.bool_values.values)
193 if which_val == 'string_value':
194 return str(arg_value.string_value)
195 raise ValueError(f'Unrecognized value type: {which_val!r}')
196
197 if which == 'symbol':
198 return sympy.Symbol(arg_proto.symbol)
199
200 if which == 'func':
201 func = arg_proto.func
202
203 if func.type not in cast(Set[str], supported):
204 raise ValueError(
205 f'Unrecognized function type {func.type!r} '
206 f'for arg_function_language={arg_function_language!r}')
207
208 if func.type == 'add':
209 return sympy.Add(*[
210 _arg_from_proto(a,
211 arg_function_language=arg_function_language,
212 required_arg_name='An addition argument')
213 for a in func.args
214 ])
215
216 if func.type == 'mul':
217 return sympy.Mul(*[
218 _arg_from_proto(a,
219 arg_function_language=arg_function_language,
220 required_arg_name='A multiplication argument')
221 for a in func.args
222 ])
223
224 if required_arg_name is not None:
225 raise ValueError(
226 f'{required_arg_name} is missing or has an unrecognized '
227 f'argument type (WhichOneof("arg")={which!r}).')
228
229 return None
230
[end of cirq/google/arg_func_langs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cirq/google/arg_func_langs.py b/cirq/google/arg_func_langs.py
--- a/cirq/google/arg_func_langs.py
+++ b/cirq/google/arg_func_langs.py
@@ -31,11 +31,12 @@
SUPPORTED_FUNCTIONS_FOR_LANGUAGE: Dict[Optional[str], FrozenSet[str]] = {
'': frozenset(),
'linear': frozenset({'add', 'mul'}),
+ 'exp': frozenset({'add', 'mul', 'pow'}),
# None means any. Is used when inferring the language during serialization.
- None: frozenset({'add', 'mul'}),
+ None: frozenset({'add', 'mul', 'pow'}),
}
-SUPPORTED_SYMPY_OPS = (sympy.Symbol, sympy.Add, sympy.Mul)
+SUPPORTED_SYMPY_OPS = (sympy.Symbol, sympy.Add, sympy.Mul, sympy.Pow)
# Argument types for gates.
ARG_LIKE = Union[int, float, List[bool], str, sympy.Symbol, sympy.Add, sympy.
@@ -47,6 +48,7 @@
LANGUAGE_ORDER = [
'',
'linear',
+ 'exp',
]
@@ -85,6 +87,10 @@
yield 'linear'
for a in arg_proto.func.args:
yield from _function_languages_from_arg(a)
+ if arg_proto.func.type in ['pow']:
+ yield 'exp'
+ for a in arg_proto.func.args:
+ yield from _function_languages_from_arg(a)
def _arg_to_proto(value: ARG_LIKE,
@@ -144,6 +150,12 @@
_arg_to_proto(arg,
arg_function_language=arg_function_language,
out=msg.func.args.add())
+ elif isinstance(value, sympy.Pow):
+ msg.func.type = check_support('pow')
+ for arg in value.args:
+ _arg_to_proto(arg,
+ arg_function_language=arg_function_language,
+ out=msg.func.args.add())
else:
raise ValueError(f'Unrecognized arg type: {type(value)}')
@@ -221,6 +233,14 @@
for a in func.args
])
+ if func.type == 'pow':
+ return sympy.Pow(*[
+ _arg_from_proto(a,
+ arg_function_language=arg_function_language,
+ required_arg_name='A power argument')
+ for a in func.args
+ ])
+
if required_arg_name is not None:
raise ValueError(
f'{required_arg_name} is missing or has an unrecognized '
|
{"golden_diff": "diff --git a/cirq/google/arg_func_langs.py b/cirq/google/arg_func_langs.py\n--- a/cirq/google/arg_func_langs.py\n+++ b/cirq/google/arg_func_langs.py\n@@ -31,11 +31,12 @@\n SUPPORTED_FUNCTIONS_FOR_LANGUAGE: Dict[Optional[str], FrozenSet[str]] = {\n '': frozenset(),\n 'linear': frozenset({'add', 'mul'}),\n+ 'exp': frozenset({'add', 'mul', 'pow'}),\n # None means any. Is used when inferring the language during serialization.\n- None: frozenset({'add', 'mul'}),\n+ None: frozenset({'add', 'mul', 'pow'}),\n }\n \n-SUPPORTED_SYMPY_OPS = (sympy.Symbol, sympy.Add, sympy.Mul)\n+SUPPORTED_SYMPY_OPS = (sympy.Symbol, sympy.Add, sympy.Mul, sympy.Pow)\n \n # Argument types for gates.\n ARG_LIKE = Union[int, float, List[bool], str, sympy.Symbol, sympy.Add, sympy.\n@@ -47,6 +48,7 @@\n LANGUAGE_ORDER = [\n '',\n 'linear',\n+ 'exp',\n ]\n \n \n@@ -85,6 +87,10 @@\n yield 'linear'\n for a in arg_proto.func.args:\n yield from _function_languages_from_arg(a)\n+ if arg_proto.func.type in ['pow']:\n+ yield 'exp'\n+ for a in arg_proto.func.args:\n+ yield from _function_languages_from_arg(a)\n \n \n def _arg_to_proto(value: ARG_LIKE,\n@@ -144,6 +150,12 @@\n _arg_to_proto(arg,\n arg_function_language=arg_function_language,\n out=msg.func.args.add())\n+ elif isinstance(value, sympy.Pow):\n+ msg.func.type = check_support('pow')\n+ for arg in value.args:\n+ _arg_to_proto(arg,\n+ arg_function_language=arg_function_language,\n+ out=msg.func.args.add())\n else:\n raise ValueError(f'Unrecognized arg type: {type(value)}')\n \n@@ -221,6 +233,14 @@\n for a in func.args\n ])\n \n+ if func.type == 'pow':\n+ return sympy.Pow(*[\n+ _arg_from_proto(a,\n+ arg_function_language=arg_function_language,\n+ required_arg_name='A power argument')\n+ for a in func.args\n+ ])\n+\n if required_arg_name is not None:\n raise ValueError(\n f'{required_arg_name} is missing or has an unrecognized '\n", "issue": "sqrtISWAP serializer doesn't like rz, ry, rx\nThe `cirq.rz` helper function ([cirq.ops.common_gates:1045](https://github.com/quantumlib/Cirq/blob/d998b3afe20dd6783e00dcc2590ba0f162b95af7/cirq/ops/common_gates.py#L1045)) converts inputs to half turns (radians?) by dividing by sympy.pi for parametrized inputs, but this division is actually expressed as `sympy.Mul(sympy.Pow(sympy.pi, sympy.Integer(-1))`\r\n\r\nThe `sympy.Pow` part of that expression then causes problems with serializers that expect only `Mul` and `Add` in Sympy expressions (i.e. [cirq.google.arg_func_langs:143](https://github.com/quantumlib/Cirq/blob/d998b3afe20dd6783e00dcc2590ba0f162b95af7/cirq/google/arg_func_langs.py#L143)) \r\n\r\nWould it make more sense to convert all forms of `rz` inputs (symbolic or not) with division by `np.pi`?\r\n\r\nreproducing code:\r\n```\r\n# This introduces `Pow`:\r\ncontains_pow = sympy.Symbol('x') / sympy.pi\r\nfor arg in contains_pow.args:\r\n print(arg, type(arg))\r\n>>> x <class 'sympy.core.symbol.Symbol'>\r\n>>> 1/pi <class 'sympy.core.power.Pow'>\r\n\r\n# This casts to float:\r\nno_pow = sympy.Symbol('x') / np.pi\r\nfor arg in no_pow.args:\r\n print(arg, type(arg))\r\n\r\n>>> 0.318309886183791 <class 'sympy.core.numbers.Float'>\r\n>>> x <class 'sympy.core.symbol.Symbol'>\r\n\r\n# Serializer chokes up on Pow due to sympy.pi division\r\nq0 = cirq.GridQubit(0, 0)\r\nc0 = cirq.Circuit(cirq.rz(sympy.Symbol('x')).on(q0))\r\nc1 = cirq.Circuit(cirq.ZPowGate(exponent=sympy.Symbol('x')/np.pi, global_shift=-0.5).on(q0) )\r\n\r\ncirq.google.SQRT_ISWAP_GATESET.serialize(c0) # Throws ValueError: Unrecognized arg type: <class 'sympy.core.power.Pow'>\r\ncirq.google.SQRT_ISWAP_GATESET.serialize(c1) # Works fine\r\n```\r\n\r\n@dstrain115 \n", "before_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\nfrom typing import (\n List,\n Union,\n Optional,\n Iterator,\n Iterable,\n cast,\n Set,\n Dict,\n FrozenSet,\n)\n\nimport numpy as np\nimport sympy\nfrom cirq.google.api import v2\n\nSUPPORTED_FUNCTIONS_FOR_LANGUAGE: Dict[Optional[str], FrozenSet[str]] = {\n '': frozenset(),\n 'linear': frozenset({'add', 'mul'}),\n # None means any. Is used when inferring the language during serialization.\n None: frozenset({'add', 'mul'}),\n}\n\nSUPPORTED_SYMPY_OPS = (sympy.Symbol, sympy.Add, sympy.Mul)\n\n# Argument types for gates.\nARG_LIKE = Union[int, float, List[bool], str, sympy.Symbol, sympy.Add, sympy.\n Mul]\n\n# Supported function languages in order from least to most flexible.\n# Clients should use the least flexible language they can, to make it easier\n# to gradually roll out new capabilities to clients and servers.\nLANGUAGE_ORDER = [\n '',\n 'linear',\n]\n\n\ndef _max_lang(langs: Iterable[str]) -> str:\n i = max((LANGUAGE_ORDER.index(e) for e in langs), default=0)\n return LANGUAGE_ORDER[i]\n\n\ndef _infer_function_language_from_circuit(value: v2.program_pb2.Circuit) -> str:\n return _max_lang({\n e for moment in value.moments for op in moment.operations\n for e in _function_languages_from_operation(op)\n })\n\n\ndef _infer_function_language_from_schedule(value: v2.program_pb2.Schedule\n ) -> str:\n return _max_lang({\n e for op in value.scheduled_operations\n for e in _function_languages_from_operation(op.operation)\n })\n\n\ndef _function_languages_from_operation(value: v2.program_pb2.Operation\n ) -> Iterator[str]:\n for arg in value.args.values():\n yield from _function_languages_from_arg(arg)\n\n\ndef _function_languages_from_arg(arg_proto: v2.program_pb2.Arg\n ) -> Iterator[str]:\n\n which = arg_proto.WhichOneof('arg')\n if which == 'func':\n if arg_proto.func.type in ['add', 'mul']:\n yield 'linear'\n for a in arg_proto.func.args:\n yield from _function_languages_from_arg(a)\n\n\ndef _arg_to_proto(value: ARG_LIKE,\n *,\n arg_function_language: Optional[str],\n out: Optional[v2.program_pb2.Arg] = None\n ) -> v2.program_pb2.Arg:\n \"\"\"Writes an argument value into an Arg proto.\n\n Args:\n value: The value to encode.\n arg_function_language: The language to use when encoding functions. If\n this is set to None, it will be set to the minimal language\n necessary to support the features that were actually used.\n out: The proto to write the result into. Defaults to a new instance.\n\n Returns:\n The proto that was written into as well as the `arg_function_language`\n that was used.\n \"\"\"\n\n if arg_function_language not in SUPPORTED_FUNCTIONS_FOR_LANGUAGE:\n raise ValueError(f'Unrecognized arg_function_language: '\n f'{arg_function_language!r}')\n supported = SUPPORTED_FUNCTIONS_FOR_LANGUAGE[arg_function_language]\n\n msg = v2.program_pb2.Arg() if out is None else out\n\n def check_support(func_type: str) -> str:\n if func_type not in supported:\n lang = (repr(arg_function_language)\n if arg_function_language is not None else '[any]')\n raise ValueError(f'Function type {func_type!r} not supported by '\n f'arg_function_language {lang}')\n return func_type\n\n if isinstance(value, (float, int, sympy.Integer, sympy.Float,\n sympy.Rational, sympy.NumberSymbol)):\n msg.arg_value.float_value = float(value)\n elif isinstance(value, str):\n msg.arg_value.string_value = value\n elif (isinstance(value, (list, tuple, np.ndarray)) and\n all(isinstance(x, (bool, np.bool_)) for x in value)):\n # Some protobuf / numpy combinations do not support np.bool_, so cast.\n msg.arg_value.bool_values.values.extend([bool(x) for x in value])\n elif isinstance(value, sympy.Symbol):\n msg.symbol = str(value.free_symbols.pop())\n elif isinstance(value, sympy.Add):\n msg.func.type = check_support('add')\n for arg in value.args:\n _arg_to_proto(arg,\n arg_function_language=arg_function_language,\n out=msg.func.args.add())\n elif isinstance(value, sympy.Mul):\n msg.func.type = check_support('mul')\n for arg in value.args:\n _arg_to_proto(arg,\n arg_function_language=arg_function_language,\n out=msg.func.args.add())\n else:\n raise ValueError(f'Unrecognized arg type: {type(value)}')\n\n return msg\n\n\ndef _arg_from_proto(\n arg_proto: v2.program_pb2.Arg,\n *,\n arg_function_language: str,\n required_arg_name: Optional[str] = None,\n) -> Optional[ARG_LIKE]:\n \"\"\"Extracts a python value from an argument value proto.\n\n Args:\n arg_proto: The proto containing a serialized value.\n arg_function_language: The `arg_function_language` field from\n `Program.Language`.\n required_arg_name: If set to `None`, the method will return `None` when\n given an unset proto value. If set to a string, the method will\n instead raise an error complaining that the value is missing in that\n situation.\n\n Returns:\n The deserialized value, or else None if there was no set value and\n `required_arg_name` was set to `None`.\n \"\"\"\n supported = SUPPORTED_FUNCTIONS_FOR_LANGUAGE.get(arg_function_language)\n if supported is None:\n raise ValueError(f'Unrecognized arg_function_language: '\n f'{arg_function_language!r}')\n\n which = arg_proto.WhichOneof('arg')\n if which == 'arg_value':\n arg_value = arg_proto.arg_value\n which_val = arg_value.WhichOneof('arg_value')\n if which_val == 'float_value' or which_val == 'double_value':\n if which_val == 'double_value':\n result = float(arg_value.double_value)\n else:\n result = float(arg_value.float_value)\n if math.ceil(result) == math.floor(result):\n result = int(result)\n return result\n if which_val == 'bool_values':\n return list(arg_value.bool_values.values)\n if which_val == 'string_value':\n return str(arg_value.string_value)\n raise ValueError(f'Unrecognized value type: {which_val!r}')\n\n if which == 'symbol':\n return sympy.Symbol(arg_proto.symbol)\n\n if which == 'func':\n func = arg_proto.func\n\n if func.type not in cast(Set[str], supported):\n raise ValueError(\n f'Unrecognized function type {func.type!r} '\n f'for arg_function_language={arg_function_language!r}')\n\n if func.type == 'add':\n return sympy.Add(*[\n _arg_from_proto(a,\n arg_function_language=arg_function_language,\n required_arg_name='An addition argument')\n for a in func.args\n ])\n\n if func.type == 'mul':\n return sympy.Mul(*[\n _arg_from_proto(a,\n arg_function_language=arg_function_language,\n required_arg_name='A multiplication argument')\n for a in func.args\n ])\n\n if required_arg_name is not None:\n raise ValueError(\n f'{required_arg_name} is missing or has an unrecognized '\n f'argument type (WhichOneof(\"arg\")={which!r}).')\n\n return None\n", "path": "cirq/google/arg_func_langs.py"}]}
| 3,516 | 583 |
gh_patches_debug_15993
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-4897
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add more context to "Easy vs Secure" prompt
Right now you get this prompt for redirect
```
Please choose whether HTTPS access is required or optional.
-------------------------------------------------------------------------------
1: Easy - Allow both HTTP and HTTPS access to these sites
2: Secure - Make all requests redirect to secure HTTPS access
-------------------------------------------------------------------------------
```
One confusing thing: The title asks "required or optional," but those words don't appear in either option.
Also, we should provide more advice on how to choose. I.e.
```
1: Easy - Allow both HTTP and HTTPS access to these sites. Choose this if you haven't yet tested that your sites function properly when visited on HTTPS.
2: Secure - Make all requests redirect to secure HTTPS access. Choose this for new sites, or if you're confident your site works on HTTPS. You can undo this change by editing your web server's configuration.
```
</issue>
<code>
[start of certbot/display/enhancements.py]
1 """Certbot Enhancement Display"""
2 import logging
3
4 import zope.component
5
6 from certbot import errors
7 from certbot import interfaces
8 from certbot.display import util as display_util
9
10
11 logger = logging.getLogger(__name__)
12
13 # Define a helper function to avoid verbose code
14 util = zope.component.getUtility
15
16
17 def ask(enhancement):
18 """Display the enhancement to the user.
19
20 :param str enhancement: One of the
21 :class:`certbot.CONFIG.ENHANCEMENTS` enhancements
22
23 :returns: True if feature is desired, False otherwise
24 :rtype: bool
25
26 :raises .errors.Error: if the enhancement provided is not supported
27
28 """
29 try:
30 # Call the appropriate function based on the enhancement
31 return DISPATCH[enhancement]()
32 except KeyError:
33 logger.error("Unsupported enhancement given to ask(): %s", enhancement)
34 raise errors.Error("Unsupported Enhancement")
35
36
37 def redirect_by_default():
38 """Determines whether the user would like to redirect to HTTPS.
39
40 :returns: True if redirect is desired, False otherwise
41 :rtype: bool
42
43 """
44 choices = [
45 ("Easy", "Allow both HTTP and HTTPS access to these sites"),
46 ("Secure", "Make all requests redirect to secure HTTPS access"),
47 ]
48
49 code, selection = util(interfaces.IDisplay).menu(
50 "Please choose whether HTTPS access is required or optional.",
51 choices, default=0,
52 cli_flag="--redirect / --no-redirect", force_interactive=True)
53
54 if code != display_util.OK:
55 return False
56
57 return selection == 1
58
59
60 DISPATCH = {
61 "redirect": redirect_by_default
62 }
63
[end of certbot/display/enhancements.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/certbot/display/enhancements.py b/certbot/display/enhancements.py
--- a/certbot/display/enhancements.py
+++ b/certbot/display/enhancements.py
@@ -42,12 +42,14 @@
"""
choices = [
- ("Easy", "Allow both HTTP and HTTPS access to these sites"),
- ("Secure", "Make all requests redirect to secure HTTPS access"),
+ ("No redirect", "Make no further changes to the webserver configuration."),
+ ("Redirect", "Make all requests redirect to secure HTTPS access. "
+ "Choose this for new sites, or if you're confident your site works on HTTPS. "
+ "You can undo this change by editing your web server's configuration."),
]
code, selection = util(interfaces.IDisplay).menu(
- "Please choose whether HTTPS access is required or optional.",
+ "Please choose whether or not to redirect HTTP traffic to HTTPS, removing HTTP access.",
choices, default=0,
cli_flag="--redirect / --no-redirect", force_interactive=True)
|
{"golden_diff": "diff --git a/certbot/display/enhancements.py b/certbot/display/enhancements.py\n--- a/certbot/display/enhancements.py\n+++ b/certbot/display/enhancements.py\n@@ -42,12 +42,14 @@\n \n \"\"\"\n choices = [\n- (\"Easy\", \"Allow both HTTP and HTTPS access to these sites\"),\n- (\"Secure\", \"Make all requests redirect to secure HTTPS access\"),\n+ (\"No redirect\", \"Make no further changes to the webserver configuration.\"),\n+ (\"Redirect\", \"Make all requests redirect to secure HTTPS access. \"\n+ \"Choose this for new sites, or if you're confident your site works on HTTPS. \"\n+ \"You can undo this change by editing your web server's configuration.\"),\n ]\n \n code, selection = util(interfaces.IDisplay).menu(\n- \"Please choose whether HTTPS access is required or optional.\",\n+ \"Please choose whether or not to redirect HTTP traffic to HTTPS, removing HTTP access.\",\n choices, default=0,\n cli_flag=\"--redirect / --no-redirect\", force_interactive=True)\n", "issue": "Add more context to \"Easy vs Secure\" prompt\nRight now you get this prompt for redirect\n\n```\nPlease choose whether HTTPS access is required or optional.\n-------------------------------------------------------------------------------\n1: Easy - Allow both HTTP and HTTPS access to these sites\n2: Secure - Make all requests redirect to secure HTTPS access\n-------------------------------------------------------------------------------\n```\n\nOne confusing thing: The title asks \"required or optional,\" but those words don't appear in either option.\n\nAlso, we should provide more advice on how to choose. I.e. \n\n```\n1: Easy - Allow both HTTP and HTTPS access to these sites. Choose this if you haven't yet tested that your sites function properly when visited on HTTPS.\n2: Secure - Make all requests redirect to secure HTTPS access. Choose this for new sites, or if you're confident your site works on HTTPS. You can undo this change by editing your web server's configuration.\n```\n\n", "before_files": [{"content": "\"\"\"Certbot Enhancement Display\"\"\"\nimport logging\n\nimport zope.component\n\nfrom certbot import errors\nfrom certbot import interfaces\nfrom certbot.display import util as display_util\n\n\nlogger = logging.getLogger(__name__)\n\n# Define a helper function to avoid verbose code\nutil = zope.component.getUtility\n\n\ndef ask(enhancement):\n \"\"\"Display the enhancement to the user.\n\n :param str enhancement: One of the\n :class:`certbot.CONFIG.ENHANCEMENTS` enhancements\n\n :returns: True if feature is desired, False otherwise\n :rtype: bool\n\n :raises .errors.Error: if the enhancement provided is not supported\n\n \"\"\"\n try:\n # Call the appropriate function based on the enhancement\n return DISPATCH[enhancement]()\n except KeyError:\n logger.error(\"Unsupported enhancement given to ask(): %s\", enhancement)\n raise errors.Error(\"Unsupported Enhancement\")\n\n\ndef redirect_by_default():\n \"\"\"Determines whether the user would like to redirect to HTTPS.\n\n :returns: True if redirect is desired, False otherwise\n :rtype: bool\n\n \"\"\"\n choices = [\n (\"Easy\", \"Allow both HTTP and HTTPS access to these sites\"),\n (\"Secure\", \"Make all requests redirect to secure HTTPS access\"),\n ]\n\n code, selection = util(interfaces.IDisplay).menu(\n \"Please choose whether HTTPS access is required or optional.\",\n choices, default=0,\n cli_flag=\"--redirect / --no-redirect\", force_interactive=True)\n\n if code != display_util.OK:\n return False\n\n return selection == 1\n\n\nDISPATCH = {\n \"redirect\": redirect_by_default\n}\n", "path": "certbot/display/enhancements.py"}]}
| 1,196 | 237 |
gh_patches_debug_13250
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-6099
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate Reppy support
https://github.com/scrapy/scrapy/issues/5226#issuecomment-897621867
</issue>
<code>
[start of scrapy/robotstxt.py]
1 import logging
2 import sys
3 from abc import ABCMeta, abstractmethod
4
5 from scrapy.utils.python import to_unicode
6
7 logger = logging.getLogger(__name__)
8
9
10 def decode_robotstxt(robotstxt_body, spider, to_native_str_type=False):
11 try:
12 if to_native_str_type:
13 robotstxt_body = to_unicode(robotstxt_body)
14 else:
15 robotstxt_body = robotstxt_body.decode("utf-8")
16 except UnicodeDecodeError:
17 # If we found garbage or robots.txt in an encoding other than UTF-8, disregard it.
18 # Switch to 'allow all' state.
19 logger.warning(
20 "Failure while parsing robots.txt. File either contains garbage or "
21 "is in an encoding other than UTF-8, treating it as an empty file.",
22 exc_info=sys.exc_info(),
23 extra={"spider": spider},
24 )
25 robotstxt_body = ""
26 return robotstxt_body
27
28
29 class RobotParser(metaclass=ABCMeta):
30 @classmethod
31 @abstractmethod
32 def from_crawler(cls, crawler, robotstxt_body):
33 """Parse the content of a robots.txt_ file as bytes. This must be a class method.
34 It must return a new instance of the parser backend.
35
36 :param crawler: crawler which made the request
37 :type crawler: :class:`~scrapy.crawler.Crawler` instance
38
39 :param robotstxt_body: content of a robots.txt_ file.
40 :type robotstxt_body: bytes
41 """
42 pass
43
44 @abstractmethod
45 def allowed(self, url, user_agent):
46 """Return ``True`` if ``user_agent`` is allowed to crawl ``url``, otherwise return ``False``.
47
48 :param url: Absolute URL
49 :type url: str
50
51 :param user_agent: User agent
52 :type user_agent: str
53 """
54 pass
55
56
57 class PythonRobotParser(RobotParser):
58 def __init__(self, robotstxt_body, spider):
59 from urllib.robotparser import RobotFileParser
60
61 self.spider = spider
62 robotstxt_body = decode_robotstxt(
63 robotstxt_body, spider, to_native_str_type=True
64 )
65 self.rp = RobotFileParser()
66 self.rp.parse(robotstxt_body.splitlines())
67
68 @classmethod
69 def from_crawler(cls, crawler, robotstxt_body):
70 spider = None if not crawler else crawler.spider
71 o = cls(robotstxt_body, spider)
72 return o
73
74 def allowed(self, url, user_agent):
75 user_agent = to_unicode(user_agent)
76 url = to_unicode(url)
77 return self.rp.can_fetch(user_agent, url)
78
79
80 class ReppyRobotParser(RobotParser):
81 def __init__(self, robotstxt_body, spider):
82 from reppy.robots import Robots
83
84 self.spider = spider
85 self.rp = Robots.parse("", robotstxt_body)
86
87 @classmethod
88 def from_crawler(cls, crawler, robotstxt_body):
89 spider = None if not crawler else crawler.spider
90 o = cls(robotstxt_body, spider)
91 return o
92
93 def allowed(self, url, user_agent):
94 return self.rp.allowed(url, user_agent)
95
96
97 class RerpRobotParser(RobotParser):
98 def __init__(self, robotstxt_body, spider):
99 from robotexclusionrulesparser import RobotExclusionRulesParser
100
101 self.spider = spider
102 self.rp = RobotExclusionRulesParser()
103 robotstxt_body = decode_robotstxt(robotstxt_body, spider)
104 self.rp.parse(robotstxt_body)
105
106 @classmethod
107 def from_crawler(cls, crawler, robotstxt_body):
108 spider = None if not crawler else crawler.spider
109 o = cls(robotstxt_body, spider)
110 return o
111
112 def allowed(self, url, user_agent):
113 user_agent = to_unicode(user_agent)
114 url = to_unicode(url)
115 return self.rp.is_allowed(user_agent, url)
116
117
118 class ProtegoRobotParser(RobotParser):
119 def __init__(self, robotstxt_body, spider):
120 from protego import Protego
121
122 self.spider = spider
123 robotstxt_body = decode_robotstxt(robotstxt_body, spider)
124 self.rp = Protego.parse(robotstxt_body)
125
126 @classmethod
127 def from_crawler(cls, crawler, robotstxt_body):
128 spider = None if not crawler else crawler.spider
129 o = cls(robotstxt_body, spider)
130 return o
131
132 def allowed(self, url, user_agent):
133 user_agent = to_unicode(user_agent)
134 url = to_unicode(url)
135 return self.rp.can_fetch(url, user_agent)
136
[end of scrapy/robotstxt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/robotstxt.py b/scrapy/robotstxt.py
--- a/scrapy/robotstxt.py
+++ b/scrapy/robotstxt.py
@@ -1,7 +1,9 @@
import logging
import sys
from abc import ABCMeta, abstractmethod
+from warnings import warn
+from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.python import to_unicode
logger = logging.getLogger(__name__)
@@ -79,6 +81,7 @@
class ReppyRobotParser(RobotParser):
def __init__(self, robotstxt_body, spider):
+ warn("ReppyRobotParser is deprecated.", ScrapyDeprecationWarning, stacklevel=2)
from reppy.robots import Robots
self.spider = spider
|
{"golden_diff": "diff --git a/scrapy/robotstxt.py b/scrapy/robotstxt.py\n--- a/scrapy/robotstxt.py\n+++ b/scrapy/robotstxt.py\n@@ -1,7 +1,9 @@\n import logging\n import sys\n from abc import ABCMeta, abstractmethod\n+from warnings import warn\n \n+from scrapy.exceptions import ScrapyDeprecationWarning\n from scrapy.utils.python import to_unicode\n \n logger = logging.getLogger(__name__)\n@@ -79,6 +81,7 @@\n \n class ReppyRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n+ warn(\"ReppyRobotParser is deprecated.\", ScrapyDeprecationWarning, stacklevel=2)\n from reppy.robots import Robots\n \n self.spider = spider\n", "issue": "Deprecate Reppy support\nhttps://github.com/scrapy/scrapy/issues/5226#issuecomment-897621867\n", "before_files": [{"content": "import logging\nimport sys\nfrom abc import ABCMeta, abstractmethod\n\nfrom scrapy.utils.python import to_unicode\n\nlogger = logging.getLogger(__name__)\n\n\ndef decode_robotstxt(robotstxt_body, spider, to_native_str_type=False):\n try:\n if to_native_str_type:\n robotstxt_body = to_unicode(robotstxt_body)\n else:\n robotstxt_body = robotstxt_body.decode(\"utf-8\")\n except UnicodeDecodeError:\n # If we found garbage or robots.txt in an encoding other than UTF-8, disregard it.\n # Switch to 'allow all' state.\n logger.warning(\n \"Failure while parsing robots.txt. File either contains garbage or \"\n \"is in an encoding other than UTF-8, treating it as an empty file.\",\n exc_info=sys.exc_info(),\n extra={\"spider\": spider},\n )\n robotstxt_body = \"\"\n return robotstxt_body\n\n\nclass RobotParser(metaclass=ABCMeta):\n @classmethod\n @abstractmethod\n def from_crawler(cls, crawler, robotstxt_body):\n \"\"\"Parse the content of a robots.txt_ file as bytes. This must be a class method.\n It must return a new instance of the parser backend.\n\n :param crawler: crawler which made the request\n :type crawler: :class:`~scrapy.crawler.Crawler` instance\n\n :param robotstxt_body: content of a robots.txt_ file.\n :type robotstxt_body: bytes\n \"\"\"\n pass\n\n @abstractmethod\n def allowed(self, url, user_agent):\n \"\"\"Return ``True`` if ``user_agent`` is allowed to crawl ``url``, otherwise return ``False``.\n\n :param url: Absolute URL\n :type url: str\n\n :param user_agent: User agent\n :type user_agent: str\n \"\"\"\n pass\n\n\nclass PythonRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n from urllib.robotparser import RobotFileParser\n\n self.spider = spider\n robotstxt_body = decode_robotstxt(\n robotstxt_body, spider, to_native_str_type=True\n )\n self.rp = RobotFileParser()\n self.rp.parse(robotstxt_body.splitlines())\n\n @classmethod\n def from_crawler(cls, crawler, robotstxt_body):\n spider = None if not crawler else crawler.spider\n o = cls(robotstxt_body, spider)\n return o\n\n def allowed(self, url, user_agent):\n user_agent = to_unicode(user_agent)\n url = to_unicode(url)\n return self.rp.can_fetch(user_agent, url)\n\n\nclass ReppyRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n from reppy.robots import Robots\n\n self.spider = spider\n self.rp = Robots.parse(\"\", robotstxt_body)\n\n @classmethod\n def from_crawler(cls, crawler, robotstxt_body):\n spider = None if not crawler else crawler.spider\n o = cls(robotstxt_body, spider)\n return o\n\n def allowed(self, url, user_agent):\n return self.rp.allowed(url, user_agent)\n\n\nclass RerpRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n from robotexclusionrulesparser import RobotExclusionRulesParser\n\n self.spider = spider\n self.rp = RobotExclusionRulesParser()\n robotstxt_body = decode_robotstxt(robotstxt_body, spider)\n self.rp.parse(robotstxt_body)\n\n @classmethod\n def from_crawler(cls, crawler, robotstxt_body):\n spider = None if not crawler else crawler.spider\n o = cls(robotstxt_body, spider)\n return o\n\n def allowed(self, url, user_agent):\n user_agent = to_unicode(user_agent)\n url = to_unicode(url)\n return self.rp.is_allowed(user_agent, url)\n\n\nclass ProtegoRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n from protego import Protego\n\n self.spider = spider\n robotstxt_body = decode_robotstxt(robotstxt_body, spider)\n self.rp = Protego.parse(robotstxt_body)\n\n @classmethod\n def from_crawler(cls, crawler, robotstxt_body):\n spider = None if not crawler else crawler.spider\n o = cls(robotstxt_body, spider)\n return o\n\n def allowed(self, url, user_agent):\n user_agent = to_unicode(user_agent)\n url = to_unicode(url)\n return self.rp.can_fetch(url, user_agent)\n", "path": "scrapy/robotstxt.py"}]}
| 1,903 | 173 |
gh_patches_debug_1829
|
rasdani/github-patches
|
git_diff
|
googleapis__google-api-python-client-1125
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect logging level for `oauth2client.contrib.locked_file` (ref #427)
This is - as per request in the original #427 - a follow-up issue.
The function documentation says:
> Detects an appropriate cache module and returns it. Returns `googleapiclient.discovery_cache.base.Cache`, a cache object which is auto detected, or `None` if no cache object is available.
Exceptions in this context only provide a pythonic way to do control flow, and do not indicate anomalies or malfunctioning of the code. `None` is a perfectly fine, as-per-specifications behaviour. The correct level should probably be `INFO`.
Also, even if you would stand by the `WARNING` level, you should tidy up the messy log message. This is what the user is met with:
```WARNING:googleapiclient.discovery_cache:file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth
Traceback (most recent call last):
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/__init__.py", line 36, in autodetect
from google.appengine.api import memcache
ModuleNotFoundError: No module named 'google.appengine'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py", line 33, in <module>
from oauth2client.contrib.locked_file import LockedFile
ModuleNotFoundError: No module named 'oauth2client.contrib.locked_file'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py", line 37, in <module>
from oauth2client.locked_file import LockedFile
ModuleNotFoundError: No module named 'oauth2client.locked_file'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/__init__.py", line 42, in autodetect
from . import file_cache
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py", line 40, in <module>
raise ImportError(
ImportError: file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth
```
A saner way to to communicate with the user could potentially be:
`WARNING - No caching available (file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth)`
</issue>
<code>
[start of googleapiclient/discovery_cache/__init__.py]
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Caching utility for the discovery document."""
16
17 from __future__ import absolute_import
18
19 import logging
20 import datetime
21 import os
22
23 LOGGER = logging.getLogger(__name__)
24
25 DISCOVERY_DOC_MAX_AGE = 60 * 60 * 24 # 1 day
26
27
28 def autodetect():
29 """Detects an appropriate cache module and returns it.
30
31 Returns:
32 googleapiclient.discovery_cache.base.Cache, a cache object which
33 is auto detected, or None if no cache object is available.
34 """
35 if 'APPENGINE_RUNTIME' in os.environ:
36 try:
37 from google.appengine.api import memcache
38 from . import appengine_memcache
39
40 return appengine_memcache.cache
41 except Exception:
42 pass
43 try:
44 from . import file_cache
45
46 return file_cache.cache
47 except Exception as e:
48 LOGGER.warning(e, exc_info=True)
49 return None
50
[end of googleapiclient/discovery_cache/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/googleapiclient/discovery_cache/__init__.py b/googleapiclient/discovery_cache/__init__.py
--- a/googleapiclient/discovery_cache/__init__.py
+++ b/googleapiclient/discovery_cache/__init__.py
@@ -44,6 +44,7 @@
from . import file_cache
return file_cache.cache
- except Exception as e:
- LOGGER.warning(e, exc_info=True)
+ except Exception:
+ LOGGER.info("file_cache is only supported with oauth2client<4.0.0",
+ exc_info=False)
return None
|
{"golden_diff": "diff --git a/googleapiclient/discovery_cache/__init__.py b/googleapiclient/discovery_cache/__init__.py\n--- a/googleapiclient/discovery_cache/__init__.py\n+++ b/googleapiclient/discovery_cache/__init__.py\n@@ -44,6 +44,7 @@\n from . import file_cache\n \n return file_cache.cache\n- except Exception as e:\n- LOGGER.warning(e, exc_info=True)\n+ except Exception:\n+ LOGGER.info(\"file_cache is only supported with oauth2client<4.0.0\",\n+ exc_info=False)\n return None\n", "issue": "Incorrect logging level for `oauth2client.contrib.locked_file` (ref #427)\nThis is - as per request in the original #427 - a follow-up issue.\r\n\r\nThe function documentation says:\r\n\r\n> Detects an appropriate cache module and returns it. Returns `googleapiclient.discovery_cache.base.Cache`, a cache object which is auto detected, or `None` if no cache object is available.\r\n\r\nExceptions in this context only provide a pythonic way to do control flow, and do not indicate anomalies or malfunctioning of the code. `None` is a perfectly fine, as-per-specifications behaviour. The correct level should probably be `INFO`.\r\n\r\nAlso, even if you would stand by the `WARNING` level, you should tidy up the messy log message. This is what the user is met with:\r\n\r\n```WARNING:googleapiclient.discovery_cache:file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth\r\nTraceback (most recent call last):\r\n File \"/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/__init__.py\", line 36, in autodetect\r\n from google.appengine.api import memcache\r\nModuleNotFoundError: No module named 'google.appengine'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py\", line 33, in <module>\r\n from oauth2client.contrib.locked_file import LockedFile\r\nModuleNotFoundError: No module named 'oauth2client.contrib.locked_file'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py\", line 37, in <module>\r\n from oauth2client.locked_file import LockedFile\r\nModuleNotFoundError: No module named 'oauth2client.locked_file'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/__init__.py\", line 42, in autodetect\r\n from . import file_cache\r\n File \"/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py\", line 40, in <module>\r\n raise ImportError(\r\nImportError: file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth\r\n```\r\n\r\nA saner way to to communicate with the user could potentially be:\r\n\r\n`WARNING - No caching available (file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth)`\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Caching utility for the discovery document.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport datetime\nimport os\n\nLOGGER = logging.getLogger(__name__)\n\nDISCOVERY_DOC_MAX_AGE = 60 * 60 * 24 # 1 day\n\n\ndef autodetect():\n \"\"\"Detects an appropriate cache module and returns it.\n\n Returns:\n googleapiclient.discovery_cache.base.Cache, a cache object which\n is auto detected, or None if no cache object is available.\n \"\"\"\n if 'APPENGINE_RUNTIME' in os.environ:\n try:\n from google.appengine.api import memcache\n from . import appengine_memcache\n\n return appengine_memcache.cache\n except Exception:\n pass\n try:\n from . import file_cache\n\n return file_cache.cache\n except Exception as e:\n LOGGER.warning(e, exc_info=True)\n return None\n", "path": "googleapiclient/discovery_cache/__init__.py"}]}
| 1,595 | 134 |
gh_patches_debug_54380
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-3284
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consider turning off template directories in Git for conan config install
To help us debug your issue please explain:
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
This is with Conan 1.6.0 on Windows 10.
To see this in progress, have a Git installation set up with automatic ctags generation akin to what's described in [Effortless Ctags with Git](https://tbaggery.com/2011/08/08/effortless-ctags-with-git.html).
When doing `conan config install` with a Git URL, I get error messages like this:
```
Traceback (most recent call last):
File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\shutil.py", line 391, in _rmtree_unsafe
os.rmdir(path)
OSError: [WinError 145] The directory is not empty: 'C:\\Users\\kam\\.conan\\tmp_config_install\\config\\.git\\hooks'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\client\command.py", line 1230, in run
method(args[0][1:])
File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\client\command.py", line 379, in config
return self._conan.config_install(args.item, verify_ssl, args.type)
File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\client\conan_api.py", line 79, in wrapper
return f(*args, **kwargs)
File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\client\conan_api.py", line 510, in config_install
return configuration_install(item, self._client_cache, self._user_io.out, verify_ssl, config_type)
File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\client\conf\config_installer.py", line 135, in configuration_install
rmdir(tmp_folder)
File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\util\files.py", line 202, in rmdir
shutil.rmtree(path, onerror=_change_permissions)
File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\shutil.py", line 494, in rmtree
return _rmtree_unsafe(path, onerror)
File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\shutil.py", line 384, in _rmtree_unsafe
_rmtree_unsafe(fullname, onerror)
File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\shutil.py", line 384, in _rmtree_unsafe
_rmtree_unsafe(fullname, onerror)
File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\shutil.py", line 384, in _rmtree_unsafe
_rmtree_unsafe(fullname, onerror)
File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\shutil.py", line 393, in _rmtree_unsafe
onerror(os.rmdir, path, sys.exc_info())
File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\util\files.py", line 197, in _change_permissions
raise OSError("Cannot change permissions for {}! Exception info: {}".format(path, exc_info))
OSError: Cannot change permissions for C:\Users\kam\.conan\tmp_config_install\config\.git\hooks! Exception info: (<class 'OSError'>, OSError(41, 'The directory is not empty'), <traceback object at 0x0000016409078548>)
ERROR: Cannot change permissions for C:\Users\kam\.conan\tmp_config_install\config\.git\hooks! Exception info: (<class 'OSError'>, OSError(41, 'The directory is not empty'), <traceback object at 0x0000016409078548>)
```
The problem boils down to the way files are locked on Windows. It's not possible to delete open files. But the hook starts a background process which has the script files open, and the cleanup of the directory fails. Of course, a second later, the directory can be deleted as files are closed and unlocked.
I've since started reworking my Git template to not start background processes by default, and only run `ctags` in checkouts that I'm actually developing on. This fixes my problem with `conan config install`.
It may make sense, though, to add `-c init.templateDir=` to the `git` command line when working on temporary downloads. It's part of sanitizing the process against user configuration: In a hook, _anything_ can happen, and the errors are obscure and difficult to understand.
`conan config install` is mostly treating a Git repository as a sophisticated form of file archive, and probably doesn't want the user's hooks or other unusual setups from the template.
</issue>
<code>
[start of conans/client/conf/config_installer.py]
1 import os
2 import shutil
3 from six.moves.urllib.parse import urlparse
4
5 from conans.tools import unzip
6 from conans.util.files import rmdir, mkdir
7 from conans.client.remote_registry import RemoteRegistry
8 from conans import tools
9 from conans.errors import ConanException
10 import subprocess
11
12
13 def _hide_password(resource):
14 """
15 Hide password from url/file path
16
17 :param resource: string with url or file path
18 :return: resource with hidden password if present
19 """
20 password = urlparse(resource).password
21 return resource.replace(password, "<hidden>") if password else resource
22
23
24 def _handle_remotes(registry_path, remote_file, output):
25 registry = RemoteRegistry(registry_path, output)
26 new_registry = RemoteRegistry(remote_file, output)
27 registry.define_remotes(new_registry.remotes)
28
29
30 def _handle_profiles(source_folder, target_folder, output):
31 mkdir(target_folder)
32 for root, _, files in os.walk(source_folder):
33 relative_path = os.path.relpath(root, source_folder)
34 if relative_path == ".":
35 relative_path = ""
36 for f in files:
37 profile = os.path.join(relative_path, f)
38 output.info(" Installing profile %s" % profile)
39 shutil.copy(os.path.join(root, f), os.path.join(target_folder, profile))
40
41
42 def _process_git_repo(repo_url, client_cache, output, tmp_folder, verify_ssl):
43 output.info("Trying to clone repo %s" % repo_url)
44
45 with tools.chdir(tmp_folder):
46 try:
47 subprocess.check_output('git -c http.sslVerify=%s clone "%s" config' % (verify_ssl, repo_url),
48 shell=True)
49 output.info("Repo cloned")
50 except Exception as e:
51 raise ConanException("config install error. Can't clone repo: %s" % str(e))
52
53 tmp_folder = os.path.join(tmp_folder, "config")
54 _process_folder(tmp_folder, client_cache, output)
55
56
57 def _process_zip_file(zippath, client_cache, output, tmp_folder, remove=False):
58 unzip(zippath, tmp_folder)
59 if remove:
60 os.unlink(zippath)
61 _process_folder(tmp_folder, client_cache, output)
62
63
64 def _handle_conan_conf(current_conan_conf, new_conan_conf_path):
65 current_conan_conf.read(new_conan_conf_path)
66 with open(current_conan_conf.filename, "w") as f:
67 current_conan_conf.write(f)
68
69
70 def _process_folder(folder, client_cache, output):
71 for root, dirs, files in os.walk(folder):
72 for f in files:
73 if f == "settings.yml":
74 output.info("Installing settings.yml")
75 settings_path = client_cache.settings_path
76 shutil.copy(os.path.join(root, f), settings_path)
77 elif f == "conan.conf":
78 output.info("Processing conan.conf")
79 conan_conf = client_cache.conan_config
80 _handle_conan_conf(conan_conf, os.path.join(root, f))
81 elif f == "remotes.txt":
82 output.info("Defining remotes")
83 registry_path = client_cache.registry
84 _handle_remotes(registry_path, os.path.join(root, f), output)
85 else:
86 relpath = os.path.relpath(root, folder)
87 target_folder = os.path.join(client_cache.conan_folder, relpath)
88 mkdir(target_folder)
89 output.info("Copying file %s to %s" % (f, target_folder))
90 shutil.copy(os.path.join(root, f), target_folder)
91 for d in dirs:
92 if d == "profiles":
93 output.info("Installing profiles")
94 profiles_path = client_cache.profiles_path
95 _handle_profiles(os.path.join(root, d), profiles_path, output)
96 break
97 dirs[:] = [d for d in dirs if d not in ("profiles", ".git")]
98
99
100 def _process_download(item, client_cache, output, tmp_folder, verify_ssl):
101 output.info("Trying to download %s" % _hide_password(item))
102 zippath = os.path.join(tmp_folder, "config.zip")
103 try:
104 tools.download(item, zippath, out=output, verify=verify_ssl)
105 _process_zip_file(zippath, client_cache, output, tmp_folder, remove=True)
106 except Exception as e:
107 raise ConanException("Error while installing config from %s\n%s" % (item, str(e)))
108
109
110 def configuration_install(item, client_cache, output, verify_ssl, config_type=None):
111 tmp_folder = os.path.join(client_cache.conan_folder, "tmp_config_install")
112 # necessary for Mac OSX, where the temp folders in /var/ are symlinks to /private/var/
113 tmp_folder = os.path.realpath(tmp_folder)
114 mkdir(tmp_folder)
115 try:
116 if item is None:
117 try:
118 item = client_cache.conan_config.get_item("general.config_install")
119 except ConanException:
120 raise ConanException("Called config install without arguments and "
121 "'general.config_install' not defined in conan.conf")
122
123 if item.endswith(".git") or config_type == "git":
124 _process_git_repo(item, client_cache, output, tmp_folder, verify_ssl)
125 elif os.path.exists(item):
126 # is a local file
127 _process_zip_file(item, client_cache, output, tmp_folder)
128 elif item.startswith("http"):
129 _process_download(item, client_cache, output, tmp_folder, verify_ssl)
130 else:
131 raise ConanException("I don't know how to process %s" % item)
132 finally:
133 if item:
134 client_cache.conan_config.set_item("general.config_install", item)
135 rmdir(tmp_folder)
136
[end of conans/client/conf/config_installer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conans/client/conf/config_installer.py b/conans/client/conf/config_installer.py
--- a/conans/client/conf/config_installer.py
+++ b/conans/client/conf/config_installer.py
@@ -44,7 +44,7 @@
with tools.chdir(tmp_folder):
try:
- subprocess.check_output('git -c http.sslVerify=%s clone "%s" config' % (verify_ssl, repo_url),
+ subprocess.check_output('git -c http.sslVerify=%s -c init.templateDir= clone "%s" config' % (verify_ssl, repo_url),
shell=True)
output.info("Repo cloned")
except Exception as e:
|
{"golden_diff": "diff --git a/conans/client/conf/config_installer.py b/conans/client/conf/config_installer.py\n--- a/conans/client/conf/config_installer.py\n+++ b/conans/client/conf/config_installer.py\n@@ -44,7 +44,7 @@\n \n with tools.chdir(tmp_folder):\n try:\n- subprocess.check_output('git -c http.sslVerify=%s clone \"%s\" config' % (verify_ssl, repo_url),\n+ subprocess.check_output('git -c http.sslVerify=%s -c init.templateDir= clone \"%s\" config' % (verify_ssl, repo_url),\n shell=True)\n output.info(\"Repo cloned\")\n except Exception as e:\n", "issue": "Consider turning off template directories in Git for conan config install\nTo help us debug your issue please explain:\r\n\r\n- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\nThis is with Conan 1.6.0 on Windows 10.\r\n\r\nTo see this in progress, have a Git installation set up with automatic ctags generation akin to what's described in [Effortless Ctags with Git](https://tbaggery.com/2011/08/08/effortless-ctags-with-git.html).\r\n\r\nWhen doing `conan config install` with a Git URL, I get error messages like this:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"c:\\users\\kam\\.virtualenvs\\skel-hv6zqtfh\\lib\\shutil.py\", line 391, in _rmtree_unsafe\r\n os.rmdir(path)\r\nOSError: [WinError 145] The directory is not empty: 'C:\\\\Users\\\\kam\\\\.conan\\\\tmp_config_install\\\\config\\\\.git\\\\hooks'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"c:\\users\\kam\\.virtualenvs\\skel-hv6zqtfh\\lib\\site-packages\\conans\\client\\command.py\", line 1230, in run\r\n method(args[0][1:])\r\n File \"c:\\users\\kam\\.virtualenvs\\skel-hv6zqtfh\\lib\\site-packages\\conans\\client\\command.py\", line 379, in config\r\n return self._conan.config_install(args.item, verify_ssl, args.type)\r\n File \"c:\\users\\kam\\.virtualenvs\\skel-hv6zqtfh\\lib\\site-packages\\conans\\client\\conan_api.py\", line 79, in wrapper\r\n return f(*args, **kwargs)\r\n File \"c:\\users\\kam\\.virtualenvs\\skel-hv6zqtfh\\lib\\site-packages\\conans\\client\\conan_api.py\", line 510, in config_install\r\n return configuration_install(item, self._client_cache, self._user_io.out, verify_ssl, config_type)\r\n File \"c:\\users\\kam\\.virtualenvs\\skel-hv6zqtfh\\lib\\site-packages\\conans\\client\\conf\\config_installer.py\", line 135, in configuration_install\r\n rmdir(tmp_folder)\r\n File \"c:\\users\\kam\\.virtualenvs\\skel-hv6zqtfh\\lib\\site-packages\\conans\\util\\files.py\", line 202, in rmdir\r\n shutil.rmtree(path, onerror=_change_permissions)\r\n File \"c:\\users\\kam\\.virtualenvs\\skel-hv6zqtfh\\lib\\shutil.py\", line 494, in rmtree\r\n return _rmtree_unsafe(path, onerror)\r\n File \"c:\\users\\kam\\.virtualenvs\\skel-hv6zqtfh\\lib\\shutil.py\", line 384, in _rmtree_unsafe\r\n _rmtree_unsafe(fullname, onerror)\r\n File \"c:\\users\\kam\\.virtualenvs\\skel-hv6zqtfh\\lib\\shutil.py\", line 384, in _rmtree_unsafe\r\n _rmtree_unsafe(fullname, onerror)\r\n File \"c:\\users\\kam\\.virtualenvs\\skel-hv6zqtfh\\lib\\shutil.py\", line 384, in _rmtree_unsafe\r\n _rmtree_unsafe(fullname, onerror)\r\n File \"c:\\users\\kam\\.virtualenvs\\skel-hv6zqtfh\\lib\\shutil.py\", line 393, in _rmtree_unsafe\r\n onerror(os.rmdir, path, sys.exc_info())\r\n File \"c:\\users\\kam\\.virtualenvs\\skel-hv6zqtfh\\lib\\site-packages\\conans\\util\\files.py\", line 197, in _change_permissions\r\n raise OSError(\"Cannot change permissions for {}! Exception info: {}\".format(path, exc_info))\r\nOSError: Cannot change permissions for C:\\Users\\kam\\.conan\\tmp_config_install\\config\\.git\\hooks! Exception info: (<class 'OSError'>, OSError(41, 'The directory is not empty'), <traceback object at 0x0000016409078548>)\r\n\r\nERROR: Cannot change permissions for C:\\Users\\kam\\.conan\\tmp_config_install\\config\\.git\\hooks! Exception info: (<class 'OSError'>, OSError(41, 'The directory is not empty'), <traceback object at 0x0000016409078548>)\r\n```\r\n\r\nThe problem boils down to the way files are locked on Windows. It's not possible to delete open files. But the hook starts a background process which has the script files open, and the cleanup of the directory fails. Of course, a second later, the directory can be deleted as files are closed and unlocked.\r\n\r\nI've since started reworking my Git template to not start background processes by default, and only run `ctags` in checkouts that I'm actually developing on. This fixes my problem with `conan config install`.\r\n\r\nIt may make sense, though, to add `-c init.templateDir=` to the `git` command line when working on temporary downloads. It's part of sanitizing the process against user configuration: In a hook, _anything_ can happen, and the errors are obscure and difficult to understand.\r\n\r\n`conan config install` is mostly treating a Git repository as a sophisticated form of file archive, and probably doesn't want the user's hooks or other unusual setups from the template.\r\n\n", "before_files": [{"content": "import os\nimport shutil\nfrom six.moves.urllib.parse import urlparse\n\nfrom conans.tools import unzip\nfrom conans.util.files import rmdir, mkdir\nfrom conans.client.remote_registry import RemoteRegistry\nfrom conans import tools\nfrom conans.errors import ConanException\nimport subprocess\n\n\ndef _hide_password(resource):\n \"\"\"\n Hide password from url/file path\n\n :param resource: string with url or file path\n :return: resource with hidden password if present\n \"\"\"\n password = urlparse(resource).password\n return resource.replace(password, \"<hidden>\") if password else resource\n\n\ndef _handle_remotes(registry_path, remote_file, output):\n registry = RemoteRegistry(registry_path, output)\n new_registry = RemoteRegistry(remote_file, output)\n registry.define_remotes(new_registry.remotes)\n\n\ndef _handle_profiles(source_folder, target_folder, output):\n mkdir(target_folder)\n for root, _, files in os.walk(source_folder):\n relative_path = os.path.relpath(root, source_folder)\n if relative_path == \".\":\n relative_path = \"\"\n for f in files:\n profile = os.path.join(relative_path, f)\n output.info(\" Installing profile %s\" % profile)\n shutil.copy(os.path.join(root, f), os.path.join(target_folder, profile))\n\n\ndef _process_git_repo(repo_url, client_cache, output, tmp_folder, verify_ssl):\n output.info(\"Trying to clone repo %s\" % repo_url)\n\n with tools.chdir(tmp_folder):\n try:\n subprocess.check_output('git -c http.sslVerify=%s clone \"%s\" config' % (verify_ssl, repo_url),\n shell=True)\n output.info(\"Repo cloned\")\n except Exception as e:\n raise ConanException(\"config install error. Can't clone repo: %s\" % str(e))\n\n tmp_folder = os.path.join(tmp_folder, \"config\")\n _process_folder(tmp_folder, client_cache, output)\n\n\ndef _process_zip_file(zippath, client_cache, output, tmp_folder, remove=False):\n unzip(zippath, tmp_folder)\n if remove:\n os.unlink(zippath)\n _process_folder(tmp_folder, client_cache, output)\n\n\ndef _handle_conan_conf(current_conan_conf, new_conan_conf_path):\n current_conan_conf.read(new_conan_conf_path)\n with open(current_conan_conf.filename, \"w\") as f:\n current_conan_conf.write(f)\n\n\ndef _process_folder(folder, client_cache, output):\n for root, dirs, files in os.walk(folder):\n for f in files:\n if f == \"settings.yml\":\n output.info(\"Installing settings.yml\")\n settings_path = client_cache.settings_path\n shutil.copy(os.path.join(root, f), settings_path)\n elif f == \"conan.conf\":\n output.info(\"Processing conan.conf\")\n conan_conf = client_cache.conan_config\n _handle_conan_conf(conan_conf, os.path.join(root, f))\n elif f == \"remotes.txt\":\n output.info(\"Defining remotes\")\n registry_path = client_cache.registry\n _handle_remotes(registry_path, os.path.join(root, f), output)\n else:\n relpath = os.path.relpath(root, folder)\n target_folder = os.path.join(client_cache.conan_folder, relpath)\n mkdir(target_folder)\n output.info(\"Copying file %s to %s\" % (f, target_folder))\n shutil.copy(os.path.join(root, f), target_folder)\n for d in dirs:\n if d == \"profiles\":\n output.info(\"Installing profiles\")\n profiles_path = client_cache.profiles_path\n _handle_profiles(os.path.join(root, d), profiles_path, output)\n break\n dirs[:] = [d for d in dirs if d not in (\"profiles\", \".git\")]\n\n\ndef _process_download(item, client_cache, output, tmp_folder, verify_ssl):\n output.info(\"Trying to download %s\" % _hide_password(item))\n zippath = os.path.join(tmp_folder, \"config.zip\")\n try:\n tools.download(item, zippath, out=output, verify=verify_ssl)\n _process_zip_file(zippath, client_cache, output, tmp_folder, remove=True)\n except Exception as e:\n raise ConanException(\"Error while installing config from %s\\n%s\" % (item, str(e)))\n\n\ndef configuration_install(item, client_cache, output, verify_ssl, config_type=None):\n tmp_folder = os.path.join(client_cache.conan_folder, \"tmp_config_install\")\n # necessary for Mac OSX, where the temp folders in /var/ are symlinks to /private/var/\n tmp_folder = os.path.realpath(tmp_folder)\n mkdir(tmp_folder)\n try:\n if item is None:\n try:\n item = client_cache.conan_config.get_item(\"general.config_install\")\n except ConanException:\n raise ConanException(\"Called config install without arguments and \"\n \"'general.config_install' not defined in conan.conf\")\n\n if item.endswith(\".git\") or config_type == \"git\":\n _process_git_repo(item, client_cache, output, tmp_folder, verify_ssl)\n elif os.path.exists(item):\n # is a local file\n _process_zip_file(item, client_cache, output, tmp_folder)\n elif item.startswith(\"http\"):\n _process_download(item, client_cache, output, tmp_folder, verify_ssl)\n else:\n raise ConanException(\"I don't know how to process %s\" % item)\n finally:\n if item:\n client_cache.conan_config.set_item(\"general.config_install\", item)\n rmdir(tmp_folder)\n", "path": "conans/client/conf/config_installer.py"}]}
| 3,423 | 146 |
gh_patches_debug_6291
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-16484
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bitwise_and
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/logic.py]
1 # global
2 import ivy
3 import ivy.functional.frontends.paddle as paddle
4 from ivy.func_wrapper import (
5 with_unsupported_dtypes,
6 handle_out_argument,
7 with_supported_dtypes,
8 )
9 from ivy.functional.frontends.paddle.func_wrapper import (
10 to_ivy_arrays_and_back,
11 )
12
13
14 @with_unsupported_dtypes(
15 {"2.4.2 and below": ("uint8", "int8", "int16", "complex64", "complex128")}, "paddle"
16 )
17 @to_ivy_arrays_and_back
18 def equal(x, y, /, *, name=None):
19 return ivy.equal(x, y)
20
21
22 @with_unsupported_dtypes(
23 {"2.4.2 and below": ("uint8", "int8", "int16", "complex64", "complex128")}, "paddle"
24 )
25 @to_ivy_arrays_and_back
26 def not_equal(x, y, /, *, name=None):
27 return ivy.not_equal(x, y)
28
29
30 @with_unsupported_dtypes(
31 {
32 "2.4.2 and below": (
33 "uint8",
34 "int8",
35 "int16",
36 "float16",
37 "complex64",
38 "complex128",
39 )
40 },
41 "paddle",
42 )
43 @to_ivy_arrays_and_back
44 def equal_all(x, y, /, *, name=None):
45 return paddle.to_tensor([ivy.array_equal(x, y)])
46
47
48 @with_unsupported_dtypes(
49 {"2.4.2 and below": ("bool", "uint8", "int8", "int16", "complex64", "complex128")},
50 "paddle",
51 )
52 @to_ivy_arrays_and_back
53 def greater_than(x, y, /, *, name=None):
54 return ivy.greater(x, y)
55
56
57 @with_unsupported_dtypes(
58 {"2.4.2 and below": ("bool", "uint8", "int8", "int16", "complex64", "complex128")},
59 "paddle",
60 )
61 @to_ivy_arrays_and_back
62 def greater_equal(x, y, /, *, name=None):
63 return ivy.greater_equal(x, y)
64
65
66 @with_unsupported_dtypes(
67 {"2.4.2 and below": ("bool", "uint8", "int8", "int16", "complex64", "complex128")},
68 "paddle",
69 )
70 @to_ivy_arrays_and_back
71 def less_than(x, y, /, *, name=None):
72 return ivy.less(x, y)
73
74
75 @with_unsupported_dtypes(
76 {"2.4.2 and below": ("bool", "uint8", "int8", "int16", "complex64", "complex128")},
77 "paddle",
78 )
79 @to_ivy_arrays_and_back
80 def less_equal(x, y, /, *, name=None):
81 return ivy.less_equal(x, y)
82
83
84 @with_supported_dtypes(
85 {
86 "2.4.2 and below": (
87 "bool",
88 "int8",
89 "int16",
90 "int32",
91 "int64",
92 "float32",
93 "float64",
94 )
95 },
96 "paddle",
97 )
98 @to_ivy_arrays_and_back
99 @handle_out_argument
100 def logical_or(x, y, /, *, name=None, out=None):
101 return ivy.logical_or(x, y, out=out)
102
103
104 @with_supported_dtypes(
105 {
106 "2.4.2 and below": (
107 "bool",
108 "int8",
109 "int16",
110 "int32",
111 "int64",
112 "float32",
113 "float64",
114 )
115 },
116 "paddle",
117 )
118 @to_ivy_arrays_and_back
119 @handle_out_argument
120 def logical_xor(x, y, /, *, name=None, out=None):
121 return ivy.logical_xor(x, y, out=out)
122
123
124 @with_supported_dtypes(
125 {
126 "2.4.2 and below": (
127 "bool",
128 "int8",
129 "int16",
130 "int32",
131 "int64",
132 "float32",
133 "float64",
134 )
135 },
136 "paddle",
137 )
138 @to_ivy_arrays_and_back
139 @handle_out_argument
140 def logical_not(x, /, *, name=None, out=None):
141 return ivy.logical_not(x, out=out)
142
143
144 @with_supported_dtypes(
145 {
146 "2.4.2 and below": (
147 "bool",
148 "uint8",
149 "int8",
150 "int16",
151 "int32",
152 "int64",
153 )
154 },
155 "paddle",
156 )
157 @to_ivy_arrays_and_back
158 @handle_out_argument
159 def bitwise_or(x, y, name=None, out=None):
160 return ivy.bitwise_or(x, y, out=out)
161
[end of ivy/functional/frontends/paddle/tensor/logic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/frontends/paddle/tensor/logic.py b/ivy/functional/frontends/paddle/tensor/logic.py
--- a/ivy/functional/frontends/paddle/tensor/logic.py
+++ b/ivy/functional/frontends/paddle/tensor/logic.py
@@ -158,3 +158,22 @@
@handle_out_argument
def bitwise_or(x, y, name=None, out=None):
return ivy.bitwise_or(x, y, out=out)
+
+
+@with_supported_dtypes(
+ {
+ "2.4.2 and below": (
+ "bool",
+ "uint8",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ )
+ },
+ "paddle",
+)
+@to_ivy_arrays_and_back
+@handle_out_argument
+def bitwise_and(x, y, /, *, name=None, out=None):
+ return ivy.bitwise_and(x, y, out=out)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/logic.py b/ivy/functional/frontends/paddle/tensor/logic.py\n--- a/ivy/functional/frontends/paddle/tensor/logic.py\n+++ b/ivy/functional/frontends/paddle/tensor/logic.py\n@@ -158,3 +158,22 @@\n @handle_out_argument\n def bitwise_or(x, y, name=None, out=None):\n return ivy.bitwise_or(x, y, out=out)\n+\n+\n+@with_supported_dtypes(\n+ {\n+ \"2.4.2 and below\": (\n+ \"bool\",\n+ \"uint8\",\n+ \"int8\",\n+ \"int16\",\n+ \"int32\",\n+ \"int64\",\n+ )\n+ },\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+@handle_out_argument\n+def bitwise_and(x, y, /, *, name=None, out=None):\n+ return ivy.bitwise_and(x, y, out=out)\n", "issue": "bitwise_and\n\n", "before_files": [{"content": "# global\nimport ivy\nimport ivy.functional.frontends.paddle as paddle\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n handle_out_argument,\n with_supported_dtypes,\n)\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef equal(x, y, /, *, name=None):\n return ivy.equal(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef not_equal(x, y, /, *, name=None):\n return ivy.not_equal(x, y)\n\n\n@with_unsupported_dtypes(\n {\n \"2.4.2 and below\": (\n \"uint8\",\n \"int8\",\n \"int16\",\n \"float16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef equal_all(x, y, /, *, name=None):\n return paddle.to_tensor([ivy.array_equal(x, y)])\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef greater_than(x, y, /, *, name=None):\n return ivy.greater(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef greater_equal(x, y, /, *, name=None):\n return ivy.greater_equal(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef less_than(x, y, /, *, name=None):\n return ivy.less(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef less_equal(x, y, /, *, name=None):\n return ivy.less_equal(x, y)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\n@handle_out_argument\ndef logical_or(x, y, /, *, name=None, out=None):\n return ivy.logical_or(x, y, out=out)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\n@handle_out_argument\ndef logical_xor(x, y, /, *, name=None, out=None):\n return ivy.logical_xor(x, y, out=out)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\n@handle_out_argument\ndef logical_not(x, /, *, name=None, out=None):\n return ivy.logical_not(x, out=out)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\n@handle_out_argument\ndef bitwise_or(x, y, name=None, out=None):\n return ivy.bitwise_or(x, y, out=out)\n", "path": "ivy/functional/frontends/paddle/tensor/logic.py"}]}
| 2,021 | 234 |
gh_patches_debug_4357
|
rasdani/github-patches
|
git_diff
|
OpenEnergyPlatform__oeplatform-1255
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include the Open Energy Compendium in the navigation bar
## Description of the issue
A first version of the **Open Energy Compendium** is ready for integration.
## Ideas of solution
Link under `About` / `Compendium`
## Workflow checklist
- [x] I am aware of the workflow in [CONTRIBUTING.md](https://github.com/OpenEnergyPlatform/oeplatform/blob/develop/CONTRIBUTING.md)
</issue>
<code>
[start of oeplatform/settings.py]
1 """
2 Django settings for oeplatform project.
3
4 Generated by 'django-admin startproject' using Django 1.8.5.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.8/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.8/ref/settings/
11 """
12
13 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
14
15 try:
16 from .securitysettings import * # noqa
17 except ImportError:
18 import logging
19 import os
20
21 logging.error("No securitysettings found. Triggerd in oeplatform/settings.py")
22 SECRET_KEY = os.environ.get("SECRET_KEY", "0")
23 DEFAULT_FROM_EMAIL = os.environ.get("DEFAULT_FROM_EMAIL")
24 URL = os.environ.get("URL")
25
26 # Quick-start development settings - unsuitable for production
27 # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
28
29 # Application definition
30
31 INSTALLED_APPS = (
32 "django.contrib.sites",
33 "django.contrib.admin",
34 "django.contrib.auth",
35 "django.contrib.contenttypes",
36 "django.contrib.sessions",
37 "django.contrib.messages",
38 "django.contrib.staticfiles",
39 "django.contrib.sessions.backends.signed_cookies",
40 "django_bootstrap5",
41 "rest_framework",
42 "rest_framework.authtoken",
43 "modelview",
44 "modelview.templatetags.modelview_extras",
45 "login",
46 "base",
47 "base.templatetags.base_tags",
48 "widget_tweaks",
49 "dataedit",
50 "colorfield",
51 "api",
52 "ontology",
53 "axes",
54 "captcha",
55 "django.contrib.postgres",
56 "fontawesome_5",
57 "django_better_admin_arrayfield",
58 "oeo_viewer",
59 )
60
61 MIDDLEWARE = (
62 "django.contrib.sites.middleware.CurrentSiteMiddleware",
63 "django.contrib.sessions.middleware.SessionMiddleware",
64 "django.middleware.common.CommonMiddleware",
65 "django.middleware.csrf.CsrfViewMiddleware",
66 "django.contrib.auth.middleware.AuthenticationMiddleware",
67 "django.contrib.messages.middleware.MessageMiddleware",
68 "django.middleware.clickjacking.XFrameOptionsMiddleware",
69 "django.middleware.security.SecurityMiddleware",
70 "login.middleware.DetachMiddleware",
71 "axes.middleware.AxesMiddleware",
72 "django.middleware.common.CommonMiddleware",
73 )
74
75 ROOT_URLCONF = "oeplatform.urls"
76
77 EXTERNAL_URLS = {
78 "tutorials_index": "https://openenergyplatform.github.io/academy/",
79 "tutorials_faq": "https://openenergyplatform.github.io/academy/",
80 "tutorials_api1": "https://openenergyplatform.github.io/academy/tutorials/api/OEP_API_tutorial_part1/", # noqa E501
81 "tutorials_licenses": "https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/",
82 # noqa E501
83 "readthedocs": "https://oeplatform.readthedocs.io/en/latest/?badge=latest",
84 }
85
86
87 def external_urls_context_processor(request):
88 """Define hard coded external urls here.
89 Use in templates like this: {{ EXTERNAL_URLS.<name_of_url> }}
90 Also, you may want to add an icon indicating external links, e.g.
91 """
92 return {"EXTERNAL_URLS": EXTERNAL_URLS}
93
94
95 SITE_ID = 1
96
97 TEMPLATES = [
98 {
99 "BACKEND": "django.template.backends.django.DjangoTemplates",
100 "DIRS": [],
101 "APP_DIRS": True,
102 "OPTIONS": {
103 "context_processors": [
104 "django.template.context_processors.debug",
105 "django.template.context_processors.request",
106 "django.contrib.auth.context_processors.auth",
107 "django.contrib.messages.context_processors.messages",
108 "oeplatform.settings.external_urls_context_processor",
109 ]
110 },
111 }
112 ]
113
114 CORS_ORIGIN_WHITELIST = ["http://localhost:3000", "http://127.0.0.1:3000"]
115
116 GRAPHENE = {"SCHEMA": "factsheet.schema.schema"}
117
118 WSGI_APPLICATION = "oeplatform.wsgi.application"
119
120 try:
121 ONTOLOGY_FOLDER # noqa
122 except NameError:
123 ONTOLOGY_FOLDER = "/tmp"
124
125 # Internationalization
126 # https://docs.djangoproject.com/en/1.8/topics/i18n/
127
128 LANGUAGE_CODE = "en-us"
129
130 TIME_ZONE = "Europe/Berlin"
131
132 USE_I18N = True
133
134 USE_L10N = True
135
136 USE_TZ = True
137
138 # Static files (CSS, JavaScript, Images)
139 # https://docs.djangoproject.com/en/1.8/howto/static-files/
140
141 AUTH_USER_MODEL = "login.myuser"
142 LOGIN_URL = "/user/login"
143 LOGIN_REDIRECT_URL = "/"
144
145 REST_FRAMEWORK = {
146 "DEFAULT_AUTHENTICATION_CLASSES": (
147 "rest_framework.authentication.BasicAuthentication",
148 "rest_framework.authentication.SessionAuthentication",
149 "rest_framework.authentication.TokenAuthentication",
150 )
151 }
152
153 AUTHENTICATION_BACKENDS = [
154 # AxesBackend should be the first backend in the AUTHENTICATION_BACKENDS list.
155 "axes.backends.AxesBackend",
156 # custom class extenging Django ModelBackend for login with username OR email
157 "login.backends.ModelBackendWithEmail",
158 ]
159
160 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
161
[end of oeplatform/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/oeplatform/settings.py b/oeplatform/settings.py
--- a/oeplatform/settings.py
+++ b/oeplatform/settings.py
@@ -81,6 +81,7 @@
"tutorials_licenses": "https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/",
# noqa E501
"readthedocs": "https://oeplatform.readthedocs.io/en/latest/?badge=latest",
+ "compendium": "https://openenergyplatform.github.io/organisation/",
}
|
{"golden_diff": "diff --git a/oeplatform/settings.py b/oeplatform/settings.py\n--- a/oeplatform/settings.py\n+++ b/oeplatform/settings.py\n@@ -81,6 +81,7 @@\n \"tutorials_licenses\": \"https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/\",\n # noqa E501\n \"readthedocs\": \"https://oeplatform.readthedocs.io/en/latest/?badge=latest\",\n+ \"compendium\": \"https://openenergyplatform.github.io/organisation/\",\n }\n", "issue": "Include the Open Energy Compendium in the navigation bar\n## Description of the issue\r\n\r\nA first version of the **Open Energy Compendium** is ready for integration.\r\n\r\n## Ideas of solution\r\n\r\nLink under `About` / `Compendium`\r\n\r\n## Workflow checklist\r\n- [x] I am aware of the workflow in [CONTRIBUTING.md](https://github.com/OpenEnergyPlatform/oeplatform/blob/develop/CONTRIBUTING.md)\r\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for oeplatform project.\n\nGenerated by 'django-admin startproject' using Django 1.8.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n\ntry:\n from .securitysettings import * # noqa\nexcept ImportError:\n import logging\n import os\n\n logging.error(\"No securitysettings found. Triggerd in oeplatform/settings.py\")\n SECRET_KEY = os.environ.get(\"SECRET_KEY\", \"0\")\n DEFAULT_FROM_EMAIL = os.environ.get(\"DEFAULT_FROM_EMAIL\")\n URL = os.environ.get(\"URL\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/\n\n# Application definition\n\nINSTALLED_APPS = (\n \"django.contrib.sites\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sessions.backends.signed_cookies\",\n \"django_bootstrap5\",\n \"rest_framework\",\n \"rest_framework.authtoken\",\n \"modelview\",\n \"modelview.templatetags.modelview_extras\",\n \"login\",\n \"base\",\n \"base.templatetags.base_tags\",\n \"widget_tweaks\",\n \"dataedit\",\n \"colorfield\",\n \"api\",\n \"ontology\",\n \"axes\",\n \"captcha\",\n \"django.contrib.postgres\",\n \"fontawesome_5\",\n \"django_better_admin_arrayfield\",\n \"oeo_viewer\",\n)\n\nMIDDLEWARE = (\n \"django.contrib.sites.middleware.CurrentSiteMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"login.middleware.DetachMiddleware\",\n \"axes.middleware.AxesMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n)\n\nROOT_URLCONF = \"oeplatform.urls\"\n\nEXTERNAL_URLS = {\n \"tutorials_index\": \"https://openenergyplatform.github.io/academy/\",\n \"tutorials_faq\": \"https://openenergyplatform.github.io/academy/\",\n \"tutorials_api1\": \"https://openenergyplatform.github.io/academy/tutorials/api/OEP_API_tutorial_part1/\", # noqa E501\n \"tutorials_licenses\": \"https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/\",\n # noqa E501\n \"readthedocs\": \"https://oeplatform.readthedocs.io/en/latest/?badge=latest\",\n}\n\n\ndef external_urls_context_processor(request):\n \"\"\"Define hard coded external urls here.\n Use in templates like this: {{ EXTERNAL_URLS.<name_of_url> }}\n Also, you may want to add an icon indicating external links, e.g.\n \"\"\"\n return {\"EXTERNAL_URLS\": EXTERNAL_URLS}\n\n\nSITE_ID = 1\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"oeplatform.settings.external_urls_context_processor\",\n ]\n },\n }\n]\n\nCORS_ORIGIN_WHITELIST = [\"http://localhost:3000\", \"http://127.0.0.1:3000\"]\n\nGRAPHENE = {\"SCHEMA\": \"factsheet.schema.schema\"}\n\nWSGI_APPLICATION = \"oeplatform.wsgi.application\"\n\ntry:\n ONTOLOGY_FOLDER # noqa\nexcept NameError:\n ONTOLOGY_FOLDER = \"/tmp\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"Europe/Berlin\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nAUTH_USER_MODEL = \"login.myuser\"\nLOGIN_URL = \"/user/login\"\nLOGIN_REDIRECT_URL = \"/\"\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n \"rest_framework.authentication.TokenAuthentication\",\n )\n}\n\nAUTHENTICATION_BACKENDS = [\n # AxesBackend should be the first backend in the AUTHENTICATION_BACKENDS list.\n \"axes.backends.AxesBackend\",\n # custom class extenging Django ModelBackend for login with username OR email\n \"login.backends.ModelBackendWithEmail\",\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n", "path": "oeplatform/settings.py"}]}
| 2,137 | 121 |
gh_patches_debug_4592
|
rasdani/github-patches
|
git_diff
|
python__mypy-14842
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong recommendation to install `types-backports`
When I import `backports.zoneinfo`, I get
```
src/test_checks.py:29:1: error: Library stubs not installed for "backports.zoneinfo" [import]
src/test_checks.py:29:1: note: Hint: "python3 -m pip install types-backports"
src/test_checks.py:29:1: note: (or run "mypy --install-types" to install all missing stub packages)
src/test_checks.py:29:1: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports
```
However, the `types-backports` package is obsolete since python/typeshed#5997; it was replaced with `types-backports.ssl_match_hostname`. It never provided stubs for `backports.zoneinfo` (which is a py.typed package and doesn't need stubs), only for `ssl_match_hostname`.
`backports.ssl_match_hostname` provides a 3.7+ feature (https://pypi.org/project/backports.ssl_match_hostname/), so it isn't relevant on any supported version of Python and I don't think mypy needs to recommend installing it. I will submit a PR to remove it from the list of recommended packages.
</issue>
<code>
[start of mypy/stubinfo.py]
1 from __future__ import annotations
2
3
4 def is_legacy_bundled_package(prefix: str) -> bool:
5 return prefix in legacy_bundled_packages
6
7
8 def approved_stub_package_exists(prefix: str) -> bool:
9 return is_legacy_bundled_package(prefix) or prefix in non_bundled_packages
10
11
12 def stub_package_name(prefix: str) -> str:
13 return legacy_bundled_packages.get(prefix) or non_bundled_packages[prefix]
14
15
16 # Stubs for these third-party packages used to be shipped with mypy.
17 #
18 # Map package name to PyPI stub distribution name.
19 #
20 # Package name can have one or two components ('a' or 'a.b').
21 legacy_bundled_packages = {
22 "aiofiles": "types-aiofiles",
23 "backports": "types-backports",
24 "backports_abc": "types-backports_abc",
25 "bleach": "types-bleach",
26 "boto": "types-boto",
27 "cachetools": "types-cachetools",
28 "click_spinner": "types-click-spinner",
29 "contextvars": "types-contextvars",
30 "croniter": "types-croniter",
31 "dataclasses": "types-dataclasses",
32 "dateparser": "types-dateparser",
33 "datetimerange": "types-DateTimeRange",
34 "dateutil": "types-python-dateutil",
35 "decorator": "types-decorator",
36 "deprecated": "types-Deprecated",
37 "docutils": "types-docutils",
38 "first": "types-first",
39 "geoip2": "types-geoip2",
40 "gflags": "types-python-gflags",
41 "google.protobuf": "types-protobuf",
42 "markdown": "types-Markdown",
43 "maxminddb": "types-maxminddb",
44 "mock": "types-mock",
45 "OpenSSL": "types-pyOpenSSL",
46 "paramiko": "types-paramiko",
47 "pkg_resources": "types-setuptools",
48 "polib": "types-polib",
49 "pycurl": "types-pycurl",
50 "pymysql": "types-PyMySQL",
51 "pyrfc3339": "types-pyRFC3339",
52 "python2": "types-six",
53 "pytz": "types-pytz",
54 "pyVmomi": "types-pyvmomi",
55 "redis": "types-redis",
56 "requests": "types-requests",
57 "retry": "types-retry",
58 "simplejson": "types-simplejson",
59 "singledispatch": "types-singledispatch",
60 "six": "types-six",
61 "slugify": "types-python-slugify",
62 "tabulate": "types-tabulate",
63 "toml": "types-toml",
64 "typed_ast": "types-typed-ast",
65 "tzlocal": "types-tzlocal",
66 "ujson": "types-ujson",
67 "waitress": "types-waitress",
68 "yaml": "types-PyYAML",
69 }
70
71 # Map package name to PyPI stub distribution name from typeshed.
72 # Stubs for these packages were never bundled with mypy. Don't
73 # include packages that have a release that includes PEP 561 type
74 # information.
75 #
76 # Package name can have one or two components ('a' or 'a.b').
77 #
78 # Note that these packages are omitted for now:
79 # pika: typeshed's stubs are on PyPI as types-pika-ts.
80 # types-pika already exists on PyPI, and is more complete in many ways,
81 # but is a non-typeshed stubs package.
82 non_bundled_packages = {
83 "MySQLdb": "types-mysqlclient",
84 "PIL": "types-Pillow",
85 "PyInstaller": "types-pyinstaller",
86 "Xlib": "types-python-xlib",
87 "annoy": "types-annoy",
88 "appdirs": "types-appdirs",
89 "aws_xray_sdk": "types-aws-xray-sdk",
90 "babel": "types-babel",
91 "backports.ssl_match_hostname": "types-backports.ssl_match_hostname",
92 "braintree": "types-braintree",
93 "bs4": "types-beautifulsoup4",
94 "bugbear": "types-flake8-bugbear",
95 "caldav": "types-caldav",
96 "cffi": "types-cffi",
97 "chevron": "types-chevron",
98 "colorama": "types-colorama",
99 "commonmark": "types-commonmark",
100 "consolemenu": "types-console-menu",
101 "crontab": "types-python-crontab",
102 "d3dshot": "types-D3DShot",
103 "dj_database_url": "types-dj-database-url",
104 "dockerfile_parse": "types-dockerfile-parse",
105 "docopt": "types-docopt",
106 "editdistance": "types-editdistance",
107 "entrypoints": "types-entrypoints",
108 "farmhash": "types-pyfarmhash",
109 "flake8_2020": "types-flake8-2020",
110 "flake8_builtins": "types-flake8-builtins",
111 "flake8_docstrings": "types-flake8-docstrings",
112 "flake8_plugin_utils": "types-flake8-plugin-utils",
113 "flake8_rst_docstrings": "types-flake8-rst-docstrings",
114 "flake8_simplify": "types-flake8-simplify",
115 "flake8_typing_imports": "types-flake8-typing-imports",
116 "flask_cors": "types-Flask-Cors",
117 "flask_migrate": "types-Flask-Migrate",
118 "flask_sqlalchemy": "types-Flask-SQLAlchemy",
119 "fpdf": "types-fpdf2",
120 "gdb": "types-gdb",
121 "google.cloud": "types-google-cloud-ndb",
122 "hdbcli": "types-hdbcli",
123 "html5lib": "types-html5lib",
124 "httplib2": "types-httplib2",
125 "humanfriendly": "types-humanfriendly",
126 "invoke": "types-invoke",
127 "jack": "types-JACK-Client",
128 "jmespath": "types-jmespath",
129 "jose": "types-python-jose",
130 "jsonschema": "types-jsonschema",
131 "keyboard": "types-keyboard",
132 "ldap3": "types-ldap3",
133 "nmap": "types-python-nmap",
134 "oauthlib": "types-oauthlib",
135 "openpyxl": "types-openpyxl",
136 "opentracing": "types-opentracing",
137 "paho.mqtt": "types-paho-mqtt",
138 "parsimonious": "types-parsimonious",
139 "passlib": "types-passlib",
140 "passpy": "types-passpy",
141 "peewee": "types-peewee",
142 "pep8ext_naming": "types-pep8-naming",
143 "playsound": "types-playsound",
144 "psutil": "types-psutil",
145 "psycopg2": "types-psycopg2",
146 "pyaudio": "types-pyaudio",
147 "pyautogui": "types-PyAutoGUI",
148 "pycocotools": "types-pycocotools",
149 "pyflakes": "types-pyflakes",
150 "pygments": "types-Pygments",
151 "pyi_splash": "types-pyinstaller",
152 "pynput": "types-pynput",
153 "pythoncom": "types-pywin32",
154 "pythonwin": "types-pywin32",
155 "pyscreeze": "types-PyScreeze",
156 "pysftp": "types-pysftp",
157 "pytest_lazyfixture": "types-pytest-lazy-fixture",
158 "pywintypes": "types-pywin32",
159 "regex": "types-regex",
160 "send2trash": "types-Send2Trash",
161 "slumber": "types-slumber",
162 "stdlib_list": "types-stdlib-list",
163 "stripe": "types-stripe",
164 "toposort": "types-toposort",
165 "tqdm": "types-tqdm",
166 "tree_sitter": "types-tree-sitter",
167 "tree_sitter_languages": "types-tree-sitter-languages",
168 "ttkthemes": "types-ttkthemes",
169 "urllib3": "types-urllib3",
170 "vobject": "types-vobject",
171 "whatthepatch": "types-whatthepatch",
172 "win32": "types-pywin32",
173 "win32api": "types-pywin32",
174 "win32con": "types-pywin32",
175 "win32com": "types-pywin32",
176 "win32comext": "types-pywin32",
177 "win32gui": "types-pywin32",
178 "xmltodict": "types-xmltodict",
179 "xxhash": "types-xxhash",
180 "zxcvbn": "types-zxcvbn",
181 # Stub packages that are not from typeshed
182 # Since these can be installed automatically via --install-types, we have a high trust bar
183 # for additions here
184 "pandas": "pandas-stubs", # https://github.com/pandas-dev/pandas-stubs
185 "lxml": "lxml-stubs", # https://github.com/lxml/lxml-stubs
186 }
187
[end of mypy/stubinfo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mypy/stubinfo.py b/mypy/stubinfo.py
--- a/mypy/stubinfo.py
+++ b/mypy/stubinfo.py
@@ -20,8 +20,6 @@
# Package name can have one or two components ('a' or 'a.b').
legacy_bundled_packages = {
"aiofiles": "types-aiofiles",
- "backports": "types-backports",
- "backports_abc": "types-backports_abc",
"bleach": "types-bleach",
"boto": "types-boto",
"cachetools": "types-cachetools",
|
{"golden_diff": "diff --git a/mypy/stubinfo.py b/mypy/stubinfo.py\n--- a/mypy/stubinfo.py\n+++ b/mypy/stubinfo.py\n@@ -20,8 +20,6 @@\n # Package name can have one or two components ('a' or 'a.b').\n legacy_bundled_packages = {\n \"aiofiles\": \"types-aiofiles\",\n- \"backports\": \"types-backports\",\n- \"backports_abc\": \"types-backports_abc\",\n \"bleach\": \"types-bleach\",\n \"boto\": \"types-boto\",\n \"cachetools\": \"types-cachetools\",\n", "issue": "Wrong recommendation to install `types-backports`\nWhen I import `backports.zoneinfo`, I get\r\n\r\n```\r\nsrc/test_checks.py:29:1: error: Library stubs not installed for \"backports.zoneinfo\" [import]\r\nsrc/test_checks.py:29:1: note: Hint: \"python3 -m pip install types-backports\"\r\nsrc/test_checks.py:29:1: note: (or run \"mypy --install-types\" to install all missing stub packages)\r\nsrc/test_checks.py:29:1: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports\r\n```\r\n\r\nHowever, the `types-backports` package is obsolete since python/typeshed#5997; it was replaced with `types-backports.ssl_match_hostname`. It never provided stubs for `backports.zoneinfo` (which is a py.typed package and doesn't need stubs), only for `ssl_match_hostname`.\r\n\r\n`backports.ssl_match_hostname` provides a 3.7+ feature (https://pypi.org/project/backports.ssl_match_hostname/), so it isn't relevant on any supported version of Python and I don't think mypy needs to recommend installing it. I will submit a PR to remove it from the list of recommended packages.\n", "before_files": [{"content": "from __future__ import annotations\n\n\ndef is_legacy_bundled_package(prefix: str) -> bool:\n return prefix in legacy_bundled_packages\n\n\ndef approved_stub_package_exists(prefix: str) -> bool:\n return is_legacy_bundled_package(prefix) or prefix in non_bundled_packages\n\n\ndef stub_package_name(prefix: str) -> str:\n return legacy_bundled_packages.get(prefix) or non_bundled_packages[prefix]\n\n\n# Stubs for these third-party packages used to be shipped with mypy.\n#\n# Map package name to PyPI stub distribution name.\n#\n# Package name can have one or two components ('a' or 'a.b').\nlegacy_bundled_packages = {\n \"aiofiles\": \"types-aiofiles\",\n \"backports\": \"types-backports\",\n \"backports_abc\": \"types-backports_abc\",\n \"bleach\": \"types-bleach\",\n \"boto\": \"types-boto\",\n \"cachetools\": \"types-cachetools\",\n \"click_spinner\": \"types-click-spinner\",\n \"contextvars\": \"types-contextvars\",\n \"croniter\": \"types-croniter\",\n \"dataclasses\": \"types-dataclasses\",\n \"dateparser\": \"types-dateparser\",\n \"datetimerange\": \"types-DateTimeRange\",\n \"dateutil\": \"types-python-dateutil\",\n \"decorator\": \"types-decorator\",\n \"deprecated\": \"types-Deprecated\",\n \"docutils\": \"types-docutils\",\n \"first\": \"types-first\",\n \"geoip2\": \"types-geoip2\",\n \"gflags\": \"types-python-gflags\",\n \"google.protobuf\": \"types-protobuf\",\n \"markdown\": \"types-Markdown\",\n \"maxminddb\": \"types-maxminddb\",\n \"mock\": \"types-mock\",\n \"OpenSSL\": \"types-pyOpenSSL\",\n \"paramiko\": \"types-paramiko\",\n \"pkg_resources\": \"types-setuptools\",\n \"polib\": \"types-polib\",\n \"pycurl\": \"types-pycurl\",\n \"pymysql\": \"types-PyMySQL\",\n \"pyrfc3339\": \"types-pyRFC3339\",\n \"python2\": \"types-six\",\n \"pytz\": \"types-pytz\",\n \"pyVmomi\": \"types-pyvmomi\",\n \"redis\": \"types-redis\",\n \"requests\": \"types-requests\",\n \"retry\": \"types-retry\",\n \"simplejson\": \"types-simplejson\",\n \"singledispatch\": \"types-singledispatch\",\n \"six\": \"types-six\",\n \"slugify\": \"types-python-slugify\",\n \"tabulate\": \"types-tabulate\",\n \"toml\": \"types-toml\",\n \"typed_ast\": \"types-typed-ast\",\n \"tzlocal\": \"types-tzlocal\",\n \"ujson\": \"types-ujson\",\n \"waitress\": \"types-waitress\",\n \"yaml\": \"types-PyYAML\",\n}\n\n# Map package name to PyPI stub distribution name from typeshed.\n# Stubs for these packages were never bundled with mypy. Don't\n# include packages that have a release that includes PEP 561 type\n# information.\n#\n# Package name can have one or two components ('a' or 'a.b').\n#\n# Note that these packages are omitted for now:\n# pika: typeshed's stubs are on PyPI as types-pika-ts.\n# types-pika already exists on PyPI, and is more complete in many ways,\n# but is a non-typeshed stubs package.\nnon_bundled_packages = {\n \"MySQLdb\": \"types-mysqlclient\",\n \"PIL\": \"types-Pillow\",\n \"PyInstaller\": \"types-pyinstaller\",\n \"Xlib\": \"types-python-xlib\",\n \"annoy\": \"types-annoy\",\n \"appdirs\": \"types-appdirs\",\n \"aws_xray_sdk\": \"types-aws-xray-sdk\",\n \"babel\": \"types-babel\",\n \"backports.ssl_match_hostname\": \"types-backports.ssl_match_hostname\",\n \"braintree\": \"types-braintree\",\n \"bs4\": \"types-beautifulsoup4\",\n \"bugbear\": \"types-flake8-bugbear\",\n \"caldav\": \"types-caldav\",\n \"cffi\": \"types-cffi\",\n \"chevron\": \"types-chevron\",\n \"colorama\": \"types-colorama\",\n \"commonmark\": \"types-commonmark\",\n \"consolemenu\": \"types-console-menu\",\n \"crontab\": \"types-python-crontab\",\n \"d3dshot\": \"types-D3DShot\",\n \"dj_database_url\": \"types-dj-database-url\",\n \"dockerfile_parse\": \"types-dockerfile-parse\",\n \"docopt\": \"types-docopt\",\n \"editdistance\": \"types-editdistance\",\n \"entrypoints\": \"types-entrypoints\",\n \"farmhash\": \"types-pyfarmhash\",\n \"flake8_2020\": \"types-flake8-2020\",\n \"flake8_builtins\": \"types-flake8-builtins\",\n \"flake8_docstrings\": \"types-flake8-docstrings\",\n \"flake8_plugin_utils\": \"types-flake8-plugin-utils\",\n \"flake8_rst_docstrings\": \"types-flake8-rst-docstrings\",\n \"flake8_simplify\": \"types-flake8-simplify\",\n \"flake8_typing_imports\": \"types-flake8-typing-imports\",\n \"flask_cors\": \"types-Flask-Cors\",\n \"flask_migrate\": \"types-Flask-Migrate\",\n \"flask_sqlalchemy\": \"types-Flask-SQLAlchemy\",\n \"fpdf\": \"types-fpdf2\",\n \"gdb\": \"types-gdb\",\n \"google.cloud\": \"types-google-cloud-ndb\",\n \"hdbcli\": \"types-hdbcli\",\n \"html5lib\": \"types-html5lib\",\n \"httplib2\": \"types-httplib2\",\n \"humanfriendly\": \"types-humanfriendly\",\n \"invoke\": \"types-invoke\",\n \"jack\": \"types-JACK-Client\",\n \"jmespath\": \"types-jmespath\",\n \"jose\": \"types-python-jose\",\n \"jsonschema\": \"types-jsonschema\",\n \"keyboard\": \"types-keyboard\",\n \"ldap3\": \"types-ldap3\",\n \"nmap\": \"types-python-nmap\",\n \"oauthlib\": \"types-oauthlib\",\n \"openpyxl\": \"types-openpyxl\",\n \"opentracing\": \"types-opentracing\",\n \"paho.mqtt\": \"types-paho-mqtt\",\n \"parsimonious\": \"types-parsimonious\",\n \"passlib\": \"types-passlib\",\n \"passpy\": \"types-passpy\",\n \"peewee\": \"types-peewee\",\n \"pep8ext_naming\": \"types-pep8-naming\",\n \"playsound\": \"types-playsound\",\n \"psutil\": \"types-psutil\",\n \"psycopg2\": \"types-psycopg2\",\n \"pyaudio\": \"types-pyaudio\",\n \"pyautogui\": \"types-PyAutoGUI\",\n \"pycocotools\": \"types-pycocotools\",\n \"pyflakes\": \"types-pyflakes\",\n \"pygments\": \"types-Pygments\",\n \"pyi_splash\": \"types-pyinstaller\",\n \"pynput\": \"types-pynput\",\n \"pythoncom\": \"types-pywin32\",\n \"pythonwin\": \"types-pywin32\",\n \"pyscreeze\": \"types-PyScreeze\",\n \"pysftp\": \"types-pysftp\",\n \"pytest_lazyfixture\": \"types-pytest-lazy-fixture\",\n \"pywintypes\": \"types-pywin32\",\n \"regex\": \"types-regex\",\n \"send2trash\": \"types-Send2Trash\",\n \"slumber\": \"types-slumber\",\n \"stdlib_list\": \"types-stdlib-list\",\n \"stripe\": \"types-stripe\",\n \"toposort\": \"types-toposort\",\n \"tqdm\": \"types-tqdm\",\n \"tree_sitter\": \"types-tree-sitter\",\n \"tree_sitter_languages\": \"types-tree-sitter-languages\",\n \"ttkthemes\": \"types-ttkthemes\",\n \"urllib3\": \"types-urllib3\",\n \"vobject\": \"types-vobject\",\n \"whatthepatch\": \"types-whatthepatch\",\n \"win32\": \"types-pywin32\",\n \"win32api\": \"types-pywin32\",\n \"win32con\": \"types-pywin32\",\n \"win32com\": \"types-pywin32\",\n \"win32comext\": \"types-pywin32\",\n \"win32gui\": \"types-pywin32\",\n \"xmltodict\": \"types-xmltodict\",\n \"xxhash\": \"types-xxhash\",\n \"zxcvbn\": \"types-zxcvbn\",\n # Stub packages that are not from typeshed\n # Since these can be installed automatically via --install-types, we have a high trust bar\n # for additions here\n \"pandas\": \"pandas-stubs\", # https://github.com/pandas-dev/pandas-stubs\n \"lxml\": \"lxml-stubs\", # https://github.com/lxml/lxml-stubs\n}\n", "path": "mypy/stubinfo.py"}]}
| 3,388 | 145 |
gh_patches_debug_3217
|
rasdani/github-patches
|
git_diff
|
huggingface__text-generation-inference-851
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Watermarking bug
### System Info
using singularity with the container `text-generation-inference:1.0.0` .
i get this error:
```
Traceback (most recent call last):
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_causal_lm.py", line 727, in warmup
_, batch = self.generate_token(batch)
File "/opt/conda/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_causal_lm.py", line 834, in generate_token
next_input_ids, next_token_logprobs = batch.next_token_chooser(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/tokens.py", line 224, in __call__
scores = self.watermark_processor(input_ids, scores)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/logits_process.py", line 398, in __call__
scores[i : i + 1] = processor(input_ids[i : i + 1], scores[i : i + 1])
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/watermark.py", line 88, in __call__
greenlist_ids = self._get_greenlist_ids(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/watermark.py", line 64, in _get_greenlist_ids
greenlist_size = int(max_value * self.gamma)
ValueError: invalid literal for int() with base 10: '0.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50
```
i think `greenlist_size = int(max_value * self.gamma)` should be `greenlist_size = max_value * int(self.gamma)`
I also tried setting the env variables only `export WATERMARK_GAMMA=0.5 && export WATERMARK_DELTA=2.0` and the exact issue.
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
```bash
export USE_FLASH_ATTENTION=True && text-generation-launcher \
--model-id /scratch//models/huggyllama-llama-7b \
--port 61697 \
--max-best-of 5 \
--max-total-tokens 2048 \
--max-input-length 1024 \
--max-batch-prefill-tokens 1024 \
--max-concurrent-requests 20 \
--sharded false \
--num-shard 1 \
--dtype float16 \
--disable-custom-kernels \
--watermark-gamma 0.5 \
--watermark-delta 2
```
### Expected behavior
It should multiply the number not the string `0.5`
</issue>
<code>
[start of server/text_generation_server/utils/watermark.py]
1 # coding=utf-8
2 # Copyright 2023 Authors of "A Watermark for Large Language Models"
3 # available at https://arxiv.org/abs/2301.10226
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 import os
17
18 import torch
19 from transformers import LogitsProcessor
20 from typing import List, Union
21
22 GAMMA = os.getenv("WATERMARK_GAMMA", 0.5)
23 DELTA = os.getenv("WATERMARK_DELTA", 2.0)
24
25
26 class WatermarkLogitsProcessor(LogitsProcessor):
27 def __init__(
28 self,
29 gamma: float = GAMMA,
30 delta: float = DELTA,
31 hash_key: int = 15485863, # just a large prime number to create a rng seed with sufficient bit width
32 device: str = "cpu",
33 ):
34 # watermarking parameters
35 self.gamma = gamma
36 self.delta = delta
37 self.rng = torch.Generator(device=device)
38 self.hash_key = hash_key
39
40 def _seed_rng(self, input_ids: Union[List[int], torch.LongTensor]):
41 if isinstance(input_ids, list):
42 assert (
43 len(input_ids) >= 1
44 ), "requires at least a 1 token prefix sequence to seed rng"
45 prev_token = input_ids[-1]
46 else:
47 assert len(input_ids) == 1
48 input_ids = input_ids[0]
49 assert (
50 input_ids.shape[-1] >= 1
51 ), "requires at least a 1 token prefix sequence to seed rng"
52 prev_token = input_ids[-1].item()
53 self.rng.manual_seed(self.hash_key * prev_token)
54
55 def _get_greenlist_ids(
56 self,
57 input_ids: Union[List[int], torch.LongTensor],
58 max_value: int,
59 device: torch.device,
60 ) -> List[int]:
61 # seed the rng using the previous tokens/prefix
62 self._seed_rng(input_ids)
63
64 greenlist_size = int(max_value * self.gamma)
65 vocab_permutation = torch.randperm(max_value, device=device, generator=self.rng)
66 greenlist_ids = vocab_permutation[:greenlist_size]
67 return greenlist_ids
68
69 @staticmethod
70 def _calc_greenlist_mask(
71 scores: torch.FloatTensor, greenlist_token_ids
72 ) -> torch.BoolTensor:
73 green_tokens_mask = torch.zeros_like(scores)
74 green_tokens_mask[-1, greenlist_token_ids] = 1
75 final_mask = green_tokens_mask.bool()
76 return final_mask
77
78 @staticmethod
79 def _bias_greenlist_logits(
80 scores: torch.Tensor, greenlist_mask: torch.Tensor, greenlist_bias: float
81 ) -> torch.Tensor:
82 scores[greenlist_mask] = scores[greenlist_mask] + greenlist_bias
83 return scores
84
85 def __call__(
86 self, input_ids: Union[List[int], torch.LongTensor], scores: torch.FloatTensor
87 ) -> torch.FloatTensor:
88 greenlist_ids = self._get_greenlist_ids(
89 input_ids, scores.shape[-1], scores.device
90 )
91 green_tokens_mask = self._calc_greenlist_mask(
92 scores=scores, greenlist_token_ids=greenlist_ids
93 )
94
95 scores = self._bias_greenlist_logits(
96 scores=scores, greenlist_mask=green_tokens_mask, greenlist_bias=self.delta
97 )
98 return scores
99
[end of server/text_generation_server/utils/watermark.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/server/text_generation_server/utils/watermark.py b/server/text_generation_server/utils/watermark.py
--- a/server/text_generation_server/utils/watermark.py
+++ b/server/text_generation_server/utils/watermark.py
@@ -19,8 +19,8 @@
from transformers import LogitsProcessor
from typing import List, Union
-GAMMA = os.getenv("WATERMARK_GAMMA", 0.5)
-DELTA = os.getenv("WATERMARK_DELTA", 2.0)
+GAMMA = float(os.getenv("WATERMARK_GAMMA", 0.5))
+DELTA = float(os.getenv("WATERMARK_DELTA", 2.0))
class WatermarkLogitsProcessor(LogitsProcessor):
|
{"golden_diff": "diff --git a/server/text_generation_server/utils/watermark.py b/server/text_generation_server/utils/watermark.py\n--- a/server/text_generation_server/utils/watermark.py\n+++ b/server/text_generation_server/utils/watermark.py\n@@ -19,8 +19,8 @@\n from transformers import LogitsProcessor\n from typing import List, Union\n \n-GAMMA = os.getenv(\"WATERMARK_GAMMA\", 0.5)\n-DELTA = os.getenv(\"WATERMARK_DELTA\", 2.0)\n+GAMMA = float(os.getenv(\"WATERMARK_GAMMA\", 0.5))\n+DELTA = float(os.getenv(\"WATERMARK_DELTA\", 2.0))\n \n \n class WatermarkLogitsProcessor(LogitsProcessor):\n", "issue": "Watermarking bug\n### System Info\n\nusing singularity with the container `text-generation-inference:1.0.0` .\r\n\r\ni get this error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_causal_lm.py\", line 727, in warmup\r\n _, batch = self.generate_token(batch)\r\n File \"/opt/conda/lib/python3.9/contextlib.py\", line 79, in inner\r\n return func(*args, **kwds)\r\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_causal_lm.py\", line 834, in generate_token\r\n next_input_ids, next_token_logprobs = batch.next_token_chooser(\r\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/tokens.py\", line 224, in __call__\r\n scores = self.watermark_processor(input_ids, scores)\r\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/logits_process.py\", line 398, in __call__\r\n scores[i : i + 1] = processor(input_ids[i : i + 1], scores[i : i + 1])\r\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/watermark.py\", line 88, in __call__\r\n greenlist_ids = self._get_greenlist_ids(\r\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/watermark.py\", line 64, in _get_greenlist_ids\r\n greenlist_size = int(max_value * self.gamma)\r\nValueError: invalid literal for int() with base 10: '0.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50\r\n```\r\n\r\ni think `greenlist_size = int(max_value * self.gamma)` should be `greenlist_size = max_value * int(self.gamma)`\r\n\r\nI also tried setting the env variables only `export WATERMARK_GAMMA=0.5 && export WATERMARK_DELTA=2.0` and the exact issue.\r\n\n\n### Information\n\n- [X] Docker\n- [ ] The CLI directly\n\n### Tasks\n\n- [X] An officially supported command\n- [ ] My own modifications\n\n### Reproduction\n\n```bash\r\nexport USE_FLASH_ATTENTION=True && text-generation-launcher \\\r\n--model-id /scratch//models/huggyllama-llama-7b \\\r\n--port 61697 \\\r\n--max-best-of 5 \\\r\n--max-total-tokens 2048 \\\r\n--max-input-length 1024 \\\r\n--max-batch-prefill-tokens 1024 \\\r\n--max-concurrent-requests 20 \\\r\n--sharded false \\\r\n--num-shard 1 \\\r\n--dtype float16 \\\r\n --disable-custom-kernels \\\r\n--watermark-gamma 0.5 \\\r\n--watermark-delta 2\r\n```\n\n### Expected behavior\n\nIt should multiply the number not the string `0.5`\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2023 Authors of \"A Watermark for Large Language Models\"\n# available at https://arxiv.org/abs/2301.10226\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nimport torch\nfrom transformers import LogitsProcessor\nfrom typing import List, Union\n\nGAMMA = os.getenv(\"WATERMARK_GAMMA\", 0.5)\nDELTA = os.getenv(\"WATERMARK_DELTA\", 2.0)\n\n\nclass WatermarkLogitsProcessor(LogitsProcessor):\n def __init__(\n self,\n gamma: float = GAMMA,\n delta: float = DELTA,\n hash_key: int = 15485863, # just a large prime number to create a rng seed with sufficient bit width\n device: str = \"cpu\",\n ):\n # watermarking parameters\n self.gamma = gamma\n self.delta = delta\n self.rng = torch.Generator(device=device)\n self.hash_key = hash_key\n\n def _seed_rng(self, input_ids: Union[List[int], torch.LongTensor]):\n if isinstance(input_ids, list):\n assert (\n len(input_ids) >= 1\n ), \"requires at least a 1 token prefix sequence to seed rng\"\n prev_token = input_ids[-1]\n else:\n assert len(input_ids) == 1\n input_ids = input_ids[0]\n assert (\n input_ids.shape[-1] >= 1\n ), \"requires at least a 1 token prefix sequence to seed rng\"\n prev_token = input_ids[-1].item()\n self.rng.manual_seed(self.hash_key * prev_token)\n\n def _get_greenlist_ids(\n self,\n input_ids: Union[List[int], torch.LongTensor],\n max_value: int,\n device: torch.device,\n ) -> List[int]:\n # seed the rng using the previous tokens/prefix\n self._seed_rng(input_ids)\n\n greenlist_size = int(max_value * self.gamma)\n vocab_permutation = torch.randperm(max_value, device=device, generator=self.rng)\n greenlist_ids = vocab_permutation[:greenlist_size]\n return greenlist_ids\n\n @staticmethod\n def _calc_greenlist_mask(\n scores: torch.FloatTensor, greenlist_token_ids\n ) -> torch.BoolTensor:\n green_tokens_mask = torch.zeros_like(scores)\n green_tokens_mask[-1, greenlist_token_ids] = 1\n final_mask = green_tokens_mask.bool()\n return final_mask\n\n @staticmethod\n def _bias_greenlist_logits(\n scores: torch.Tensor, greenlist_mask: torch.Tensor, greenlist_bias: float\n ) -> torch.Tensor:\n scores[greenlist_mask] = scores[greenlist_mask] + greenlist_bias\n return scores\n\n def __call__(\n self, input_ids: Union[List[int], torch.LongTensor], scores: torch.FloatTensor\n ) -> torch.FloatTensor:\n greenlist_ids = self._get_greenlist_ids(\n input_ids, scores.shape[-1], scores.device\n )\n green_tokens_mask = self._calc_greenlist_mask(\n scores=scores, greenlist_token_ids=greenlist_ids\n )\n\n scores = self._bias_greenlist_logits(\n scores=scores, greenlist_mask=green_tokens_mask, greenlist_bias=self.delta\n )\n return scores\n", "path": "server/text_generation_server/utils/watermark.py"}]}
| 2,418 | 160 |
gh_patches_debug_22253
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-2962
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] DistutilsMetaFinder breaks distutils imports when old setuptools is higher on path
### setuptools version
setuptools==60.0.4
### Python version
python3.8
### OS
RHEL8, FreeBSD12/13, Ubuntu 20.04
### Additional environment information
Any environment where an old setuptools (eg OS package) is higher in `sys.path`, but a newer setuptools is installed anywhere on `sys.path` (with its distutils shimming `.pth` file active)
### Description
Any import of `distutils` will bomb when a new setuptools is present, but an older setuptools is higher on the path (since the new setuptools' path shim blindly tries to import from `setuptools._distutils`, which doesn't exist in old versions of setuptools). This has probably been the case ever since the distutils shim was created, but now that it's active by default, it's a bigger problem.
The place we hit this was running `pip install` from a venv with `--system-site-packages`- pip's build isolation installs latest `setuptools`, but then shuffles `sys.path` around such that the ancient OS-packaged `setuptools` is higher on the path. The first thing that tries to import `distutils` invokes the metapath finder, which has no validation or recovery and just lets the `ModuleNotFoundError` fly.
At first blush, it seems like the metapath shim's `spec_for_distutils` should try to at least verify the co-location of `setuptools` with the shim impl (ie, that they're under the same path prefix) and/or that the `_distutils` subpackage is available via `find_spec` before it actually imports it. Mixing and matching the shim version with random setuptools versions seems like a recipe for trouble down the road...
I could probably throw together a PR for this if there's a consensus that this approach is sane.
### Expected behavior
The presence of a new-but-not-loadable setuptools on sys.path doesn't break old setuptools/distutils.
### How to Reproduce
(NB: this is a much simpler repro than the OS installed setuptools that's actually failing, but illustrates the problem more easily on any OS)
```
python3 -m venv /tmp/bang && source /tmp/bang/bin/activate # give us a clean venv to work in
python -m pip install 'setuptools>60' # install new setuptools to venv site-packages
python -m pip install 'setuptools<45' -t . # install old setuptools to cwd- higher than site-packages on path for most systems
python -c 'import distutils'
```
### Output
```console
...
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/bang/lib64/python3.9/site-packages/_distutils_hack/__init__.py", line 92, in create_module
return importlib.import_module('setuptools._distutils')
File "/usr/lib64/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/home/mdavis/setuptools/__init__.py", line 6, in <module>
import distutils.core
File "/tmp/bang/lib64/python3.9/site-packages/_distutils_hack/__init__.py", line 92, in create_module
return importlib.import_module('setuptools._distutils')
File "/usr/lib64/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
ModuleNotFoundError: No module named 'setuptools._distutils'
```
### Code of Conduct
- [X] I agree to follow the PSF Code of Conduct
</issue>
<code>
[start of _distutils_hack/__init__.py]
1 import sys
2 import os
3 import re
4 import importlib
5 import warnings
6 import contextlib
7
8
9 is_pypy = '__pypy__' in sys.builtin_module_names
10
11
12 warnings.filterwarnings('ignore',
13 r'.+ distutils\b.+ deprecated',
14 DeprecationWarning)
15
16
17 def warn_distutils_present():
18 if 'distutils' not in sys.modules:
19 return
20 if is_pypy and sys.version_info < (3, 7):
21 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
22 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
23 return
24 warnings.warn(
25 "Distutils was imported before Setuptools, but importing Setuptools "
26 "also replaces the `distutils` module in `sys.modules`. This may lead "
27 "to undesirable behaviors or errors. To avoid these issues, avoid "
28 "using distutils directly, ensure that setuptools is installed in the "
29 "traditional way (e.g. not an editable install), and/or make sure "
30 "that setuptools is always imported before distutils.")
31
32
33 def clear_distutils():
34 if 'distutils' not in sys.modules:
35 return
36 warnings.warn("Setuptools is replacing distutils.")
37 mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
38 for name in mods:
39 del sys.modules[name]
40
41
42 def enabled():
43 """
44 Allow selection of distutils by environment variable.
45 """
46 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
47 return which == 'local'
48
49
50 def ensure_local_distutils():
51 clear_distutils()
52
53 # With the DistutilsMetaFinder in place,
54 # perform an import to cause distutils to be
55 # loaded from setuptools._distutils. Ref #2906.
56 with shim():
57 importlib.import_module('distutils')
58
59 # check that submodules load as expected
60 core = importlib.import_module('distutils.core')
61 assert '_distutils' in core.__file__, core.__file__
62
63
64 def do_override():
65 """
66 Ensure that the local copy of distutils is preferred over stdlib.
67
68 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
69 for more motivation.
70 """
71 if enabled():
72 warn_distutils_present()
73 ensure_local_distutils()
74
75
76 class DistutilsMetaFinder:
77 def find_spec(self, fullname, path, target=None):
78 if path is not None:
79 return
80
81 method_name = 'spec_for_{fullname}'.format(**locals())
82 method = getattr(self, method_name, lambda: None)
83 return method()
84
85 def spec_for_distutils(self):
86 import importlib.abc
87 import importlib.util
88
89 # In cases of path manipulation during sitecustomize,
90 # Setuptools might actually not be present even though
91 # the hook has been loaded. Allow the caller to fall
92 # back to stdlib behavior. See #2980.
93 if not importlib.util.find_spec('setuptools'):
94 return
95
96 class DistutilsLoader(importlib.abc.Loader):
97
98 def create_module(self, spec):
99 return importlib.import_module('setuptools._distutils')
100
101 def exec_module(self, module):
102 pass
103
104 return importlib.util.spec_from_loader('distutils', DistutilsLoader())
105
106 def spec_for_pip(self):
107 """
108 Ensure stdlib distutils when running under pip.
109 See pypa/pip#8761 for rationale.
110 """
111 if self.pip_imported_during_build():
112 return
113 clear_distutils()
114 self.spec_for_distutils = lambda: None
115
116 @classmethod
117 def pip_imported_during_build(cls):
118 """
119 Detect if pip is being imported in a build script. Ref #2355.
120 """
121 import traceback
122 return any(
123 cls.frame_file_is_setup(frame)
124 for frame, line in traceback.walk_stack(None)
125 )
126
127 @staticmethod
128 def frame_file_is_setup(frame):
129 """
130 Return True if the indicated frame suggests a setup.py file.
131 """
132 # some frames may not have __file__ (#2940)
133 return frame.f_globals.get('__file__', '').endswith('setup.py')
134
135
136 DISTUTILS_FINDER = DistutilsMetaFinder()
137
138
139 def ensure_shim():
140 DISTUTILS_FINDER in sys.meta_path or add_shim()
141
142
143 @contextlib.contextmanager
144 def shim():
145 add_shim()
146 try:
147 yield
148 finally:
149 remove_shim()
150
151
152 def add_shim():
153 sys.meta_path.insert(0, DISTUTILS_FINDER)
154
155
156 def remove_shim():
157 try:
158 sys.meta_path.remove(DISTUTILS_FINDER)
159 except ValueError:
160 pass
161
[end of _distutils_hack/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py
--- a/_distutils_hack/__init__.py
+++ b/_distutils_hack/__init__.py
@@ -86,17 +86,23 @@
import importlib.abc
import importlib.util
- # In cases of path manipulation during sitecustomize,
- # Setuptools might actually not be present even though
- # the hook has been loaded. Allow the caller to fall
- # back to stdlib behavior. See #2980.
- if not importlib.util.find_spec('setuptools'):
+ try:
+ mod = importlib.import_module('setuptools._distutils')
+ except Exception:
+ # There are a couple of cases where setuptools._distutils
+ # may not be present:
+ # - An older Setuptools without a local distutils is
+ # taking precedence. Ref #2957.
+ # - Path manipulation during sitecustomize removes
+ # setuptools from the path but only after the hook
+ # has been loaded. Ref #2980.
+ # In either case, fall back to stdlib behavior.
return
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
- return importlib.import_module('setuptools._distutils')
+ return mod
def exec_module(self, module):
pass
|
{"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -86,17 +86,23 @@\n import importlib.abc\n import importlib.util\n \n- # In cases of path manipulation during sitecustomize,\n- # Setuptools might actually not be present even though\n- # the hook has been loaded. Allow the caller to fall\n- # back to stdlib behavior. See #2980.\n- if not importlib.util.find_spec('setuptools'):\n+ try:\n+ mod = importlib.import_module('setuptools._distutils')\n+ except Exception:\n+ # There are a couple of cases where setuptools._distutils\n+ # may not be present:\n+ # - An older Setuptools without a local distutils is\n+ # taking precedence. Ref #2957.\n+ # - Path manipulation during sitecustomize removes\n+ # setuptools from the path but only after the hook\n+ # has been loaded. Ref #2980.\n+ # In either case, fall back to stdlib behavior.\n return\n \n class DistutilsLoader(importlib.abc.Loader):\n \n def create_module(self, spec):\n- return importlib.import_module('setuptools._distutils')\n+ return mod\n \n def exec_module(self, module):\n pass\n", "issue": "[BUG] DistutilsMetaFinder breaks distutils imports when old setuptools is higher on path\n### setuptools version\r\n\r\nsetuptools==60.0.4\r\n\r\n### Python version\r\n\r\npython3.8\r\n\r\n### OS\r\n\r\nRHEL8, FreeBSD12/13, Ubuntu 20.04\r\n\r\n### Additional environment information\r\n\r\nAny environment where an old setuptools (eg OS package) is higher in `sys.path`, but a newer setuptools is installed anywhere on `sys.path` (with its distutils shimming `.pth` file active)\r\n\r\n### Description\r\n\r\nAny import of `distutils` will bomb when a new setuptools is present, but an older setuptools is higher on the path (since the new setuptools' path shim blindly tries to import from `setuptools._distutils`, which doesn't exist in old versions of setuptools). This has probably been the case ever since the distutils shim was created, but now that it's active by default, it's a bigger problem.\r\n\r\nThe place we hit this was running `pip install` from a venv with `--system-site-packages`- pip's build isolation installs latest `setuptools`, but then shuffles `sys.path` around such that the ancient OS-packaged `setuptools` is higher on the path. The first thing that tries to import `distutils` invokes the metapath finder, which has no validation or recovery and just lets the `ModuleNotFoundError` fly.\r\n\r\nAt first blush, it seems like the metapath shim's `spec_for_distutils` should try to at least verify the co-location of `setuptools` with the shim impl (ie, that they're under the same path prefix) and/or that the `_distutils` subpackage is available via `find_spec` before it actually imports it. Mixing and matching the shim version with random setuptools versions seems like a recipe for trouble down the road...\r\n\r\nI could probably throw together a PR for this if there's a consensus that this approach is sane.\r\n\r\n### Expected behavior\r\n\r\nThe presence of a new-but-not-loadable setuptools on sys.path doesn't break old setuptools/distutils.\r\n\r\n\r\n### How to Reproduce\r\n\r\n(NB: this is a much simpler repro than the OS installed setuptools that's actually failing, but illustrates the problem more easily on any OS)\r\n```\r\npython3 -m venv /tmp/bang && source /tmp/bang/bin/activate # give us a clean venv to work in\r\npython -m pip install 'setuptools>60' # install new setuptools to venv site-packages\r\npython -m pip install 'setuptools<45' -t . # install old setuptools to cwd- higher than site-packages on path for most systems\r\npython -c 'import distutils'\r\n```\r\n\r\n### Output\r\n\r\n```console\r\n...\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/tmp/bang/lib64/python3.9/site-packages/_distutils_hack/__init__.py\", line 92, in create_module\r\n return importlib.import_module('setuptools._distutils')\r\n File \"/usr/lib64/python3.9/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"/home/mdavis/setuptools/__init__.py\", line 6, in <module>\r\n import distutils.core\r\n File \"/tmp/bang/lib64/python3.9/site-packages/_distutils_hack/__init__.py\", line 92, in create_module\r\n return importlib.import_module('setuptools._distutils')\r\n File \"/usr/lib64/python3.9/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\nModuleNotFoundError: No module named 'setuptools._distutils'\r\n\r\n```\r\n\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the PSF Code of Conduct\n", "before_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\nimport contextlib\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\nwarnings.filterwarnings('ignore',\n r'.+ distutils\\b.+ deprecated',\n DeprecationWarning)\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n\n # With the DistutilsMetaFinder in place,\n # perform an import to cause distutils to be\n # loaded from setuptools._distutils. Ref #2906.\n with shim():\n importlib.import_module('distutils')\n\n # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n # In cases of path manipulation during sitecustomize,\n # Setuptools might actually not be present even though\n # the hook has been loaded. Allow the caller to fall\n # back to stdlib behavior. See #2980.\n if not importlib.util.find_spec('setuptools'):\n return\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('setuptools._distutils')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n @classmethod\n def pip_imported_during_build(cls):\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n cls.frame_file_is_setup(frame)\n for frame, line in traceback.walk_stack(None)\n )\n\n @staticmethod\n def frame_file_is_setup(frame):\n \"\"\"\n Return True if the indicated frame suggests a setup.py file.\n \"\"\"\n # some frames may not have __file__ (#2940)\n return frame.f_globals.get('__file__', '').endswith('setup.py')\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef ensure_shim():\n DISTUTILS_FINDER in sys.meta_path or add_shim()\n\n\[email protected]\ndef shim():\n add_shim()\n try:\n yield\n finally:\n remove_shim()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}]}
| 2,860 | 327 |
gh_patches_debug_9506
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-574
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
headline css is broken
our headlines don't use h1 h2 etc anymore and look completely broken on firefox mobile.
</issue>
<code>
[start of evap/staff/templatetags/staff_templatetags.py]
1 from django.template import Library
2
3 from evap.evaluation.models import Semester
4
5 register = Library()
6
7
8 @register.inclusion_tag("staff_semester_menu.html")
9 def include_staff_semester_menu():
10 return dict(semesters=Semester.objects.all()[:5])
11
12 @register.inclusion_tag('staff_course_selection_list.html')
13 def include_staff_course_selection_list(course_forms, btn_label, empty_msg):
14 return {
15 'course_forms': course_forms,
16 'btn_label': btn_label,
17 'empty_msg': empty_msg
18 }
19
20 @register.inclusion_tag('staff_user_labels.html')
21 def include_staff_user_labels(user):
22 return dict(user=user)
23
24 @register.inclusion_tag("staff_questionnaire_index_list.html")
25 def include_staff_questionnaire_index_list(questionnaires, headline):
26 return dict(questionnaires=questionnaires, headline=headline)
27
28 @register.inclusion_tag("staff_semester_view_course.html")
29 def include_staff_semester_view_course(semester, state, course, disable_if_archived, info_only=False):
30 return dict(semester=semester, state=state, course=course, disable_if_archived=disable_if_archived, info_only=info_only)
31
[end of evap/staff/templatetags/staff_templatetags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/evap/staff/templatetags/staff_templatetags.py b/evap/staff/templatetags/staff_templatetags.py
--- a/evap/staff/templatetags/staff_templatetags.py
+++ b/evap/staff/templatetags/staff_templatetags.py
@@ -9,14 +9,6 @@
def include_staff_semester_menu():
return dict(semesters=Semester.objects.all()[:5])
[email protected]_tag('staff_course_selection_list.html')
-def include_staff_course_selection_list(course_forms, btn_label, empty_msg):
- return {
- 'course_forms': course_forms,
- 'btn_label': btn_label,
- 'empty_msg': empty_msg
- }
-
@register.inclusion_tag('staff_user_labels.html')
def include_staff_user_labels(user):
return dict(user=user)
|
{"golden_diff": "diff --git a/evap/staff/templatetags/staff_templatetags.py b/evap/staff/templatetags/staff_templatetags.py\n--- a/evap/staff/templatetags/staff_templatetags.py\n+++ b/evap/staff/templatetags/staff_templatetags.py\n@@ -9,14 +9,6 @@\n def include_staff_semester_menu():\n return dict(semesters=Semester.objects.all()[:5])\n \[email protected]_tag('staff_course_selection_list.html')\n-def include_staff_course_selection_list(course_forms, btn_label, empty_msg):\n- return {\n- 'course_forms': course_forms,\n- 'btn_label': btn_label,\n- 'empty_msg': empty_msg\n- }\n-\n @register.inclusion_tag('staff_user_labels.html')\n def include_staff_user_labels(user):\n return dict(user=user)\n", "issue": "headline css is broken\nour headlines don't use h1 h2 etc anymore and look completely broken on firefox mobile.\n\n", "before_files": [{"content": "from django.template import Library\n\nfrom evap.evaluation.models import Semester\n\nregister = Library()\n\n\[email protected]_tag(\"staff_semester_menu.html\")\ndef include_staff_semester_menu():\n return dict(semesters=Semester.objects.all()[:5])\n\[email protected]_tag('staff_course_selection_list.html')\ndef include_staff_course_selection_list(course_forms, btn_label, empty_msg):\n return {\n 'course_forms': course_forms,\n 'btn_label': btn_label,\n 'empty_msg': empty_msg\n }\n\[email protected]_tag('staff_user_labels.html')\ndef include_staff_user_labels(user):\n return dict(user=user)\n\[email protected]_tag(\"staff_questionnaire_index_list.html\")\ndef include_staff_questionnaire_index_list(questionnaires, headline):\n return dict(questionnaires=questionnaires, headline=headline)\n\[email protected]_tag(\"staff_semester_view_course.html\")\ndef include_staff_semester_view_course(semester, state, course, disable_if_archived, info_only=False):\n return dict(semester=semester, state=state, course=course, disable_if_archived=disable_if_archived, info_only=info_only)\n", "path": "evap/staff/templatetags/staff_templatetags.py"}]}
| 884 | 206 |
gh_patches_debug_34651
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-499
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hide scout message about event draining
When `log_level` is set to `ERROR` and logs are redirected to a log file through `log_file_path`, scout still logs a message like: `Scout draining 1 event for up to 2.0 seconds` on STDOUT, is it possible to remove this message from STDOUT?
Thank you!
</issue>
<code>
[start of src/scout_apm/core/__init__.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import atexit
5 import logging
6 import os
7 import sys
8
9 from scout_apm import instruments
10 from scout_apm.compat import kwargs_only
11 from scout_apm.core import objtrace
12 from scout_apm.core.config import scout_config
13 from scout_apm.core.core_agent_manager import CoreAgentManager
14 from scout_apm.core.metadata import report_app_metadata
15 from scout_apm.core.socket import CoreAgentSocketThread
16
17 logger = logging.getLogger(__name__)
18
19
20 @kwargs_only
21 def install(config=None):
22 global shutdown_registered
23 if config is not None:
24 scout_config.set(**config)
25 scout_config.log()
26
27 if os.name == "nt":
28 logger.info(
29 "APM Not Launching on PID: %s - Windows is not supported", os.getpid()
30 )
31 return False
32
33 if not scout_config.value("monitor"):
34 logger.info(
35 "APM Not Launching on PID: %s - Configuration 'monitor' is not true",
36 os.getpid(),
37 )
38 return False
39
40 instruments.ensure_all_installed()
41 objtrace.enable()
42
43 logger.debug("APM Launching on PID: %s", os.getpid())
44 launched = CoreAgentManager().launch()
45
46 report_app_metadata()
47 if launched:
48 # Stop the thread to avoid running threads pre-fork
49 CoreAgentSocketThread.ensure_stopped()
50
51 if scout_config.value("shutdown_timeout_seconds") > 0.0 and not shutdown_registered:
52 atexit.register(shutdown)
53 shutdown_registered = True
54
55 return True
56
57
58 shutdown_registered = False
59
60
61 def shutdown():
62 timeout_seconds = scout_config.value("shutdown_timeout_seconds")
63
64 def callback(queue_size):
65 print( # noqa: T001
66 (
67 "Scout draining {queue_size} event{s} for up to"
68 + " {timeout_seconds} seconds"
69 ).format(
70 queue_size=queue_size,
71 s=("" if queue_size == 1 else "s"),
72 timeout_seconds=timeout_seconds,
73 ),
74 file=sys.stderr,
75 )
76
77 CoreAgentSocketThread.wait_until_drained(
78 timeout_seconds=timeout_seconds, callback=callback
79 )
80
[end of src/scout_apm/core/__init__.py]
[start of src/scout_apm/core/config.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import logging
5 import os
6 import warnings
7
8 from scout_apm.compat import string_type
9 from scout_apm.core import platform_detection
10
11 logger = logging.getLogger(__name__)
12
13
14 class ScoutConfig(object):
15 """
16 Configuration object for the ScoutApm agent.
17
18 Contains a list of configuration "layers". When a configuration key is
19 looked up, each layer is asked in turn if it knows the value. The first one
20 to answer affirmatively returns the value.
21 """
22
23 def __init__(self):
24 self.layers = [
25 Env(),
26 Python(),
27 Derived(self),
28 Defaults(),
29 Null(),
30 ]
31
32 def value(self, key):
33 value = self.locate_layer_for_key(key).value(key)
34 if key in CONVERSIONS:
35 return CONVERSIONS[key](value)
36 return value
37
38 def locate_layer_for_key(self, key):
39 for layer in self.layers:
40 if layer.has_config(key):
41 return layer
42
43 # Should be unreachable because Null returns None for all keys.
44 raise ValueError("key {!r} not found in any layer".format(key))
45
46 def log(self):
47 logger.debug("Configuration Loaded:")
48 for key in self.known_keys():
49 layer = self.locate_layer_for_key(key)
50 logger.debug(
51 "%-9s: %s = %s", layer.__class__.__name__, key, layer.value(key)
52 )
53
54 def known_keys(self):
55 return [
56 "app_server",
57 "application_root",
58 "core_agent_dir",
59 "core_agent_download",
60 "core_agent_launch",
61 "core_agent_log_level",
62 "core_agent_permissions",
63 "core_agent_version",
64 "disabled_instruments",
65 "download_url",
66 "framework",
67 "framework_version",
68 "hostname",
69 "ignore",
70 "key",
71 "log_level",
72 "monitor",
73 "name",
74 "revision_sha",
75 "scm_subdirectory",
76 "shutdown_timeout_seconds",
77 "socket_path",
78 ]
79
80 def core_agent_permissions(self):
81 try:
82 return int(str(self.value("core_agent_permissions")), 8)
83 except ValueError:
84 logger.exception(
85 "Invalid core_agent_permissions value, using default of 0o700"
86 )
87 return 0o700
88
89 @classmethod
90 def set(cls, **kwargs):
91 """
92 Sets a configuration value for the Scout agent. Values set here will
93 not override values set in ENV.
94 """
95 for key, value in kwargs.items():
96 SCOUT_PYTHON_VALUES[key] = value
97
98 @classmethod
99 def unset(cls, *keys):
100 """
101 Removes a configuration value for the Scout agent.
102 """
103 for key in keys:
104 SCOUT_PYTHON_VALUES.pop(key, None)
105
106 @classmethod
107 def reset_all(cls):
108 """
109 Remove all configuration settings set via `ScoutConfig.set(...)`.
110
111 This is meant for use in testing.
112 """
113 SCOUT_PYTHON_VALUES.clear()
114
115
116 # Module-level data, the ScoutConfig.set(key="value") adds to this
117 SCOUT_PYTHON_VALUES = {}
118
119
120 class Python(object):
121 """
122 A configuration overlay that lets other parts of python set values.
123 """
124
125 def has_config(self, key):
126 return key in SCOUT_PYTHON_VALUES
127
128 def value(self, key):
129 return SCOUT_PYTHON_VALUES[key]
130
131
132 class Env(object):
133 """
134 Reads configuration from environment by prefixing the key
135 requested with "SCOUT_"
136
137 Example: the `key` config looks for SCOUT_KEY
138 environment variable
139 """
140
141 def has_config(self, key):
142 env_key = self.modify_key(key)
143 return env_key in os.environ
144
145 def value(self, key):
146 env_key = self.modify_key(key)
147 return os.environ[env_key]
148
149 def modify_key(self, key):
150 env_key = ("SCOUT_" + key).upper()
151 return env_key
152
153
154 class Derived(object):
155 """
156 A configuration overlay that calculates from other values.
157 """
158
159 def __init__(self, config):
160 """
161 config argument is the overall ScoutConfig var, so we can lookup the
162 components of the derived info.
163 """
164 self.config = config
165
166 def has_config(self, key):
167 return self.lookup_func(key) is not None
168
169 def value(self, key):
170 return self.lookup_func(key)()
171
172 def lookup_func(self, key):
173 """
174 Returns the derive_#{key} function, or None if it isn't defined
175 """
176 func_name = "derive_" + key
177 return getattr(self, func_name, None)
178
179 def derive_socket_path(self):
180 return "{}/{}/scout-agent.sock".format(
181 self.config.value("core_agent_dir"),
182 self.config.value("core_agent_full_name"),
183 )
184
185 def derive_core_agent_full_name(self):
186 triple = self.config.value("core_agent_triple")
187 if not platform_detection.is_valid_triple(triple):
188 warnings.warn("Invalid value for core_agent_triple: {}".format(triple))
189 return "{name}-{version}-{triple}".format(
190 name="scout_apm_core",
191 version=self.config.value("core_agent_version"),
192 triple=triple,
193 )
194
195 def derive_core_agent_triple(self):
196 return platform_detection.get_triple()
197
198
199 class Defaults(object):
200 """
201 Provides default values for important configurations
202 """
203
204 def __init__(self):
205 self.defaults = {
206 "app_server": "",
207 "application_root": "",
208 "core_agent_dir": "/tmp/scout_apm_core",
209 "core_agent_download": True,
210 "core_agent_launch": True,
211 "core_agent_log_level": "info",
212 "core_agent_permissions": 700,
213 "core_agent_version": "v1.2.6", # can be an exact tag name, or 'latest'
214 "disabled_instruments": [],
215 "download_url": "https://s3-us-west-1.amazonaws.com/scout-public-downloads/apm_core_agent/release", # noqa: E501
216 "framework": "",
217 "framework_version": "",
218 "hostname": None,
219 "key": "",
220 "monitor": False,
221 "name": "Python App",
222 "revision_sha": self._git_revision_sha(),
223 "scm_subdirectory": "",
224 "shutdown_timeout_seconds": 2.0,
225 "uri_reporting": "filtered_params",
226 }
227
228 def _git_revision_sha(self):
229 # N.B. The environment variable SCOUT_REVISION_SHA may also be used,
230 # but that will be picked up by Env
231 return os.environ.get("HEROKU_SLUG_COMMIT", "")
232
233 def has_config(self, key):
234 return key in self.defaults
235
236 def value(self, key):
237 return self.defaults[key]
238
239
240 class Null(object):
241 """
242 Always answers that a key is present, but the value is None
243
244 Used as the last step of the layered configuration.
245 """
246
247 def has_config(self, key):
248 return True
249
250 def value(self, key):
251 return None
252
253
254 def convert_to_bool(value):
255 if isinstance(value, bool):
256 return value
257 if isinstance(value, string_type):
258 return value.lower() in ("yes", "true", "t", "1")
259 # Unknown type - default to false?
260 return False
261
262
263 def convert_to_float(value):
264 try:
265 return float(value)
266 except ValueError:
267 return 0.0
268
269
270 def convert_to_list(value):
271 if isinstance(value, list):
272 return value
273 if isinstance(value, tuple):
274 return list(value)
275 if isinstance(value, string_type):
276 # Split on commas
277 return [item.strip() for item in value.split(",") if item]
278 # Unknown type - default to empty?
279 return []
280
281
282 CONVERSIONS = {
283 "core_agent_download": convert_to_bool,
284 "core_agent_launch": convert_to_bool,
285 "disabled_instruments": convert_to_list,
286 "ignore": convert_to_list,
287 "monitor": convert_to_bool,
288 "shutdown_timeout_seconds": convert_to_float,
289 }
290
291
292 scout_config = ScoutConfig()
293
[end of src/scout_apm/core/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/scout_apm/core/__init__.py b/src/scout_apm/core/__init__.py
--- a/src/scout_apm/core/__init__.py
+++ b/src/scout_apm/core/__init__.py
@@ -62,17 +62,18 @@
timeout_seconds = scout_config.value("shutdown_timeout_seconds")
def callback(queue_size):
- print( # noqa: T001
- (
- "Scout draining {queue_size} event{s} for up to"
- + " {timeout_seconds} seconds"
- ).format(
- queue_size=queue_size,
- s=("" if queue_size == 1 else "s"),
- timeout_seconds=timeout_seconds,
- ),
- file=sys.stderr,
- )
+ if scout_config.value("shutdown_message_enabled"):
+ print( # noqa: T001
+ (
+ "Scout draining {queue_size} event{s} for up to"
+ + " {timeout_seconds} seconds"
+ ).format(
+ queue_size=queue_size,
+ s=("" if queue_size == 1 else "s"),
+ timeout_seconds=timeout_seconds,
+ ),
+ file=sys.stderr,
+ )
CoreAgentSocketThread.wait_until_drained(
timeout_seconds=timeout_seconds, callback=callback
diff --git a/src/scout_apm/core/config.py b/src/scout_apm/core/config.py
--- a/src/scout_apm/core/config.py
+++ b/src/scout_apm/core/config.py
@@ -73,6 +73,7 @@
"name",
"revision_sha",
"scm_subdirectory",
+ "shutdown_message_enabled",
"shutdown_timeout_seconds",
"socket_path",
]
@@ -221,6 +222,7 @@
"name": "Python App",
"revision_sha": self._git_revision_sha(),
"scm_subdirectory": "",
+ "shutdown_message_enabled": True,
"shutdown_timeout_seconds": 2.0,
"uri_reporting": "filtered_params",
}
@@ -285,6 +287,7 @@
"disabled_instruments": convert_to_list,
"ignore": convert_to_list,
"monitor": convert_to_bool,
+ "shutdown_message_enabled": convert_to_bool,
"shutdown_timeout_seconds": convert_to_float,
}
|
{"golden_diff": "diff --git a/src/scout_apm/core/__init__.py b/src/scout_apm/core/__init__.py\n--- a/src/scout_apm/core/__init__.py\n+++ b/src/scout_apm/core/__init__.py\n@@ -62,17 +62,18 @@\n timeout_seconds = scout_config.value(\"shutdown_timeout_seconds\")\n \n def callback(queue_size):\n- print( # noqa: T001\n- (\n- \"Scout draining {queue_size} event{s} for up to\"\n- + \" {timeout_seconds} seconds\"\n- ).format(\n- queue_size=queue_size,\n- s=(\"\" if queue_size == 1 else \"s\"),\n- timeout_seconds=timeout_seconds,\n- ),\n- file=sys.stderr,\n- )\n+ if scout_config.value(\"shutdown_message_enabled\"):\n+ print( # noqa: T001\n+ (\n+ \"Scout draining {queue_size} event{s} for up to\"\n+ + \" {timeout_seconds} seconds\"\n+ ).format(\n+ queue_size=queue_size,\n+ s=(\"\" if queue_size == 1 else \"s\"),\n+ timeout_seconds=timeout_seconds,\n+ ),\n+ file=sys.stderr,\n+ )\n \n CoreAgentSocketThread.wait_until_drained(\n timeout_seconds=timeout_seconds, callback=callback\ndiff --git a/src/scout_apm/core/config.py b/src/scout_apm/core/config.py\n--- a/src/scout_apm/core/config.py\n+++ b/src/scout_apm/core/config.py\n@@ -73,6 +73,7 @@\n \"name\",\n \"revision_sha\",\n \"scm_subdirectory\",\n+ \"shutdown_message_enabled\",\n \"shutdown_timeout_seconds\",\n \"socket_path\",\n ]\n@@ -221,6 +222,7 @@\n \"name\": \"Python App\",\n \"revision_sha\": self._git_revision_sha(),\n \"scm_subdirectory\": \"\",\n+ \"shutdown_message_enabled\": True,\n \"shutdown_timeout_seconds\": 2.0,\n \"uri_reporting\": \"filtered_params\",\n }\n@@ -285,6 +287,7 @@\n \"disabled_instruments\": convert_to_list,\n \"ignore\": convert_to_list,\n \"monitor\": convert_to_bool,\n+ \"shutdown_message_enabled\": convert_to_bool,\n \"shutdown_timeout_seconds\": convert_to_float,\n }\n", "issue": "Hide scout message about event draining\nWhen `log_level` is set to `ERROR` and logs are redirected to a log file through `log_file_path`, scout still logs a message like: `Scout draining 1 event for up to 2.0 seconds` on STDOUT, is it possible to remove this message from STDOUT?\r\nThank you!\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport atexit\nimport logging\nimport os\nimport sys\n\nfrom scout_apm import instruments\nfrom scout_apm.compat import kwargs_only\nfrom scout_apm.core import objtrace\nfrom scout_apm.core.config import scout_config\nfrom scout_apm.core.core_agent_manager import CoreAgentManager\nfrom scout_apm.core.metadata import report_app_metadata\nfrom scout_apm.core.socket import CoreAgentSocketThread\n\nlogger = logging.getLogger(__name__)\n\n\n@kwargs_only\ndef install(config=None):\n global shutdown_registered\n if config is not None:\n scout_config.set(**config)\n scout_config.log()\n\n if os.name == \"nt\":\n logger.info(\n \"APM Not Launching on PID: %s - Windows is not supported\", os.getpid()\n )\n return False\n\n if not scout_config.value(\"monitor\"):\n logger.info(\n \"APM Not Launching on PID: %s - Configuration 'monitor' is not true\",\n os.getpid(),\n )\n return False\n\n instruments.ensure_all_installed()\n objtrace.enable()\n\n logger.debug(\"APM Launching on PID: %s\", os.getpid())\n launched = CoreAgentManager().launch()\n\n report_app_metadata()\n if launched:\n # Stop the thread to avoid running threads pre-fork\n CoreAgentSocketThread.ensure_stopped()\n\n if scout_config.value(\"shutdown_timeout_seconds\") > 0.0 and not shutdown_registered:\n atexit.register(shutdown)\n shutdown_registered = True\n\n return True\n\n\nshutdown_registered = False\n\n\ndef shutdown():\n timeout_seconds = scout_config.value(\"shutdown_timeout_seconds\")\n\n def callback(queue_size):\n print( # noqa: T001\n (\n \"Scout draining {queue_size} event{s} for up to\"\n + \" {timeout_seconds} seconds\"\n ).format(\n queue_size=queue_size,\n s=(\"\" if queue_size == 1 else \"s\"),\n timeout_seconds=timeout_seconds,\n ),\n file=sys.stderr,\n )\n\n CoreAgentSocketThread.wait_until_drained(\n timeout_seconds=timeout_seconds, callback=callback\n )\n", "path": "src/scout_apm/core/__init__.py"}, {"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport os\nimport warnings\n\nfrom scout_apm.compat import string_type\nfrom scout_apm.core import platform_detection\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScoutConfig(object):\n \"\"\"\n Configuration object for the ScoutApm agent.\n\n Contains a list of configuration \"layers\". When a configuration key is\n looked up, each layer is asked in turn if it knows the value. The first one\n to answer affirmatively returns the value.\n \"\"\"\n\n def __init__(self):\n self.layers = [\n Env(),\n Python(),\n Derived(self),\n Defaults(),\n Null(),\n ]\n\n def value(self, key):\n value = self.locate_layer_for_key(key).value(key)\n if key in CONVERSIONS:\n return CONVERSIONS[key](value)\n return value\n\n def locate_layer_for_key(self, key):\n for layer in self.layers:\n if layer.has_config(key):\n return layer\n\n # Should be unreachable because Null returns None for all keys.\n raise ValueError(\"key {!r} not found in any layer\".format(key))\n\n def log(self):\n logger.debug(\"Configuration Loaded:\")\n for key in self.known_keys():\n layer = self.locate_layer_for_key(key)\n logger.debug(\n \"%-9s: %s = %s\", layer.__class__.__name__, key, layer.value(key)\n )\n\n def known_keys(self):\n return [\n \"app_server\",\n \"application_root\",\n \"core_agent_dir\",\n \"core_agent_download\",\n \"core_agent_launch\",\n \"core_agent_log_level\",\n \"core_agent_permissions\",\n \"core_agent_version\",\n \"disabled_instruments\",\n \"download_url\",\n \"framework\",\n \"framework_version\",\n \"hostname\",\n \"ignore\",\n \"key\",\n \"log_level\",\n \"monitor\",\n \"name\",\n \"revision_sha\",\n \"scm_subdirectory\",\n \"shutdown_timeout_seconds\",\n \"socket_path\",\n ]\n\n def core_agent_permissions(self):\n try:\n return int(str(self.value(\"core_agent_permissions\")), 8)\n except ValueError:\n logger.exception(\n \"Invalid core_agent_permissions value, using default of 0o700\"\n )\n return 0o700\n\n @classmethod\n def set(cls, **kwargs):\n \"\"\"\n Sets a configuration value for the Scout agent. Values set here will\n not override values set in ENV.\n \"\"\"\n for key, value in kwargs.items():\n SCOUT_PYTHON_VALUES[key] = value\n\n @classmethod\n def unset(cls, *keys):\n \"\"\"\n Removes a configuration value for the Scout agent.\n \"\"\"\n for key in keys:\n SCOUT_PYTHON_VALUES.pop(key, None)\n\n @classmethod\n def reset_all(cls):\n \"\"\"\n Remove all configuration settings set via `ScoutConfig.set(...)`.\n\n This is meant for use in testing.\n \"\"\"\n SCOUT_PYTHON_VALUES.clear()\n\n\n# Module-level data, the ScoutConfig.set(key=\"value\") adds to this\nSCOUT_PYTHON_VALUES = {}\n\n\nclass Python(object):\n \"\"\"\n A configuration overlay that lets other parts of python set values.\n \"\"\"\n\n def has_config(self, key):\n return key in SCOUT_PYTHON_VALUES\n\n def value(self, key):\n return SCOUT_PYTHON_VALUES[key]\n\n\nclass Env(object):\n \"\"\"\n Reads configuration from environment by prefixing the key\n requested with \"SCOUT_\"\n\n Example: the `key` config looks for SCOUT_KEY\n environment variable\n \"\"\"\n\n def has_config(self, key):\n env_key = self.modify_key(key)\n return env_key in os.environ\n\n def value(self, key):\n env_key = self.modify_key(key)\n return os.environ[env_key]\n\n def modify_key(self, key):\n env_key = (\"SCOUT_\" + key).upper()\n return env_key\n\n\nclass Derived(object):\n \"\"\"\n A configuration overlay that calculates from other values.\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n config argument is the overall ScoutConfig var, so we can lookup the\n components of the derived info.\n \"\"\"\n self.config = config\n\n def has_config(self, key):\n return self.lookup_func(key) is not None\n\n def value(self, key):\n return self.lookup_func(key)()\n\n def lookup_func(self, key):\n \"\"\"\n Returns the derive_#{key} function, or None if it isn't defined\n \"\"\"\n func_name = \"derive_\" + key\n return getattr(self, func_name, None)\n\n def derive_socket_path(self):\n return \"{}/{}/scout-agent.sock\".format(\n self.config.value(\"core_agent_dir\"),\n self.config.value(\"core_agent_full_name\"),\n )\n\n def derive_core_agent_full_name(self):\n triple = self.config.value(\"core_agent_triple\")\n if not platform_detection.is_valid_triple(triple):\n warnings.warn(\"Invalid value for core_agent_triple: {}\".format(triple))\n return \"{name}-{version}-{triple}\".format(\n name=\"scout_apm_core\",\n version=self.config.value(\"core_agent_version\"),\n triple=triple,\n )\n\n def derive_core_agent_triple(self):\n return platform_detection.get_triple()\n\n\nclass Defaults(object):\n \"\"\"\n Provides default values for important configurations\n \"\"\"\n\n def __init__(self):\n self.defaults = {\n \"app_server\": \"\",\n \"application_root\": \"\",\n \"core_agent_dir\": \"/tmp/scout_apm_core\",\n \"core_agent_download\": True,\n \"core_agent_launch\": True,\n \"core_agent_log_level\": \"info\",\n \"core_agent_permissions\": 700,\n \"core_agent_version\": \"v1.2.6\", # can be an exact tag name, or 'latest'\n \"disabled_instruments\": [],\n \"download_url\": \"https://s3-us-west-1.amazonaws.com/scout-public-downloads/apm_core_agent/release\", # noqa: E501\n \"framework\": \"\",\n \"framework_version\": \"\",\n \"hostname\": None,\n \"key\": \"\",\n \"monitor\": False,\n \"name\": \"Python App\",\n \"revision_sha\": self._git_revision_sha(),\n \"scm_subdirectory\": \"\",\n \"shutdown_timeout_seconds\": 2.0,\n \"uri_reporting\": \"filtered_params\",\n }\n\n def _git_revision_sha(self):\n # N.B. The environment variable SCOUT_REVISION_SHA may also be used,\n # but that will be picked up by Env\n return os.environ.get(\"HEROKU_SLUG_COMMIT\", \"\")\n\n def has_config(self, key):\n return key in self.defaults\n\n def value(self, key):\n return self.defaults[key]\n\n\nclass Null(object):\n \"\"\"\n Always answers that a key is present, but the value is None\n\n Used as the last step of the layered configuration.\n \"\"\"\n\n def has_config(self, key):\n return True\n\n def value(self, key):\n return None\n\n\ndef convert_to_bool(value):\n if isinstance(value, bool):\n return value\n if isinstance(value, string_type):\n return value.lower() in (\"yes\", \"true\", \"t\", \"1\")\n # Unknown type - default to false?\n return False\n\n\ndef convert_to_float(value):\n try:\n return float(value)\n except ValueError:\n return 0.0\n\n\ndef convert_to_list(value):\n if isinstance(value, list):\n return value\n if isinstance(value, tuple):\n return list(value)\n if isinstance(value, string_type):\n # Split on commas\n return [item.strip() for item in value.split(\",\") if item]\n # Unknown type - default to empty?\n return []\n\n\nCONVERSIONS = {\n \"core_agent_download\": convert_to_bool,\n \"core_agent_launch\": convert_to_bool,\n \"disabled_instruments\": convert_to_list,\n \"ignore\": convert_to_list,\n \"monitor\": convert_to_bool,\n \"shutdown_timeout_seconds\": convert_to_float,\n}\n\n\nscout_config = ScoutConfig()\n", "path": "src/scout_apm/core/config.py"}]}
| 3,831 | 526 |
gh_patches_debug_22092
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-777
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make usage tracking opt-in
</issue>
<code>
[start of parsl/config.py]
1 import logging
2
3 from parsl.utils import RepresentationMixin
4 from parsl.executors.threads import ThreadPoolExecutor
5 from parsl.dataflow.error import ConfigurationError
6
7 logger = logging.getLogger(__name__)
8
9
10 class Config(RepresentationMixin):
11 """
12 Specification of Parsl configuration options.
13
14 Parameters
15 ----------
16 executors : list of ParslExecutor, optional
17 List of executor instances to use. Possible executors include :class:`~parsl.executors.threads.ThreadPoolExecutor`,
18 :class:`~parsl.executors.ipp.IPyParallelExecutor`, or :class:`~parsl.executors.swift_t.TurbineExecutor`. Default
19 is [:class:`~parsl.executors.threads.ThreadPoolExecutor()`].
20 app_cache : bool, optional
21 Enable app caching. Default is True.
22 checkpoint_files : list of str, optional
23 List of paths to checkpoint files. Default is None.
24 checkpoint_mode : str, optional
25 Checkpoint mode to use, can be 'dfk_exit', 'task_exit', or 'periodic'. If set to
26 `None`, checkpointing will be disabled. Default is None.
27 checkpoint_period : str, optional
28 Time interval (in "HH:MM:SS") at which to checkpoint completed tasks. Only has an effect if
29 `checkpoint_mode='periodic'`.
30 data_management_max_threads : int, optional
31 Maximum number of threads to allocate for the data manager to use for managing input and output transfers.
32 Default is 10.
33 monitoring_config : MonitoringConfig, optional
34 The config to use for database monitoring. Default is None which does not log to a database.
35 lazy_errors : bool, optional
36 If True, errors from task failures will not be raised until `future.result()` is called. Otherwise, they will
37 be raised as soon as the task returns. Default is True.
38 retries : int, optional
39 Set the number of retries in case of failure. Default is 0.
40 run_dir : str, optional
41 Path to run directory. Default is 'runinfo'.
42 strategy : str, optional
43 Strategy to use for scaling resources according to workflow needs. Can be 'simple' or `None`. If `None`, dynamic
44 scaling will be disabled. Default is 'simple'.
45 usage_tracking : bool, optional
46 Enable usage tracking. Default is True.
47 """
48 def __init__(self,
49 executors=None,
50 app_cache=True,
51 checkpoint_files=None,
52 checkpoint_mode=None,
53 checkpoint_period=None,
54 data_management_max_threads=10,
55 lazy_errors=True,
56 retries=0,
57 run_dir='runinfo',
58 strategy='simple',
59 monitoring_config=None,
60 usage_tracking=True):
61 if executors is None:
62 executors = [ThreadPoolExecutor()]
63 self.executors = executors
64 self.app_cache = app_cache
65 self.checkpoint_files = checkpoint_files
66 self.checkpoint_mode = checkpoint_mode
67 if checkpoint_period is not None:
68 if checkpoint_mode is None:
69 logger.debug('The requested `checkpoint_period={}` will have no effect because `checkpoint_mode=None`'.format(
70 checkpoint_period)
71 )
72 elif checkpoint_mode != 'periodic':
73 logger.debug("Requested checkpoint period of {} only has an effect with checkpoint_mode='periodic'".format(
74 checkpoint_period)
75 )
76 if checkpoint_mode == 'periodic' and checkpoint_period is None:
77 checkpoint_period = "00:30:00"
78 self.checkpoint_period = checkpoint_period
79 self.data_management_max_threads = data_management_max_threads
80 self.lazy_errors = lazy_errors
81 self.retries = retries
82 self.run_dir = run_dir
83 self.strategy = strategy
84 self.usage_tracking = usage_tracking
85 self.monitoring_config = monitoring_config
86
87 @property
88 def executors(self):
89 return self._executors
90
91 @executors.setter
92 def executors(self, executors):
93 labels = [e.label for e in executors]
94 duplicates = [e for n, e in enumerate(labels) if e in labels[:n]]
95 if len(duplicates) > 0:
96 raise ConfigurationError('Executors must have unique labels ({})'.format(
97 ', '.join(['label={}'.format(repr(d)) for d in duplicates])))
98 self._executors = executors
99
[end of parsl/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsl/config.py b/parsl/config.py
--- a/parsl/config.py
+++ b/parsl/config.py
@@ -43,7 +43,8 @@
Strategy to use for scaling resources according to workflow needs. Can be 'simple' or `None`. If `None`, dynamic
scaling will be disabled. Default is 'simple'.
usage_tracking : bool, optional
- Enable usage tracking. Default is True.
+ Set this field to True to Opt-in to Parsl's usage tracking system. Parsl only collects minimal, non personally-identifiable,
+ information used for reporting to our funding agencies. Default is False.
"""
def __init__(self,
executors=None,
@@ -57,7 +58,7 @@
run_dir='runinfo',
strategy='simple',
monitoring_config=None,
- usage_tracking=True):
+ usage_tracking=False):
if executors is None:
executors = [ThreadPoolExecutor()]
self.executors = executors
|
{"golden_diff": "diff --git a/parsl/config.py b/parsl/config.py\n--- a/parsl/config.py\n+++ b/parsl/config.py\n@@ -43,7 +43,8 @@\n Strategy to use for scaling resources according to workflow needs. Can be 'simple' or `None`. If `None`, dynamic\n scaling will be disabled. Default is 'simple'.\n usage_tracking : bool, optional\n- Enable usage tracking. Default is True.\n+ Set this field to True to Opt-in to Parsl's usage tracking system. Parsl only collects minimal, non personally-identifiable,\n+ information used for reporting to our funding agencies. Default is False.\n \"\"\"\n def __init__(self,\n executors=None,\n@@ -57,7 +58,7 @@\n run_dir='runinfo',\n strategy='simple',\n monitoring_config=None,\n- usage_tracking=True):\n+ usage_tracking=False):\n if executors is None:\n executors = [ThreadPoolExecutor()]\n self.executors = executors\n", "issue": "Make usage tracking opt-in\n\n", "before_files": [{"content": "import logging\n\nfrom parsl.utils import RepresentationMixin\nfrom parsl.executors.threads import ThreadPoolExecutor\nfrom parsl.dataflow.error import ConfigurationError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Config(RepresentationMixin):\n \"\"\"\n Specification of Parsl configuration options.\n\n Parameters\n ----------\n executors : list of ParslExecutor, optional\n List of executor instances to use. Possible executors include :class:`~parsl.executors.threads.ThreadPoolExecutor`,\n :class:`~parsl.executors.ipp.IPyParallelExecutor`, or :class:`~parsl.executors.swift_t.TurbineExecutor`. Default\n is [:class:`~parsl.executors.threads.ThreadPoolExecutor()`].\n app_cache : bool, optional\n Enable app caching. Default is True.\n checkpoint_files : list of str, optional\n List of paths to checkpoint files. Default is None.\n checkpoint_mode : str, optional\n Checkpoint mode to use, can be 'dfk_exit', 'task_exit', or 'periodic'. If set to\n `None`, checkpointing will be disabled. Default is None.\n checkpoint_period : str, optional\n Time interval (in \"HH:MM:SS\") at which to checkpoint completed tasks. Only has an effect if\n `checkpoint_mode='periodic'`.\n data_management_max_threads : int, optional\n Maximum number of threads to allocate for the data manager to use for managing input and output transfers.\n Default is 10.\n monitoring_config : MonitoringConfig, optional\n The config to use for database monitoring. Default is None which does not log to a database.\n lazy_errors : bool, optional\n If True, errors from task failures will not be raised until `future.result()` is called. Otherwise, they will\n be raised as soon as the task returns. Default is True.\n retries : int, optional\n Set the number of retries in case of failure. Default is 0.\n run_dir : str, optional\n Path to run directory. Default is 'runinfo'.\n strategy : str, optional\n Strategy to use for scaling resources according to workflow needs. Can be 'simple' or `None`. If `None`, dynamic\n scaling will be disabled. Default is 'simple'.\n usage_tracking : bool, optional\n Enable usage tracking. Default is True.\n \"\"\"\n def __init__(self,\n executors=None,\n app_cache=True,\n checkpoint_files=None,\n checkpoint_mode=None,\n checkpoint_period=None,\n data_management_max_threads=10,\n lazy_errors=True,\n retries=0,\n run_dir='runinfo',\n strategy='simple',\n monitoring_config=None,\n usage_tracking=True):\n if executors is None:\n executors = [ThreadPoolExecutor()]\n self.executors = executors\n self.app_cache = app_cache\n self.checkpoint_files = checkpoint_files\n self.checkpoint_mode = checkpoint_mode\n if checkpoint_period is not None:\n if checkpoint_mode is None:\n logger.debug('The requested `checkpoint_period={}` will have no effect because `checkpoint_mode=None`'.format(\n checkpoint_period)\n )\n elif checkpoint_mode != 'periodic':\n logger.debug(\"Requested checkpoint period of {} only has an effect with checkpoint_mode='periodic'\".format(\n checkpoint_period)\n )\n if checkpoint_mode == 'periodic' and checkpoint_period is None:\n checkpoint_period = \"00:30:00\"\n self.checkpoint_period = checkpoint_period\n self.data_management_max_threads = data_management_max_threads\n self.lazy_errors = lazy_errors\n self.retries = retries\n self.run_dir = run_dir\n self.strategy = strategy\n self.usage_tracking = usage_tracking\n self.monitoring_config = monitoring_config\n\n @property\n def executors(self):\n return self._executors\n\n @executors.setter\n def executors(self, executors):\n labels = [e.label for e in executors]\n duplicates = [e for n, e in enumerate(labels) if e in labels[:n]]\n if len(duplicates) > 0:\n raise ConfigurationError('Executors must have unique labels ({})'.format(\n ', '.join(['label={}'.format(repr(d)) for d in duplicates])))\n self._executors = executors\n", "path": "parsl/config.py"}]}
| 1,648 | 223 |
gh_patches_debug_28892
|
rasdani/github-patches
|
git_diff
|
jupyterhub__jupyterhub-67
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Install problem
I am trying to install Jupyter Hub on an Ubuntu 14.04 server. I followed exact instructions in the README.
I get this error when running "pip install ."
```
moment#2.7.0 share/jupyter/static/components/moment
running css
error: [Errno 2] No such file or directory
----------------------------------------
Cleaning up...
Command /usr/bin/python -c "import setuptools, tokenize;__file__='/tmp/pip-3k3Avh-build/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-jqPTEE-record/install-record.txt --single-version-externally-managed --compile failed with error code 1 in /tmp/pip-3k3Avh-build
Traceback (most recent call last):
File "/usr/bin/pip", line 9, in <module>
load_entry_point('pip==1.5.4', 'console_scripts', 'pip')()
File "/usr/lib/python2.7/dist-packages/pip/__init__.py", line 185, in main
return command.main(cmd_args)
File "/usr/lib/python2.7/dist-packages/pip/basecommand.py", line 161, in main
text = '\n'.join(complete_log)
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 4: ordinal not in range(128)
```
My pip version is:
```
pip 1.5.4 from /usr/lib/python2.7/dist-packages (python 2.7)
```
and my bower version is
```
1.3.12
```
and my python version is
```
Python 2.7.6
```
Any idea what is going on? What should I try? Thanks in advance!
Matt
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # coding: utf-8
3
4 # Copyright (c) Juptyer Development Team.
5 # Distributed under the terms of the Modified BSD License.
6
7 #-----------------------------------------------------------------------------
8 # Minimal Python version sanity check (from IPython)
9 #-----------------------------------------------------------------------------
10
11 from __future__ import print_function
12
13 import os
14 import sys
15
16 v = sys.version_info
17 if v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):
18 error = "ERROR: IPython requires Python version 2.7 or 3.3 or above."
19 print(error, file=sys.stderr)
20 sys.exit(1)
21
22 PY3 = (sys.version_info[0] >= 3)
23
24 if os.name in ('nt', 'dos'):
25 error = "ERROR: Windows is not supported"
26 print(error, file=sys.stderr)
27
28 # At least we're on the python version we need, move on.
29
30 import os
31
32 from glob import glob
33
34 from distutils.core import setup
35 from subprocess import check_call
36
37 try:
38 execfile
39 except NameError:
40 # py3
41 def execfile(fname, globs, locs=None):
42 locs = locs or globs
43 exec(compile(open(fname).read(), fname, "exec"), globs, locs)
44
45 pjoin = os.path.join
46
47 here = os.path.abspath(os.path.dirname(__file__))
48 share_jupyter = pjoin(here, 'share', 'jupyter')
49 static = pjoin(share_jupyter, 'static')
50
51 #---------------------------------------------------------------------------
52 # Build basic package data, etc.
53 #---------------------------------------------------------------------------
54
55 def get_data_files():
56 """Get data files in share/jupyter"""
57
58 data_files = []
59 ntrim = len(here) + 1
60
61 for (d, dirs, filenames) in os.walk(share_jupyter):
62 data_files.append((
63 d[ntrim:],
64 [ pjoin(d, f) for f in filenames ]
65 ))
66 return data_files
67
68
69 ns = {}
70 execfile(pjoin(here, 'jupyterhub', 'version.py'), ns)
71
72 packages = []
73 for d, _, _ in os.walk('jupyterhub'):
74 if os.path.exists(pjoin(d, '__init__.py')):
75 packages.append(d.replace(os.path.sep, '.'))
76
77 setup_args = dict(
78 name = 'jupyterhub',
79 scripts = glob(pjoin('scripts', '*')),
80 packages = packages,
81 # dummy, so that install_data doesn't get skipped
82 # this will be overridden when bower is run anyway
83 data_files = get_data_files() or ['dummy'],
84 version = ns['__version__'],
85 description = """JupyterHub: A multi-user server for Jupyter notebooks""",
86 long_description = "",
87 author = "Jupyter Development Team",
88 author_email = "[email protected]",
89 url = "http://jupyter.org",
90 license = "BSD",
91 platforms = "Linux, Mac OS X",
92 keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],
93 classifiers = [
94 'Intended Audience :: Developers',
95 'Intended Audience :: Science/Research',
96 'License :: OSI Approved :: BSD License',
97 'Programming Language :: Python',
98 'Programming Language :: Python :: 2',
99 'Programming Language :: Python :: 2.7',
100 'Programming Language :: Python :: 3',
101 'Topic :: System :: Shells',
102 ],
103 )
104
105 #---------------------------------------------------------------------------
106 # custom distutils commands
107 #---------------------------------------------------------------------------
108
109 # imports here, so they are after setuptools import if there was one
110 from distutils.cmd import Command
111 from distutils.command.install import install
112
113 class BaseCommand(Command):
114 """Dumb empty command because Command needs subclasses to override too much"""
115 user_options = []
116
117 def initialize_options(self):
118 pass
119
120 def finalize_options(self):
121 pass
122
123 def get_inputs(self):
124 return []
125
126 def get_outputs(self):
127 return []
128
129
130 class Bower(BaseCommand):
131 description = "fetch static components with bower"
132
133 user_options = []
134
135 def run(self):
136 check_call(['bower', 'install', '--allow-root'])
137 # update data-files in case this created new files
138 self.distribution.data_files = get_data_files()
139
140 class CSS(BaseCommand):
141 description = "compile CSS from LESS"
142
143 user_options = []
144
145 def initialize_options(self):
146 pass
147
148 def finalize_options(self):
149 pass
150
151 def run(self):
152 style_less = pjoin(static, 'less', 'style.less')
153 style_css = pjoin(static, 'css', 'style.min.css')
154 sourcemap = style_css + '.map'
155 check_call([
156 'lessc', '-x', '--verbose',
157 '--source-map-basepath={}'.format(static),
158 '--source-map={}'.format(sourcemap),
159 '--source-map-rootpath=../',
160 style_less, style_css,
161 ])
162 # update data-files in case this created new files
163 self.distribution.data_files = get_data_files()
164
165 # ensure bower is run as part of install
166 install.sub_commands.insert(0, ('bower', None))
167 install.sub_commands.insert(1, ('css', None))
168
169 setup_args['cmdclass'] = {
170 'bower': Bower,
171 'css': CSS,
172 }
173
174
175 # setuptools requirements
176
177 if 'setuptools' in sys.modules:
178 setup_args['zip_safe'] = False
179 from setuptools.command.develop import develop
180 class develop_js_css(develop):
181 def run(self):
182 if not self.uninstall:
183 self.distribution.run_command('bower')
184 self.distribution.run_command('css')
185 develop.run(self)
186 setup_args['cmdclass']['develop'] = develop_js_css
187
188
189 with open('requirements.txt') as f:
190 install_requires = [ line.strip() for line in f.readlines() ]
191 setup_args['install_requires'] = install_requires
192
193 #---------------------------------------------------------------------------
194 # setup
195 #---------------------------------------------------------------------------
196
197 def main():
198 setup(**setup_args)
199
200 if __name__ == '__main__':
201 main()
202
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -133,7 +133,12 @@
user_options = []
def run(self):
- check_call(['bower', 'install', '--allow-root'])
+ try:
+ check_call(['bower', 'install', '--allow-root'])
+ except OSError as e:
+ print("Failed to run bower: %s" % e, file=sys.stderr)
+ print("You can install bower with `npm install -g bower`", file=sys.stderr)
+ raise
# update data-files in case this created new files
self.distribution.data_files = get_data_files()
@@ -152,13 +157,18 @@
style_less = pjoin(static, 'less', 'style.less')
style_css = pjoin(static, 'css', 'style.min.css')
sourcemap = style_css + '.map'
- check_call([
- 'lessc', '-x', '--verbose',
- '--source-map-basepath={}'.format(static),
- '--source-map={}'.format(sourcemap),
- '--source-map-rootpath=../',
- style_less, style_css,
- ])
+ try:
+ check_call([
+ 'lessc', '-x', '--verbose',
+ '--source-map-basepath={}'.format(static),
+ '--source-map={}'.format(sourcemap),
+ '--source-map-rootpath=../',
+ style_less, style_css,
+ ])
+ except OSError as e:
+ print("Failed to run lessc: %s" % e, file=sys.stderr)
+ print("You can install less with `npm install -g less`", file=sys.stderr)
+ raise
# update data-files in case this created new files
self.distribution.data_files = get_data_files()
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -133,7 +133,12 @@\n user_options = []\n \n def run(self):\n- check_call(['bower', 'install', '--allow-root'])\n+ try:\n+ check_call(['bower', 'install', '--allow-root'])\n+ except OSError as e:\n+ print(\"Failed to run bower: %s\" % e, file=sys.stderr)\n+ print(\"You can install bower with `npm install -g bower`\", file=sys.stderr)\n+ raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n \n@@ -152,13 +157,18 @@\n style_less = pjoin(static, 'less', 'style.less')\n style_css = pjoin(static, 'css', 'style.min.css')\n sourcemap = style_css + '.map'\n- check_call([\n- 'lessc', '-x', '--verbose',\n- '--source-map-basepath={}'.format(static),\n- '--source-map={}'.format(sourcemap),\n- '--source-map-rootpath=../',\n- style_less, style_css,\n- ])\n+ try:\n+ check_call([\n+ 'lessc', '-x', '--verbose',\n+ '--source-map-basepath={}'.format(static),\n+ '--source-map={}'.format(sourcemap),\n+ '--source-map-rootpath=../',\n+ style_less, style_css,\n+ ])\n+ except OSError as e:\n+ print(\"Failed to run lessc: %s\" % e, file=sys.stderr)\n+ print(\"You can install less with `npm install -g less`\", file=sys.stderr)\n+ raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n", "issue": "Install problem\nI am trying to install Jupyter Hub on an Ubuntu 14.04 server. I followed exact instructions in the README.\n\nI get this error when running \"pip install .\"\n\n```\nmoment#2.7.0 share/jupyter/static/components/moment\n\nrunning css\n\nerror: [Errno 2] No such file or directory\n\n----------------------------------------\nCleaning up...\nCommand /usr/bin/python -c \"import setuptools, tokenize;__file__='/tmp/pip-3k3Avh-build/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))\" install --record /tmp/pip-jqPTEE-record/install-record.txt --single-version-externally-managed --compile failed with error code 1 in /tmp/pip-3k3Avh-build\nTraceback (most recent call last):\n File \"/usr/bin/pip\", line 9, in <module>\n load_entry_point('pip==1.5.4', 'console_scripts', 'pip')()\n File \"/usr/lib/python2.7/dist-packages/pip/__init__.py\", line 185, in main\n return command.main(cmd_args)\n File \"/usr/lib/python2.7/dist-packages/pip/basecommand.py\", line 161, in main\n text = '\\n'.join(complete_log)\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 4: ordinal not in range(128)\n```\n\nMy pip version is:\n\n```\npip 1.5.4 from /usr/lib/python2.7/dist-packages (python 2.7)\n```\n\nand my bower version is\n\n```\n1.3.12\n```\n\nand my python version is\n\n```\nPython 2.7.6\n```\n\nAny idea what is going on? What should I try? Thanks in advance!\nMatt\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding: utf-8\n\n# Copyright (c) Juptyer Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check (from IPython)\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nv = sys.version_info\nif v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):\n error = \"ERROR: IPython requires Python version 2.7 or 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\nPY3 = (sys.version_info[0] >= 3)\n\nif os.name in ('nt', 'dos'):\n error = \"ERROR: Windows is not supported\"\n print(error, file=sys.stderr)\n\n# At least we're on the python version we need, move on.\n\nimport os\n\nfrom glob import glob\n\nfrom distutils.core import setup\nfrom subprocess import check_call\n\ntry:\n execfile\nexcept NameError:\n # py3\n def execfile(fname, globs, locs=None):\n locs = locs or globs\n exec(compile(open(fname).read(), fname, \"exec\"), globs, locs)\n\npjoin = os.path.join\n\nhere = os.path.abspath(os.path.dirname(__file__))\nshare_jupyter = pjoin(here, 'share', 'jupyter')\nstatic = pjoin(share_jupyter, 'static')\n\n#---------------------------------------------------------------------------\n# Build basic package data, etc.\n#---------------------------------------------------------------------------\n\ndef get_data_files():\n \"\"\"Get data files in share/jupyter\"\"\"\n \n data_files = []\n ntrim = len(here) + 1\n \n for (d, dirs, filenames) in os.walk(share_jupyter):\n data_files.append((\n d[ntrim:],\n [ pjoin(d, f) for f in filenames ]\n ))\n return data_files\n\n\nns = {}\nexecfile(pjoin(here, 'jupyterhub', 'version.py'), ns)\n\npackages = []\nfor d, _, _ in os.walk('jupyterhub'):\n if os.path.exists(pjoin(d, '__init__.py')):\n packages.append(d.replace(os.path.sep, '.'))\n\nsetup_args = dict(\n name = 'jupyterhub',\n scripts = glob(pjoin('scripts', '*')),\n packages = packages,\n # dummy, so that install_data doesn't get skipped\n # this will be overridden when bower is run anyway\n data_files = get_data_files() or ['dummy'],\n version = ns['__version__'],\n description = \"\"\"JupyterHub: A multi-user server for Jupyter notebooks\"\"\",\n long_description = \"\",\n author = \"Jupyter Development Team\",\n author_email = \"[email protected]\",\n url = \"http://jupyter.org\",\n license = \"BSD\",\n platforms = \"Linux, Mac OS X\",\n keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Topic :: System :: Shells',\n ],\n)\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n\n# imports here, so they are after setuptools import if there was one\nfrom distutils.cmd import Command\nfrom distutils.command.install import install\n\nclass BaseCommand(Command):\n \"\"\"Dumb empty command because Command needs subclasses to override too much\"\"\"\n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def get_inputs(self):\n return []\n \n def get_outputs(self):\n return []\n\n\nclass Bower(BaseCommand):\n description = \"fetch static components with bower\"\n \n user_options = []\n \n def run(self):\n check_call(['bower', 'install', '--allow-root'])\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\nclass CSS(BaseCommand):\n description = \"compile CSS from LESS\"\n \n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def run(self):\n style_less = pjoin(static, 'less', 'style.less')\n style_css = pjoin(static, 'css', 'style.min.css')\n sourcemap = style_css + '.map'\n check_call([\n 'lessc', '-x', '--verbose',\n '--source-map-basepath={}'.format(static),\n '--source-map={}'.format(sourcemap),\n '--source-map-rootpath=../',\n style_less, style_css,\n ])\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n# ensure bower is run as part of install\ninstall.sub_commands.insert(0, ('bower', None))\ninstall.sub_commands.insert(1, ('css', None))\n\nsetup_args['cmdclass'] = {\n 'bower': Bower,\n 'css': CSS,\n}\n\n\n# setuptools requirements\n\nif 'setuptools' in sys.modules:\n setup_args['zip_safe'] = False\n from setuptools.command.develop import develop\n class develop_js_css(develop):\n def run(self):\n if not self.uninstall:\n self.distribution.run_command('bower')\n self.distribution.run_command('css')\n develop.run(self)\n setup_args['cmdclass']['develop'] = develop_js_css\n\n\n with open('requirements.txt') as f:\n install_requires = [ line.strip() for line in f.readlines() ]\n setup_args['install_requires'] = install_requires\n\n#---------------------------------------------------------------------------\n# setup\n#---------------------------------------------------------------------------\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}]}
| 2,775 | 419 |
gh_patches_debug_14485
|
rasdani/github-patches
|
git_diff
|
numpy__numpy-15938
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Building numpy on AIX (xlc and xlf)
<!-- Please describe the issue in detail here, and fill in the fields below -->
build for AIX is not working.
A couple of issues - maybe all simple to fix - still researching the last one...
First one: numpy/fft/_pocketfft.c does not compile -- resolution: `export CFLAGS=-D_LARGE_FILES`
Second issue: Does not find ld_so_aix - expects, it seems, a symbolic link to the correct directory. e.g., calls `/opt/lib/python3.6/config/ld_so_aix` when the actual path is `/opt/lib/python3.6/config-3.6m/ld_so_aix`
Third issue: same issue as issue #6609 - undefined symbols:
```
ld: 0711-317 ERROR: Undefined symbol: .zungqr_
ld: 0711-317 ERROR: Undefined symbol: .zgeqrf_
ld: 0711-317 ERROR: Undefined symbol: .zgelsd_
ld: 0711-317 ERROR: Undefined symbol: .dorgqr_
ld: 0711-317 ERROR: Undefined symbol: .dgeqrf_
ld: 0711-317 ERROR: Undefined symbol: .dgelsd_
```
The first issues are easy to get around - export CFLAGS and add a symbolic link for the generic path.
For the final issue I am going to guess that there is a macro involved - that is adding an underscore to the function being called - as they do exist in liblapack.a
### Reproducing code example:
`python setup.py build`
<!-- Remove these sections for a feature request -->
### Error message:
More detail:
```
ld: 0711-318 ERROR: Undefined symbols were found.
The following symbols are in error:
Symbol Inpndx TY CL Source-File(Object-File) OR Import-File{Shared-object}
RLD: Address Section Rld-type Referencing Symbol
----------------------------------------------------------------------------------------------
.zungqr_ [124] ER PR numpy/linalg/lapack_litemodule.c(build/temp.AIX.6106.1043.64-3.6/numpy/linalg/lapack_litemodule.o)
000002c0 .text R_RBR [10] <.lapack_lite_zungqr>
.zgeqrf_ [128] ER PR numpy/linalg/lapack_litemodule.c(build/temp.AIX.6106.1043.64-3.6/numpy/linalg/lapack_litemodule.o)
00000454 .text R_RBR [12] <.lapack_lite_zgeqrf>
.zgelsd_ [130] ER PR numpy/linalg/lapack_litemodule.c(build/temp.AIX.6106.1043.64-3.6/numpy/linalg/lapack_litemodule.o)
000006fc .text R_RBR [14] <.lapack_lite_zgelsd>
.dorgqr_ [132] ER PR numpy/linalg/lapack_litemodule.c(build/temp.AIX.6106.1043.64-3.6/numpy/linalg/lapack_litemodule.o)
00000920 .text R_RBR [16] <.lapack_lite_dorgqr>
.dgeqrf_ [134] ER PR numpy/linalg/lapack_litemodule.c(build/temp.AIX.6106.1043.64-3.6/numpy/linalg/lapack_litemodule.o)
00000a94 .text R_RBR [18] <.lapack_lite_dgeqrf>
.dgelsd_ [136] ER PR numpy/linalg/lapack_litemodule.c(build/temp.AIX.6106.1043.64-3.6/numpy/linalg/lapack_litemodule.o)
00000d3c .text R_RBR [20] <.lapack_lite_dgelsd>
ER: The return code is 8.
```
Note: They do exist - just without the trailing underscore (_)
```
(py36) root@x066:[/data/prj/python/numpy-1.17.4]nm -Ae /usr/lib/liblapack.a | egrep "zungqr|zgeqrf|zgelsd|dorgqr|dgeqrf|dgelsd"_ | grep -v " U " | egrep ": \.[a-z]"
/usr/lib/liblapack.a[dgeqrf.o]: .dgeqrf T 0
/usr/lib/liblapack.a[dgeqrfp.o]: .dgeqrfp T 0
/usr/lib/liblapack.a[dorgqr.o]: .dorgqr T 0
/usr/lib/liblapack.a[zgelsd.o]: .zgelsd T 0
/usr/lib/liblapack.a[zgeqrf.o]: .zgeqrf T 0
/usr/lib/liblapack.a[zgeqrfp.o]: .zgeqrfp T 0
/usr/lib/liblapack.a[zungqr.o]: .zungqr T 0
```
* UPDATE: this is resolved by adding -DNO_APPEND_FORTRAN to CFLAGS
* Now I am open to suggestions on where and how to best get these automated in setup.py process.
</issue>
<code>
[start of numpy/fft/setup.py]
1
2 def configuration(parent_package='',top_path=None):
3 from numpy.distutils.misc_util import Configuration
4 config = Configuration('fft', parent_package, top_path)
5
6 config.add_data_dir('tests')
7
8 # Configure pocketfft_internal
9 config.add_extension('_pocketfft_internal',
10 sources=['_pocketfft.c']
11 )
12
13 return config
14
15 if __name__ == '__main__':
16 from numpy.distutils.core import setup
17 setup(configuration=configuration)
18
[end of numpy/fft/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/numpy/fft/setup.py b/numpy/fft/setup.py
--- a/numpy/fft/setup.py
+++ b/numpy/fft/setup.py
@@ -1,3 +1,4 @@
+import sys
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
@@ -5,9 +6,12 @@
config.add_data_dir('tests')
+ # AIX needs to be told to use large file support - at all times
+ defs = [('_LARGE_FILES', None)] if sys.platform[:3] == "aix" else []
# Configure pocketfft_internal
config.add_extension('_pocketfft_internal',
- sources=['_pocketfft.c']
+ sources=['_pocketfft.c'],
+ define_macros=defs,
)
return config
|
{"golden_diff": "diff --git a/numpy/fft/setup.py b/numpy/fft/setup.py\n--- a/numpy/fft/setup.py\n+++ b/numpy/fft/setup.py\n@@ -1,3 +1,4 @@\n+import sys\n \n def configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n@@ -5,9 +6,12 @@\n \n config.add_data_dir('tests')\n \n+ # AIX needs to be told to use large file support - at all times\n+ defs = [('_LARGE_FILES', None)] if sys.platform[:3] == \"aix\" else []\n # Configure pocketfft_internal\n config.add_extension('_pocketfft_internal',\n- sources=['_pocketfft.c']\n+ sources=['_pocketfft.c'],\n+ define_macros=defs,\n )\n \n return config\n", "issue": "Building numpy on AIX (xlc and xlf)\n<!-- Please describe the issue in detail here, and fill in the fields below -->\r\nbuild for AIX is not working.\r\n\r\nA couple of issues - maybe all simple to fix - still researching the last one...\r\n\r\nFirst one: numpy/fft/_pocketfft.c does not compile -- resolution: `export CFLAGS=-D_LARGE_FILES`\r\n\r\nSecond issue: Does not find ld_so_aix - expects, it seems, a symbolic link to the correct directory. e.g., calls `/opt/lib/python3.6/config/ld_so_aix` when the actual path is `/opt/lib/python3.6/config-3.6m/ld_so_aix`\r\n\r\nThird issue: same issue as issue #6609 - undefined symbols:\r\n```\r\nld: 0711-317 ERROR: Undefined symbol: .zungqr_\r\nld: 0711-317 ERROR: Undefined symbol: .zgeqrf_\r\nld: 0711-317 ERROR: Undefined symbol: .zgelsd_\r\nld: 0711-317 ERROR: Undefined symbol: .dorgqr_\r\nld: 0711-317 ERROR: Undefined symbol: .dgeqrf_\r\nld: 0711-317 ERROR: Undefined symbol: .dgelsd_\r\n```\r\n\r\nThe first issues are easy to get around - export CFLAGS and add a symbolic link for the generic path.\r\n\r\nFor the final issue I am going to guess that there is a macro involved - that is adding an underscore to the function being called - as they do exist in liblapack.a\r\n\r\n### Reproducing code example:\r\n\r\n`python setup.py build`\r\n\r\n<!-- Remove these sections for a feature request -->\r\n\r\n### Error message:\r\nMore detail:\r\n```\r\nld: 0711-318 ERROR: Undefined symbols were found.\r\n The following symbols are in error:\r\n Symbol Inpndx TY CL Source-File(Object-File) OR Import-File{Shared-object}\r\n RLD: Address Section Rld-type Referencing Symbol\r\n ----------------------------------------------------------------------------------------------\r\n .zungqr_ [124] ER PR numpy/linalg/lapack_litemodule.c(build/temp.AIX.6106.1043.64-3.6/numpy/linalg/lapack_litemodule.o)\r\n 000002c0 .text R_RBR [10] <.lapack_lite_zungqr>\r\n .zgeqrf_ [128] ER PR numpy/linalg/lapack_litemodule.c(build/temp.AIX.6106.1043.64-3.6/numpy/linalg/lapack_litemodule.o)\r\n 00000454 .text R_RBR [12] <.lapack_lite_zgeqrf>\r\n .zgelsd_ [130] ER PR numpy/linalg/lapack_litemodule.c(build/temp.AIX.6106.1043.64-3.6/numpy/linalg/lapack_litemodule.o)\r\n 000006fc .text R_RBR [14] <.lapack_lite_zgelsd>\r\n .dorgqr_ [132] ER PR numpy/linalg/lapack_litemodule.c(build/temp.AIX.6106.1043.64-3.6/numpy/linalg/lapack_litemodule.o)\r\n 00000920 .text R_RBR [16] <.lapack_lite_dorgqr>\r\n .dgeqrf_ [134] ER PR numpy/linalg/lapack_litemodule.c(build/temp.AIX.6106.1043.64-3.6/numpy/linalg/lapack_litemodule.o)\r\n 00000a94 .text R_RBR [18] <.lapack_lite_dgeqrf>\r\n .dgelsd_ [136] ER PR numpy/linalg/lapack_litemodule.c(build/temp.AIX.6106.1043.64-3.6/numpy/linalg/lapack_litemodule.o)\r\n 00000d3c .text R_RBR [20] <.lapack_lite_dgelsd>\r\nER: The return code is 8.\r\n\r\n\r\n```\r\n\r\nNote: They do exist - just without the trailing underscore (_)\r\n\r\n```\r\n(py36) root@x066:[/data/prj/python/numpy-1.17.4]nm -Ae /usr/lib/liblapack.a | egrep \"zungqr|zgeqrf|zgelsd|dorgqr|dgeqrf|dgelsd\"_ | grep -v \" U \" | egrep \": \\.[a-z]\"\r\n/usr/lib/liblapack.a[dgeqrf.o]: .dgeqrf T 0\r\n/usr/lib/liblapack.a[dgeqrfp.o]: .dgeqrfp T 0\r\n/usr/lib/liblapack.a[dorgqr.o]: .dorgqr T 0\r\n/usr/lib/liblapack.a[zgelsd.o]: .zgelsd T 0\r\n/usr/lib/liblapack.a[zgeqrf.o]: .zgeqrf T 0\r\n/usr/lib/liblapack.a[zgeqrfp.o]: .zgeqrfp T 0\r\n/usr/lib/liblapack.a[zungqr.o]: .zungqr T 0\r\n\r\n```\r\n\r\n* UPDATE: this is resolved by adding -DNO_APPEND_FORTRAN to CFLAGS\r\n\r\n* Now I am open to suggestions on where and how to best get these automated in setup.py process.\n", "before_files": [{"content": "\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration('fft', parent_package, top_path)\n\n config.add_data_dir('tests')\n\n # Configure pocketfft_internal\n config.add_extension('_pocketfft_internal',\n sources=['_pocketfft.c']\n )\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(configuration=configuration)\n", "path": "numpy/fft/setup.py"}]}
| 1,955 | 185 |
gh_patches_debug_43207
|
rasdani/github-patches
|
git_diff
|
refuel-ai__autolabel-539
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: Send only needed columns in transforms
**Describe the bug**
Currently, in the webpage transform, even if a subset of columns are specified in the output columns, all columns are sent to the final dataset object.
**To Reproduce**
Run the example_webpage_transform and look at the ds.df
**Expected behavior**
We should just return the columns which are present in the output_columns dictionary.
</issue>
<code>
[start of src/autolabel/transforms/webpage_transform.py]
1 from autolabel.schema import TransformType
2 from autolabel.transforms import BaseTransform
3 from typing import Dict, Any
4 import asyncio
5 import logging
6 import pandas as pd
7 import ssl
8
9 from autolabel.cache import BaseCache
10
11 logger = logging.getLogger(__name__)
12
13 MAX_RETRIES = 3
14 MAX_KEEPALIVE_CONNECTIONS = 20
15 MAX_CONNECTIONS = 100
16 BACKOFF = 2
17 HEADERS = {}
18 HTML_PARSER = "html.parser"
19
20
21 class WebpageTransform(BaseTransform):
22 def __init__(
23 self,
24 cache: BaseCache,
25 output_columns: Dict[str, Any],
26 url_column: str,
27 timeout: int = 5,
28 headers: Dict[str, str] = HEADERS,
29 ) -> None:
30 super().__init__(cache, output_columns)
31 self.url_column = url_column
32 self.headers = headers
33 self.max_retries = MAX_RETRIES
34 try:
35 from bs4 import BeautifulSoup
36 import httpx
37
38 if not headers.get("User-Agent"):
39 from fake_useragent import UserAgent
40
41 headers["User-Agent"] = UserAgent().random
42
43 self.httpx = httpx
44 self.timeout_time = timeout
45 self.timeout = httpx.Timeout(timeout)
46 limits = httpx.Limits(
47 max_keepalive_connections=MAX_KEEPALIVE_CONNECTIONS,
48 max_connections=MAX_CONNECTIONS,
49 keepalive_expiry=timeout,
50 )
51 self.client = httpx.AsyncClient(
52 timeout=self.timeout, limits=limits, follow_redirects=True
53 )
54 self.client_with_no_verify = httpx.AsyncClient(
55 timeout=self.timeout, limits=limits, follow_redirects=True, verify=False
56 )
57 self.beautiful_soup = BeautifulSoup
58 except ImportError:
59 raise ImportError(
60 "BeautifulSoup, httpx and fake_useragent are required to use the webpage transform. Please install them with the following command: pip install beautifulsoup4 httpx fake_useragent"
61 )
62
63 def name(self) -> str:
64 return TransformType.WEBPAGE_TRANSFORM
65
66 @property
67 def output_columns(self) -> Dict[str, Any]:
68 COLUMN_NAMES = [
69 "content_column",
70 "content_in_bytes_column",
71 "soup_column",
72 "metadata_column",
73 ]
74 return {k: self._output_columns.get(k, k) for k in COLUMN_NAMES}
75
76 def _load_metadata(self, url, soup) -> Dict[str, Any]:
77 metadata = {"url": url}
78 if soup.find("title"):
79 metadata["title"] = soup.find("title").get_text()
80 for meta in soup.find_all("meta"):
81 if meta.get("name") and meta.get("content"):
82 metadata[meta.get("name")] = meta.get("content")
83 elif meta.get("property") and meta.get("content"):
84 metadata[meta.get("property")] = meta.get("content")
85 return metadata
86
87 async def _load_url(
88 self, url: str, verify=True, headers=HEADERS, retry_count=0
89 ) -> Dict[str, Any]:
90 if retry_count >= self.max_retries:
91 logger.warning(f"Max retries reached for URL: {url}")
92 return {}
93
94 try:
95 client = self.client
96 if not verify:
97 client = self.client_with_no_verify
98 response = await client.get(url, headers=headers)
99
100 # TODO: Add support for other parsers
101 content_bytes = response.content
102 soup = self.beautiful_soup(content_bytes, HTML_PARSER)
103 return {
104 "content": soup.get_text(),
105 "content_bytes": content_bytes,
106 "soup": soup,
107 "metadata": self._load_metadata(url, soup),
108 }
109 except self.httpx.ConnectTimeout as e:
110 logger.error(f"Timeout when fetching content from URL: {url}")
111 return {}
112 except ssl.SSLCertVerificationError as e:
113 logger.warning(
114 f"SSL verification error when fetching content from URL: {url}, retrying with verify=False"
115 )
116 await asyncio.sleep(BACKOFF**retry_count)
117 return await self._load_url(
118 url, verify=False, headers=headers, retry_count=retry_count + 1
119 )
120 except Exception as e:
121 logger.error(f"Error fetching content from URL: {url}. Exception: {e}")
122 return {}
123
124 async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:
125 url = row[self.url_column]
126 url_response_data = {}
127 if pd.isna(url):
128 logger.warning(f"Empty url in row {row}")
129 else:
130 url_response_data = await self._load_url(url)
131
132 transformed_row = {
133 self.output_columns["content_column"]: url_response_data.get("content"),
134 self.output_columns["content_in_bytes_column"]: url_response_data.get(
135 "content_bytes"
136 ),
137 self.output_columns["soup_column"]: url_response_data.get("soup"),
138 self.output_columns["metadata_column"]: url_response_data.get("metadata"),
139 }
140
141 return transformed_row
142
143 def params(self):
144 return {
145 "url_column": self.url_column,
146 "output_columns": self.output_columns,
147 "timeout": self.timeout_time,
148 }
149
[end of src/autolabel/transforms/webpage_transform.py]
[start of src/autolabel/transforms/pdf.py]
1 from typing import List, Dict, Any
2
3 from autolabel.schema import TransformType
4 from autolabel.transforms import BaseTransform
5 from autolabel.cache import BaseCache
6
7
8 class PDFTransform(BaseTransform):
9 def __init__(
10 self,
11 cache: BaseCache,
12 output_columns: Dict[str, Any],
13 file_path_column: str,
14 ocr_enabled: bool = False,
15 page_header: str = "Page {page_num}: {page_content}",
16 page_sep: str = "\n\n",
17 ) -> None:
18 """The output columns for this class should be in the order: [content_column, num_pages_column]"""
19 super().__init__(cache, output_columns)
20 self.file_path_column = file_path_column
21 self.ocr_enabled = ocr_enabled
22 self.page_format = page_header
23 self.page_sep = page_sep
24
25 if self.ocr_enabled:
26 try:
27 from pdf2image import convert_from_path
28 import pytesseract
29
30 self.convert_from_path = convert_from_path
31 self.pytesseract = pytesseract
32 self.pytesseract.get_tesseract_version()
33 except ImportError:
34 raise ImportError(
35 "pdf2image and pytesseract are required to use the pdf transform with ocr. Please install pdf2image and pytesseract with the following command: pip install pdf2image pytesseract"
36 )
37 except EnvironmentError:
38 raise EnvironmentError(
39 "The tesseract engine is required to use the pdf transform with ocr. Please see https://tesseract-ocr.github.io/tessdoc/Installation.html for installation instructions."
40 )
41 else:
42 try:
43 from langchain.document_loaders import PDFPlumberLoader
44
45 self.PDFPlumberLoader = PDFPlumberLoader
46 except ImportError:
47 raise ImportError(
48 "pdfplumber is required to use the pdf transform. Please install pdfplumber with the following command: pip install pdfplumber"
49 )
50
51 @staticmethod
52 def name() -> str:
53 return TransformType.PDF
54
55 @property
56 def output_columns(self) -> Dict[str, Any]:
57 COLUMN_NAMES = [
58 "content_column",
59 "metadata_column",
60 ]
61 return {k: self._output_columns.get(k, k) for k in COLUMN_NAMES}
62
63 def get_page_texts(self, row: Dict[str, Any]) -> List[str]:
64 """This function gets the text from each page of a PDF file.
65 If OCR is enabled, it uses the pdf2image library to convert the PDF into images and then uses
66 pytesseract to convert the images into text. Otherwise, it uses pdfplumber to extract the text.
67
68 Args:
69 row (Dict[str, Any]): The row of data to be transformed.
70
71 Returns:
72 List[str]: A list of strings containing the text from each page of the PDF.
73 """
74 if self.ocr_enabled:
75 pages = self.convert_from_path(row[self.file_path_column])
76 return [self.pytesseract.image_to_string(page) for page in pages]
77 else:
78 loader = self.PDFPlumberLoader(row[self.file_path_column])
79 return [page.page_content for page in loader.load()]
80
81 async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:
82 """This function transforms a PDF file into a string of text.
83 The text is formatted according to the page_format and
84 page_sep parameters and returned as a string.
85
86 Args:
87 row (Dict[str, Any]): The row of data to be transformed.
88
89 Returns:
90 Dict[str, Any]: The dict of output columns.
91 """
92 texts = []
93 for idx, text in enumerate(self.get_page_texts(row)):
94 texts.append(self.page_format.format(page_num=idx + 1, page_content=text))
95 output = self.page_sep.join(texts)
96 transformed_row = {
97 self.output_columns["content_column"]: output,
98 self.output_columns["metadata_column"]: {"num_pages": len(texts)},
99 }
100 return transformed_row
101
102 def params(self):
103 return {
104 "file_path_column": self.file_path_column,
105 "ocr_enabled": self.ocr_enabled,
106 "page_header": self.page_format,
107 "page_sep": self.page_sep,
108 "output_columns": self.output_columns,
109 }
110
[end of src/autolabel/transforms/pdf.py]
[start of src/autolabel/transforms/base.py]
1 from abc import ABC, abstractmethod
2 from typing import Dict, Any
3 from autolabel.cache import BaseCache
4 from autolabel.schema import TransformCacheEntry
5
6
7 class BaseTransform(ABC):
8 """Base class for all transforms."""
9
10 TTL_MS = 60 * 60 * 24 * 7 * 1000 # 1 week
11
12 def __init__(self, cache: BaseCache, output_columns: Dict[str, Any]) -> None:
13 """
14 Initialize a transform.
15 Args:
16 cache: A cache object to use for caching the results of this transform.
17 output_columns: A dictionary of output columns. The keys are the names of the output columns as expected by the transform. The values are the column names they should be mapped to in the dataset.
18 """
19 super().__init__()
20 self._output_columns = output_columns
21 self.cache = cache
22
23 @staticmethod
24 @abstractmethod
25 def name() -> str:
26 """
27 Returns the name of the transform.
28 """
29 pass
30
31 @property
32 @abstractmethod
33 def output_columns(self) -> Dict[str, Any]:
34 """
35 Returns a dictionary of output columns. The keys are the names of the output columns
36 as expected by the transform. The values are the column names they should be mapped to in
37 the dataset.
38 """
39 return self._output_columns
40
41 @abstractmethod
42 async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:
43 """
44 Applies the transform to the given row.
45 Args:
46 row: A dictionary representing a row in the dataset. The keys are the column names and the values are the column values.
47 Returns:
48 A dictionary representing the transformed row. The keys are the column names and the values are the column values.
49 """
50 pass
51
52 @abstractmethod
53 def params(self) -> Dict[str, Any]:
54 """
55 Returns a dictionary of parameters that can be used to uniquely identify this transform.
56 Returns:
57 A dictionary of parameters that can be used to uniquely identify this transform.
58 """
59 return {}
60
61 async def apply(self, row: Dict[str, Any]) -> Dict[str, Any]:
62 if self.cache is not None:
63 cache_entry = TransformCacheEntry(
64 transform_name=self.name(),
65 transform_params=self.params(),
66 input=row,
67 ttl_ms=self.TTL_MS,
68 )
69 output = self.cache.lookup(cache_entry)
70
71 if output is not None:
72 # Cache hit
73 return output
74
75 output = await self._apply(row)
76
77 if self.cache is not None:
78 cache_entry.output = output
79 self.cache.update(cache_entry)
80
81 return output
82
[end of src/autolabel/transforms/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/autolabel/transforms/base.py b/src/autolabel/transforms/base.py
--- a/src/autolabel/transforms/base.py
+++ b/src/autolabel/transforms/base.py
@@ -29,14 +29,13 @@
pass
@property
- @abstractmethod
def output_columns(self) -> Dict[str, Any]:
"""
Returns a dictionary of output columns. The keys are the names of the output columns
as expected by the transform. The values are the column names they should be mapped to in
the dataset.
"""
- return self._output_columns
+ return {k: self._output_columns.get(k, None) for k in self.COLUMN_NAMES}
@abstractmethod
async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:
@@ -79,3 +78,15 @@
self.cache.update(cache_entry)
return output
+
+ def _return_output_row(self, row: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Returns the output row with the correct column names.
+ Args:
+ row: The output row.
+ Returns:
+ The output row with the correct column names.
+ """
+ # remove null key
+ row.pop(None, None)
+ return row
diff --git a/src/autolabel/transforms/pdf.py b/src/autolabel/transforms/pdf.py
--- a/src/autolabel/transforms/pdf.py
+++ b/src/autolabel/transforms/pdf.py
@@ -6,6 +6,11 @@
class PDFTransform(BaseTransform):
+ COLUMN_NAMES = [
+ "content_column",
+ "metadata_column",
+ ]
+
def __init__(
self,
cache: BaseCache,
@@ -52,14 +57,6 @@
def name() -> str:
return TransformType.PDF
- @property
- def output_columns(self) -> Dict[str, Any]:
- COLUMN_NAMES = [
- "content_column",
- "metadata_column",
- ]
- return {k: self._output_columns.get(k, k) for k in COLUMN_NAMES}
-
def get_page_texts(self, row: Dict[str, Any]) -> List[str]:
"""This function gets the text from each page of a PDF file.
If OCR is enabled, it uses the pdf2image library to convert the PDF into images and then uses
@@ -97,7 +94,7 @@
self.output_columns["content_column"]: output,
self.output_columns["metadata_column"]: {"num_pages": len(texts)},
}
- return transformed_row
+ return self._return_output_row(transformed_row)
def params(self):
return {
diff --git a/src/autolabel/transforms/webpage_transform.py b/src/autolabel/transforms/webpage_transform.py
--- a/src/autolabel/transforms/webpage_transform.py
+++ b/src/autolabel/transforms/webpage_transform.py
@@ -19,6 +19,13 @@
class WebpageTransform(BaseTransform):
+ COLUMN_NAMES = [
+ "content_column",
+ "content_in_bytes_column",
+ "soup_column",
+ "metadata_column",
+ ]
+
def __init__(
self,
cache: BaseCache,
@@ -63,16 +70,6 @@
def name(self) -> str:
return TransformType.WEBPAGE_TRANSFORM
- @property
- def output_columns(self) -> Dict[str, Any]:
- COLUMN_NAMES = [
- "content_column",
- "content_in_bytes_column",
- "soup_column",
- "metadata_column",
- ]
- return {k: self._output_columns.get(k, k) for k in COLUMN_NAMES}
-
def _load_metadata(self, url, soup) -> Dict[str, Any]:
metadata = {"url": url}
if soup.find("title"):
@@ -138,7 +135,7 @@
self.output_columns["metadata_column"]: url_response_data.get("metadata"),
}
- return transformed_row
+ return self._return_output_row(transformed_row)
def params(self):
return {
|
{"golden_diff": "diff --git a/src/autolabel/transforms/base.py b/src/autolabel/transforms/base.py\n--- a/src/autolabel/transforms/base.py\n+++ b/src/autolabel/transforms/base.py\n@@ -29,14 +29,13 @@\n pass\n \n @property\n- @abstractmethod\n def output_columns(self) -> Dict[str, Any]:\n \"\"\"\n Returns a dictionary of output columns. The keys are the names of the output columns\n as expected by the transform. The values are the column names they should be mapped to in\n the dataset.\n \"\"\"\n- return self._output_columns\n+ return {k: self._output_columns.get(k, None) for k in self.COLUMN_NAMES}\n \n @abstractmethod\n async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:\n@@ -79,3 +78,15 @@\n self.cache.update(cache_entry)\n \n return output\n+\n+ def _return_output_row(self, row: Dict[str, Any]) -> Dict[str, Any]:\n+ \"\"\"\n+ Returns the output row with the correct column names.\n+ Args:\n+ row: The output row.\n+ Returns:\n+ The output row with the correct column names.\n+ \"\"\"\n+ # remove null key\n+ row.pop(None, None)\n+ return row\ndiff --git a/src/autolabel/transforms/pdf.py b/src/autolabel/transforms/pdf.py\n--- a/src/autolabel/transforms/pdf.py\n+++ b/src/autolabel/transforms/pdf.py\n@@ -6,6 +6,11 @@\n \n \n class PDFTransform(BaseTransform):\n+ COLUMN_NAMES = [\n+ \"content_column\",\n+ \"metadata_column\",\n+ ]\n+\n def __init__(\n self,\n cache: BaseCache,\n@@ -52,14 +57,6 @@\n def name() -> str:\n return TransformType.PDF\n \n- @property\n- def output_columns(self) -> Dict[str, Any]:\n- COLUMN_NAMES = [\n- \"content_column\",\n- \"metadata_column\",\n- ]\n- return {k: self._output_columns.get(k, k) for k in COLUMN_NAMES}\n-\n def get_page_texts(self, row: Dict[str, Any]) -> List[str]:\n \"\"\"This function gets the text from each page of a PDF file.\n If OCR is enabled, it uses the pdf2image library to convert the PDF into images and then uses\n@@ -97,7 +94,7 @@\n self.output_columns[\"content_column\"]: output,\n self.output_columns[\"metadata_column\"]: {\"num_pages\": len(texts)},\n }\n- return transformed_row\n+ return self._return_output_row(transformed_row)\n \n def params(self):\n return {\ndiff --git a/src/autolabel/transforms/webpage_transform.py b/src/autolabel/transforms/webpage_transform.py\n--- a/src/autolabel/transforms/webpage_transform.py\n+++ b/src/autolabel/transforms/webpage_transform.py\n@@ -19,6 +19,13 @@\n \n \n class WebpageTransform(BaseTransform):\n+ COLUMN_NAMES = [\n+ \"content_column\",\n+ \"content_in_bytes_column\",\n+ \"soup_column\",\n+ \"metadata_column\",\n+ ]\n+\n def __init__(\n self,\n cache: BaseCache,\n@@ -63,16 +70,6 @@\n def name(self) -> str:\n return TransformType.WEBPAGE_TRANSFORM\n \n- @property\n- def output_columns(self) -> Dict[str, Any]:\n- COLUMN_NAMES = [\n- \"content_column\",\n- \"content_in_bytes_column\",\n- \"soup_column\",\n- \"metadata_column\",\n- ]\n- return {k: self._output_columns.get(k, k) for k in COLUMN_NAMES}\n-\n def _load_metadata(self, url, soup) -> Dict[str, Any]:\n metadata = {\"url\": url}\n if soup.find(\"title\"):\n@@ -138,7 +135,7 @@\n self.output_columns[\"metadata_column\"]: url_response_data.get(\"metadata\"),\n }\n \n- return transformed_row\n+ return self._return_output_row(transformed_row)\n \n def params(self):\n return {\n", "issue": "[Bug]: Send only needed columns in transforms\n**Describe the bug**\r\nCurrently, in the webpage transform, even if a subset of columns are specified in the output columns, all columns are sent to the final dataset object.\r\n\r\n**To Reproduce**\r\nRun the example_webpage_transform and look at the ds.df\r\n\r\n**Expected behavior**\r\nWe should just return the columns which are present in the output_columns dictionary.\r\n\n", "before_files": [{"content": "from autolabel.schema import TransformType\nfrom autolabel.transforms import BaseTransform\nfrom typing import Dict, Any\nimport asyncio\nimport logging\nimport pandas as pd\nimport ssl\n\nfrom autolabel.cache import BaseCache\n\nlogger = logging.getLogger(__name__)\n\nMAX_RETRIES = 3\nMAX_KEEPALIVE_CONNECTIONS = 20\nMAX_CONNECTIONS = 100\nBACKOFF = 2\nHEADERS = {}\nHTML_PARSER = \"html.parser\"\n\n\nclass WebpageTransform(BaseTransform):\n def __init__(\n self,\n cache: BaseCache,\n output_columns: Dict[str, Any],\n url_column: str,\n timeout: int = 5,\n headers: Dict[str, str] = HEADERS,\n ) -> None:\n super().__init__(cache, output_columns)\n self.url_column = url_column\n self.headers = headers\n self.max_retries = MAX_RETRIES\n try:\n from bs4 import BeautifulSoup\n import httpx\n\n if not headers.get(\"User-Agent\"):\n from fake_useragent import UserAgent\n\n headers[\"User-Agent\"] = UserAgent().random\n\n self.httpx = httpx\n self.timeout_time = timeout\n self.timeout = httpx.Timeout(timeout)\n limits = httpx.Limits(\n max_keepalive_connections=MAX_KEEPALIVE_CONNECTIONS,\n max_connections=MAX_CONNECTIONS,\n keepalive_expiry=timeout,\n )\n self.client = httpx.AsyncClient(\n timeout=self.timeout, limits=limits, follow_redirects=True\n )\n self.client_with_no_verify = httpx.AsyncClient(\n timeout=self.timeout, limits=limits, follow_redirects=True, verify=False\n )\n self.beautiful_soup = BeautifulSoup\n except ImportError:\n raise ImportError(\n \"BeautifulSoup, httpx and fake_useragent are required to use the webpage transform. Please install them with the following command: pip install beautifulsoup4 httpx fake_useragent\"\n )\n\n def name(self) -> str:\n return TransformType.WEBPAGE_TRANSFORM\n\n @property\n def output_columns(self) -> Dict[str, Any]:\n COLUMN_NAMES = [\n \"content_column\",\n \"content_in_bytes_column\",\n \"soup_column\",\n \"metadata_column\",\n ]\n return {k: self._output_columns.get(k, k) for k in COLUMN_NAMES}\n\n def _load_metadata(self, url, soup) -> Dict[str, Any]:\n metadata = {\"url\": url}\n if soup.find(\"title\"):\n metadata[\"title\"] = soup.find(\"title\").get_text()\n for meta in soup.find_all(\"meta\"):\n if meta.get(\"name\") and meta.get(\"content\"):\n metadata[meta.get(\"name\")] = meta.get(\"content\")\n elif meta.get(\"property\") and meta.get(\"content\"):\n metadata[meta.get(\"property\")] = meta.get(\"content\")\n return metadata\n\n async def _load_url(\n self, url: str, verify=True, headers=HEADERS, retry_count=0\n ) -> Dict[str, Any]:\n if retry_count >= self.max_retries:\n logger.warning(f\"Max retries reached for URL: {url}\")\n return {}\n\n try:\n client = self.client\n if not verify:\n client = self.client_with_no_verify\n response = await client.get(url, headers=headers)\n\n # TODO: Add support for other parsers\n content_bytes = response.content\n soup = self.beautiful_soup(content_bytes, HTML_PARSER)\n return {\n \"content\": soup.get_text(),\n \"content_bytes\": content_bytes,\n \"soup\": soup,\n \"metadata\": self._load_metadata(url, soup),\n }\n except self.httpx.ConnectTimeout as e:\n logger.error(f\"Timeout when fetching content from URL: {url}\")\n return {}\n except ssl.SSLCertVerificationError as e:\n logger.warning(\n f\"SSL verification error when fetching content from URL: {url}, retrying with verify=False\"\n )\n await asyncio.sleep(BACKOFF**retry_count)\n return await self._load_url(\n url, verify=False, headers=headers, retry_count=retry_count + 1\n )\n except Exception as e:\n logger.error(f\"Error fetching content from URL: {url}. Exception: {e}\")\n return {}\n\n async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:\n url = row[self.url_column]\n url_response_data = {}\n if pd.isna(url):\n logger.warning(f\"Empty url in row {row}\")\n else:\n url_response_data = await self._load_url(url)\n\n transformed_row = {\n self.output_columns[\"content_column\"]: url_response_data.get(\"content\"),\n self.output_columns[\"content_in_bytes_column\"]: url_response_data.get(\n \"content_bytes\"\n ),\n self.output_columns[\"soup_column\"]: url_response_data.get(\"soup\"),\n self.output_columns[\"metadata_column\"]: url_response_data.get(\"metadata\"),\n }\n\n return transformed_row\n\n def params(self):\n return {\n \"url_column\": self.url_column,\n \"output_columns\": self.output_columns,\n \"timeout\": self.timeout_time,\n }\n", "path": "src/autolabel/transforms/webpage_transform.py"}, {"content": "from typing import List, Dict, Any\n\nfrom autolabel.schema import TransformType\nfrom autolabel.transforms import BaseTransform\nfrom autolabel.cache import BaseCache\n\n\nclass PDFTransform(BaseTransform):\n def __init__(\n self,\n cache: BaseCache,\n output_columns: Dict[str, Any],\n file_path_column: str,\n ocr_enabled: bool = False,\n page_header: str = \"Page {page_num}: {page_content}\",\n page_sep: str = \"\\n\\n\",\n ) -> None:\n \"\"\"The output columns for this class should be in the order: [content_column, num_pages_column]\"\"\"\n super().__init__(cache, output_columns)\n self.file_path_column = file_path_column\n self.ocr_enabled = ocr_enabled\n self.page_format = page_header\n self.page_sep = page_sep\n\n if self.ocr_enabled:\n try:\n from pdf2image import convert_from_path\n import pytesseract\n\n self.convert_from_path = convert_from_path\n self.pytesseract = pytesseract\n self.pytesseract.get_tesseract_version()\n except ImportError:\n raise ImportError(\n \"pdf2image and pytesseract are required to use the pdf transform with ocr. Please install pdf2image and pytesseract with the following command: pip install pdf2image pytesseract\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n \"The tesseract engine is required to use the pdf transform with ocr. Please see https://tesseract-ocr.github.io/tessdoc/Installation.html for installation instructions.\"\n )\n else:\n try:\n from langchain.document_loaders import PDFPlumberLoader\n\n self.PDFPlumberLoader = PDFPlumberLoader\n except ImportError:\n raise ImportError(\n \"pdfplumber is required to use the pdf transform. Please install pdfplumber with the following command: pip install pdfplumber\"\n )\n\n @staticmethod\n def name() -> str:\n return TransformType.PDF\n\n @property\n def output_columns(self) -> Dict[str, Any]:\n COLUMN_NAMES = [\n \"content_column\",\n \"metadata_column\",\n ]\n return {k: self._output_columns.get(k, k) for k in COLUMN_NAMES}\n\n def get_page_texts(self, row: Dict[str, Any]) -> List[str]:\n \"\"\"This function gets the text from each page of a PDF file.\n If OCR is enabled, it uses the pdf2image library to convert the PDF into images and then uses\n pytesseract to convert the images into text. Otherwise, it uses pdfplumber to extract the text.\n\n Args:\n row (Dict[str, Any]): The row of data to be transformed.\n\n Returns:\n List[str]: A list of strings containing the text from each page of the PDF.\n \"\"\"\n if self.ocr_enabled:\n pages = self.convert_from_path(row[self.file_path_column])\n return [self.pytesseract.image_to_string(page) for page in pages]\n else:\n loader = self.PDFPlumberLoader(row[self.file_path_column])\n return [page.page_content for page in loader.load()]\n\n async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"This function transforms a PDF file into a string of text.\n The text is formatted according to the page_format and\n page_sep parameters and returned as a string.\n\n Args:\n row (Dict[str, Any]): The row of data to be transformed.\n\n Returns:\n Dict[str, Any]: The dict of output columns.\n \"\"\"\n texts = []\n for idx, text in enumerate(self.get_page_texts(row)):\n texts.append(self.page_format.format(page_num=idx + 1, page_content=text))\n output = self.page_sep.join(texts)\n transformed_row = {\n self.output_columns[\"content_column\"]: output,\n self.output_columns[\"metadata_column\"]: {\"num_pages\": len(texts)},\n }\n return transformed_row\n\n def params(self):\n return {\n \"file_path_column\": self.file_path_column,\n \"ocr_enabled\": self.ocr_enabled,\n \"page_header\": self.page_format,\n \"page_sep\": self.page_sep,\n \"output_columns\": self.output_columns,\n }\n", "path": "src/autolabel/transforms/pdf.py"}, {"content": "from abc import ABC, abstractmethod\nfrom typing import Dict, Any\nfrom autolabel.cache import BaseCache\nfrom autolabel.schema import TransformCacheEntry\n\n\nclass BaseTransform(ABC):\n \"\"\"Base class for all transforms.\"\"\"\n\n TTL_MS = 60 * 60 * 24 * 7 * 1000 # 1 week\n\n def __init__(self, cache: BaseCache, output_columns: Dict[str, Any]) -> None:\n \"\"\"\n Initialize a transform.\n Args:\n cache: A cache object to use for caching the results of this transform.\n output_columns: A dictionary of output columns. The keys are the names of the output columns as expected by the transform. The values are the column names they should be mapped to in the dataset.\n \"\"\"\n super().__init__()\n self._output_columns = output_columns\n self.cache = cache\n\n @staticmethod\n @abstractmethod\n def name() -> str:\n \"\"\"\n Returns the name of the transform.\n \"\"\"\n pass\n\n @property\n @abstractmethod\n def output_columns(self) -> Dict[str, Any]:\n \"\"\"\n Returns a dictionary of output columns. The keys are the names of the output columns\n as expected by the transform. The values are the column names they should be mapped to in\n the dataset.\n \"\"\"\n return self._output_columns\n\n @abstractmethod\n async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Applies the transform to the given row.\n Args:\n row: A dictionary representing a row in the dataset. The keys are the column names and the values are the column values.\n Returns:\n A dictionary representing the transformed row. The keys are the column names and the values are the column values.\n \"\"\"\n pass\n\n @abstractmethod\n def params(self) -> Dict[str, Any]:\n \"\"\"\n Returns a dictionary of parameters that can be used to uniquely identify this transform.\n Returns:\n A dictionary of parameters that can be used to uniquely identify this transform.\n \"\"\"\n return {}\n\n async def apply(self, row: Dict[str, Any]) -> Dict[str, Any]:\n if self.cache is not None:\n cache_entry = TransformCacheEntry(\n transform_name=self.name(),\n transform_params=self.params(),\n input=row,\n ttl_ms=self.TTL_MS,\n )\n output = self.cache.lookup(cache_entry)\n\n if output is not None:\n # Cache hit\n return output\n\n output = await self._apply(row)\n\n if self.cache is not None:\n cache_entry.output = output\n self.cache.update(cache_entry)\n\n return output\n", "path": "src/autolabel/transforms/base.py"}]}
| 3,993 | 936 |
gh_patches_debug_7167
|
rasdani/github-patches
|
git_diff
|
ansible__awx-13071
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CyberArk Conjur Secret Lookup - Versioning Does Not Work
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
### Bug Summary
When providing a version number on the "Metadata" section of the CyberArk Conjur Secret Lookup External Secret Management System pane when defining a secret to retrieve on a credential, the secret value associated with the version number is not returned during execution of the job. Regardless of the version number given, only the latest secret value is ever returned.
### AWX version
Ansible Automation Platform Controller 4.1.1
### Select the relevant components
- [ ] UI
- [ ] API
- [ ] Docs
- [ ] Collection
- [ ] CLI
- [X] Other
### Installation method
N/A
### Modifications
no
### Ansible version
ansible [core 2.12.2]
### Operating system
Red Hat Enterprise Linux release 8.6 (Ootpa)
### Web browser
_No response_
### Steps to reproduce
1. Configure CyberArk Conjur Secret Lookup.
2. Configure Credential to use External Secret Management System.
3. On Metadata, provide a version number not equal to `null` or `1`.
4. Run a job to execute a playbook that has `debug.msg` print the value to STDOUT.
### Expected results
The value returned should change based on the version being requested by the External Secret Management System metadata.
### Actual results
The latest version of the secret is always returned and versioning is ignored.
### Additional information

</issue>
<code>
[start of awx/main/credential_plugins/conjur.py]
1 from .plugin import CredentialPlugin, CertFiles, raise_for_status
2
3 import base64
4 from urllib.parse import urljoin, quote
5
6 from django.utils.translation import gettext_lazy as _
7 import requests
8
9
10 conjur_inputs = {
11 'fields': [
12 {
13 'id': 'url',
14 'label': _('Conjur URL'),
15 'type': 'string',
16 'format': 'url',
17 },
18 {
19 'id': 'api_key',
20 'label': _('API Key'),
21 'type': 'string',
22 'secret': True,
23 },
24 {
25 'id': 'account',
26 'label': _('Account'),
27 'type': 'string',
28 },
29 {
30 'id': 'username',
31 'label': _('Username'),
32 'type': 'string',
33 },
34 {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},
35 ],
36 'metadata': [
37 {
38 'id': 'secret_path',
39 'label': _('Secret Identifier'),
40 'type': 'string',
41 'help_text': _('The identifier for the secret e.g., /some/identifier'),
42 },
43 {
44 'id': 'secret_version',
45 'label': _('Secret Version'),
46 'type': 'string',
47 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),
48 },
49 ],
50 'required': ['url', 'api_key', 'account', 'username'],
51 }
52
53
54 def conjur_backend(**kwargs):
55 url = kwargs['url']
56 api_key = kwargs['api_key']
57 account = quote(kwargs['account'], safe='')
58 username = quote(kwargs['username'], safe='')
59 secret_path = quote(kwargs['secret_path'], safe='')
60 version = kwargs.get('secret_version')
61 cacert = kwargs.get('cacert', None)
62
63 auth_kwargs = {
64 'headers': {'Content-Type': 'text/plain'},
65 'data': api_key,
66 'allow_redirects': False,
67 }
68
69 with CertFiles(cacert) as cert:
70 # https://www.conjur.org/api.html#authentication-authenticate-post
71 auth_kwargs['verify'] = cert
72 resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)
73 raise_for_status(resp)
74 token = base64.b64encode(resp.content).decode('utf-8')
75
76 lookup_kwargs = {
77 'headers': {'Authorization': 'Token token="{}"'.format(token)},
78 'allow_redirects': False,
79 }
80
81 # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
82 path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))
83 if version:
84 path = '?'.join([path, version])
85
86 with CertFiles(cacert) as cert:
87 lookup_kwargs['verify'] = cert
88 resp = requests.get(path, timeout=30, **lookup_kwargs)
89 raise_for_status(resp)
90 return resp.text
91
92
93 conjur_plugin = CredentialPlugin('CyberArk Conjur Secret Lookup', inputs=conjur_inputs, backend=conjur_backend)
94
[end of awx/main/credential_plugins/conjur.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/awx/main/credential_plugins/conjur.py b/awx/main/credential_plugins/conjur.py
--- a/awx/main/credential_plugins/conjur.py
+++ b/awx/main/credential_plugins/conjur.py
@@ -81,7 +81,8 @@
# https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))
if version:
- path = '?'.join([path, version])
+ ver = "version={}".format(version)
+ path = '?'.join([path, ver])
with CertFiles(cacert) as cert:
lookup_kwargs['verify'] = cert
|
{"golden_diff": "diff --git a/awx/main/credential_plugins/conjur.py b/awx/main/credential_plugins/conjur.py\n--- a/awx/main/credential_plugins/conjur.py\n+++ b/awx/main/credential_plugins/conjur.py\n@@ -81,7 +81,8 @@\n # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get\n path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))\n if version:\n- path = '?'.join([path, version])\n+ ver = \"version={}\".format(version)\n+ path = '?'.join([path, ver])\n \n with CertFiles(cacert) as cert:\n lookup_kwargs['verify'] = cert\n", "issue": "CyberArk Conjur Secret Lookup - Versioning Does Not Work\n### Please confirm the following\n\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\n\n### Bug Summary\n\nWhen providing a version number on the \"Metadata\" section of the CyberArk Conjur Secret Lookup External Secret Management System pane when defining a secret to retrieve on a credential, the secret value associated with the version number is not returned during execution of the job. Regardless of the version number given, only the latest secret value is ever returned.\n\n### AWX version\n\nAnsible Automation Platform Controller 4.1.1\n\n### Select the relevant components\n\n- [ ] UI\n- [ ] API\n- [ ] Docs\n- [ ] Collection\n- [ ] CLI\n- [X] Other\n\n### Installation method\n\nN/A\n\n### Modifications\n\nno\n\n### Ansible version\n\nansible [core 2.12.2]\n\n### Operating system\n\nRed Hat Enterprise Linux release 8.6 (Ootpa)\n\n### Web browser\n\n_No response_\n\n### Steps to reproduce\n\n1. Configure CyberArk Conjur Secret Lookup.\r\n2. Configure Credential to use External Secret Management System.\r\n3. On Metadata, provide a version number not equal to `null` or `1`.\r\n4. Run a job to execute a playbook that has `debug.msg` print the value to STDOUT.\n\n### Expected results\n\nThe value returned should change based on the version being requested by the External Secret Management System metadata.\n\n### Actual results\n\nThe latest version of the secret is always returned and versioning is ignored.\n\n### Additional information\n\n\r\n\n", "before_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nimport base64\nfrom urllib.parse import urljoin, quote\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\n\nconjur_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('Conjur URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'api_key',\n 'label': _('API Key'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'account',\n 'label': _('Account'),\n 'type': 'string',\n },\n {\n 'id': 'username',\n 'label': _('Username'),\n 'type': 'string',\n },\n {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},\n ],\n 'metadata': [\n {\n 'id': 'secret_path',\n 'label': _('Secret Identifier'),\n 'type': 'string',\n 'help_text': _('The identifier for the secret e.g., /some/identifier'),\n },\n {\n 'id': 'secret_version',\n 'label': _('Secret Version'),\n 'type': 'string',\n 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),\n },\n ],\n 'required': ['url', 'api_key', 'account', 'username'],\n}\n\n\ndef conjur_backend(**kwargs):\n url = kwargs['url']\n api_key = kwargs['api_key']\n account = quote(kwargs['account'], safe='')\n username = quote(kwargs['username'], safe='')\n secret_path = quote(kwargs['secret_path'], safe='')\n version = kwargs.get('secret_version')\n cacert = kwargs.get('cacert', None)\n\n auth_kwargs = {\n 'headers': {'Content-Type': 'text/plain'},\n 'data': api_key,\n 'allow_redirects': False,\n }\n\n with CertFiles(cacert) as cert:\n # https://www.conjur.org/api.html#authentication-authenticate-post\n auth_kwargs['verify'] = cert\n resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = base64.b64encode(resp.content).decode('utf-8')\n\n lookup_kwargs = {\n 'headers': {'Authorization': 'Token token=\"{}\"'.format(token)},\n 'allow_redirects': False,\n }\n\n # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get\n path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))\n if version:\n path = '?'.join([path, version])\n\n with CertFiles(cacert) as cert:\n lookup_kwargs['verify'] = cert\n resp = requests.get(path, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n\n\nconjur_plugin = CredentialPlugin('CyberArk Conjur Secret Lookup', inputs=conjur_inputs, backend=conjur_backend)\n", "path": "awx/main/credential_plugins/conjur.py"}]}
| 1,876 | 165 |
gh_patches_debug_6661
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-1928
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Start testing in Python 3.9
Python 3.9.0b1 is release already, and soon there'll be a full release. We should start running CI against it, to catch things like #1910
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 ###############################################################################
3 # Copyright (c) 2013 Potential Ventures Ltd
4 # Copyright (c) 2013 SolarFlare Communications Inc
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above copyright
12 # notice, this list of conditions and the following disclaimer in the
13 # documentation and/or other materials provided with the distribution.
14 # * Neither the name of Potential Ventures Ltd,
15 # SolarFlare Communications Inc nor the
16 # names of its contributors may be used to endorse or promote products
17 # derived from this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
20 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
23 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 ###############################################################################
30
31 import sys
32 if sys.version_info[:2] < (3, 5):
33 msg = [
34 "This version of cocotb requires at least Python 3.5,",
35 "you are running Python %d.%d.%d." % (
36 sys.version_info[0], sys.version_info[1], sys.version_info[2])
37 ]
38 if sys.version_info[0] == 2:
39 msg += [
40 "If you have Python 3 installed on your machine try ",
41 "using 'python3 -m pip' instead of 'pip' to install cocotb."
42 ]
43 msg += [
44 "For more information please refer to the documentation at ",
45 "https://cocotb.readthedocs.io."
46 ]
47
48 raise SystemExit("\n".join(msg))
49
50 import logging
51 from setuptools import setup
52 from setuptools import find_packages
53 from os import path, walk
54 from io import StringIO
55
56 # Note: cocotb is not installed properly yet and is missing dependencies and binaries
57 # We can still import other files next to setup.py, as long as they're in MANIFEST.in
58 from cocotb_build_libs import get_ext, build_ext
59
60
61 def read_file(fname):
62 with open(path.join(path.dirname(__file__), fname), encoding='utf8') as f:
63 return f.read()
64
65
66 def package_files(directory):
67 paths = []
68 for (fpath, directories, filenames) in walk(directory):
69 for filename in filenames:
70 paths.append(path.join('..', fpath, filename))
71 return paths
72
73
74 # this sets the __version__ variable
75 exec(read_file(path.join('cocotb', '_version.py')))
76
77 # store log from build_libs and display at the end in verbose mode
78 # see https://github.com/pypa/pip/issues/6634
79 log_stream = StringIO()
80 handler = logging.StreamHandler(log_stream)
81 log = logging.getLogger("cocotb._build_libs")
82 log.setLevel(logging.INFO)
83 log.addHandler(handler)
84
85 setup(
86 name='cocotb',
87 cmdclass={'build_ext': build_ext},
88 version=__version__, # noqa: F821
89 description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',
90 url='https://docs.cocotb.org',
91 license='BSD',
92 long_description=read_file('README.md'),
93 long_description_content_type='text/markdown',
94 author='Chris Higgs, Stuart Hodgson',
95 maintainer='cocotb contributors',
96 maintainer_email='[email protected]',
97 install_requires=[],
98 python_requires='>=3.5',
99 packages=find_packages(),
100 include_package_data=True,
101 package_data={'cocotb': package_files('cocotb/share')},
102 ext_modules=get_ext(),
103 entry_points={
104 'console_scripts': [
105 'cocotb-config=cocotb.config:main',
106 ]
107 },
108 platforms='any',
109 classifiers=[
110 "Programming Language :: Python :: 3",
111 "Programming Language :: Python :: 3.5",
112 "Programming Language :: Python :: 3.6",
113 "Programming Language :: Python :: 3.7",
114 "Programming Language :: Python :: 3.8",
115 "License :: OSI Approved :: BSD License",
116 "Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
117 ],
118
119 # these appear in the sidebar on PyPI
120 project_urls={
121 "Bug Tracker": "https://github.com/cocotb/cocotb/issues",
122 "Source Code": "https://github.com/cocotb/cocotb",
123 "Documentation": "https://docs.cocotb.org",
124 },
125 )
126
127 print(log_stream.getvalue())
128
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -112,6 +112,7 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -112,6 +112,7 @@\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n", "issue": "Start testing in Python 3.9\nPython 3.9.0b1 is release already, and soon there'll be a full release. We should start running CI against it, to catch things like #1910\n", "before_files": [{"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nimport sys\nif sys.version_info[:2] < (3, 5):\n msg = [\n \"This version of cocotb requires at least Python 3.5,\",\n \"you are running Python %d.%d.%d.\" % (\n sys.version_info[0], sys.version_info[1], sys.version_info[2])\n ]\n if sys.version_info[0] == 2:\n msg += [\n \"If you have Python 3 installed on your machine try \",\n \"using 'python3 -m pip' instead of 'pip' to install cocotb.\"\n ]\n msg += [\n \"For more information please refer to the documentation at \",\n \"https://cocotb.readthedocs.io.\"\n ]\n\n raise SystemExit(\"\\n\".join(msg))\n\nimport logging\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom os import path, walk\nfrom io import StringIO\n\n# Note: cocotb is not installed properly yet and is missing dependencies and binaries\n# We can still import other files next to setup.py, as long as they're in MANIFEST.in\nfrom cocotb_build_libs import get_ext, build_ext\n\n\ndef read_file(fname):\n with open(path.join(path.dirname(__file__), fname), encoding='utf8') as f:\n return f.read()\n\n\ndef package_files(directory):\n paths = []\n for (fpath, directories, filenames) in walk(directory):\n for filename in filenames:\n paths.append(path.join('..', fpath, filename))\n return paths\n\n\n# this sets the __version__ variable\nexec(read_file(path.join('cocotb', '_version.py')))\n\n# store log from build_libs and display at the end in verbose mode\n# see https://github.com/pypa/pip/issues/6634\nlog_stream = StringIO()\nhandler = logging.StreamHandler(log_stream)\nlog = logging.getLogger(\"cocotb._build_libs\")\nlog.setLevel(logging.INFO)\nlog.addHandler(handler)\n\nsetup(\n name='cocotb',\n cmdclass={'build_ext': build_ext},\n version=__version__, # noqa: F821\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://docs.cocotb.org',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n author='Chris Higgs, Stuart Hodgson',\n maintainer='cocotb contributors',\n maintainer_email='[email protected]',\n install_requires=[],\n python_requires='>=3.5',\n packages=find_packages(),\n include_package_data=True,\n package_data={'cocotb': package_files('cocotb/share')},\n ext_modules=get_ext(),\n entry_points={\n 'console_scripts': [\n 'cocotb-config=cocotb.config:main',\n ]\n },\n platforms='any',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n\n # these appear in the sidebar on PyPI\n project_urls={\n \"Bug Tracker\": \"https://github.com/cocotb/cocotb/issues\",\n \"Source Code\": \"https://github.com/cocotb/cocotb\",\n \"Documentation\": \"https://docs.cocotb.org\",\n },\n)\n\nprint(log_stream.getvalue())\n", "path": "setup.py"}]}
| 2,008 | 109 |
gh_patches_debug_31665
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-5879
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adopt Dependabot
I've only just learned that requires.io has been shut down. A big thank you to @omansion and @atabary for providing such a fantastic service over the years! ❤️
For mitmproxy this probably means we should migrate to Dependabot. This will probably mean a whole lot more PRs, let's see.
</issue>
<code>
[start of setup.py]
1 import os
2 import re
3 from codecs import open
4
5 from setuptools import find_packages
6 from setuptools import setup
7
8 # Based on https://github.com/pypa/sampleproject/blob/main/setup.py
9 # and https://python-packaging-user-guide.readthedocs.org/
10
11 here = os.path.abspath(os.path.dirname(__file__))
12
13 with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
14 long_description = f.read()
15 long_description_content_type = "text/markdown"
16
17 with open(os.path.join(here, "mitmproxy/version.py")) as f:
18 match = re.search(r'VERSION = "(.+?)"', f.read())
19 assert match
20 VERSION = match.group(1)
21
22 setup(
23 name="mitmproxy",
24 version=VERSION,
25 description="An interactive, SSL/TLS-capable intercepting proxy for HTTP/1, HTTP/2, and WebSockets.",
26 long_description=long_description,
27 long_description_content_type=long_description_content_type,
28 url="http://mitmproxy.org",
29 author="Aldo Cortesi",
30 author_email="[email protected]",
31 license="MIT",
32 classifiers=[
33 "License :: OSI Approved :: MIT License",
34 "Development Status :: 5 - Production/Stable",
35 "Environment :: Console :: Curses",
36 "Operating System :: MacOS",
37 "Operating System :: POSIX",
38 "Operating System :: Microsoft :: Windows",
39 "Programming Language :: Python :: 3 :: Only",
40 "Programming Language :: Python :: 3.9",
41 "Programming Language :: Python :: 3.10",
42 "Programming Language :: Python :: 3.11",
43 "Programming Language :: Python :: Implementation :: CPython",
44 "Topic :: Security",
45 "Topic :: Internet :: WWW/HTTP",
46 "Topic :: Internet :: Proxy Servers",
47 "Topic :: System :: Networking :: Monitoring",
48 "Topic :: Software Development :: Testing",
49 "Typing :: Typed",
50 ],
51 project_urls={
52 "Documentation": "https://docs.mitmproxy.org/stable/",
53 "Source": "https://github.com/mitmproxy/mitmproxy/",
54 "Tracker": "https://github.com/mitmproxy/mitmproxy/issues",
55 },
56 packages=find_packages(
57 include=[
58 "mitmproxy",
59 "mitmproxy.*",
60 ]
61 ),
62 include_package_data=True,
63 entry_points={
64 "console_scripts": [
65 "mitmproxy = mitmproxy.tools.main:mitmproxy",
66 "mitmdump = mitmproxy.tools.main:mitmdump",
67 "mitmweb = mitmproxy.tools.main:mitmweb",
68 ],
69 "pyinstaller40": [
70 "hook-dirs = mitmproxy.utils.pyinstaller:hook_dirs",
71 ],
72 },
73 python_requires=">=3.9",
74 # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#install-requires
75 # It is not considered best practice to use install_requires to pin dependencies to specific versions.
76 install_requires=[
77 "aioquic_mitmproxy>=0.9.20,<0.10",
78 "asgiref>=3.2.10,<3.6",
79 "Brotli>=1.0,<1.1",
80 "certifi>=2019.9.11", # no semver here - this should always be on the last release!
81 "cryptography>=38.0,<38.1",
82 "flask>=1.1.1,<2.3",
83 "h11>=0.11,<0.15",
84 "h2>=4.1,<5",
85 "hyperframe>=6.0,<7",
86 "kaitaistruct>=0.10,<0.11",
87 "ldap3>=2.8,<2.10",
88 "mitmproxy_wireguard>=0.1.6,<0.2",
89 "msgpack>=1.0.0, <1.1.0",
90 "passlib>=1.6.5, <1.8",
91 "protobuf>=3.14,<5",
92 "pyOpenSSL>=22.1,<22.2",
93 "pyparsing>=2.4.2,<3.1",
94 "pyperclip>=1.6.0,<1.9",
95 "ruamel.yaml>=0.16,<0.18",
96 "sortedcontainers>=2.3,<2.5",
97 "tornado>=6.2,<7",
98 "urwid-mitmproxy>=2.1.1,<2.2",
99 "wsproto>=1.0,<1.3",
100 "publicsuffix2>=2.20190812,<3",
101 "zstandard>=0.11,<0.20",
102 "typing-extensions>=4.3,<4.5; python_version<'3.10'",
103 ],
104 extras_require={
105 ':sys_platform == "win32"': [
106 "pydivert>=2.0.3,<2.2",
107 ],
108 "dev": [
109 "click>=7.0,<8.2",
110 "hypothesis>=5.8,<7",
111 "parver>=0.1,<2.0",
112 "pdoc>=4.0.0",
113 "pyinstaller==5.6.2",
114 "pytest-asyncio>=0.17,<0.21",
115 "pytest-cov>=2.7.1,<4.1",
116 "pytest-timeout>=1.3.3,<2.2",
117 "pytest-xdist>=2.1.0,<3.1",
118 "pytest>=6.1.0,<8",
119 "requests>=2.9.1,<3",
120 "tox>=3.5,<4",
121 "wheel>=0.36.2,<0.39",
122 ],
123 },
124 )
125
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -75,10 +75,10 @@
# It is not considered best practice to use install_requires to pin dependencies to specific versions.
install_requires=[
"aioquic_mitmproxy>=0.9.20,<0.10",
- "asgiref>=3.2.10,<3.6",
+ "asgiref>=3.2.10,<3.7",
"Brotli>=1.0,<1.1",
"certifi>=2019.9.11", # no semver here - this should always be on the last release!
- "cryptography>=38.0,<38.1",
+ "cryptography>=38.0,<39.1",
"flask>=1.1.1,<2.3",
"h11>=0.11,<0.15",
"h2>=4.1,<5",
@@ -89,7 +89,7 @@
"msgpack>=1.0.0, <1.1.0",
"passlib>=1.6.5, <1.8",
"protobuf>=3.14,<5",
- "pyOpenSSL>=22.1,<22.2",
+ "pyOpenSSL>=22.1,<23.1",
"pyparsing>=2.4.2,<3.1",
"pyperclip>=1.6.0,<1.9",
"ruamel.yaml>=0.16,<0.18",
@@ -110,14 +110,14 @@
"hypothesis>=5.8,<7",
"parver>=0.1,<2.0",
"pdoc>=4.0.0",
- "pyinstaller==5.6.2",
+ "pyinstaller==5.7.0",
"pytest-asyncio>=0.17,<0.21",
"pytest-cov>=2.7.1,<4.1",
"pytest-timeout>=1.3.3,<2.2",
- "pytest-xdist>=2.1.0,<3.1",
+ "pytest-xdist>=2.1.0,<3.2",
"pytest>=6.1.0,<8",
"requests>=2.9.1,<3",
- "tox>=3.5,<4",
+ "tox>=3.5,<5",
"wheel>=0.36.2,<0.39",
],
},
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -75,10 +75,10 @@\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"aioquic_mitmproxy>=0.9.20,<0.10\",\n- \"asgiref>=3.2.10,<3.6\",\n+ \"asgiref>=3.2.10,<3.7\",\n \"Brotli>=1.0,<1.1\",\n \"certifi>=2019.9.11\", # no semver here - this should always be on the last release!\n- \"cryptography>=38.0,<38.1\",\n+ \"cryptography>=38.0,<39.1\",\n \"flask>=1.1.1,<2.3\",\n \"h11>=0.11,<0.15\",\n \"h2>=4.1,<5\",\n@@ -89,7 +89,7 @@\n \"msgpack>=1.0.0, <1.1.0\",\n \"passlib>=1.6.5, <1.8\",\n \"protobuf>=3.14,<5\",\n- \"pyOpenSSL>=22.1,<22.2\",\n+ \"pyOpenSSL>=22.1,<23.1\",\n \"pyparsing>=2.4.2,<3.1\",\n \"pyperclip>=1.6.0,<1.9\",\n \"ruamel.yaml>=0.16,<0.18\",\n@@ -110,14 +110,14 @@\n \"hypothesis>=5.8,<7\",\n \"parver>=0.1,<2.0\",\n \"pdoc>=4.0.0\",\n- \"pyinstaller==5.6.2\",\n+ \"pyinstaller==5.7.0\",\n \"pytest-asyncio>=0.17,<0.21\",\n \"pytest-cov>=2.7.1,<4.1\",\n \"pytest-timeout>=1.3.3,<2.2\",\n- \"pytest-xdist>=2.1.0,<3.1\",\n+ \"pytest-xdist>=2.1.0,<3.2\",\n \"pytest>=6.1.0,<8\",\n \"requests>=2.9.1,<3\",\n- \"tox>=3.5,<4\",\n+ \"tox>=3.5,<5\",\n \"wheel>=0.36.2,<0.39\",\n ],\n },\n", "issue": "Adopt Dependabot\nI've only just learned that requires.io has been shut down. A big thank you to @omansion and @atabary for providing such a fantastic service over the years! \u2764\ufe0f\r\n\r\nFor mitmproxy this probably means we should migrate to Dependabot. This will probably mean a whole lot more PRs, let's see.\n", "before_files": [{"content": "import os\nimport re\nfrom codecs import open\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Based on https://github.com/pypa/sampleproject/blob/main/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\nlong_description_content_type = \"text/markdown\"\n\nwith open(os.path.join(here, \"mitmproxy/version.py\")) as f:\n match = re.search(r'VERSION = \"(.+?)\"', f.read())\n assert match\n VERSION = match.group(1)\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL/TLS-capable intercepting proxy for HTTP/1, HTTP/2, and WebSockets.\",\n long_description=long_description,\n long_description_content_type=long_description_content_type,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: System :: Networking :: Monitoring\",\n \"Topic :: Software Development :: Testing\",\n \"Typing :: Typed\",\n ],\n project_urls={\n \"Documentation\": \"https://docs.mitmproxy.org/stable/\",\n \"Source\": \"https://github.com/mitmproxy/mitmproxy/\",\n \"Tracker\": \"https://github.com/mitmproxy/mitmproxy/issues\",\n },\n packages=find_packages(\n include=[\n \"mitmproxy\",\n \"mitmproxy.*\",\n ]\n ),\n include_package_data=True,\n entry_points={\n \"console_scripts\": [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n ],\n \"pyinstaller40\": [\n \"hook-dirs = mitmproxy.utils.pyinstaller:hook_dirs\",\n ],\n },\n python_requires=\">=3.9\",\n # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"aioquic_mitmproxy>=0.9.20,<0.10\",\n \"asgiref>=3.2.10,<3.6\",\n \"Brotli>=1.0,<1.1\",\n \"certifi>=2019.9.11\", # no semver here - this should always be on the last release!\n \"cryptography>=38.0,<38.1\",\n \"flask>=1.1.1,<2.3\",\n \"h11>=0.11,<0.15\",\n \"h2>=4.1,<5\",\n \"hyperframe>=6.0,<7\",\n \"kaitaistruct>=0.10,<0.11\",\n \"ldap3>=2.8,<2.10\",\n \"mitmproxy_wireguard>=0.1.6,<0.2\",\n \"msgpack>=1.0.0, <1.1.0\",\n \"passlib>=1.6.5, <1.8\",\n \"protobuf>=3.14,<5\",\n \"pyOpenSSL>=22.1,<22.2\",\n \"pyparsing>=2.4.2,<3.1\",\n \"pyperclip>=1.6.0,<1.9\",\n \"ruamel.yaml>=0.16,<0.18\",\n \"sortedcontainers>=2.3,<2.5\",\n \"tornado>=6.2,<7\",\n \"urwid-mitmproxy>=2.1.1,<2.2\",\n \"wsproto>=1.0,<1.3\",\n \"publicsuffix2>=2.20190812,<3\",\n \"zstandard>=0.11,<0.20\",\n \"typing-extensions>=4.3,<4.5; python_version<'3.10'\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n \"dev\": [\n \"click>=7.0,<8.2\",\n \"hypothesis>=5.8,<7\",\n \"parver>=0.1,<2.0\",\n \"pdoc>=4.0.0\",\n \"pyinstaller==5.6.2\",\n \"pytest-asyncio>=0.17,<0.21\",\n \"pytest-cov>=2.7.1,<4.1\",\n \"pytest-timeout>=1.3.3,<2.2\",\n \"pytest-xdist>=2.1.0,<3.1\",\n \"pytest>=6.1.0,<8\",\n \"requests>=2.9.1,<3\",\n \"tox>=3.5,<4\",\n \"wheel>=0.36.2,<0.39\",\n ],\n },\n)\n", "path": "setup.py"}]}
| 2,164 | 602 |
gh_patches_debug_51251
|
rasdani/github-patches
|
git_diff
|
conda__conda-6470
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
conda 4.4 rc2: failure in conda list when editable install has been moved
`conda list` for my root environment is working nicely using conda 4.3, but when upgrading with canary to conda 4.4.0 rc2, the `conda list` command fails with the following error:
```
joris@joris-XPS-13-9350:~/scipy$ conda list
# packages in environment at /home/joris/miniconda3:
#
`$ /home/joris/miniconda3/bin/conda list`
Traceback (most recent call last):
File "/home/joris/miniconda3/lib/python3.5/site-packages/conda/exceptions.py", line 683, in __call__
return func(*args, **kwargs)
File "/home/joris/miniconda3/lib/python3.5/site-packages/conda/cli/main.py", line 78, in _main
exit_code = do_call(args, p)
File "/home/joris/miniconda3/lib/python3.5/site-packages/conda/cli/conda_argparse.py", line 75, in do_call
exit_code = getattr(module, func_name)(args, parser)
File "/home/joris/miniconda3/lib/python3.5/site-packages/conda/cli/main_list.py", line 150, in execute
show_channel_urls=context.show_channel_urls)
File "/home/joris/miniconda3/lib/python3.5/site-packages/conda/cli/main_list.py", line 85, in print_packages
other_python = get_egg_info(prefix)
File "/home/joris/miniconda3/lib/python3.5/site-packages/conda/egg_info.py", line 86, in get_egg_info
for path in get_egg_info_files(join(prefix, sp_dir)):
File "/home/joris/miniconda3/lib/python3.5/site-packages/conda/egg_info.py", line 35, in get_egg_info_files
for egg in get_egg_info_files(reader.readline().strip()):
File "/home/joris/miniconda3/lib/python3.5/site-packages/conda/egg_info.py", line 32, in get_egg_info_files
for fn in os.listdir(sp_dir):
FileNotFoundError: [Errno 2] No such file or directory: '/home/joris/scipy/dateutil'
```
The reason for this is that I once did `pip install -e .` in the '/home/joris/scipy/dateutil' directory to test out a dev install of dateutil. But later I removed it. Until now it was not a problem, but so now it is starting to give problems with conda 4.4.
I know I can fix this by removing it manually from easy-install.pth, but IMO it could be handles more gracefully by conda.
</issue>
<code>
[start of conda/egg_info.py]
1 """
2 Functions related to core conda functionality that relates to manually
3 installed Python packages, e.g. using "python setup.py install", or "pip".
4 """
5 from __future__ import absolute_import, division, print_function, unicode_literals
6
7 from io import open
8 import os
9 from os.path import isdir, isfile, join
10 import re
11 import sys
12
13 from .common.compat import itervalues, on_win
14 from .core.linked_data import linked_data
15 from .misc import rel_path
16 from .models.dist import Dist
17
18
19 def get_site_packages_dir(installed_pkgs):
20 for info in itervalues(installed_pkgs):
21 if info['name'] == 'python':
22 if on_win:
23 stdlib_dir = 'Lib'
24 else:
25 py_ver = info['version'][:3]
26 stdlib_dir = 'lib/python%s' % py_ver
27 return join(stdlib_dir, 'site-packages')
28 return None
29
30
31 def get_egg_info_files(sp_dir):
32 for fn in os.listdir(sp_dir):
33 if fn.endswith('.egg-link'):
34 with open(join(sp_dir, fn), 'r') as reader:
35 for egg in get_egg_info_files(reader.readline().strip()):
36 yield egg
37 if not fn.endswith(('.egg', '.egg-info', '.dist-info')):
38 continue
39 path = join(sp_dir, fn)
40 if isfile(path):
41 yield path
42 elif isdir(path):
43 for path2 in [join(path, 'PKG-INFO'),
44 join(path, 'EGG-INFO', 'PKG-INFO'),
45 join(path, 'METADATA')]:
46 if isfile(path2):
47 yield path2
48
49
50 pat = re.compile(r'(\w+):\s*(\S+)', re.I)
51 def parse_egg_info(path):
52 """
53 Parse an .egg-info file and return its canonical distribution name
54 """
55 info = {}
56 for line in open(path, encoding='utf-8'):
57 line = line.strip()
58 m = pat.match(line)
59 if m:
60 key = m.group(1).lower()
61 info[key] = m.group(2)
62 try:
63 return '%(name)s-%(version)s-<pip>' % info
64 except KeyError:
65 pass
66 return None
67
68
69 def get_egg_info(prefix, all_pkgs=False):
70 """
71 Return a set of canonical names of all Python packages (in `prefix`),
72 by inspecting the .egg-info files inside site-packages.
73 By default, only untracked (not conda installed) .egg-info files are
74 considered. Setting `all_pkgs` to True changes this.
75 """
76 installed_pkgs = linked_data(prefix)
77 sp_dir = get_site_packages_dir(installed_pkgs)
78 if sp_dir is None:
79 return set()
80
81 conda_files = set()
82 for info in itervalues(installed_pkgs):
83 conda_files.update(info.get('files', []))
84
85 res = set()
86 for path in get_egg_info_files(join(prefix, sp_dir)):
87 f = rel_path(prefix, path)
88 if all_pkgs or f not in conda_files:
89 try:
90 dist = parse_egg_info(path)
91 except UnicodeDecodeError:
92 dist = None
93 if dist:
94 res.add(Dist(dist))
95 return res
96
97
98 if __name__ == '__main__':
99 from pprint import pprint
100 pprint(get_egg_info(sys.prefix))
101
[end of conda/egg_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conda/egg_info.py b/conda/egg_info.py
--- a/conda/egg_info.py
+++ b/conda/egg_info.py
@@ -75,7 +75,7 @@
"""
installed_pkgs = linked_data(prefix)
sp_dir = get_site_packages_dir(installed_pkgs)
- if sp_dir is None:
+ if sp_dir is None or not isdir(join(prefix, sp_dir)):
return set()
conda_files = set()
|
{"golden_diff": "diff --git a/conda/egg_info.py b/conda/egg_info.py\n--- a/conda/egg_info.py\n+++ b/conda/egg_info.py\n@@ -75,7 +75,7 @@\n \"\"\"\n installed_pkgs = linked_data(prefix)\n sp_dir = get_site_packages_dir(installed_pkgs)\n- if sp_dir is None:\n+ if sp_dir is None or not isdir(join(prefix, sp_dir)):\n return set()\n \n conda_files = set()\n", "issue": "conda 4.4 rc2: failure in conda list when editable install has been moved\n`conda list` for my root environment is working nicely using conda 4.3, but when upgrading with canary to conda 4.4.0 rc2, the `conda list` command fails with the following error:\r\n\r\n```\r\njoris@joris-XPS-13-9350:~/scipy$ conda list\r\n# packages in environment at /home/joris/miniconda3:\r\n#\r\n\r\n`$ /home/joris/miniconda3/bin/conda list`\r\n\r\n Traceback (most recent call last):\r\n File \"/home/joris/miniconda3/lib/python3.5/site-packages/conda/exceptions.py\", line 683, in __call__\r\n return func(*args, **kwargs)\r\n File \"/home/joris/miniconda3/lib/python3.5/site-packages/conda/cli/main.py\", line 78, in _main\r\n exit_code = do_call(args, p)\r\n File \"/home/joris/miniconda3/lib/python3.5/site-packages/conda/cli/conda_argparse.py\", line 75, in do_call\r\n exit_code = getattr(module, func_name)(args, parser)\r\n File \"/home/joris/miniconda3/lib/python3.5/site-packages/conda/cli/main_list.py\", line 150, in execute\r\n show_channel_urls=context.show_channel_urls)\r\n File \"/home/joris/miniconda3/lib/python3.5/site-packages/conda/cli/main_list.py\", line 85, in print_packages\r\n other_python = get_egg_info(prefix)\r\n File \"/home/joris/miniconda3/lib/python3.5/site-packages/conda/egg_info.py\", line 86, in get_egg_info\r\n for path in get_egg_info_files(join(prefix, sp_dir)):\r\n File \"/home/joris/miniconda3/lib/python3.5/site-packages/conda/egg_info.py\", line 35, in get_egg_info_files\r\n for egg in get_egg_info_files(reader.readline().strip()):\r\n File \"/home/joris/miniconda3/lib/python3.5/site-packages/conda/egg_info.py\", line 32, in get_egg_info_files\r\n for fn in os.listdir(sp_dir):\r\n FileNotFoundError: [Errno 2] No such file or directory: '/home/joris/scipy/dateutil'\r\n```\r\n\r\nThe reason for this is that I once did `pip install -e .` in the '/home/joris/scipy/dateutil' directory to test out a dev install of dateutil. But later I removed it. Until now it was not a problem, but so now it is starting to give problems with conda 4.4. \r\nI know I can fix this by removing it manually from easy-install.pth, but IMO it could be handles more gracefully by conda.\n", "before_files": [{"content": "\"\"\"\nFunctions related to core conda functionality that relates to manually\ninstalled Python packages, e.g. using \"python setup.py install\", or \"pip\".\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom io import open\nimport os\nfrom os.path import isdir, isfile, join\nimport re\nimport sys\n\nfrom .common.compat import itervalues, on_win\nfrom .core.linked_data import linked_data\nfrom .misc import rel_path\nfrom .models.dist import Dist\n\n\ndef get_site_packages_dir(installed_pkgs):\n for info in itervalues(installed_pkgs):\n if info['name'] == 'python':\n if on_win:\n stdlib_dir = 'Lib'\n else:\n py_ver = info['version'][:3]\n stdlib_dir = 'lib/python%s' % py_ver\n return join(stdlib_dir, 'site-packages')\n return None\n\n\ndef get_egg_info_files(sp_dir):\n for fn in os.listdir(sp_dir):\n if fn.endswith('.egg-link'):\n with open(join(sp_dir, fn), 'r') as reader:\n for egg in get_egg_info_files(reader.readline().strip()):\n yield egg\n if not fn.endswith(('.egg', '.egg-info', '.dist-info')):\n continue\n path = join(sp_dir, fn)\n if isfile(path):\n yield path\n elif isdir(path):\n for path2 in [join(path, 'PKG-INFO'),\n join(path, 'EGG-INFO', 'PKG-INFO'),\n join(path, 'METADATA')]:\n if isfile(path2):\n yield path2\n\n\npat = re.compile(r'(\\w+):\\s*(\\S+)', re.I)\ndef parse_egg_info(path):\n \"\"\"\n Parse an .egg-info file and return its canonical distribution name\n \"\"\"\n info = {}\n for line in open(path, encoding='utf-8'):\n line = line.strip()\n m = pat.match(line)\n if m:\n key = m.group(1).lower()\n info[key] = m.group(2)\n try:\n return '%(name)s-%(version)s-<pip>' % info\n except KeyError:\n pass\n return None\n\n\ndef get_egg_info(prefix, all_pkgs=False):\n \"\"\"\n Return a set of canonical names of all Python packages (in `prefix`),\n by inspecting the .egg-info files inside site-packages.\n By default, only untracked (not conda installed) .egg-info files are\n considered. Setting `all_pkgs` to True changes this.\n \"\"\"\n installed_pkgs = linked_data(prefix)\n sp_dir = get_site_packages_dir(installed_pkgs)\n if sp_dir is None:\n return set()\n\n conda_files = set()\n for info in itervalues(installed_pkgs):\n conda_files.update(info.get('files', []))\n\n res = set()\n for path in get_egg_info_files(join(prefix, sp_dir)):\n f = rel_path(prefix, path)\n if all_pkgs or f not in conda_files:\n try:\n dist = parse_egg_info(path)\n except UnicodeDecodeError:\n dist = None\n if dist:\n res.add(Dist(dist))\n return res\n\n\nif __name__ == '__main__':\n from pprint import pprint\n pprint(get_egg_info(sys.prefix))\n", "path": "conda/egg_info.py"}]}
| 2,097 | 109 |
gh_patches_debug_12347
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-3405
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
API V2 cache problem with custom configuration in nginx.conf.in
API v2 responses doesn't take care about [```X-FORWARDED-PROTO```](https://github.com/GeotrekCE/Geotrek-admin/blob/master/conf/nginx.conf.in#L55) value defined in nginx configuration.
This header is used by django to know if generated url (pictogram, attachment) is http or https.
In some cases, custom configuration in nginx.conf.in to work behind haproxy, API response does not match with headers.
For the moment, any change about it in nginx template should be followed by clearing api_v2 cache in admin panel
</issue>
<code>
[start of geotrek/api/v2/viewsets.py]
1 from hashlib import md5
2
3 from django.conf import settings
4 from django_filters.rest_framework.backends import DjangoFilterBackend
5 from mapentity.renderers import GeoJSONRenderer
6 from rest_framework import viewsets, renderers
7 from rest_framework.authentication import BasicAuthentication, SessionAuthentication
8 from rest_framework.generics import get_object_or_404
9 from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated
10
11 from geotrek.api.v2 import pagination as api_pagination, filters as api_filters
12 from geotrek.api.v2.cache import RetrieveCacheResponseMixin
13 from geotrek.api.v2.serializers import override_serializer
14
15
16 class GeotrekViewSet(RetrieveCacheResponseMixin, viewsets.ReadOnlyModelViewSet):
17 filter_backends = (
18 DjangoFilterBackend,
19 api_filters.GeotrekQueryParamsFilter,
20 api_filters.GeotrekPublishedFilter,
21 )
22 pagination_class = api_pagination.StandardResultsSetPagination
23 permission_classes = [IsAuthenticatedOrReadOnly, ] if settings.API_IS_PUBLIC else [IsAuthenticated, ]
24 authentication_classes = [BasicAuthentication, SessionAuthentication]
25 renderer_classes = [renderers.JSONRenderer, renderers.BrowsableAPIRenderer, ] if settings.DEBUG else [renderers.JSONRenderer, ]
26 lookup_value_regex = r'\d+'
27
28 def get_ordered_query_params(self):
29 """ Get multi value query params sorted by key """
30 parameters = self.request.query_params
31 sorted_keys = sorted(parameters.keys())
32 return {k: sorted(parameters.getlist(k)) for k in sorted_keys}
33
34 def get_base_cache_string(self):
35 """ return cache string as url path + ordered query params """
36 return f"{self.request.path}:{self.get_ordered_query_params()}:{self.request.accepted_renderer.format}"
37
38 def get_object_cache_key(self, pk):
39 """ return specific object cache key based on object date_update column"""
40 # don't directly use get_object or get_queryset to avoid select / prefetch and annotation sql queries
41 # insure object exists and doesn't raise exception
42 instance = get_object_or_404(self.get_queryset().model, pk=pk)
43 date_update = instance.date_update
44 return f"{self.get_base_cache_string()}:{date_update.isoformat()}"
45
46 def object_cache_key_func(self, **kwargs):
47 """ cache key md5 for retrieve viewset action """
48 return md5(self.get_object_cache_key(kwargs.get('kwargs').get('pk')).encode("utf-8")).hexdigest()
49
50 def get_serializer_context(self):
51 return {
52 'request': self.request,
53 'kwargs': self.kwargs
54 }
55
56
57 class GeotrekGeometricViewset(GeotrekViewSet):
58 filter_backends = GeotrekViewSet.filter_backends + (
59 api_filters.GeotrekQueryParamsDimensionFilter,
60 api_filters.GeotrekInBBoxFilter,
61 api_filters.GeotrekDistanceToPointFilter,
62 )
63 distance_filter_field = 'geom'
64 bbox_filter_field = 'geom'
65 bbox_filter_include_overlapping = True
66 renderer_classes = GeotrekViewSet.renderer_classes + [GeoJSONRenderer, ]
67
68 def get_serializer_class(self):
69 base_serializer_class = super().get_serializer_class()
70 format_output = self.request.query_params.get('format', 'json')
71 return override_serializer(format_output, base_serializer_class)
72
[end of geotrek/api/v2/viewsets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geotrek/api/v2/viewsets.py b/geotrek/api/v2/viewsets.py
--- a/geotrek/api/v2/viewsets.py
+++ b/geotrek/api/v2/viewsets.py
@@ -33,7 +33,8 @@
def get_base_cache_string(self):
""" return cache string as url path + ordered query params """
- return f"{self.request.path}:{self.get_ordered_query_params()}:{self.request.accepted_renderer.format}"
+ proto_scheme = self.request.headers.get('X-Forwarded-Proto', self.request.scheme) # take care about scheme defined in nginx.conf
+ return f"{self.request.path}:{self.get_ordered_query_params()}:{self.request.accepted_renderer.format}:{proto_scheme}"
def get_object_cache_key(self, pk):
""" return specific object cache key based on object date_update column"""
|
{"golden_diff": "diff --git a/geotrek/api/v2/viewsets.py b/geotrek/api/v2/viewsets.py\n--- a/geotrek/api/v2/viewsets.py\n+++ b/geotrek/api/v2/viewsets.py\n@@ -33,7 +33,8 @@\n \n def get_base_cache_string(self):\n \"\"\" return cache string as url path + ordered query params \"\"\"\n- return f\"{self.request.path}:{self.get_ordered_query_params()}:{self.request.accepted_renderer.format}\"\n+ proto_scheme = self.request.headers.get('X-Forwarded-Proto', self.request.scheme) # take care about scheme defined in nginx.conf\n+ return f\"{self.request.path}:{self.get_ordered_query_params()}:{self.request.accepted_renderer.format}:{proto_scheme}\"\n \n def get_object_cache_key(self, pk):\n \"\"\" return specific object cache key based on object date_update column\"\"\"\n", "issue": "API V2 cache problem with custom configuration in nginx.conf.in\nAPI v2 responses doesn't take care about [```X-FORWARDED-PROTO```](https://github.com/GeotrekCE/Geotrek-admin/blob/master/conf/nginx.conf.in#L55) value defined in nginx configuration.\r\nThis header is used by django to know if generated url (pictogram, attachment) is http or https.\r\nIn some cases, custom configuration in nginx.conf.in to work behind haproxy, API response does not match with headers.\r\n\r\nFor the moment, any change about it in nginx template should be followed by clearing api_v2 cache in admin panel\n", "before_files": [{"content": "from hashlib import md5\n\nfrom django.conf import settings\nfrom django_filters.rest_framework.backends import DjangoFilterBackend\nfrom mapentity.renderers import GeoJSONRenderer\nfrom rest_framework import viewsets, renderers\nfrom rest_framework.authentication import BasicAuthentication, SessionAuthentication\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated\n\nfrom geotrek.api.v2 import pagination as api_pagination, filters as api_filters\nfrom geotrek.api.v2.cache import RetrieveCacheResponseMixin\nfrom geotrek.api.v2.serializers import override_serializer\n\n\nclass GeotrekViewSet(RetrieveCacheResponseMixin, viewsets.ReadOnlyModelViewSet):\n filter_backends = (\n DjangoFilterBackend,\n api_filters.GeotrekQueryParamsFilter,\n api_filters.GeotrekPublishedFilter,\n )\n pagination_class = api_pagination.StandardResultsSetPagination\n permission_classes = [IsAuthenticatedOrReadOnly, ] if settings.API_IS_PUBLIC else [IsAuthenticated, ]\n authentication_classes = [BasicAuthentication, SessionAuthentication]\n renderer_classes = [renderers.JSONRenderer, renderers.BrowsableAPIRenderer, ] if settings.DEBUG else [renderers.JSONRenderer, ]\n lookup_value_regex = r'\\d+'\n\n def get_ordered_query_params(self):\n \"\"\" Get multi value query params sorted by key \"\"\"\n parameters = self.request.query_params\n sorted_keys = sorted(parameters.keys())\n return {k: sorted(parameters.getlist(k)) for k in sorted_keys}\n\n def get_base_cache_string(self):\n \"\"\" return cache string as url path + ordered query params \"\"\"\n return f\"{self.request.path}:{self.get_ordered_query_params()}:{self.request.accepted_renderer.format}\"\n\n def get_object_cache_key(self, pk):\n \"\"\" return specific object cache key based on object date_update column\"\"\"\n # don't directly use get_object or get_queryset to avoid select / prefetch and annotation sql queries\n # insure object exists and doesn't raise exception\n instance = get_object_or_404(self.get_queryset().model, pk=pk)\n date_update = instance.date_update\n return f\"{self.get_base_cache_string()}:{date_update.isoformat()}\"\n\n def object_cache_key_func(self, **kwargs):\n \"\"\" cache key md5 for retrieve viewset action \"\"\"\n return md5(self.get_object_cache_key(kwargs.get('kwargs').get('pk')).encode(\"utf-8\")).hexdigest()\n\n def get_serializer_context(self):\n return {\n 'request': self.request,\n 'kwargs': self.kwargs\n }\n\n\nclass GeotrekGeometricViewset(GeotrekViewSet):\n filter_backends = GeotrekViewSet.filter_backends + (\n api_filters.GeotrekQueryParamsDimensionFilter,\n api_filters.GeotrekInBBoxFilter,\n api_filters.GeotrekDistanceToPointFilter,\n )\n distance_filter_field = 'geom'\n bbox_filter_field = 'geom'\n bbox_filter_include_overlapping = True\n renderer_classes = GeotrekViewSet.renderer_classes + [GeoJSONRenderer, ]\n\n def get_serializer_class(self):\n base_serializer_class = super().get_serializer_class()\n format_output = self.request.query_params.get('format', 'json')\n return override_serializer(format_output, base_serializer_class)\n", "path": "geotrek/api/v2/viewsets.py"}]}
| 1,502 | 188 |
gh_patches_debug_3105
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-414
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
File creation fails if no executor has been loaded
```python
In [53]: file = File('first.txt')
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-53-b5dc63ca042a> in <module>()
----> 1 file = File('first.txt')
~/ci/parsl/parsl/data_provider/files.py in __init__(self, url, dman, cache, caching_dir)
40 self.path = parsed_url.path
41 self.filename = os.path.basename(self.path)
---> 42 self.dman = dman if dman else DataManager.get_data_manager()
43 self.data_future = {}
44 if self.scheme == 'globus':
~/ci/parsl/parsl/data_provider/data_manager.py in get_data_manager(cls, max_threads, executors)
24 def get_data_manager(cls, max_threads=None, executors=None):
25 if cls.default_data_manager is None:
---> 26 cls.default_data_manager = DataManager(max_threads=max_threads, executors=executors)
27 return cls.default_data_manager
28
~/ci/parsl/parsl/data_provider/data_manager.py in __init__(self, max_threads, executors)
37
38 self.label = 'data_manager'
---> 39 self.executors = {e.label: e for e in executors}
40 self.max_threads = max_threads
41 self.files = []
TypeError: 'NoneType' object is not iterable
```
</issue>
<code>
[start of parsl/data_provider/data_manager.py]
1 import os
2 import logging
3 import requests
4 import ftplib
5 import concurrent.futures as cf
6 from parsl.data_provider.scheme import GlobusScheme
7 from parsl.executors.base import ParslExecutor
8 from parsl.data_provider.globus import get_globus
9 from parsl.app.app import App
10
11 logger = logging.getLogger(__name__)
12
13
14 class DataManager(ParslExecutor):
15 """The DataManager is responsible for transferring input and output data.
16
17 It uses the Executor interface, where staging tasks are submitted
18 to it, and DataFutures are returned.
19 """
20
21 default_data_manager = None
22
23 @classmethod
24 def get_data_manager(cls, max_threads=None, executors=None):
25 if cls.default_data_manager is None:
26 cls.default_data_manager = DataManager(max_threads=max_threads, executors=executors)
27 return cls.default_data_manager
28
29 def __init__(self, max_threads=10, executors=None):
30 """Initialize the DataManager.
31
32 Kwargs:
33 - max_threads (int): Number of threads. Default is 10.
34 - executors (list of Executors): Executors for which data transfer will be managed.
35 """
36 self._scaling_enabled = False
37
38 self.label = 'data_manager'
39 self.executors = {e.label: e for e in executors}
40 self.max_threads = max_threads
41 self.files = []
42 self.globus = None
43 self.managed = True
44
45 def start(self):
46 self.executor = cf.ThreadPoolExecutor(max_workers=self.max_threads)
47
48 def submit(self, *args, **kwargs):
49 """Submit a staging app. All optimization should be here."""
50 return self.executor.submit(*args, **kwargs)
51
52 def scale_in(self, blocks, *args, **kwargs):
53 pass
54
55 def scale_out(self, *args, **kwargs):
56 pass
57
58 def shutdown(self, block=False):
59 """Shutdown the ThreadPool.
60
61 Kwargs:
62 - block (bool): To block for confirmations or not
63
64 """
65 x = self.executor.shutdown(wait=block)
66 logger.debug("Done with executor shutdown")
67 return x
68
69 @property
70 def scaling_enabled(self):
71 return self._scaling_enabled
72
73 def add_file(self, file):
74 if file.scheme == 'globus':
75 if not self.globus:
76 self.globus = get_globus()
77 # keep a list of all remote files for optimization purposes (TODO)
78 self.files.append(file)
79 self._set_local_path(file)
80
81 def _set_local_path(self, file):
82 globus_ep = self._get_globus_endpoint()
83 file.local_path = os.path.join(globus_ep['working_dir'], file.filename)
84
85 def _get_globus_endpoint(self, executor_label=None):
86 for executor in self.executors.values():
87 if executor_label is None or executor.label == executor_label:
88 for scheme in executor.storage_access:
89 if isinstance(scheme, GlobusScheme):
90 working_dir = os.path.normpath(executor.working_dir)
91 if scheme.endpoint_path and scheme.local_path:
92 endpoint_path = os.path.normpath(scheme.endpoint_path)
93 local_path = os.path.normpath(scheme.local_path)
94 common_path = os.path.commonpath((local_path, working_dir))
95 if local_path != common_path:
96 raise Exception('"local_path" must be equal or an absolute subpath of "working_dir"')
97 relative_path = os.path.relpath(working_dir, common_path)
98 endpoint_path = os.path.join(endpoint_path, relative_path)
99 else:
100 endpoint_path = working_dir
101 return {'endpoint_uuid': scheme.endpoint_uuid,
102 'endpoint_path': endpoint_path,
103 'working_dir': working_dir}
104 raise Exception('No executor with a Globus endpoint and working_dir defined')
105
106 def stage_in(self, file, executor):
107 """Transport the file from the input source to the executor.
108
109 This function returns a DataFuture.
110
111 Args:
112 - self
113 - file (File) : file to stage in
114 - executor (str) : an executor the file is going to be staged in to.
115 If the executor argument is not specified for a file
116 with 'globus' scheme, the file will be staged in to
117 the first executor with the "globus" key in a config.
118 """
119
120 if file.scheme == 'file':
121 stage_in_app = self._file_stage_in_app()
122 app_fut = stage_in_app(outputs=[file])
123 return app_fut._outputs[0]
124 elif file.scheme == 'ftp':
125 working_dir = self.executors[executor].working_dir
126 stage_in_app = self._ftp_stage_in_app(executor=executor)
127 app_fut = stage_in_app(working_dir, outputs=[file])
128 return app_fut._outputs[0]
129 elif file.scheme == 'http' or file.scheme == 'https':
130 working_dir = self.executors[executor].working_dir
131 stage_in_app = self._http_stage_in_app(executor=executor)
132 app_fut = stage_in_app(working_dir, outputs=[file])
133 return app_fut._outputs[0]
134 elif file.scheme == 'globus':
135 globus_ep = self._get_globus_endpoint(executor)
136 stage_in_app = self._globus_stage_in_app()
137 app_fut = stage_in_app(globus_ep, outputs=[file])
138 return app_fut._outputs[0]
139 else:
140 raise Exception('Staging in with unknown file scheme {} is not supported'.format(file.scheme))
141
142 def _file_stage_in_app(self):
143 return App("python", executors=['data_manager'])(self._file_stage_in)
144
145 def _file_stage_in(self, outputs=[]):
146 pass
147
148 def _ftp_stage_in_app(self, executor):
149 return App("python", executors=[executor])(self._ftp_stage_in)
150
151 def _ftp_stage_in(self, working_dir, outputs=[]):
152 file = outputs[0]
153 if working_dir:
154 os.makedirs(working_dir, exist_ok=True)
155 file.local_path = os.path.join(working_dir, file.filename)
156 else:
157 file.local_path = file.filename
158 with open(file.local_path, 'wb') as f:
159 ftp = ftplib.FTP(file.netloc)
160 ftp.login()
161 ftp.cwd(os.path.dirname(file.path))
162 ftp.retrbinary('RETR {}'.format(file.filename), f.write)
163 ftp.quit()
164
165 def _http_stage_in_app(self, executor):
166 return App("python", executors=[executor])(self._http_stage_in)
167
168 def _http_stage_in(self, working_dir, outputs=[]):
169 file = outputs[0]
170 if working_dir:
171 os.makedirs(working_dir, exist_ok=True)
172 file.local_path = os.path.join(working_dir, file.filename)
173 else:
174 file.local_path = file.filename
175 resp = requests.get(file.url, stream=True)
176 with open(file.local_path, 'wb') as f:
177 for chunk in resp.iter_content(chunk_size=1024):
178 if chunk:
179 f.write(chunk)
180
181 def _globus_stage_in_app(self):
182 return App("python", executors=['data_manager'])(self._globus_stage_in)
183
184 def _globus_stage_in(self, globus_ep, outputs=[]):
185 file = outputs[0]
186 file.local_path = os.path.join(
187 globus_ep['working_dir'], file.filename)
188 dst_path = os.path.join(
189 globus_ep['endpoint_path'], file.filename)
190 self.globus.transfer_file(
191 file.netloc, globus_ep['endpoint_uuid'],
192 file.path, dst_path)
193
194 def stage_out(self, file, executor):
195 """Transport the file from the local filesystem to the remote Globus endpoint.
196
197 This function returns a DataFuture.
198
199 Args:
200 - self
201 - file (File) - file to stage out
202 - executor (str) - Which executor the file is going to be staged out from.
203 If the executor argument is not specified for a file
204 with the 'globus' scheme, the file will be staged in to
205 the first executor with the "globus" key in a config.
206 """
207
208 if file.scheme == 'file':
209 stage_out_app = self._file_stage_out_app()
210 return stage_out_app()
211 elif file.scheme == 'http' or file.scheme == 'https':
212 raise Exception('HTTP/HTTPS file staging out is not supported')
213 elif file.scheme == 'ftp':
214 raise Exception('FTP file staging out is not supported')
215 elif file.scheme == 'globus':
216 globus_ep = self._get_globus_endpoint(executor)
217 stage_out_app = self._globus_stage_out_app()
218 return stage_out_app(globus_ep, inputs=[file])
219 else:
220 raise Exception('Staging out with unknown file scheme {} is not supported'.format(file.scheme))
221
222 def _file_stage_out_app(self):
223 return App("python", executors=['data_manager'])(self._file_stage_out)
224
225 def _file_stage_out(self):
226 pass
227
228 def _globus_stage_out_app(self):
229 return App("python", executors=['data_manager'])(self._globus_stage_out)
230
231 def _globus_stage_out(self, globus_ep, inputs=[]):
232 file = inputs[0]
233 src_path = os.path.join(globus_ep['endpoint_path'], file.filename)
234 self.globus.transfer_file(
235 globus_ep['endpoint_uuid'], file.netloc,
236 src_path, file.path
237 )
238
[end of parsl/data_provider/data_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsl/data_provider/data_manager.py b/parsl/data_provider/data_manager.py
--- a/parsl/data_provider/data_manager.py
+++ b/parsl/data_provider/data_manager.py
@@ -36,6 +36,8 @@
self._scaling_enabled = False
self.label = 'data_manager'
+ if executors is None:
+ executors = []
self.executors = {e.label: e for e in executors}
self.max_threads = max_threads
self.files = []
|
{"golden_diff": "diff --git a/parsl/data_provider/data_manager.py b/parsl/data_provider/data_manager.py\n--- a/parsl/data_provider/data_manager.py\n+++ b/parsl/data_provider/data_manager.py\n@@ -36,6 +36,8 @@\n self._scaling_enabled = False\n \n self.label = 'data_manager'\n+ if executors is None:\n+ executors = []\n self.executors = {e.label: e for e in executors}\n self.max_threads = max_threads\n self.files = []\n", "issue": "File creation fails if no executor has been loaded\n```python\r\nIn [53]: file = File('first.txt')\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-53-b5dc63ca042a> in <module>()\r\n----> 1 file = File('first.txt')\r\n\r\n~/ci/parsl/parsl/data_provider/files.py in __init__(self, url, dman, cache, caching_dir)\r\n 40 self.path = parsed_url.path\r\n 41 self.filename = os.path.basename(self.path)\r\n---> 42 self.dman = dman if dman else DataManager.get_data_manager()\r\n 43 self.data_future = {}\r\n 44 if self.scheme == 'globus':\r\n\r\n~/ci/parsl/parsl/data_provider/data_manager.py in get_data_manager(cls, max_threads, executors)\r\n 24 def get_data_manager(cls, max_threads=None, executors=None):\r\n 25 if cls.default_data_manager is None:\r\n---> 26 cls.default_data_manager = DataManager(max_threads=max_threads, executors=executors)\r\n 27 return cls.default_data_manager\r\n 28\r\n\r\n~/ci/parsl/parsl/data_provider/data_manager.py in __init__(self, max_threads, executors)\r\n 37\r\n 38 self.label = 'data_manager'\r\n---> 39 self.executors = {e.label: e for e in executors}\r\n 40 self.max_threads = max_threads\r\n 41 self.files = []\r\n\r\nTypeError: 'NoneType' object is not iterable\r\n```\n", "before_files": [{"content": "import os\nimport logging\nimport requests\nimport ftplib\nimport concurrent.futures as cf\nfrom parsl.data_provider.scheme import GlobusScheme\nfrom parsl.executors.base import ParslExecutor\nfrom parsl.data_provider.globus import get_globus\nfrom parsl.app.app import App\n\nlogger = logging.getLogger(__name__)\n\n\nclass DataManager(ParslExecutor):\n \"\"\"The DataManager is responsible for transferring input and output data.\n\n It uses the Executor interface, where staging tasks are submitted\n to it, and DataFutures are returned.\n \"\"\"\n\n default_data_manager = None\n\n @classmethod\n def get_data_manager(cls, max_threads=None, executors=None):\n if cls.default_data_manager is None:\n cls.default_data_manager = DataManager(max_threads=max_threads, executors=executors)\n return cls.default_data_manager\n\n def __init__(self, max_threads=10, executors=None):\n \"\"\"Initialize the DataManager.\n\n Kwargs:\n - max_threads (int): Number of threads. Default is 10.\n - executors (list of Executors): Executors for which data transfer will be managed.\n \"\"\"\n self._scaling_enabled = False\n\n self.label = 'data_manager'\n self.executors = {e.label: e for e in executors}\n self.max_threads = max_threads\n self.files = []\n self.globus = None\n self.managed = True\n\n def start(self):\n self.executor = cf.ThreadPoolExecutor(max_workers=self.max_threads)\n\n def submit(self, *args, **kwargs):\n \"\"\"Submit a staging app. All optimization should be here.\"\"\"\n return self.executor.submit(*args, **kwargs)\n\n def scale_in(self, blocks, *args, **kwargs):\n pass\n\n def scale_out(self, *args, **kwargs):\n pass\n\n def shutdown(self, block=False):\n \"\"\"Shutdown the ThreadPool.\n\n Kwargs:\n - block (bool): To block for confirmations or not\n\n \"\"\"\n x = self.executor.shutdown(wait=block)\n logger.debug(\"Done with executor shutdown\")\n return x\n\n @property\n def scaling_enabled(self):\n return self._scaling_enabled\n\n def add_file(self, file):\n if file.scheme == 'globus':\n if not self.globus:\n self.globus = get_globus()\n # keep a list of all remote files for optimization purposes (TODO)\n self.files.append(file)\n self._set_local_path(file)\n\n def _set_local_path(self, file):\n globus_ep = self._get_globus_endpoint()\n file.local_path = os.path.join(globus_ep['working_dir'], file.filename)\n\n def _get_globus_endpoint(self, executor_label=None):\n for executor in self.executors.values():\n if executor_label is None or executor.label == executor_label:\n for scheme in executor.storage_access:\n if isinstance(scheme, GlobusScheme):\n working_dir = os.path.normpath(executor.working_dir)\n if scheme.endpoint_path and scheme.local_path:\n endpoint_path = os.path.normpath(scheme.endpoint_path)\n local_path = os.path.normpath(scheme.local_path)\n common_path = os.path.commonpath((local_path, working_dir))\n if local_path != common_path:\n raise Exception('\"local_path\" must be equal or an absolute subpath of \"working_dir\"')\n relative_path = os.path.relpath(working_dir, common_path)\n endpoint_path = os.path.join(endpoint_path, relative_path)\n else:\n endpoint_path = working_dir\n return {'endpoint_uuid': scheme.endpoint_uuid,\n 'endpoint_path': endpoint_path,\n 'working_dir': working_dir}\n raise Exception('No executor with a Globus endpoint and working_dir defined')\n\n def stage_in(self, file, executor):\n \"\"\"Transport the file from the input source to the executor.\n\n This function returns a DataFuture.\n\n Args:\n - self\n - file (File) : file to stage in\n - executor (str) : an executor the file is going to be staged in to.\n If the executor argument is not specified for a file\n with 'globus' scheme, the file will be staged in to\n the first executor with the \"globus\" key in a config.\n \"\"\"\n\n if file.scheme == 'file':\n stage_in_app = self._file_stage_in_app()\n app_fut = stage_in_app(outputs=[file])\n return app_fut._outputs[0]\n elif file.scheme == 'ftp':\n working_dir = self.executors[executor].working_dir\n stage_in_app = self._ftp_stage_in_app(executor=executor)\n app_fut = stage_in_app(working_dir, outputs=[file])\n return app_fut._outputs[0]\n elif file.scheme == 'http' or file.scheme == 'https':\n working_dir = self.executors[executor].working_dir\n stage_in_app = self._http_stage_in_app(executor=executor)\n app_fut = stage_in_app(working_dir, outputs=[file])\n return app_fut._outputs[0]\n elif file.scheme == 'globus':\n globus_ep = self._get_globus_endpoint(executor)\n stage_in_app = self._globus_stage_in_app()\n app_fut = stage_in_app(globus_ep, outputs=[file])\n return app_fut._outputs[0]\n else:\n raise Exception('Staging in with unknown file scheme {} is not supported'.format(file.scheme))\n\n def _file_stage_in_app(self):\n return App(\"python\", executors=['data_manager'])(self._file_stage_in)\n\n def _file_stage_in(self, outputs=[]):\n pass\n\n def _ftp_stage_in_app(self, executor):\n return App(\"python\", executors=[executor])(self._ftp_stage_in)\n\n def _ftp_stage_in(self, working_dir, outputs=[]):\n file = outputs[0]\n if working_dir:\n os.makedirs(working_dir, exist_ok=True)\n file.local_path = os.path.join(working_dir, file.filename)\n else:\n file.local_path = file.filename\n with open(file.local_path, 'wb') as f:\n ftp = ftplib.FTP(file.netloc)\n ftp.login()\n ftp.cwd(os.path.dirname(file.path))\n ftp.retrbinary('RETR {}'.format(file.filename), f.write)\n ftp.quit()\n\n def _http_stage_in_app(self, executor):\n return App(\"python\", executors=[executor])(self._http_stage_in)\n\n def _http_stage_in(self, working_dir, outputs=[]):\n file = outputs[0]\n if working_dir:\n os.makedirs(working_dir, exist_ok=True)\n file.local_path = os.path.join(working_dir, file.filename)\n else:\n file.local_path = file.filename\n resp = requests.get(file.url, stream=True)\n with open(file.local_path, 'wb') as f:\n for chunk in resp.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n\n def _globus_stage_in_app(self):\n return App(\"python\", executors=['data_manager'])(self._globus_stage_in)\n\n def _globus_stage_in(self, globus_ep, outputs=[]):\n file = outputs[0]\n file.local_path = os.path.join(\n globus_ep['working_dir'], file.filename)\n dst_path = os.path.join(\n globus_ep['endpoint_path'], file.filename)\n self.globus.transfer_file(\n file.netloc, globus_ep['endpoint_uuid'],\n file.path, dst_path)\n\n def stage_out(self, file, executor):\n \"\"\"Transport the file from the local filesystem to the remote Globus endpoint.\n\n This function returns a DataFuture.\n\n Args:\n - self\n - file (File) - file to stage out\n - executor (str) - Which executor the file is going to be staged out from.\n If the executor argument is not specified for a file\n with the 'globus' scheme, the file will be staged in to\n the first executor with the \"globus\" key in a config.\n \"\"\"\n\n if file.scheme == 'file':\n stage_out_app = self._file_stage_out_app()\n return stage_out_app()\n elif file.scheme == 'http' or file.scheme == 'https':\n raise Exception('HTTP/HTTPS file staging out is not supported')\n elif file.scheme == 'ftp':\n raise Exception('FTP file staging out is not supported')\n elif file.scheme == 'globus':\n globus_ep = self._get_globus_endpoint(executor)\n stage_out_app = self._globus_stage_out_app()\n return stage_out_app(globus_ep, inputs=[file])\n else:\n raise Exception('Staging out with unknown file scheme {} is not supported'.format(file.scheme))\n\n def _file_stage_out_app(self):\n return App(\"python\", executors=['data_manager'])(self._file_stage_out)\n\n def _file_stage_out(self):\n pass\n\n def _globus_stage_out_app(self):\n return App(\"python\", executors=['data_manager'])(self._globus_stage_out)\n\n def _globus_stage_out(self, globus_ep, inputs=[]):\n file = inputs[0]\n src_path = os.path.join(globus_ep['endpoint_path'], file.filename)\n self.globus.transfer_file(\n globus_ep['endpoint_uuid'], file.netloc,\n src_path, file.path\n )\n", "path": "parsl/data_provider/data_manager.py"}]}
| 3,561 | 115 |
gh_patches_debug_34634
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-598
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
shipping address shouldn't be editable while order is processed?
Order has a foreign key to address for shipping address.
With the current model, I think a user is able to change the address while the order is being processed (before shipped without realizing his past order might be affected)
</issue>
<code>
[start of saleor/checkout/core.py]
1 from __future__ import unicode_literals
2 from functools import wraps
3
4 from django.conf import settings
5 from django.db import transaction
6 from django.forms.models import model_to_dict
7 from django.utils.encoding import smart_text
8 from prices import Price, FixedDiscount
9
10 from ..cart.views import get_or_empty_db_cart
11 from ..core import analytics
12 from ..discount.models import Voucher, NotApplicable
13 from ..order.models import Order
14 from ..shipping.models import ShippingMethodCountry, ANY_COUNTRY
15 from ..userprofile.models import Address, User
16
17 STORAGE_SESSION_KEY = 'checkout_storage'
18
19
20 class Checkout(object):
21
22 VERSION = '1.0.0'
23 modified = False
24
25 def __init__(self, cart, user, tracking_code):
26 self.cart = cart
27 self.storage = {'version': self.VERSION}
28 self.tracking_code = tracking_code
29 self.user = user
30 self.discounts = cart.discounts
31
32 @classmethod
33 def from_storage(cls, storage_data, cart, user, tracking_code):
34 checkout = cls(cart, user, tracking_code)
35 checkout.storage = storage_data
36 try:
37 version = checkout.storage['version']
38 except (TypeError, KeyError):
39 version = None
40 if version != cls.VERSION:
41 checkout.storage = {'version': cls.VERSION}
42 return checkout
43
44 def for_storage(self):
45 return self.storage
46
47 def clear_storage(self):
48 self.storage = None
49 self.modified = True
50
51 def _get_address_from_storage(self, key):
52 address_data = self.storage.get(key)
53 if address_data is not None and address_data.get('id'):
54 try:
55 return Address.objects.get(id=address_data['id'])
56 except Address.DoesNotExist:
57 return None
58 elif address_data:
59 return Address(**address_data)
60 return None
61
62 @property
63 def is_shipping_required(self):
64 return self.cart.is_shipping_required()
65
66 @property
67 def deliveries(self):
68 for partition in self.cart.partition():
69 if self.shipping_method and partition.is_shipping_required():
70 shipping_cost = self.shipping_method.get_total()
71 else:
72 shipping_cost = Price(0, currency=settings.DEFAULT_CURRENCY)
73 total_with_shipping = partition.get_total(
74 discounts=self.cart.discounts) + shipping_cost
75
76 partition = [
77 (item,
78 item.get_price_per_item(discounts=self.cart.discounts),
79 item.get_total(discounts=self.cart.discounts))
80 for item in partition]
81
82 yield partition, shipping_cost, total_with_shipping
83
84 @property
85 def shipping_address(self):
86 address = self._get_address_from_storage('shipping_address')
87 if address is None and self.user.is_authenticated():
88 return self.user.default_shipping_address
89 return address
90
91 @shipping_address.setter
92 def shipping_address(self, address):
93 address_data = model_to_dict(address)
94 address_data['country'] = smart_text(address_data['country'])
95 self.storage['shipping_address'] = address_data
96 self.modified = True
97
98 @property
99 def shipping_method(self):
100 shipping_address = self.shipping_address
101 if shipping_address is not None:
102 shipping_method_country_id = self.storage.get(
103 'shipping_method_country_id')
104 if shipping_method_country_id is not None:
105 try:
106 shipping_method_country = ShippingMethodCountry.objects.get(
107 id=shipping_method_country_id)
108 except ShippingMethodCountry.DoesNotExist:
109 return None
110 shipping_country_code = shipping_address.country.code
111 if (shipping_method_country.country_code == ANY_COUNTRY or
112 shipping_method_country.country_code == shipping_country_code):
113 return shipping_method_country
114
115 @shipping_method.setter
116 def shipping_method(self, shipping_method_country):
117 self.storage['shipping_method_country_id'] = shipping_method_country.id
118 self.modified = True
119
120 @property
121 def email(self):
122 return self.storage.get('email')
123
124 @email.setter
125 def email(self, email):
126 self.storage['email'] = email
127 self.modified = True
128
129 @property
130 def billing_address(self):
131 address = self._get_address_from_storage('billing_address')
132 if address is not None:
133 return address
134 elif self.user.is_authenticated() and self.user.default_billing_address:
135 return self.user.default_billing_address
136 elif self.shipping_address:
137 return self.shipping_address
138
139 @billing_address.setter
140 def billing_address(self, address):
141 address_data = model_to_dict(address)
142 address_data['country'] = smart_text(address_data['country'])
143 self.storage['billing_address'] = address_data
144 self.modified = True
145
146 @property
147 def discount(self):
148 value = self.storage.get('discount_value')
149 currency = self.storage.get('discount_currency')
150 name = self.storage.get('discount_name')
151 if value is not None and name is not None and currency is not None:
152 amount = Price(value, currency=currency)
153 return FixedDiscount(amount, name)
154
155 @discount.setter
156 def discount(self, discount):
157 amount = discount.amount
158 self.storage['discount_value'] = smart_text(amount.net)
159 self.storage['discount_currency'] = amount.currency
160 self.storage['discount_name'] = discount.name
161 self.modified = True
162
163 @discount.deleter
164 def discount(self):
165 if 'discount_value' in self.storage:
166 del self.storage['discount_value']
167 self.modified = True
168 if 'discount_currency' in self.storage:
169 del self.storage['discount_currency']
170 self.modified = True
171 if 'discount_name' in self.storage:
172 del self.storage['discount_name']
173 self.modified = True
174
175 @property
176 def voucher_code(self):
177 return self.storage.get('voucher_code')
178
179 @voucher_code.setter
180 def voucher_code(self, voucher_code):
181 self.storage['voucher_code'] = voucher_code
182 self.modified = True
183
184 @voucher_code.deleter
185 def voucher_code(self):
186 if 'voucher_code' in self.storage:
187 del self.storage['voucher_code']
188 self.modified = True
189
190 @property
191 def is_shipping_same_as_billing(self):
192 return Address.objects.are_identical(
193 self.shipping_address, self.billing_address)
194
195 def _save_address(self, address, is_billing=False, is_shipping=False):
196 if self.user.is_authenticated() and address.id is None:
197 address = User.objects.store_address(
198 self.user, address, shipping=is_shipping, billing=is_billing)
199 elif address.id is None:
200 address.save()
201 return address
202
203 @transaction.atomic
204 def create_order(self):
205 if self.is_shipping_required:
206 shipping_address = self._save_address(
207 self.shipping_address, is_shipping=True)
208 else:
209 shipping_address = None
210 billing_address = self._save_address(
211 self.billing_address, is_billing=True)
212
213 order_data = {
214 'billing_address': billing_address,
215 'shipping_address': shipping_address,
216 'tracking_client_id': self.tracking_code,
217 'total': self.get_total()}
218
219 if self.user.is_authenticated():
220 order_data['user'] = self.user
221 else:
222 # TODO: we should always save email in order not only
223 # for anonymous
224 order_data['anonymous_user_email'] = self.email
225
226 voucher = self._get_voucher()
227 if voucher is not None:
228 discount = self.discount
229 order_data['voucher'] = voucher
230 order_data['discount_amount'] = discount.amount
231 order_data['discount_name'] = discount.name
232
233 order = Order.objects.create(**order_data)
234
235 for partition in self.cart.partition():
236 shipping_required = partition.is_shipping_required()
237 if shipping_required:
238 shipping_price = self.shipping_method.get_total()
239 shipping_method_name = smart_text(self.shipping_method)
240 else:
241 shipping_price = 0
242 shipping_method_name = None
243 group = order.groups.create(
244 shipping_price=shipping_price,
245 shipping_method_name=shipping_method_name)
246 group.add_items_from_partition(
247 partition, discounts=self.cart.discounts)
248
249 if voucher is not None:
250 Voucher.objects.increase_usage(voucher)
251
252 return order
253
254 def _get_voucher(self):
255 voucher_code = self.voucher_code
256 if voucher_code is not None:
257 vouchers = Voucher.objects.active().select_for_update()
258 try:
259 return vouchers.get(code=self.voucher_code)
260 except Voucher.DoesNotExist:
261 return None
262
263 def recalculate_discount(self):
264 voucher = self._get_voucher()
265 if voucher is not None:
266 try:
267 self.discount = voucher.get_discount_for_checkout(self)
268 except NotApplicable:
269 del self.discount
270 del self.voucher_code
271 else:
272 del self.discount
273 del self.voucher_code
274
275 def get_subtotal(self):
276 zero = Price(0, currency=settings.DEFAULT_CURRENCY)
277 cost_iterator = (
278 total - shipping_cost
279 for shipment, shipping_cost, total in self.deliveries)
280 total = sum(cost_iterator, zero)
281 return total
282
283 def get_total(self):
284 zero = Price(0, currency=settings.DEFAULT_CURRENCY)
285 cost_iterator = (
286 total
287 for shipment, shipping_cost, total in self.deliveries)
288 total = sum(cost_iterator, zero)
289 return total if self.discount is None else self.discount.apply(total)
290
291 def get_total_shipping(self):
292 zero = Price(0, currency=settings.DEFAULT_CURRENCY)
293 cost_iterator = (
294 shipping_cost
295 for shipment, shipping_cost, total in self.deliveries)
296 total = sum(cost_iterator, zero)
297 return total
298
299
300 def load_checkout(view):
301 @wraps(view)
302 @get_or_empty_db_cart
303 def func(request, cart):
304 try:
305 session_data = request.session[STORAGE_SESSION_KEY]
306 except KeyError:
307 session_data = ''
308 tracking_code = analytics.get_client_id(request)
309
310 checkout = Checkout.from_storage(
311 session_data, cart, request.user, tracking_code)
312 response = view(request, checkout, cart)
313 if checkout.modified:
314 request.session[STORAGE_SESSION_KEY] = checkout.for_storage()
315 return response
316
317 return func
318
[end of saleor/checkout/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/saleor/checkout/core.py b/saleor/checkout/core.py
--- a/saleor/checkout/core.py
+++ b/saleor/checkout/core.py
@@ -192,23 +192,36 @@
return Address.objects.are_identical(
self.shipping_address, self.billing_address)
- def _save_address(self, address, is_billing=False, is_shipping=False):
- if self.user.is_authenticated() and address.id is None:
- address = User.objects.store_address(
- self.user, address, shipping=is_shipping, billing=is_billing)
- elif address.id is None:
- address.save()
+ def _add_to_user_address_book(self, address, is_billing=False,
+ is_shipping=False):
+ if self.user.is_authenticated():
+ User.objects.store_address(
+ self.user, address, shipping=is_shipping,
+ billing=is_billing)
+
+ def _get_address_copy(self, address):
+ address.user = None
+ address.pk = None
+ address.save()
return address
+ def _save_order_billing_address(self):
+ return self._get_address_copy(self.billing_address)
+
+ def _save_order_shipping_address(self):
+ return self._get_address_copy(self.shipping_address)
+
@transaction.atomic
def create_order(self):
if self.is_shipping_required:
- shipping_address = self._save_address(
+ shipping_address = self._save_order_shipping_address()
+ self._add_to_user_address_book(
self.shipping_address, is_shipping=True)
else:
shipping_address = None
- billing_address = self._save_address(
- self.billing_address, is_billing=True)
+ billing_address = self._save_order_billing_address()
+ self._add_to_user_address_book(
+ self.shipping_address, is_billing=True)
order_data = {
'billing_address': billing_address,
@@ -306,7 +319,7 @@
except KeyError:
session_data = ''
tracking_code = analytics.get_client_id(request)
-
+
checkout = Checkout.from_storage(
session_data, cart, request.user, tracking_code)
response = view(request, checkout, cart)
|
{"golden_diff": "diff --git a/saleor/checkout/core.py b/saleor/checkout/core.py\n--- a/saleor/checkout/core.py\n+++ b/saleor/checkout/core.py\n@@ -192,23 +192,36 @@\n return Address.objects.are_identical(\n self.shipping_address, self.billing_address)\n \n- def _save_address(self, address, is_billing=False, is_shipping=False):\n- if self.user.is_authenticated() and address.id is None:\n- address = User.objects.store_address(\n- self.user, address, shipping=is_shipping, billing=is_billing)\n- elif address.id is None:\n- address.save()\n+ def _add_to_user_address_book(self, address, is_billing=False,\n+ is_shipping=False):\n+ if self.user.is_authenticated():\n+ User.objects.store_address(\n+ self.user, address, shipping=is_shipping,\n+ billing=is_billing)\n+\n+ def _get_address_copy(self, address):\n+ address.user = None\n+ address.pk = None\n+ address.save()\n return address\n \n+ def _save_order_billing_address(self):\n+ return self._get_address_copy(self.billing_address)\n+\n+ def _save_order_shipping_address(self):\n+ return self._get_address_copy(self.shipping_address)\n+\n @transaction.atomic\n def create_order(self):\n if self.is_shipping_required:\n- shipping_address = self._save_address(\n+ shipping_address = self._save_order_shipping_address()\n+ self._add_to_user_address_book(\n self.shipping_address, is_shipping=True)\n else:\n shipping_address = None\n- billing_address = self._save_address(\n- self.billing_address, is_billing=True)\n+ billing_address = self._save_order_billing_address()\n+ self._add_to_user_address_book(\n+ self.shipping_address, is_billing=True)\n \n order_data = {\n 'billing_address': billing_address,\n@@ -306,7 +319,7 @@\n except KeyError:\n session_data = ''\n tracking_code = analytics.get_client_id(request)\n- \n+\n checkout = Checkout.from_storage(\n session_data, cart, request.user, tracking_code)\n response = view(request, checkout, cart)\n", "issue": "shipping address shouldn't be editable while order is processed?\nOrder has a foreign key to address for shipping address. \nWith the current model, I think a user is able to change the address while the order is being processed (before shipped without realizing his past order might be affected)\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom functools import wraps\n\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.forms.models import model_to_dict\nfrom django.utils.encoding import smart_text\nfrom prices import Price, FixedDiscount\n\nfrom ..cart.views import get_or_empty_db_cart\nfrom ..core import analytics\nfrom ..discount.models import Voucher, NotApplicable\nfrom ..order.models import Order\nfrom ..shipping.models import ShippingMethodCountry, ANY_COUNTRY\nfrom ..userprofile.models import Address, User\n\nSTORAGE_SESSION_KEY = 'checkout_storage'\n\n\nclass Checkout(object):\n\n VERSION = '1.0.0'\n modified = False\n\n def __init__(self, cart, user, tracking_code):\n self.cart = cart\n self.storage = {'version': self.VERSION}\n self.tracking_code = tracking_code\n self.user = user\n self.discounts = cart.discounts\n\n @classmethod\n def from_storage(cls, storage_data, cart, user, tracking_code):\n checkout = cls(cart, user, tracking_code)\n checkout.storage = storage_data\n try:\n version = checkout.storage['version']\n except (TypeError, KeyError):\n version = None\n if version != cls.VERSION:\n checkout.storage = {'version': cls.VERSION}\n return checkout\n\n def for_storage(self):\n return self.storage\n\n def clear_storage(self):\n self.storage = None\n self.modified = True\n\n def _get_address_from_storage(self, key):\n address_data = self.storage.get(key)\n if address_data is not None and address_data.get('id'):\n try:\n return Address.objects.get(id=address_data['id'])\n except Address.DoesNotExist:\n return None\n elif address_data:\n return Address(**address_data)\n return None\n\n @property\n def is_shipping_required(self):\n return self.cart.is_shipping_required()\n\n @property\n def deliveries(self):\n for partition in self.cart.partition():\n if self.shipping_method and partition.is_shipping_required():\n shipping_cost = self.shipping_method.get_total()\n else:\n shipping_cost = Price(0, currency=settings.DEFAULT_CURRENCY)\n total_with_shipping = partition.get_total(\n discounts=self.cart.discounts) + shipping_cost\n\n partition = [\n (item,\n item.get_price_per_item(discounts=self.cart.discounts),\n item.get_total(discounts=self.cart.discounts))\n for item in partition]\n\n yield partition, shipping_cost, total_with_shipping\n\n @property\n def shipping_address(self):\n address = self._get_address_from_storage('shipping_address')\n if address is None and self.user.is_authenticated():\n return self.user.default_shipping_address\n return address\n\n @shipping_address.setter\n def shipping_address(self, address):\n address_data = model_to_dict(address)\n address_data['country'] = smart_text(address_data['country'])\n self.storage['shipping_address'] = address_data\n self.modified = True\n\n @property\n def shipping_method(self):\n shipping_address = self.shipping_address\n if shipping_address is not None:\n shipping_method_country_id = self.storage.get(\n 'shipping_method_country_id')\n if shipping_method_country_id is not None:\n try:\n shipping_method_country = ShippingMethodCountry.objects.get(\n id=shipping_method_country_id)\n except ShippingMethodCountry.DoesNotExist:\n return None\n shipping_country_code = shipping_address.country.code\n if (shipping_method_country.country_code == ANY_COUNTRY or\n shipping_method_country.country_code == shipping_country_code):\n return shipping_method_country\n\n @shipping_method.setter\n def shipping_method(self, shipping_method_country):\n self.storage['shipping_method_country_id'] = shipping_method_country.id\n self.modified = True\n\n @property\n def email(self):\n return self.storage.get('email')\n\n @email.setter\n def email(self, email):\n self.storage['email'] = email\n self.modified = True\n\n @property\n def billing_address(self):\n address = self._get_address_from_storage('billing_address')\n if address is not None:\n return address\n elif self.user.is_authenticated() and self.user.default_billing_address:\n return self.user.default_billing_address\n elif self.shipping_address:\n return self.shipping_address\n\n @billing_address.setter\n def billing_address(self, address):\n address_data = model_to_dict(address)\n address_data['country'] = smart_text(address_data['country'])\n self.storage['billing_address'] = address_data\n self.modified = True\n\n @property\n def discount(self):\n value = self.storage.get('discount_value')\n currency = self.storage.get('discount_currency')\n name = self.storage.get('discount_name')\n if value is not None and name is not None and currency is not None:\n amount = Price(value, currency=currency)\n return FixedDiscount(amount, name)\n\n @discount.setter\n def discount(self, discount):\n amount = discount.amount\n self.storage['discount_value'] = smart_text(amount.net)\n self.storage['discount_currency'] = amount.currency\n self.storage['discount_name'] = discount.name\n self.modified = True\n\n @discount.deleter\n def discount(self):\n if 'discount_value' in self.storage:\n del self.storage['discount_value']\n self.modified = True\n if 'discount_currency' in self.storage:\n del self.storage['discount_currency']\n self.modified = True\n if 'discount_name' in self.storage:\n del self.storage['discount_name']\n self.modified = True\n\n @property\n def voucher_code(self):\n return self.storage.get('voucher_code')\n\n @voucher_code.setter\n def voucher_code(self, voucher_code):\n self.storage['voucher_code'] = voucher_code\n self.modified = True\n\n @voucher_code.deleter\n def voucher_code(self):\n if 'voucher_code' in self.storage:\n del self.storage['voucher_code']\n self.modified = True\n\n @property\n def is_shipping_same_as_billing(self):\n return Address.objects.are_identical(\n self.shipping_address, self.billing_address)\n\n def _save_address(self, address, is_billing=False, is_shipping=False):\n if self.user.is_authenticated() and address.id is None:\n address = User.objects.store_address(\n self.user, address, shipping=is_shipping, billing=is_billing)\n elif address.id is None:\n address.save()\n return address\n\n @transaction.atomic\n def create_order(self):\n if self.is_shipping_required:\n shipping_address = self._save_address(\n self.shipping_address, is_shipping=True)\n else:\n shipping_address = None\n billing_address = self._save_address(\n self.billing_address, is_billing=True)\n\n order_data = {\n 'billing_address': billing_address,\n 'shipping_address': shipping_address,\n 'tracking_client_id': self.tracking_code,\n 'total': self.get_total()}\n\n if self.user.is_authenticated():\n order_data['user'] = self.user\n else:\n # TODO: we should always save email in order not only\n # for anonymous\n order_data['anonymous_user_email'] = self.email\n\n voucher = self._get_voucher()\n if voucher is not None:\n discount = self.discount\n order_data['voucher'] = voucher\n order_data['discount_amount'] = discount.amount\n order_data['discount_name'] = discount.name\n\n order = Order.objects.create(**order_data)\n\n for partition in self.cart.partition():\n shipping_required = partition.is_shipping_required()\n if shipping_required:\n shipping_price = self.shipping_method.get_total()\n shipping_method_name = smart_text(self.shipping_method)\n else:\n shipping_price = 0\n shipping_method_name = None\n group = order.groups.create(\n shipping_price=shipping_price,\n shipping_method_name=shipping_method_name)\n group.add_items_from_partition(\n partition, discounts=self.cart.discounts)\n\n if voucher is not None:\n Voucher.objects.increase_usage(voucher)\n\n return order\n\n def _get_voucher(self):\n voucher_code = self.voucher_code\n if voucher_code is not None:\n vouchers = Voucher.objects.active().select_for_update()\n try:\n return vouchers.get(code=self.voucher_code)\n except Voucher.DoesNotExist:\n return None\n\n def recalculate_discount(self):\n voucher = self._get_voucher()\n if voucher is not None:\n try:\n self.discount = voucher.get_discount_for_checkout(self)\n except NotApplicable:\n del self.discount\n del self.voucher_code\n else:\n del self.discount\n del self.voucher_code\n\n def get_subtotal(self):\n zero = Price(0, currency=settings.DEFAULT_CURRENCY)\n cost_iterator = (\n total - shipping_cost\n for shipment, shipping_cost, total in self.deliveries)\n total = sum(cost_iterator, zero)\n return total\n\n def get_total(self):\n zero = Price(0, currency=settings.DEFAULT_CURRENCY)\n cost_iterator = (\n total\n for shipment, shipping_cost, total in self.deliveries)\n total = sum(cost_iterator, zero)\n return total if self.discount is None else self.discount.apply(total)\n\n def get_total_shipping(self):\n zero = Price(0, currency=settings.DEFAULT_CURRENCY)\n cost_iterator = (\n shipping_cost\n for shipment, shipping_cost, total in self.deliveries)\n total = sum(cost_iterator, zero)\n return total\n\n\ndef load_checkout(view):\n @wraps(view)\n @get_or_empty_db_cart\n def func(request, cart):\n try:\n session_data = request.session[STORAGE_SESSION_KEY]\n except KeyError:\n session_data = ''\n tracking_code = analytics.get_client_id(request)\n \n checkout = Checkout.from_storage(\n session_data, cart, request.user, tracking_code)\n response = view(request, checkout, cart)\n if checkout.modified:\n request.session[STORAGE_SESSION_KEY] = checkout.for_storage()\n return response\n\n return func\n", "path": "saleor/checkout/core.py"}]}
| 3,608 | 485 |
gh_patches_debug_35702
|
rasdani/github-patches
|
git_diff
|
keras-team__keras-nlp-980
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Investigate top-p performance
Currently our top-p (nucleus) sampler has the worst performance of our entire sampling offering, likely because of the sort operation we need to do for our softmax probabilities. See [these rough numbers](https://github.com/keras-team/keras-nlp/commit/41af01200c5d9898f381477b6a67a87b6951f844#diff-576c27794bae284efc278336a16d0649aaf599973a64d4b65dc67bc99bb7f000) for example, where top-p is slower than beam, despite needing much less computation on the model itself.
We should see if there is anything we can do to speed up top-p sampling with XLA (e.g. a more XLA friendly sort op).
</issue>
<code>
[start of keras_nlp/samplers/top_p_sampler.py]
1 # Copyright 2023 The KerasNLP Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Top-p Sampler."""
15
16 import tensorflow as tf
17
18 from keras_nlp.api_export import keras_nlp_export
19 from keras_nlp.samplers.sampler import Sampler
20 from keras_nlp.samplers.sampler import call_args_docstring
21 from keras_nlp.utils.python_utils import format_docstring
22
23
24 @format_docstring(call_args=call_args_docstring)
25 @keras_nlp_export("keras_nlp.samplers.TopPSampler")
26 class TopPSampler(Sampler):
27 """Top-P Sampler class.
28
29 This sampler implements top-p search algorithm. Top-p search selects tokens
30 from the smallest subset of output probabilities that sum to greater than
31 `p`. Put in another way, top-p will first order token predictions by
32 likelihood, and ignore all tokens after the cumulative probability of
33 selected tokens exceeds `p`, then select a token from the remaining tokens.
34
35 Args:
36 p: float, the `p` value of top-p.
37 seed: int, defaults to None. The random seed.
38
39 Call Args:
40 {{call_args}}
41
42 Examples:
43 ```python
44 # Use a simple alphabet of lowercase characters with ids in range [0, 25].
45 int_lookup = {i: chr(i + ord('a')) for i in range(26)}
46 char_lookup = {v: k for k, v in int_lookup.items()}
47 batch_size, length, vocab_size = 1, 12, len(int_lookup)
48
49 def next(prompt, cache, index):
50 hidden_states = tf.ones((batch_size, 10))
51 # A uniform distribution over our alphabet.
52 logits = tf.ones((batch_size, vocab_size))
53 return logits, hidden_states, cache
54
55 output = keras_nlp.samplers.TopPSampler(p=0.1)(
56 next=next,
57 prompt=tf.fill((batch_size, length,), char_lookup['z']),
58 index=5,
59 )
60 print(["".join([int_lookup[i] for i in s]) for s in output.numpy()])
61 # >>> ['zzzzzbabcccb']
62 ```
63 """
64
65 def __init__(
66 self,
67 p=0.1,
68 seed=None,
69 ):
70 super().__init__()
71 self.p = p
72 self.seed = seed
73
74 def get_next_token(self, probabilities):
75 # Sort preds in descending order.
76 sorted_preds, sorted_indices = tf.math.top_k(
77 probabilities, k=tf.shape(probabilities)[1], sorted=True
78 )
79 # Calculate cumulative probability distribution.
80 cumulative_probabilities = tf.math.cumsum(sorted_preds, axis=-1)
81 # Create a mask for the tokens to keep.
82 keep_mask = cumulative_probabilities <= self.p
83 # Shift to include the last token that exceed p.
84 shifted_keep_mask = tf.concat(
85 [tf.ones_like(keep_mask[:, :1]), keep_mask[:, :-1]], axis=-1
86 )
87 # Filter out unmasked tokens and sample from filtered distribution.
88 probabilities = tf.where(
89 shifted_keep_mask,
90 sorted_preds,
91 tf.zeros(tf.shape(probabilities), dtype=sorted_preds.dtype),
92 )
93 sorted_next_token = tf.random.categorical(
94 tf.math.log(probabilities), 1, seed=self.seed
95 )
96 return tf.gather_nd(sorted_indices, sorted_next_token, batch_dims=1)
97
98 def get_config(self):
99 config = super().get_config()
100 config.update(
101 {
102 "p": self.p,
103 "seed": self.seed,
104 }
105 )
106 return config
107
[end of keras_nlp/samplers/top_p_sampler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/keras_nlp/samplers/top_p_sampler.py b/keras_nlp/samplers/top_p_sampler.py
--- a/keras_nlp/samplers/top_p_sampler.py
+++ b/keras_nlp/samplers/top_p_sampler.py
@@ -34,6 +34,12 @@
Args:
p: float, the `p` value of top-p.
+ k: int, defaults to None. If set, this argument defines a
+ heuristic "top-k" cutoff applied before the "top-p" sampling. All
+ logits not in the top `k` will be discarded, and the remaining
+ logits will be sorted to find a cutoff point for `p`. Setting this
+ arg can significantly speed sampling up by reducing the number
+ of tokens to sort.
seed: int, defaults to None. The random seed.
Call Args:
@@ -65,16 +71,21 @@
def __init__(
self,
p=0.1,
+ k=None,
seed=None,
):
super().__init__()
self.p = p
+ self.k = k
self.seed = seed
def get_next_token(self, probabilities):
- # Sort preds in descending order.
+ cutoff = tf.shape(probabilities)[1]
+ if self.k is not None:
+ # If `k` is set, only sample from top `k` tokens.
+ cutoff = tf.math.minimum(cutoff, self.k)
sorted_preds, sorted_indices = tf.math.top_k(
- probabilities, k=tf.shape(probabilities)[1], sorted=True
+ probabilities, k=cutoff, sorted=True
)
# Calculate cumulative probability distribution.
cumulative_probabilities = tf.math.cumsum(sorted_preds, axis=-1)
@@ -88,7 +99,7 @@
probabilities = tf.where(
shifted_keep_mask,
sorted_preds,
- tf.zeros(tf.shape(probabilities), dtype=sorted_preds.dtype),
+ tf.zeros(tf.shape(sorted_preds), dtype=sorted_preds.dtype),
)
sorted_next_token = tf.random.categorical(
tf.math.log(probabilities), 1, seed=self.seed
@@ -100,6 +111,7 @@
config.update(
{
"p": self.p,
+ "k": self.k,
"seed": self.seed,
}
)
|
{"golden_diff": "diff --git a/keras_nlp/samplers/top_p_sampler.py b/keras_nlp/samplers/top_p_sampler.py\n--- a/keras_nlp/samplers/top_p_sampler.py\n+++ b/keras_nlp/samplers/top_p_sampler.py\n@@ -34,6 +34,12 @@\n \n Args:\n p: float, the `p` value of top-p.\n+ k: int, defaults to None. If set, this argument defines a\n+ heuristic \"top-k\" cutoff applied before the \"top-p\" sampling. All\n+ logits not in the top `k` will be discarded, and the remaining\n+ logits will be sorted to find a cutoff point for `p`. Setting this\n+ arg can significantly speed sampling up by reducing the number\n+ of tokens to sort.\n seed: int, defaults to None. The random seed.\n \n Call Args:\n@@ -65,16 +71,21 @@\n def __init__(\n self,\n p=0.1,\n+ k=None,\n seed=None,\n ):\n super().__init__()\n self.p = p\n+ self.k = k\n self.seed = seed\n \n def get_next_token(self, probabilities):\n- # Sort preds in descending order.\n+ cutoff = tf.shape(probabilities)[1]\n+ if self.k is not None:\n+ # If `k` is set, only sample from top `k` tokens.\n+ cutoff = tf.math.minimum(cutoff, self.k)\n sorted_preds, sorted_indices = tf.math.top_k(\n- probabilities, k=tf.shape(probabilities)[1], sorted=True\n+ probabilities, k=cutoff, sorted=True\n )\n # Calculate cumulative probability distribution.\n cumulative_probabilities = tf.math.cumsum(sorted_preds, axis=-1)\n@@ -88,7 +99,7 @@\n probabilities = tf.where(\n shifted_keep_mask,\n sorted_preds,\n- tf.zeros(tf.shape(probabilities), dtype=sorted_preds.dtype),\n+ tf.zeros(tf.shape(sorted_preds), dtype=sorted_preds.dtype),\n )\n sorted_next_token = tf.random.categorical(\n tf.math.log(probabilities), 1, seed=self.seed\n@@ -100,6 +111,7 @@\n config.update(\n {\n \"p\": self.p,\n+ \"k\": self.k,\n \"seed\": self.seed,\n }\n )\n", "issue": "Investigate top-p performance\nCurrently our top-p (nucleus) sampler has the worst performance of our entire sampling offering, likely because of the sort operation we need to do for our softmax probabilities. See [these rough numbers](https://github.com/keras-team/keras-nlp/commit/41af01200c5d9898f381477b6a67a87b6951f844#diff-576c27794bae284efc278336a16d0649aaf599973a64d4b65dc67bc99bb7f000) for example, where top-p is slower than beam, despite needing much less computation on the model itself.\r\n\r\nWe should see if there is anything we can do to speed up top-p sampling with XLA (e.g. a more XLA friendly sort op).\n", "before_files": [{"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Top-p Sampler.\"\"\"\n\nimport tensorflow as tf\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.samplers.sampler import Sampler\nfrom keras_nlp.samplers.sampler import call_args_docstring\nfrom keras_nlp.utils.python_utils import format_docstring\n\n\n@format_docstring(call_args=call_args_docstring)\n@keras_nlp_export(\"keras_nlp.samplers.TopPSampler\")\nclass TopPSampler(Sampler):\n \"\"\"Top-P Sampler class.\n\n This sampler implements top-p search algorithm. Top-p search selects tokens\n from the smallest subset of output probabilities that sum to greater than\n `p`. Put in another way, top-p will first order token predictions by\n likelihood, and ignore all tokens after the cumulative probability of\n selected tokens exceeds `p`, then select a token from the remaining tokens.\n\n Args:\n p: float, the `p` value of top-p.\n seed: int, defaults to None. The random seed.\n\n Call Args:\n {{call_args}}\n\n Examples:\n ```python\n # Use a simple alphabet of lowercase characters with ids in range [0, 25].\n int_lookup = {i: chr(i + ord('a')) for i in range(26)}\n char_lookup = {v: k for k, v in int_lookup.items()}\n batch_size, length, vocab_size = 1, 12, len(int_lookup)\n\n def next(prompt, cache, index):\n hidden_states = tf.ones((batch_size, 10))\n # A uniform distribution over our alphabet.\n logits = tf.ones((batch_size, vocab_size))\n return logits, hidden_states, cache\n\n output = keras_nlp.samplers.TopPSampler(p=0.1)(\n next=next,\n prompt=tf.fill((batch_size, length,), char_lookup['z']),\n index=5,\n )\n print([\"\".join([int_lookup[i] for i in s]) for s in output.numpy()])\n # >>> ['zzzzzbabcccb']\n ```\n \"\"\"\n\n def __init__(\n self,\n p=0.1,\n seed=None,\n ):\n super().__init__()\n self.p = p\n self.seed = seed\n\n def get_next_token(self, probabilities):\n # Sort preds in descending order.\n sorted_preds, sorted_indices = tf.math.top_k(\n probabilities, k=tf.shape(probabilities)[1], sorted=True\n )\n # Calculate cumulative probability distribution.\n cumulative_probabilities = tf.math.cumsum(sorted_preds, axis=-1)\n # Create a mask for the tokens to keep.\n keep_mask = cumulative_probabilities <= self.p\n # Shift to include the last token that exceed p.\n shifted_keep_mask = tf.concat(\n [tf.ones_like(keep_mask[:, :1]), keep_mask[:, :-1]], axis=-1\n )\n # Filter out unmasked tokens and sample from filtered distribution.\n probabilities = tf.where(\n shifted_keep_mask,\n sorted_preds,\n tf.zeros(tf.shape(probabilities), dtype=sorted_preds.dtype),\n )\n sorted_next_token = tf.random.categorical(\n tf.math.log(probabilities), 1, seed=self.seed\n )\n return tf.gather_nd(sorted_indices, sorted_next_token, batch_dims=1)\n\n def get_config(self):\n config = super().get_config()\n config.update(\n {\n \"p\": self.p,\n \"seed\": self.seed,\n }\n )\n return config\n", "path": "keras_nlp/samplers/top_p_sampler.py"}]}
| 1,859 | 528 |
gh_patches_debug_10598
|
rasdani/github-patches
|
git_diff
|
privacyidea__privacyidea-2479
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow PIN change at WebUI
### Top-level intent
Use the pin change during validate when logging in to the webui
### Steps to reproduce
1. configure ``change-pin-on-first-use``
2. configure ``login_mode=privacyIDEA``
3. Login to webui with 2FA
4. The webui will not display any challenge
### Expected outcome
The user should be able to change his PIN
### Actual outcome
Nothing is displayed
I think the problem is, that the input field is "ng-hidden" by some tokentype in the response. The PIN-change-challenge probably is missing some fields in the JSON response, so that dialog is hidden.
</issue>
<code>
[start of privacyidea/lib/challengeresponsedecorators.py]
1 # -*- coding: utf-8 -*-
2 #
3 # 2020-08-03 Cornelius Kölbel <[email protected]>
4 # Initial writeup
5 #
6 # License: AGPLv3
7 # contact: http://www.privacyidea.org
8 #
9 # This code is free software; you can redistribute it and/or
10 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
11 # License as published by the Free Software Foundation; either
12 # version 3 of the License, or any later version.
13 #
14 # This code is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNE7SS FOR A PARTICULAR PURPOSE. See the
17 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
18 #
19 # You should have received a copy of the GNU Affero General Public
20 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #
22 """
23 These are the decorator functions for generic challenge response mechanisms:
24
25 * PIN change
26
27 Currently the decorator is only tested in tests/test_lib_token.py
28 """
29 import logging
30
31 from privacyidea.lib.policy import Match
32 from privacyidea.lib.policy import ACTION, SCOPE, check_pin, SCOPE
33 from privacyidea.lib.config import get_from_config
34 from privacyidea.lib.crypto import pass_hash, verify_pass_hash, get_rand_digit_str
35 from privacyidea.models import Challenge
36 from privacyidea.lib.challenge import get_challenges
37 from privacyidea.lib import _
38
39
40 log = logging.getLogger(__name__)
41
42
43 SEED_LENGTH = 16
44
45
46 class CHALLENGE_TYPE(object):
47 PIN_RESET = "generic_pin_reset"
48
49
50 def _create_pin_reset_challenge(token_obj, message, challenge_data=None):
51 validity = int(get_from_config('DefaultChallengeValidityTime', 120))
52 validity = int(get_from_config('PinResetChallengeValidityTime', validity))
53 db_challenge = Challenge(token_obj.token.serial,
54 challenge=CHALLENGE_TYPE.PIN_RESET,
55 data=challenge_data,
56 validitytime=validity)
57 db_challenge.save()
58 token_obj.challenge_janitor()
59 reply_dict = {}
60 reply_dict["multi_challenge"] = [{"transaction_id": db_challenge.transaction_id,
61 "message": message,
62 "serial": token_obj.token.serial,
63 "type": token_obj.token.tokentype}]
64 reply_dict["message"] = message
65 reply_dict["messages"] = [message]
66 reply_dict["transaction_id"] = db_challenge.transaction_id
67 # TODO: This line is deprecated: Add the information for the old administrative triggerchallenge
68 reply_dict["transaction_ids"] = [db_challenge.transaction_id]
69
70 return reply_dict
71
72
73 def generic_challenge_response_reset_pin(wrapped_function, *args, **kwds):
74 """
75 Check if the authentication was successful, but if the token needs to reset
76 its PIN.
77
78 Conditions: To do so we check for "next_pin_change" in the tokeninfo data. This
79 is however easily done using token.is_pin_change().
80
81 Policies: A policy defines, if this PIN reset functionality should be active
82 at all. scope=AUTH, action=CHANGE_PIN_VIA_VALIDATE
83
84 args are:
85 :param tokenobject_list: The list of all the tokens of the user, that will be checked
86 :param passw: The password presented in the authentication. We need this for the PIN reset.
87
88 kwds are:
89 :param options: options dictionary containing g
90 :param user: The user_obj
91 """
92
93 # Before we call the wrapped function, we need to check, if we have a generic challenge
94 # for the given transaction_id and if the token serial matches a given token
95 options = kwds.get("options") or {}
96 user_obj = kwds.get("user")
97 transaction_id = options.get("transaction_id") or options.get("state")
98 if transaction_id:
99 challenges = get_challenges(transaction_id=transaction_id, challenge=CHALLENGE_TYPE.PIN_RESET)
100 if len(challenges) == 1:
101 challenge = challenges[0]
102 # check if challenge matches a token and if it is valid
103 token_obj = next(t for t in args[0] if t.token.serial == challenge.serial)
104 if token_obj:
105 # Then either verify the PIN or set the PIN the first time. The
106 # PIN from the 1st response is stored in challenge.data
107 if challenge.data:
108 # Verify the password
109 if verify_pass_hash(args[1], challenge.data):
110 g = options.get("g")
111 challenge.set_otp_status(True)
112 token_obj.challenge_janitor()
113 # Success, set new PIN and return success
114 token_obj.set_pin(args[1])
115 pinpol = Match.token(g, scope=SCOPE.ENROLL, action=ACTION.CHANGE_PIN_EVERY,
116 token_obj=token_obj).action_values(unique=True)
117 # Set a new next_pin_change
118 if pinpol:
119 # Set a new next pin change
120 token_obj.set_next_pin_change(diff=list(pinpol)[0])
121 else:
122 # Obviously the admin removed the policy for changing pins,
123 # so we will not require to change the PIN again
124 token_obj.del_tokeninfo("next_pin_change")
125 return True, {"message": "PIN successfully set.",
126 "serial": token_obj.token.serial}
127 else:
128 return False, {"serial": token_obj.token.serial,
129 "message": "PINs do not match"}
130 else:
131 # The PIN is presented the first time.
132 # Verify if the PIN adheres to the PIN policies. This is always in the normal user context
133 g = options.get("g")
134 g.logged_in_user = {"role": SCOPE.USER}
135 if user_obj:
136 # check_pin below originally works for logged in users, since only logged in users
137 # are allowed to change the pin. So we need to construct a logged_in_user object, otherwise
138 # check_pin would fail.
139 g.logged_in_user["username"] = user_obj.login
140 g.logged_in_user["realm"] = user_obj.realm
141 check_pin(g, args[1], token_obj.token.tokentype, user_obj)
142 # We need to ask for a 2nd time
143 challenge.set_otp_status(True)
144 seed = get_rand_digit_str(SEED_LENGTH)
145 reply_dict = _create_pin_reset_challenge(token_obj, _("Please enter the new PIN again"),
146 pass_hash(args[1]))
147 return False, reply_dict
148
149 success, reply_dict = wrapped_function(*args, **kwds)
150
151 # After a successful authentication, we might start the PIN change process
152 if success and reply_dict.get("pin_change"):
153 g = options.get("g")
154 # Determine the realm by the serial
155 serial = reply_dict.get("serial")
156 # The tokenlist can contain more than one token. So we get the matching token object
157 token_obj = next(t for t in args[0] if t.token.serial == serial)
158 if g and Match.token(g, scope=SCOPE.AUTH, action=ACTION.CHANGE_PIN_VIA_VALIDATE, token_obj=token_obj).any():
159 reply_dict = _create_pin_reset_challenge(token_obj, _("Please enter a new PIN"))
160 return False, reply_dict
161
162 return success, reply_dict
163
[end of privacyidea/lib/challengeresponsedecorators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/privacyidea/lib/challengeresponsedecorators.py b/privacyidea/lib/challengeresponsedecorators.py
--- a/privacyidea/lib/challengeresponsedecorators.py
+++ b/privacyidea/lib/challengeresponsedecorators.py
@@ -59,6 +59,7 @@
reply_dict = {}
reply_dict["multi_challenge"] = [{"transaction_id": db_challenge.transaction_id,
"message": message,
+ "attributes": None,
"serial": token_obj.token.serial,
"type": token_obj.token.tokentype}]
reply_dict["message"] = message
|
{"golden_diff": "diff --git a/privacyidea/lib/challengeresponsedecorators.py b/privacyidea/lib/challengeresponsedecorators.py\n--- a/privacyidea/lib/challengeresponsedecorators.py\n+++ b/privacyidea/lib/challengeresponsedecorators.py\n@@ -59,6 +59,7 @@\n reply_dict = {}\n reply_dict[\"multi_challenge\"] = [{\"transaction_id\": db_challenge.transaction_id,\n \"message\": message,\n+ \"attributes\": None,\n \"serial\": token_obj.token.serial,\n \"type\": token_obj.token.tokentype}]\n reply_dict[\"message\"] = message\n", "issue": "Allow PIN change at WebUI\n\r\n### Top-level intent\r\n\r\nUse the pin change during validate when logging in to the webui\r\n\r\n### Steps to reproduce\r\n\r\n1. configure ``change-pin-on-first-use``\r\n2. configure ``login_mode=privacyIDEA``\r\n3. Login to webui with 2FA\r\n4. The webui will not display any challenge\r\n\r\n### Expected outcome\r\n\r\nThe user should be able to change his PIN\r\n\r\n### Actual outcome\r\n\r\nNothing is displayed\r\n\r\nI think the problem is, that the input field is \"ng-hidden\" by some tokentype in the response. The PIN-change-challenge probably is missing some fields in the JSON response, so that dialog is hidden.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2020-08-03 Cornelius K\u00f6lbel <[email protected]>\n# Initial writeup\n#\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNE7SS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThese are the decorator functions for generic challenge response mechanisms:\n\n* PIN change\n\nCurrently the decorator is only tested in tests/test_lib_token.py\n\"\"\"\nimport logging\n\nfrom privacyidea.lib.policy import Match\nfrom privacyidea.lib.policy import ACTION, SCOPE, check_pin, SCOPE\nfrom privacyidea.lib.config import get_from_config\nfrom privacyidea.lib.crypto import pass_hash, verify_pass_hash, get_rand_digit_str\nfrom privacyidea.models import Challenge\nfrom privacyidea.lib.challenge import get_challenges\nfrom privacyidea.lib import _\n\n\nlog = logging.getLogger(__name__)\n\n\nSEED_LENGTH = 16\n\n\nclass CHALLENGE_TYPE(object):\n PIN_RESET = \"generic_pin_reset\"\n\n\ndef _create_pin_reset_challenge(token_obj, message, challenge_data=None):\n validity = int(get_from_config('DefaultChallengeValidityTime', 120))\n validity = int(get_from_config('PinResetChallengeValidityTime', validity))\n db_challenge = Challenge(token_obj.token.serial,\n challenge=CHALLENGE_TYPE.PIN_RESET,\n data=challenge_data,\n validitytime=validity)\n db_challenge.save()\n token_obj.challenge_janitor()\n reply_dict = {}\n reply_dict[\"multi_challenge\"] = [{\"transaction_id\": db_challenge.transaction_id,\n \"message\": message,\n \"serial\": token_obj.token.serial,\n \"type\": token_obj.token.tokentype}]\n reply_dict[\"message\"] = message\n reply_dict[\"messages\"] = [message]\n reply_dict[\"transaction_id\"] = db_challenge.transaction_id\n # TODO: This line is deprecated: Add the information for the old administrative triggerchallenge\n reply_dict[\"transaction_ids\"] = [db_challenge.transaction_id]\n\n return reply_dict\n\n\ndef generic_challenge_response_reset_pin(wrapped_function, *args, **kwds):\n \"\"\"\n Check if the authentication was successful, but if the token needs to reset\n its PIN.\n\n Conditions: To do so we check for \"next_pin_change\" in the tokeninfo data. This\n is however easily done using token.is_pin_change().\n\n Policies: A policy defines, if this PIN reset functionality should be active\n at all. scope=AUTH, action=CHANGE_PIN_VIA_VALIDATE\n\n args are:\n :param tokenobject_list: The list of all the tokens of the user, that will be checked\n :param passw: The password presented in the authentication. We need this for the PIN reset.\n\n kwds are:\n :param options: options dictionary containing g\n :param user: The user_obj\n \"\"\"\n\n # Before we call the wrapped function, we need to check, if we have a generic challenge\n # for the given transaction_id and if the token serial matches a given token\n options = kwds.get(\"options\") or {}\n user_obj = kwds.get(\"user\")\n transaction_id = options.get(\"transaction_id\") or options.get(\"state\")\n if transaction_id:\n challenges = get_challenges(transaction_id=transaction_id, challenge=CHALLENGE_TYPE.PIN_RESET)\n if len(challenges) == 1:\n challenge = challenges[0]\n # check if challenge matches a token and if it is valid\n token_obj = next(t for t in args[0] if t.token.serial == challenge.serial)\n if token_obj:\n # Then either verify the PIN or set the PIN the first time. The\n # PIN from the 1st response is stored in challenge.data\n if challenge.data:\n # Verify the password\n if verify_pass_hash(args[1], challenge.data):\n g = options.get(\"g\")\n challenge.set_otp_status(True)\n token_obj.challenge_janitor()\n # Success, set new PIN and return success\n token_obj.set_pin(args[1])\n pinpol = Match.token(g, scope=SCOPE.ENROLL, action=ACTION.CHANGE_PIN_EVERY,\n token_obj=token_obj).action_values(unique=True)\n # Set a new next_pin_change\n if pinpol:\n # Set a new next pin change\n token_obj.set_next_pin_change(diff=list(pinpol)[0])\n else:\n # Obviously the admin removed the policy for changing pins,\n # so we will not require to change the PIN again\n token_obj.del_tokeninfo(\"next_pin_change\")\n return True, {\"message\": \"PIN successfully set.\",\n \"serial\": token_obj.token.serial}\n else:\n return False, {\"serial\": token_obj.token.serial,\n \"message\": \"PINs do not match\"}\n else:\n # The PIN is presented the first time.\n # Verify if the PIN adheres to the PIN policies. This is always in the normal user context\n g = options.get(\"g\")\n g.logged_in_user = {\"role\": SCOPE.USER}\n if user_obj:\n # check_pin below originally works for logged in users, since only logged in users\n # are allowed to change the pin. So we need to construct a logged_in_user object, otherwise\n # check_pin would fail.\n g.logged_in_user[\"username\"] = user_obj.login\n g.logged_in_user[\"realm\"] = user_obj.realm\n check_pin(g, args[1], token_obj.token.tokentype, user_obj)\n # We need to ask for a 2nd time\n challenge.set_otp_status(True)\n seed = get_rand_digit_str(SEED_LENGTH)\n reply_dict = _create_pin_reset_challenge(token_obj, _(\"Please enter the new PIN again\"),\n pass_hash(args[1]))\n return False, reply_dict\n\n success, reply_dict = wrapped_function(*args, **kwds)\n\n # After a successful authentication, we might start the PIN change process\n if success and reply_dict.get(\"pin_change\"):\n g = options.get(\"g\")\n # Determine the realm by the serial\n serial = reply_dict.get(\"serial\")\n # The tokenlist can contain more than one token. So we get the matching token object\n token_obj = next(t for t in args[0] if t.token.serial == serial)\n if g and Match.token(g, scope=SCOPE.AUTH, action=ACTION.CHANGE_PIN_VIA_VALIDATE, token_obj=token_obj).any():\n reply_dict = _create_pin_reset_challenge(token_obj, _(\"Please enter a new PIN\"))\n return False, reply_dict\n\n return success, reply_dict\n", "path": "privacyidea/lib/challengeresponsedecorators.py"}]}
| 2,659 | 145 |
gh_patches_debug_5590
|
rasdani/github-patches
|
git_diff
|
google__timesketch-1910
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sigma analyzer creates saved search and story even with 0 results
The Sigma analyzer will create a story and saved search even if there are no results from the analyzer.
Fix: Only create saved search and story if there was any events tagged.
</issue>
<code>
[start of timesketch/lib/analyzers/sigma_tagger.py]
1 """Index analyzer plugin for sigma."""
2 from __future__ import unicode_literals
3
4 import logging
5 import time
6 import elasticsearch
7
8 from flask import current_app
9
10 from timesketch.lib.analyzers import utils
11
12 from timesketch.lib.analyzers import interface
13 from timesketch.lib.analyzers import manager
14 import timesketch.lib.sigma_util as ts_sigma_lib
15
16
17 logger = logging.getLogger('timesketch.analyzers.sigma_tagger')
18
19
20 class SigmaPlugin(interface.BaseAnalyzer):
21 """Analyzer for Sigma."""
22
23 NAME = 'sigma'
24 DISPLAY_NAME = 'Sigma'
25 DESCRIPTION = 'Run pre-defined Sigma rules and tag matching events'
26
27 def run_sigma_rule(self, query, rule_name, tag_list = None):
28 """Runs a sigma rule and applies the appropriate tags.
29
30 Args:
31 query: elastic search query for events to tag.
32 rule_name: rule_name to apply to matching events.
33 tag_list: a list of additional tags to be added to the event(s)
34
35 Returns:
36 int: number of events tagged.
37 """
38 return_fields = []
39 tagged_events_counter = 0
40 events = self.event_stream(
41 query_string=query, return_fields=return_fields)
42 for event in events:
43 ts_sigma_rules = event.source.get('ts_sigma_rule', [])
44 ts_sigma_rules.append(rule_name)
45 event.add_attributes({'ts_sigma_rule': list(set(ts_sigma_rules))})
46 ts_ttp = event.source.get('ts_ttp', [])
47 for tag in tag_list:
48 # special handling for sigma tags that TS considers TTPS
49 # https://car.mitre.org and https://attack.mitre.org
50 if tag.startswith(('attack.', 'car.')):
51 ts_ttp.append(tag)
52 tag_list.remove(tag)
53 event.add_tags(tag_list)
54 if len(ts_ttp) > 0:
55 event.add_attributes({'ts_ttp': list(set(ts_ttp))})
56 event.commit()
57 tagged_events_counter += 1
58 return tagged_events_counter
59
60 def run(self):
61 """Entry point for the analyzer.
62
63 Returns:
64 String with summary of the analyzer result.
65 """
66
67 tags_applied = {}
68 sigma_rule_counter = 0
69 sigma_rules = ts_sigma_lib.get_all_sigma_rules()
70 if sigma_rules is None:
71 logger.error('No Sigma rules found. Check SIGMA_RULES_FOLDERS')
72 problem_strings = []
73 output_strings = []
74
75 for rule in sigma_rules:
76 tags_applied[rule.get('file_name')] = 0
77 try:
78 sigma_rule_counter += 1
79 tagged_events_counter = self.run_sigma_rule(
80 rule.get('es_query'), rule.get('file_name'),
81 tag_list=rule.get('tags'))
82 tags_applied[rule.get('file_name')] += tagged_events_counter
83 if sigma_rule_counter % 10 == 0:
84 logger.debug('Rule {0:d}/{1:d}'.format(
85 sigma_rule_counter, len(sigma_rules)))
86 except elasticsearch.TransportError as e:
87 logger.error(
88 'Timeout executing search for {0:s}: '
89 '{1!s} waiting for 10 seconds'.format(
90 rule.get('file_name'), e), exc_info=True)
91 # this is caused by too many ES queries in short time range
92 # TODO: https://github.com/google/timesketch/issues/1782
93 sleep_time = current_app.config.get(
94 'SIGMA_TAG_DELAY', 15)
95 time.sleep(sleep_time)
96 tagged_events_counter = self.run_sigma_rule(
97 rule.get('es_query'), rule.get('file_name'),
98 tag_list=rule.get('tags'))
99 tags_applied[rule.get('file_name')] += tagged_events_counter
100 # Wide exception handling since there are multiple exceptions that
101 # can be raised by the underlying sigma library.
102 except: # pylint: disable=bare-except
103 logger.error(
104 'Problem with rule in file {0:s}: '.format(
105 rule.get('file_name')), exc_info=True)
106 problem_strings.append('* {0:s}'.format(
107 rule.get('file_name')))
108 continue
109
110 total_tagged_events = sum(tags_applied.values())
111 output_strings.append('Applied {0:d} tags'.format(total_tagged_events))
112
113 if sigma_rule_counter > 0:
114 self.add_sigma_match_view(sigma_rule_counter)
115
116 if len(problem_strings) > 0:
117 output_strings.append('Problematic rules:')
118 output_strings.extend(problem_strings)
119
120 return '\n'.join(output_strings)
121
122 def add_sigma_match_view(self, sigma_rule_counter):
123 """Adds a view with the top 20 matching rules.
124
125 Args:
126 sigma_rule_counter number of matching rules
127
128 """
129 view = self.sketch.add_view(
130 view_name='Sigma Rule matches', analyzer_name=self.NAME,
131 query_string='tag:"sigma*"')
132 agg_params = {
133 'field': 'tag',
134 'limit': 20,
135 'index': [self.timeline_id],
136 }
137 agg_obj = self.sketch.add_aggregation(
138 name='Top 20 Sigma tags', agg_name='field_bucket',
139 agg_params=agg_params, view_id=view.id, chart_type='hbarchart',
140 description='Created by the Sigma analyzer')
141
142 story = self.sketch.add_story('Sigma Rule hits')
143 story.add_text(
144 utils.SIGMA_STORY_HEADER, skip_if_exists=True)
145
146 story.add_text(
147 '## Sigma Analyzer.\n\nThe Sigma '
148 'analyzer takes Events and matches them with Sigma rules.'
149 'In this timeline the analyzer discovered {0:d} '
150 'Sigma tags.\n\nThis is a summary of '
151 'it\'s findings.'.format(sigma_rule_counter))
152 story.add_text(
153 'The top 20 most commonly discovered tags were:')
154 story.add_aggregation(agg_obj)
155 story.add_text(
156 'And an overview of all the discovered search terms:')
157 story.add_view(view)
158
159
160 class RulesSigmaPlugin(SigmaPlugin):
161 """Sigma plugin to run rules."""
162
163 NAME = 'sigma'
164
165 manager.AnalysisManager.register_analyzer(RulesSigmaPlugin)
166
[end of timesketch/lib/analyzers/sigma_tagger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/timesketch/lib/analyzers/sigma_tagger.py b/timesketch/lib/analyzers/sigma_tagger.py
--- a/timesketch/lib/analyzers/sigma_tagger.py
+++ b/timesketch/lib/analyzers/sigma_tagger.py
@@ -110,7 +110,7 @@
total_tagged_events = sum(tags_applied.values())
output_strings.append('Applied {0:d} tags'.format(total_tagged_events))
- if sigma_rule_counter > 0:
+ if total_tagged_events > 0:
self.add_sigma_match_view(sigma_rule_counter)
if len(problem_strings) > 0:
|
{"golden_diff": "diff --git a/timesketch/lib/analyzers/sigma_tagger.py b/timesketch/lib/analyzers/sigma_tagger.py\n--- a/timesketch/lib/analyzers/sigma_tagger.py\n+++ b/timesketch/lib/analyzers/sigma_tagger.py\n@@ -110,7 +110,7 @@\n total_tagged_events = sum(tags_applied.values())\n output_strings.append('Applied {0:d} tags'.format(total_tagged_events))\n \n- if sigma_rule_counter > 0:\n+ if total_tagged_events > 0:\n self.add_sigma_match_view(sigma_rule_counter)\n \n if len(problem_strings) > 0:\n", "issue": "Sigma analyzer creates saved search and story even with 0 results\nThe Sigma analyzer will create a story and saved search even if there are no results from the analyzer.\r\n\r\nFix: Only create saved search and story if there was any events tagged.\n", "before_files": [{"content": "\"\"\"Index analyzer plugin for sigma.\"\"\"\nfrom __future__ import unicode_literals\n\nimport logging\nimport time\nimport elasticsearch\n\nfrom flask import current_app\n\nfrom timesketch.lib.analyzers import utils\n\nfrom timesketch.lib.analyzers import interface\nfrom timesketch.lib.analyzers import manager\nimport timesketch.lib.sigma_util as ts_sigma_lib\n\n\nlogger = logging.getLogger('timesketch.analyzers.sigma_tagger')\n\n\nclass SigmaPlugin(interface.BaseAnalyzer):\n \"\"\"Analyzer for Sigma.\"\"\"\n\n NAME = 'sigma'\n DISPLAY_NAME = 'Sigma'\n DESCRIPTION = 'Run pre-defined Sigma rules and tag matching events'\n\n def run_sigma_rule(self, query, rule_name, tag_list = None):\n \"\"\"Runs a sigma rule and applies the appropriate tags.\n\n Args:\n query: elastic search query for events to tag.\n rule_name: rule_name to apply to matching events.\n tag_list: a list of additional tags to be added to the event(s)\n\n Returns:\n int: number of events tagged.\n \"\"\"\n return_fields = []\n tagged_events_counter = 0\n events = self.event_stream(\n query_string=query, return_fields=return_fields)\n for event in events:\n ts_sigma_rules = event.source.get('ts_sigma_rule', [])\n ts_sigma_rules.append(rule_name)\n event.add_attributes({'ts_sigma_rule': list(set(ts_sigma_rules))})\n ts_ttp = event.source.get('ts_ttp', [])\n for tag in tag_list:\n # special handling for sigma tags that TS considers TTPS\n # https://car.mitre.org and https://attack.mitre.org\n if tag.startswith(('attack.', 'car.')):\n ts_ttp.append(tag)\n tag_list.remove(tag)\n event.add_tags(tag_list)\n if len(ts_ttp) > 0:\n event.add_attributes({'ts_ttp': list(set(ts_ttp))})\n event.commit()\n tagged_events_counter += 1\n return tagged_events_counter\n\n def run(self):\n \"\"\"Entry point for the analyzer.\n\n Returns:\n String with summary of the analyzer result.\n \"\"\"\n\n tags_applied = {}\n sigma_rule_counter = 0\n sigma_rules = ts_sigma_lib.get_all_sigma_rules()\n if sigma_rules is None:\n logger.error('No Sigma rules found. Check SIGMA_RULES_FOLDERS')\n problem_strings = []\n output_strings = []\n\n for rule in sigma_rules:\n tags_applied[rule.get('file_name')] = 0\n try:\n sigma_rule_counter += 1\n tagged_events_counter = self.run_sigma_rule(\n rule.get('es_query'), rule.get('file_name'),\n tag_list=rule.get('tags'))\n tags_applied[rule.get('file_name')] += tagged_events_counter\n if sigma_rule_counter % 10 == 0:\n logger.debug('Rule {0:d}/{1:d}'.format(\n sigma_rule_counter, len(sigma_rules)))\n except elasticsearch.TransportError as e:\n logger.error(\n 'Timeout executing search for {0:s}: '\n '{1!s} waiting for 10 seconds'.format(\n rule.get('file_name'), e), exc_info=True)\n # this is caused by too many ES queries in short time range\n # TODO: https://github.com/google/timesketch/issues/1782\n sleep_time = current_app.config.get(\n 'SIGMA_TAG_DELAY', 15)\n time.sleep(sleep_time)\n tagged_events_counter = self.run_sigma_rule(\n rule.get('es_query'), rule.get('file_name'),\n tag_list=rule.get('tags'))\n tags_applied[rule.get('file_name')] += tagged_events_counter\n # Wide exception handling since there are multiple exceptions that\n # can be raised by the underlying sigma library.\n except: # pylint: disable=bare-except\n logger.error(\n 'Problem with rule in file {0:s}: '.format(\n rule.get('file_name')), exc_info=True)\n problem_strings.append('* {0:s}'.format(\n rule.get('file_name')))\n continue\n\n total_tagged_events = sum(tags_applied.values())\n output_strings.append('Applied {0:d} tags'.format(total_tagged_events))\n\n if sigma_rule_counter > 0:\n self.add_sigma_match_view(sigma_rule_counter)\n\n if len(problem_strings) > 0:\n output_strings.append('Problematic rules:')\n output_strings.extend(problem_strings)\n\n return '\\n'.join(output_strings)\n\n def add_sigma_match_view(self, sigma_rule_counter):\n \"\"\"Adds a view with the top 20 matching rules.\n\n Args:\n sigma_rule_counter number of matching rules\n\n \"\"\"\n view = self.sketch.add_view(\n view_name='Sigma Rule matches', analyzer_name=self.NAME,\n query_string='tag:\"sigma*\"')\n agg_params = {\n 'field': 'tag',\n 'limit': 20,\n 'index': [self.timeline_id],\n }\n agg_obj = self.sketch.add_aggregation(\n name='Top 20 Sigma tags', agg_name='field_bucket',\n agg_params=agg_params, view_id=view.id, chart_type='hbarchart',\n description='Created by the Sigma analyzer')\n\n story = self.sketch.add_story('Sigma Rule hits')\n story.add_text(\n utils.SIGMA_STORY_HEADER, skip_if_exists=True)\n\n story.add_text(\n '## Sigma Analyzer.\\n\\nThe Sigma '\n 'analyzer takes Events and matches them with Sigma rules.'\n 'In this timeline the analyzer discovered {0:d} '\n 'Sigma tags.\\n\\nThis is a summary of '\n 'it\\'s findings.'.format(sigma_rule_counter))\n story.add_text(\n 'The top 20 most commonly discovered tags were:')\n story.add_aggregation(agg_obj)\n story.add_text(\n 'And an overview of all the discovered search terms:')\n story.add_view(view)\n\n\nclass RulesSigmaPlugin(SigmaPlugin):\n \"\"\"Sigma plugin to run rules.\"\"\"\n\n NAME = 'sigma'\n\nmanager.AnalysisManager.register_analyzer(RulesSigmaPlugin)\n", "path": "timesketch/lib/analyzers/sigma_tagger.py"}]}
| 2,310 | 151 |
gh_patches_debug_1796
|
rasdani/github-patches
|
git_diff
|
xonsh__xonsh-3121
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Syntax errors when using bashisms xontrib
At least making assignment statements and entering function definitions at the prompt is
broken when using the included `bashisms` xontrib and prompt toolkit.
## xonfig
```
$ xonfig
+------------------+---------------------+
| xonsh | 0.8.12 |
| Git SHA | 3edd2df7 |
| Commit Date | May 9 07:50:20 2019 |
| Python | 3.7.3 |
| PLY | 3.11 |
| have readline | True |
| prompt toolkit | 2.0.9 |
| shell type | prompt_toolkit2 |
| pygments | 2.3.1 |
| on posix | True |
| on linux | True |
| distro | ubuntu |
| on darwin | False |
| on windows | False |
| on cygwin | False |
| on msys2 | False |
| is superuser | False |
| default encoding | utf-8 |
| xonsh encoding | utf-8 |
| encoding errors | surrogateescape |
+------------------+---------------------+
```
## Expected Behavior
I should be able to create new variables and function definitions, no sweat.
## Current Behavior
Things work fine until I start the shell with the xontrib `bashisms` loaded, or after I start the shell without it and load it later.
Assignment statements appear to go into multiline mode after hitting the return
key, and after a second return key, a stacktrace appears. The assignment
statement does not succeed.
Function definitions at the prompt result in an IndexError and the function is
not defined afterwards.
### Traceback (if applicable)
#### Assignment
```
$ a = 1
$ a
1
$ xontrib load bashisms
$ b = 1
..................................................
Traceback (most recent call last):
File "/home/eddie/source/xonsh/xonsh/ptk2/shell.py", line 173, in _push
code = self.execer.compile(src, mode="single", glbs=self.ctx, locs=None)
File "/home/eddie/source/xonsh/xonsh/execer.py", line 135, in compile
tree = self.parse(input, ctx, mode=mode, filename=filename, transform=transform)
File "/home/eddie/source/xonsh/xonsh/execer.py", line 96, in parse
tree, input = self._parse_ctx_free(input, mode=mode, filename=filename)
File "/home/eddie/source/xonsh/xonsh/execer.py", line 235, in _parse_ctx_free
raise original_error from None
File "/home/eddie/source/xonsh/xonsh/execer.py", line 220, in _parse_ctx_free
debug_level=(self.debug_level > 2),
File "/home/eddie/source/xonsh/xonsh/parsers/base.py", line 543, in parse
tree = self.parser.parse(input=s, lexer=self.lexer, debug=debug_level)
File "/home/eddie/source/xonsh/xonsh/ply/ply/yacc.py", line 335, in parse
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
File "/home/eddie/source/xonsh/xonsh/ply/ply/yacc.py", line 1203, in parseopt_notrack
tok = call_errorfunc(self.errorfunc, errtoken, self)
File "/home/eddie/source/xonsh/xonsh/ply/ply/yacc.py", line 194, in call_errorfunc
r = errorfunc(token)
File "/home/eddie/source/xonsh/xonsh/parsers/base.py", line 3285, in p_error
self._parse_error("no further code", None)
File "/home/eddie/source/xonsh/xonsh/parsers/base.py", line 676, in _parse_error
raise err
File "<string>", line None
SyntaxError: None: no further code
$ b
Traceback (most recent call last):
File "/home/eddie/source/xonsh/xonsh/built_ins.py", line 560, in _run_binary
p = self.cls(self.cmd, bufsize=bufsize, **kwargs)
File "/home/eddie/source/xonsh/xonsh/proc.py", line 583, in __init__
*args, stdin=stdin, stdout=stdout, stderr=stderr, **kwargs
File "/home/eddie/.pyenv/versions/3.7.3/lib/python3.7/subprocess.py", line 775, in __init__
restore_signals, start_new_session)
File "/home/eddie/.pyenv/versions/3.7.3/lib/python3.7/subprocess.py", line 1522, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: 'b': 'b'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/eddie/source/xonsh/xonsh/proc.py", line 1856, in __init__
proc = spec.run(pipeline_group=pipeline_group)
File "/home/eddie/source/xonsh/xonsh/built_ins.py", line 549, in run
p = self._run_binary(kwargs)
File "/home/eddie/source/xonsh/xonsh/built_ins.py", line 571, in _run_binary
raise XonshError(e)
xonsh.tools.XonshError: xonsh: subprocess mode: command not found: b
Did you mean one of the following?
bg: Alias
e: Alias
z: Alias
bc: Command (/usr/bin/bc)
ab: Command (/usr/bin/ab)
b: command not found
```
#### Function definition
```
$ def f():
.................................................. pass
..................................................
$ f
<function __main__.f>
$ xontrib load bashisms
$ def g():
.................................................. pass
..................................................
Traceback (most recent call last):
File "/home/eddie/source/xonsh/xonsh/execer.py", line 220, in _parse_ctx_free
debug_level=(self.debug_level > 2),
File "/home/eddie/source/xonsh/xonsh/parsers/base.py", line 543, in parse
tree = self.parser.parse(input=s, lexer=self.lexer, debug=debug_level)
File "/home/eddie/source/xonsh/xonsh/ply/ply/yacc.py", line 335, in parse
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
File "/home/eddie/source/xonsh/xonsh/ply/ply/yacc.py", line 1203, in parseopt_notrack
tok = call_errorfunc(self.errorfunc, errtoken, self)
File "/home/eddie/source/xonsh/xonsh/ply/ply/yacc.py", line 194, in call_errorfunc
r = errorfunc(token)
File "/home/eddie/source/xonsh/xonsh/parsers/base.py", line 3295, in p_error
self._parse_error(msg, self.currloc(lineno=p.lineno, column=p.lexpos))
File "/home/eddie/source/xonsh/xonsh/parsers/base.py", line 676, in _parse_error
raise err
File "<string>", line None
SyntaxError: /home/eddie/source/xonsh/xontrib/prompt_ret_code.xsh:3:0: ('code: ',)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/eddie/source/xonsh/xonsh/ptk2/shell.py", line 173, in _push
code = self.execer.compile(src, mode="single", glbs=self.ctx, locs=None)
File "/home/eddie/source/xonsh/xonsh/execer.py", line 135, in compile
tree = self.parse(input, ctx, mode=mode, filename=filename, transform=transform)
File "/home/eddie/source/xonsh/xonsh/execer.py", line 96, in parse
tree, input = self._parse_ctx_free(input, mode=mode, filename=filename)
File "/home/eddie/source/xonsh/xonsh/execer.py", line 242, in _parse_ctx_free
line, nlogical, idx = get_logical_line(lines, idx)
File "/home/eddie/source/xonsh/xonsh/tools.py", line 544, in get_logical_line
line = lines[idx]
IndexError: list index out of range
$ g
Traceback (most recent call last):
File "/home/eddie/source/xonsh/xonsh/built_ins.py", line 560, in _run_binary
p = self.cls(self.cmd, bufsize=bufsize, **kwargs)
File "/home/eddie/source/xonsh/xonsh/proc.py", line 583, in __init__
*args, stdin=stdin, stdout=stdout, stderr=stderr, **kwargs
File "/home/eddie/.pyenv/versions/3.7.3/lib/python3.7/subprocess.py", line 775, in __init__
restore_signals, start_new_session)
File "/home/eddie/.pyenv/versions/3.7.3/lib/python3.7/subprocess.py", line 1522, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: 'g': 'g'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/eddie/source/xonsh/xonsh/proc.py", line 1856, in __init__
proc = spec.run(pipeline_group=pipeline_group)
File "/home/eddie/source/xonsh/xonsh/built_ins.py", line 549, in run
p = self._run_binary(kwargs)
File "/home/eddie/source/xonsh/xonsh/built_ins.py", line 571, in _run_binary
raise XonshError(e)
xonsh.tools.XonshError: xonsh: subprocess mode: command not found: g
Did you mean one of the following?
fg: Alias
bg: Alias
e: Alias
z: Alias
ag: Command (/usr/local/bin/ag)
g: command not found
```
## Steps to Reproduce
```
# start w/o bashisms loaded
$ a = 1
$ xontrib load bashisms
$ a = 1
```
or
```
# start w/o bashisms loaded
$ xontrib load bashisms
$ def f():
.................................................. pass
..................................................
```
</issue>
<code>
[start of xontrib/bashisms.py]
1 """Bash-like interface extensions for xonsh."""
2 import shlex
3 import sys
4 import re
5
6 from prompt_toolkit.keys import Keys
7 from prompt_toolkit.filters import Condition, EmacsInsertMode, ViInsertMode
8
9 from xonsh.platform import ptk_shell_type
10
11 __all__ = ()
12
13
14 @events.on_transform_command
15 def bash_preproc(cmd, **kw):
16 bang_previous = {
17 "!": lambda x: x,
18 "$": lambda x: shlex.split(x)[-1],
19 "^": lambda x: shlex.split(x)[0],
20 "*": lambda x: " ".join(shlex.split(x)[1:]),
21 }
22
23 def replace_bang(m):
24 arg = m.group(1)
25 inputs = __xonsh__.history.inps
26
27 # Dissect the previous command.
28 if arg in bang_previous:
29 try:
30 return bang_previous[arg](inputs[-1])
31 except IndexError:
32 print("xonsh: no history for '!{}'".format(arg))
33 return ""
34
35 # Look back in history for a matching command.
36 else:
37 try:
38 return next((x for x in reversed(inputs) if x.startswith(arg)))
39 except StopIteration:
40 print("xonsh: no previous commands match '!{}'".format(arg))
41 return ""
42
43 return re.sub(r"!([!$^*]|[\w]+)", replace_bang, cmd.strip())
44
45
46 @events.on_ptk_create
47 def custom_keybindings(bindings, **kw):
48 if ptk_shell_type() == "prompt_toolkit2":
49 handler = bindings.add
50
51 @Condition
52 def last_command_exists():
53 return len(__xonsh__.history) > 0
54
55 else:
56 handler = bindings.registry.add_binding
57
58 @Condition
59 def last_command_exists(cli):
60 return len(__xonsh__.history) > 0
61
62 insert_mode = ViInsertMode() | EmacsInsertMode()
63
64 @handler(Keys.Escape, ".", filter=last_command_exists & insert_mode)
65 def recall_last_arg(event):
66 arg = __xonsh__.history[-1].cmd.split()[-1]
67 event.current_buffer.insert_text(arg)
68
69
70 def alias(args, stdin=None):
71 ret = 0
72
73 if args:
74 for arg in args:
75 if "=" in arg:
76 # shlex.split to remove quotes, e.g. "foo='echo hey'" into
77 # "foo=echo hey"
78 name, cmd = shlex.split(arg)[0].split("=", 1)
79 aliases[name] = shlex.split(cmd)
80 elif arg in aliases:
81 print("{}={}".format(arg, aliases[arg]))
82 else:
83 print("alias: {}: not found".format(arg), file=sys.stderr)
84 ret = 1
85 else:
86 for alias, cmd in aliases.items():
87 print("{}={}".format(alias, cmd))
88
89 return ret
90
91
92 aliases["alias"] = alias
93
[end of xontrib/bashisms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/xontrib/bashisms.py b/xontrib/bashisms.py
--- a/xontrib/bashisms.py
+++ b/xontrib/bashisms.py
@@ -40,7 +40,7 @@
print("xonsh: no previous commands match '!{}'".format(arg))
return ""
- return re.sub(r"!([!$^*]|[\w]+)", replace_bang, cmd.strip())
+ return re.sub(r"!([!$^*]|[\w]+)", replace_bang, cmd)
@events.on_ptk_create
|
{"golden_diff": "diff --git a/xontrib/bashisms.py b/xontrib/bashisms.py\n--- a/xontrib/bashisms.py\n+++ b/xontrib/bashisms.py\n@@ -40,7 +40,7 @@\n print(\"xonsh: no previous commands match '!{}'\".format(arg))\n return \"\"\n \n- return re.sub(r\"!([!$^*]|[\\w]+)\", replace_bang, cmd.strip())\n+ return re.sub(r\"!([!$^*]|[\\w]+)\", replace_bang, cmd)\n \n \n @events.on_ptk_create\n", "issue": "Syntax errors when using bashisms xontrib\nAt least making assignment statements and entering function definitions at the prompt is \r\nbroken when using the included `bashisms` xontrib and prompt toolkit.\r\n\r\n## xonfig\r\n\r\n```\r\n$ xonfig \r\n+------------------+---------------------+\r\n| xonsh | 0.8.12 |\r\n| Git SHA | 3edd2df7 |\r\n| Commit Date | May 9 07:50:20 2019 |\r\n| Python | 3.7.3 |\r\n| PLY | 3.11 |\r\n| have readline | True |\r\n| prompt toolkit | 2.0.9 |\r\n| shell type | prompt_toolkit2 |\r\n| pygments | 2.3.1 |\r\n| on posix | True |\r\n| on linux | True |\r\n| distro | ubuntu |\r\n| on darwin | False |\r\n| on windows | False |\r\n| on cygwin | False |\r\n| on msys2 | False |\r\n| is superuser | False |\r\n| default encoding | utf-8 |\r\n| xonsh encoding | utf-8 |\r\n| encoding errors | surrogateescape |\r\n+------------------+---------------------+\r\n\r\n```\r\n\r\n## Expected Behavior\r\nI should be able to create new variables and function definitions, no sweat.\r\n\r\n## Current Behavior\r\n\r\nThings work fine until I start the shell with the xontrib `bashisms` loaded, or after I start the shell without it and load it later.\r\n\r\nAssignment statements appear to go into multiline mode after hitting the return\r\nkey, and after a second return key, a stacktrace appears. The assignment\r\nstatement does not succeed.\r\n\r\nFunction definitions at the prompt result in an IndexError and the function is\r\nnot defined afterwards.\r\n\r\n### Traceback (if applicable)\r\n\r\n#### Assignment\r\n\r\n```\r\n$ a = 1 \r\n$ a \r\n1\r\n$ xontrib load bashisms \r\n$ b = 1 \r\n.................................................. \r\nTraceback (most recent call last):\r\n File \"/home/eddie/source/xonsh/xonsh/ptk2/shell.py\", line 173, in _push\r\n code = self.execer.compile(src, mode=\"single\", glbs=self.ctx, locs=None)\r\n File \"/home/eddie/source/xonsh/xonsh/execer.py\", line 135, in compile\r\n tree = self.parse(input, ctx, mode=mode, filename=filename, transform=transform)\r\n File \"/home/eddie/source/xonsh/xonsh/execer.py\", line 96, in parse\r\n tree, input = self._parse_ctx_free(input, mode=mode, filename=filename)\r\n File \"/home/eddie/source/xonsh/xonsh/execer.py\", line 235, in _parse_ctx_free\r\n raise original_error from None\r\n File \"/home/eddie/source/xonsh/xonsh/execer.py\", line 220, in _parse_ctx_free\r\n debug_level=(self.debug_level > 2),\r\n File \"/home/eddie/source/xonsh/xonsh/parsers/base.py\", line 543, in parse\r\n tree = self.parser.parse(input=s, lexer=self.lexer, debug=debug_level)\r\n File \"/home/eddie/source/xonsh/xonsh/ply/ply/yacc.py\", line 335, in parse\r\n return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)\r\n File \"/home/eddie/source/xonsh/xonsh/ply/ply/yacc.py\", line 1203, in parseopt_notrack\r\n tok = call_errorfunc(self.errorfunc, errtoken, self)\r\n File \"/home/eddie/source/xonsh/xonsh/ply/ply/yacc.py\", line 194, in call_errorfunc\r\n r = errorfunc(token)\r\n File \"/home/eddie/source/xonsh/xonsh/parsers/base.py\", line 3285, in p_error\r\n self._parse_error(\"no further code\", None)\r\n File \"/home/eddie/source/xonsh/xonsh/parsers/base.py\", line 676, in _parse_error\r\n raise err\r\n File \"<string>\", line None\r\nSyntaxError: None: no further code\r\n$ b \r\nTraceback (most recent call last):\r\n File \"/home/eddie/source/xonsh/xonsh/built_ins.py\", line 560, in _run_binary\r\n p = self.cls(self.cmd, bufsize=bufsize, **kwargs)\r\n File \"/home/eddie/source/xonsh/xonsh/proc.py\", line 583, in __init__\r\n *args, stdin=stdin, stdout=stdout, stderr=stderr, **kwargs\r\n File \"/home/eddie/.pyenv/versions/3.7.3/lib/python3.7/subprocess.py\", line 775, in __init__\r\n restore_signals, start_new_session)\r\n File \"/home/eddie/.pyenv/versions/3.7.3/lib/python3.7/subprocess.py\", line 1522, in _execute_child\r\n raise child_exception_type(errno_num, err_msg, err_filename)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'b': 'b'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/eddie/source/xonsh/xonsh/proc.py\", line 1856, in __init__\r\n proc = spec.run(pipeline_group=pipeline_group)\r\n File \"/home/eddie/source/xonsh/xonsh/built_ins.py\", line 549, in run\r\n p = self._run_binary(kwargs)\r\n File \"/home/eddie/source/xonsh/xonsh/built_ins.py\", line 571, in _run_binary\r\n raise XonshError(e)\r\nxonsh.tools.XonshError: xonsh: subprocess mode: command not found: b\r\nDid you mean one of the following?\r\n bg: Alias\r\n e: Alias\r\n z: Alias\r\n bc: Command (/usr/bin/bc)\r\n ab: Command (/usr/bin/ab)\r\n\r\nb: command not found\r\n```\r\n\r\n#### Function definition\r\n\r\n```\r\n$ def f(): \r\n.................................................. pass \r\n.................................................. \r\n$ f \r\n<function __main__.f>\r\n$ xontrib load bashisms \r\n$ def g(): \r\n.................................................. pass \r\n.................................................. \r\nTraceback (most recent call last):\r\n File \"/home/eddie/source/xonsh/xonsh/execer.py\", line 220, in _parse_ctx_free\r\n debug_level=(self.debug_level > 2),\r\n File \"/home/eddie/source/xonsh/xonsh/parsers/base.py\", line 543, in parse\r\n tree = self.parser.parse(input=s, lexer=self.lexer, debug=debug_level)\r\n File \"/home/eddie/source/xonsh/xonsh/ply/ply/yacc.py\", line 335, in parse\r\n return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)\r\n File \"/home/eddie/source/xonsh/xonsh/ply/ply/yacc.py\", line 1203, in parseopt_notrack\r\n tok = call_errorfunc(self.errorfunc, errtoken, self)\r\n File \"/home/eddie/source/xonsh/xonsh/ply/ply/yacc.py\", line 194, in call_errorfunc\r\n r = errorfunc(token)\r\n File \"/home/eddie/source/xonsh/xonsh/parsers/base.py\", line 3295, in p_error\r\n self._parse_error(msg, self.currloc(lineno=p.lineno, column=p.lexpos))\r\n File \"/home/eddie/source/xonsh/xonsh/parsers/base.py\", line 676, in _parse_error\r\n raise err\r\n File \"<string>\", line None\r\nSyntaxError: /home/eddie/source/xonsh/xontrib/prompt_ret_code.xsh:3:0: ('code: ',)\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/eddie/source/xonsh/xonsh/ptk2/shell.py\", line 173, in _push\r\n code = self.execer.compile(src, mode=\"single\", glbs=self.ctx, locs=None)\r\n File \"/home/eddie/source/xonsh/xonsh/execer.py\", line 135, in compile\r\n tree = self.parse(input, ctx, mode=mode, filename=filename, transform=transform)\r\n File \"/home/eddie/source/xonsh/xonsh/execer.py\", line 96, in parse\r\n tree, input = self._parse_ctx_free(input, mode=mode, filename=filename)\r\n File \"/home/eddie/source/xonsh/xonsh/execer.py\", line 242, in _parse_ctx_free\r\n line, nlogical, idx = get_logical_line(lines, idx)\r\n File \"/home/eddie/source/xonsh/xonsh/tools.py\", line 544, in get_logical_line\r\n line = lines[idx]\r\nIndexError: list index out of range\r\n$ g \r\nTraceback (most recent call last):\r\n File \"/home/eddie/source/xonsh/xonsh/built_ins.py\", line 560, in _run_binary\r\n p = self.cls(self.cmd, bufsize=bufsize, **kwargs)\r\n File \"/home/eddie/source/xonsh/xonsh/proc.py\", line 583, in __init__\r\n *args, stdin=stdin, stdout=stdout, stderr=stderr, **kwargs\r\n File \"/home/eddie/.pyenv/versions/3.7.3/lib/python3.7/subprocess.py\", line 775, in __init__\r\n restore_signals, start_new_session)\r\n File \"/home/eddie/.pyenv/versions/3.7.3/lib/python3.7/subprocess.py\", line 1522, in _execute_child\r\n raise child_exception_type(errno_num, err_msg, err_filename)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'g': 'g'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/eddie/source/xonsh/xonsh/proc.py\", line 1856, in __init__\r\n proc = spec.run(pipeline_group=pipeline_group)\r\n File \"/home/eddie/source/xonsh/xonsh/built_ins.py\", line 549, in run\r\n p = self._run_binary(kwargs)\r\n File \"/home/eddie/source/xonsh/xonsh/built_ins.py\", line 571, in _run_binary\r\n raise XonshError(e)\r\nxonsh.tools.XonshError: xonsh: subprocess mode: command not found: g\r\nDid you mean one of the following?\r\n fg: Alias\r\n bg: Alias\r\n e: Alias\r\n z: Alias\r\n ag: Command (/usr/local/bin/ag)\r\n\r\ng: command not found\r\n```\r\n\r\n## Steps to Reproduce\r\n\r\n```\r\n# start w/o bashisms loaded\r\n$ a = 1 \r\n$ xontrib load bashisms \r\n$ a = 1 \r\n```\r\n\r\nor\r\n\r\n```\r\n# start w/o bashisms loaded\r\n$ xontrib load bashisms \r\n$ def f(): \r\n.................................................. pass \r\n.................................................. \r\n```\n", "before_files": [{"content": "\"\"\"Bash-like interface extensions for xonsh.\"\"\"\nimport shlex\nimport sys\nimport re\n\nfrom prompt_toolkit.keys import Keys\nfrom prompt_toolkit.filters import Condition, EmacsInsertMode, ViInsertMode\n\nfrom xonsh.platform import ptk_shell_type\n\n__all__ = ()\n\n\[email protected]_transform_command\ndef bash_preproc(cmd, **kw):\n bang_previous = {\n \"!\": lambda x: x,\n \"$\": lambda x: shlex.split(x)[-1],\n \"^\": lambda x: shlex.split(x)[0],\n \"*\": lambda x: \" \".join(shlex.split(x)[1:]),\n }\n\n def replace_bang(m):\n arg = m.group(1)\n inputs = __xonsh__.history.inps\n\n # Dissect the previous command.\n if arg in bang_previous:\n try:\n return bang_previous[arg](inputs[-1])\n except IndexError:\n print(\"xonsh: no history for '!{}'\".format(arg))\n return \"\"\n\n # Look back in history for a matching command.\n else:\n try:\n return next((x for x in reversed(inputs) if x.startswith(arg)))\n except StopIteration:\n print(\"xonsh: no previous commands match '!{}'\".format(arg))\n return \"\"\n\n return re.sub(r\"!([!$^*]|[\\w]+)\", replace_bang, cmd.strip())\n\n\[email protected]_ptk_create\ndef custom_keybindings(bindings, **kw):\n if ptk_shell_type() == \"prompt_toolkit2\":\n handler = bindings.add\n\n @Condition\n def last_command_exists():\n return len(__xonsh__.history) > 0\n\n else:\n handler = bindings.registry.add_binding\n\n @Condition\n def last_command_exists(cli):\n return len(__xonsh__.history) > 0\n\n insert_mode = ViInsertMode() | EmacsInsertMode()\n\n @handler(Keys.Escape, \".\", filter=last_command_exists & insert_mode)\n def recall_last_arg(event):\n arg = __xonsh__.history[-1].cmd.split()[-1]\n event.current_buffer.insert_text(arg)\n\n\ndef alias(args, stdin=None):\n ret = 0\n\n if args:\n for arg in args:\n if \"=\" in arg:\n # shlex.split to remove quotes, e.g. \"foo='echo hey'\" into\n # \"foo=echo hey\"\n name, cmd = shlex.split(arg)[0].split(\"=\", 1)\n aliases[name] = shlex.split(cmd)\n elif arg in aliases:\n print(\"{}={}\".format(arg, aliases[arg]))\n else:\n print(\"alias: {}: not found\".format(arg), file=sys.stderr)\n ret = 1\n else:\n for alias, cmd in aliases.items():\n print(\"{}={}\".format(alias, cmd))\n\n return ret\n\n\naliases[\"alias\"] = alias\n", "path": "xontrib/bashisms.py"}]}
| 3,928 | 126 |
gh_patches_debug_20309
|
rasdani/github-patches
|
git_diff
|
cleanlab__cleanlab-397
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
color_sentence matches subtokens
<!-- Briefly summarize the issue. -->
`color_sentence` technically does subtoken matching instead of checking for equality between the tokenized sentence and the given token.
Is this intended?
# Stack trace
```bash
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AssertionError: ('\x1b[31mI\x1b[0mf you and I were to meet', 'If you and \x1b[31mI\x1b[0m were to meet')
```
# Steps to reproduce
Here's a MWE
```python
from cleanlab.internal.token_classification_utils import color_sentence
sentence = "If you and I were to meet"
word = "I"
output = color_sentence(sentence, word)
expected_output = "If you and \x1b[31mI\x1b[0m were to meet"
assert output == expected_output, (output, expected_output)
```
<!-- Be as detailed as possible here. If possible, include a self-contained
runnable example that demonstrates the issue. Remember to supply any data
necessary to run your example, or construct your example with synthetic data.
This is not strictly required, but the more detailed your bug report, the more
quickly we can help you and fix the bug. -->
# Additional information
- **Cleanlab version**: 31d43707014cb5c217ee1e9b014a206d554f2fd3
- **Operating system**: Linux 4.19.128-microsoft-standard
- **Python version**: Python 3.9.12
<!-- Please include any other information that could be helpful for debugging. -->
## Definition of `color_sentence`
https://github.com/cleanlab/cleanlab/blob/31d43707014cb5c217ee1e9b014a206d554f2fd3/cleanlab/internal/token_classification_utils.py#L144-L164
Give better error messages when a class has no examples in `find_label_issues`
Currently, we produce various internal errors when a class is not represented. We should either support missing classes, or we should produce something like a `ValueError("find_label_issues requires that each class has at least one example")`.
Right now, we produce a variety of internal errors (data-dependent).
This can be seen as a dupe of #41 and #89, though it suggests at least fixing the confusing error messages as a stop-gap solution until we decide whether or not to support classes with no examples. Potentially related to #266.
# Steps to reproduce
## Example 1
```python
import cleanlab
import numpy as np
labels = np.array([0, 0, 1, 1])
pred_probs = np.array([[0.1, 0.7, 0.2], [0.1, 0.8, 0.1], [0.7, 0.2, 0.1], [0.8, 0.1, 0.1]])
issues = cleanlab.filter.find_label_issues(labels, pred_probs, n_jobs=1)
```
Produces the result:
```
Traceback (most recent call last):
File ".../error.py", line 6, in <module>
issues = cleanlab.filter.find_label_issues(labels, pred_probs)
File ".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/filter.py", line 457, in find_label_issues
confident_joint, cl_error_indices = compute_confident_joint(
File ".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/count.py", line 435, in compute_confident_joint
pred_probs_bool = pred_probs >= thresholds - 1e-6
ValueError: operands could not be broadcast together with shapes (4,3) (2,)
```
## Example 2
```python
import cleanlab
import numpy as np
labels = np.array([0, 0])
pred_probs = np.array([[0.3, 0.7], [0.2, 0.8]])
issues = cleanlab.filter.find_label_issues(labels, pred_probs, n_jobs=1)
```
Produces the result:
```
Traceback (most recent call last):
File ".../error.py", line 6, in <module>
issues = cleanlab.filter.find_label_issues(labels, pred_probs, n_jobs=1)
File ".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/filter.py", line 561, in find_label_issues
label_issues_masks_per_class = [_prune_by_count(k, args) for k in range(K)]
File ".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/filter.py", line 561, in <listcomp>
label_issues_masks_per_class = [_prune_by_count(k, args) for k in range(K)]
File ".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/filter.py", line 204, in _prune_by_count
if label_counts[k] <= min_examples_per_class: # No prune if not at least min_examples_per_class
IndexError: index 1 is out of bounds for axis 0 with size 1
```
# Additional information
- **Cleanlab version**: 2.0.0 (and also reproduced on master = 5e0b62d79790028c6c69a76c6af28c813a27ace5, though the stack traces and exceptions are slightly different there)
- **Operating system**: macOS 12.1, shouldn't be relevant here
- **Python version**: Python 3.9.12
</issue>
<code>
[start of cleanlab/internal/token_classification_utils.py]
1 import re
2 import string
3 import numpy as np
4 from termcolor import colored
5 from typing import List, Optional, Callable, Tuple
6
7
8 def get_sentence(words: List[str]) -> str:
9 """
10 Get sentence formed by a list of words with minor processing for readability
11
12 Parameters
13 ----------
14 words: List[str]
15 list of word-level tokens
16
17 Returns
18 ----------
19 sentence: string
20 sentence formed by list of word-level tokens
21
22 """
23 sentence = ""
24 for word in words:
25 if word not in string.punctuation or word in ["-", "("]:
26 word = " " + word
27 sentence += word
28 sentence = sentence.replace(" '", "'").replace("( ", "(").strip()
29 return sentence
30
31
32 def filter_sentence(
33 sentences: List[str],
34 condition: Optional[Callable[[str], bool]] = None,
35 ) -> Tuple[List[str], List[bool]]:
36 """
37 Filter sentence based on some condition, and returns filter mask
38
39 Parameters
40 ----------
41 sentences: List[str]
42 list of sentences
43
44 condition: Optional[Callable[[str], bool]]
45 sentence filtering condition
46
47 Returns
48 ---------
49 sentences: List[str]
50 list of sentences filtered
51
52 mask: List[bool]
53 boolean mask such that `mask[i] == True` if the i'th sentence is included in the
54 filtered sentence, otherwise `mask[i] == False`
55
56 """
57 if not condition:
58 condition = lambda sentence: len(sentence) > 1 and "#" not in sentence
59 mask = list(map(condition, sentences))
60 sentences = [sentence for m, sentence in zip(mask, sentences) if m]
61 return sentences, mask
62
63
64 def process_token(token: str, replace: List[Tuple[str, str]] = [("#", "")]) -> str:
65 """
66 Replaces special characters in the tokens
67
68 Parameters
69 ----------
70 token: str
71 token which potentially contains special characters
72
73 replace: List[Tuple[str, str]]
74 list of tuples `(s1, s2)`, where all occurances of s1 are replaced by s2
75
76 Returns
77 ---------
78 processed_token: str
79 processed token whose special character has been replaced
80
81 Note
82 ----
83 Only applies to characters in the original input token.
84 """
85 replace_dict = {re.escape(k): v for (k, v) in replace}
86 pattern = "|".join(replace_dict.keys())
87 compiled_pattern = re.compile(pattern)
88 replacement = lambda match: replace_dict[re.escape(match.group(0))]
89 processed_token = compiled_pattern.sub(replacement, token)
90 return processed_token
91
92
93 def mapping(entities: List[int], maps: List[int]) -> List[int]:
94 """
95 Map a list of entities to its corresponding entities
96
97 Parameters
98 ----------
99 entities: List[int]
100 a list of given entities
101
102 maps: List[int]
103 a list of mapped entities, such that the i'th indexed token should be mapped to `maps[i]`
104
105 Returns
106 ---------
107 mapped_entities: List[int]
108 a list of mapped entities
109
110 Examples
111 --------
112 >>> unique_identities = [0, 1, 2, 3, 4] # ["O", "B-PER", "I-PER", "B-LOC", "I-LOC"]
113 >>> maps = [0, 1, 1, 2, 2] # ["O", "PER", "PER", "LOC", "LOC"]
114 >>> mapping(unique_identities, maps)
115 [0, 1, 1, 2, 2] # ["O", "PER", "PER", "LOC", "LOC"]
116 >>> mapping([0, 0, 4, 4, 3, 4, 0, 2], maps)
117 [0, 0, 2, 2, 2, 2, 0, 1] # ["O", "O", "LOC", "LOC", "LOC", "LOC", "O", "PER"]
118 """
119 f = lambda x: maps[x]
120 return list(map(f, entities))
121
122
123 def merge_probs(probs: np.ndarray, maps: List[int]) -> np.ndarray:
124 """
125 Merges model-predictive probabilities with desired mapping
126
127 Parameters
128 ----------
129 probs:
130 np.array of shape `(N, K)`, where N is the number of tokens, and K is the number of classes for the model
131
132 maps: List[int]
133 a list of mapped index, such that the probability of the token being in the i'th class is mapped to the
134 `maps[i]` index. If `maps[i] == -1`, the i'th column of `probs` is ignored. If `np.any(maps == -1)`, the
135 returned probability is re-normalized.
136
137 Returns
138 ---------
139 probs_merged:
140 np.array of shape `(N, K')`, where K' is the number of new classes. Probablities are merged and
141 re-normalized if necessary.
142
143 """
144 old_classes = probs.shape[1]
145 map_size = np.max(maps) + 1
146 probs_merged = np.zeros([len(probs), map_size], dtype=probs.dtype.type)
147
148 for i in range(old_classes):
149 if maps[i] >= 0:
150 probs_merged[:, maps[i]] += probs[:, i]
151 if -1 in maps:
152 row_sums = probs_merged.sum(axis=1)
153 probs_merged /= row_sums[:, np.newaxis]
154 return probs_merged
155
156
157 def color_sentence(sentence: str, word: str) -> str:
158 """
159 Searches for a given token in the sentence and returns the sentence where the given token is colored red
160
161 Parameters
162 ----------
163 sentence: str
164 a sentence where the word is searched
165
166 word: str
167 keyword to find in `sentence`. Assumes the word exists in token
168
169 Returns
170 ---------
171 colored_sentence: str
172 `sentence` where the first occurance of the word is colored red, using `termcolor.colored`
173
174 """
175 start_idx = sentence.index(word)
176 before, after = sentence[:start_idx], sentence[start_idx + len(word) :]
177 return "%s%s%s" % (before, colored(word, "red"), after)
178
[end of cleanlab/internal/token_classification_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cleanlab/internal/token_classification_utils.py b/cleanlab/internal/token_classification_utils.py
--- a/cleanlab/internal/token_classification_utils.py
+++ b/cleanlab/internal/token_classification_utils.py
@@ -160,18 +160,22 @@
Parameters
----------
- sentence: str
+ sentence:
a sentence where the word is searched
- word: str
- keyword to find in `sentence`. Assumes the word exists in token
-
+ word:
+ keyword to find in `sentence`. Assumes the word exists in the sentence.
Returns
---------
- colored_sentence: str
- `sentence` where the first occurance of the word is colored red, using `termcolor.colored`
+ colored_sentence:
+ `sentence` where the every occurance of the word is colored red, using `termcolor.colored`
"""
- start_idx = sentence.index(word)
- before, after = sentence[:start_idx], sentence[start_idx + len(word) :]
- return "%s%s%s" % (before, colored(word, "red"), after)
+ colored_word = colored(word, "red")
+ colored_sentence, number_of_substitions = re.subn(
+ r"\b{}\b".format(word), colored_word, sentence
+ )
+ if number_of_substitions == 0:
+ # Use basic string manipulation if regex fails
+ colored_sentence = sentence.replace(word, colored_word)
+ return colored_sentence
|
{"golden_diff": "diff --git a/cleanlab/internal/token_classification_utils.py b/cleanlab/internal/token_classification_utils.py\n--- a/cleanlab/internal/token_classification_utils.py\n+++ b/cleanlab/internal/token_classification_utils.py\n@@ -160,18 +160,22 @@\n \n Parameters\n ----------\n- sentence: str\n+ sentence:\n a sentence where the word is searched\n \n- word: str\n- keyword to find in `sentence`. Assumes the word exists in token\n-\n+ word:\n+ keyword to find in `sentence`. Assumes the word exists in the sentence.\n Returns\n ---------\n- colored_sentence: str\n- `sentence` where the first occurance of the word is colored red, using `termcolor.colored`\n+ colored_sentence:\n+ `sentence` where the every occurance of the word is colored red, using `termcolor.colored`\n \n \"\"\"\n- start_idx = sentence.index(word)\n- before, after = sentence[:start_idx], sentence[start_idx + len(word) :]\n- return \"%s%s%s\" % (before, colored(word, \"red\"), after)\n+ colored_word = colored(word, \"red\")\n+ colored_sentence, number_of_substitions = re.subn(\n+ r\"\\b{}\\b\".format(word), colored_word, sentence\n+ )\n+ if number_of_substitions == 0:\n+ # Use basic string manipulation if regex fails\n+ colored_sentence = sentence.replace(word, colored_word)\n+ return colored_sentence\n", "issue": "color_sentence matches subtokens \n<!-- Briefly summarize the issue. -->\r\n\r\n`color_sentence` technically does subtoken matching instead of checking for equality between the tokenized sentence and the given token.\r\n\r\nIs this intended?\r\n\r\n# Stack trace\r\n\r\n```bash\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\nAssertionError: ('\\x1b[31mI\\x1b[0mf you and I were to meet', 'If you and \\x1b[31mI\\x1b[0m were to meet')\r\n```\r\n\r\n# Steps to reproduce\r\n\r\nHere's a MWE\r\n\r\n```python\r\nfrom cleanlab.internal.token_classification_utils import color_sentence\r\n\r\nsentence = \"If you and I were to meet\"\r\nword = \"I\"\r\noutput = color_sentence(sentence, word)\r\nexpected_output = \"If you and \\x1b[31mI\\x1b[0m were to meet\"\r\n\r\nassert output == expected_output, (output, expected_output)\r\n```\r\n\r\n<!-- Be as detailed as possible here. If possible, include a self-contained\r\nrunnable example that demonstrates the issue. Remember to supply any data\r\nnecessary to run your example, or construct your example with synthetic data.\r\nThis is not strictly required, but the more detailed your bug report, the more\r\nquickly we can help you and fix the bug. -->\r\n\r\n# Additional information\r\n\r\n- **Cleanlab version**: 31d43707014cb5c217ee1e9b014a206d554f2fd3\r\n- **Operating system**: Linux 4.19.128-microsoft-standard\r\n- **Python version**: Python 3.9.12\r\n\r\n<!-- Please include any other information that could be helpful for debugging. -->\r\n\r\n\r\n## Definition of `color_sentence`\r\n\r\nhttps://github.com/cleanlab/cleanlab/blob/31d43707014cb5c217ee1e9b014a206d554f2fd3/cleanlab/internal/token_classification_utils.py#L144-L164\nGive better error messages when a class has no examples in `find_label_issues`\nCurrently, we produce various internal errors when a class is not represented. We should either support missing classes, or we should produce something like a `ValueError(\"find_label_issues requires that each class has at least one example\")`.\r\n\r\nRight now, we produce a variety of internal errors (data-dependent).\r\n\r\nThis can be seen as a dupe of #41 and #89, though it suggests at least fixing the confusing error messages as a stop-gap solution until we decide whether or not to support classes with no examples. Potentially related to #266.\r\n\r\n# Steps to reproduce\r\n\r\n## Example 1\r\n\r\n```python\r\nimport cleanlab\r\nimport numpy as np\r\n\r\nlabels = np.array([0, 0, 1, 1])\r\npred_probs = np.array([[0.1, 0.7, 0.2], [0.1, 0.8, 0.1], [0.7, 0.2, 0.1], [0.8, 0.1, 0.1]])\r\nissues = cleanlab.filter.find_label_issues(labels, pred_probs, n_jobs=1)\r\n```\r\n\r\nProduces the result:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \".../error.py\", line 6, in <module>\r\n issues = cleanlab.filter.find_label_issues(labels, pred_probs)\r\n File \".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/filter.py\", line 457, in find_label_issues\r\n confident_joint, cl_error_indices = compute_confident_joint(\r\n File \".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/count.py\", line 435, in compute_confident_joint\r\n pred_probs_bool = pred_probs >= thresholds - 1e-6\r\nValueError: operands could not be broadcast together with shapes (4,3) (2,)\r\n```\r\n\r\n## Example 2\r\n\r\n```python\r\nimport cleanlab\r\nimport numpy as np\r\n\r\nlabels = np.array([0, 0])\r\npred_probs = np.array([[0.3, 0.7], [0.2, 0.8]])\r\nissues = cleanlab.filter.find_label_issues(labels, pred_probs, n_jobs=1)\r\n```\r\n\r\nProduces the result:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \".../error.py\", line 6, in <module>\r\n issues = cleanlab.filter.find_label_issues(labels, pred_probs, n_jobs=1)\r\n File \".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/filter.py\", line 561, in find_label_issues\r\n label_issues_masks_per_class = [_prune_by_count(k, args) for k in range(K)]\r\n File \".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/filter.py\", line 561, in <listcomp>\r\n label_issues_masks_per_class = [_prune_by_count(k, args) for k in range(K)]\r\n File \".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/filter.py\", line 204, in _prune_by_count\r\n if label_counts[k] <= min_examples_per_class: # No prune if not at least min_examples_per_class\r\nIndexError: index 1 is out of bounds for axis 0 with size 1\r\n```\r\n\r\n# Additional information\r\n\r\n- **Cleanlab version**: 2.0.0 (and also reproduced on master = 5e0b62d79790028c6c69a76c6af28c813a27ace5, though the stack traces and exceptions are slightly different there)\r\n- **Operating system**: macOS 12.1, shouldn't be relevant here\r\n- **Python version**: Python 3.9.12\r\n\n", "before_files": [{"content": "import re\nimport string\nimport numpy as np\nfrom termcolor import colored\nfrom typing import List, Optional, Callable, Tuple\n\n\ndef get_sentence(words: List[str]) -> str:\n \"\"\"\n Get sentence formed by a list of words with minor processing for readability\n\n Parameters\n ----------\n words: List[str]\n list of word-level tokens\n\n Returns\n ----------\n sentence: string\n sentence formed by list of word-level tokens\n\n \"\"\"\n sentence = \"\"\n for word in words:\n if word not in string.punctuation or word in [\"-\", \"(\"]:\n word = \" \" + word\n sentence += word\n sentence = sentence.replace(\" '\", \"'\").replace(\"( \", \"(\").strip()\n return sentence\n\n\ndef filter_sentence(\n sentences: List[str],\n condition: Optional[Callable[[str], bool]] = None,\n) -> Tuple[List[str], List[bool]]:\n \"\"\"\n Filter sentence based on some condition, and returns filter mask\n\n Parameters\n ----------\n sentences: List[str]\n list of sentences\n\n condition: Optional[Callable[[str], bool]]\n sentence filtering condition\n\n Returns\n ---------\n sentences: List[str]\n list of sentences filtered\n\n mask: List[bool]\n boolean mask such that `mask[i] == True` if the i'th sentence is included in the\n filtered sentence, otherwise `mask[i] == False`\n\n \"\"\"\n if not condition:\n condition = lambda sentence: len(sentence) > 1 and \"#\" not in sentence\n mask = list(map(condition, sentences))\n sentences = [sentence for m, sentence in zip(mask, sentences) if m]\n return sentences, mask\n\n\ndef process_token(token: str, replace: List[Tuple[str, str]] = [(\"#\", \"\")]) -> str:\n \"\"\"\n Replaces special characters in the tokens\n\n Parameters\n ----------\n token: str\n token which potentially contains special characters\n\n replace: List[Tuple[str, str]]\n list of tuples `(s1, s2)`, where all occurances of s1 are replaced by s2\n\n Returns\n ---------\n processed_token: str\n processed token whose special character has been replaced\n\n Note\n ----\n Only applies to characters in the original input token.\n \"\"\"\n replace_dict = {re.escape(k): v for (k, v) in replace}\n pattern = \"|\".join(replace_dict.keys())\n compiled_pattern = re.compile(pattern)\n replacement = lambda match: replace_dict[re.escape(match.group(0))]\n processed_token = compiled_pattern.sub(replacement, token)\n return processed_token\n\n\ndef mapping(entities: List[int], maps: List[int]) -> List[int]:\n \"\"\"\n Map a list of entities to its corresponding entities\n\n Parameters\n ----------\n entities: List[int]\n a list of given entities\n\n maps: List[int]\n a list of mapped entities, such that the i'th indexed token should be mapped to `maps[i]`\n\n Returns\n ---------\n mapped_entities: List[int]\n a list of mapped entities\n\n Examples\n --------\n >>> unique_identities = [0, 1, 2, 3, 4] # [\"O\", \"B-PER\", \"I-PER\", \"B-LOC\", \"I-LOC\"]\n >>> maps = [0, 1, 1, 2, 2] # [\"O\", \"PER\", \"PER\", \"LOC\", \"LOC\"]\n >>> mapping(unique_identities, maps)\n [0, 1, 1, 2, 2] # [\"O\", \"PER\", \"PER\", \"LOC\", \"LOC\"]\n >>> mapping([0, 0, 4, 4, 3, 4, 0, 2], maps)\n [0, 0, 2, 2, 2, 2, 0, 1] # [\"O\", \"O\", \"LOC\", \"LOC\", \"LOC\", \"LOC\", \"O\", \"PER\"]\n \"\"\"\n f = lambda x: maps[x]\n return list(map(f, entities))\n\n\ndef merge_probs(probs: np.ndarray, maps: List[int]) -> np.ndarray:\n \"\"\"\n Merges model-predictive probabilities with desired mapping\n\n Parameters\n ----------\n probs:\n np.array of shape `(N, K)`, where N is the number of tokens, and K is the number of classes for the model\n\n maps: List[int]\n a list of mapped index, such that the probability of the token being in the i'th class is mapped to the\n `maps[i]` index. If `maps[i] == -1`, the i'th column of `probs` is ignored. If `np.any(maps == -1)`, the\n returned probability is re-normalized.\n\n Returns\n ---------\n probs_merged:\n np.array of shape `(N, K')`, where K' is the number of new classes. Probablities are merged and\n re-normalized if necessary.\n\n \"\"\"\n old_classes = probs.shape[1]\n map_size = np.max(maps) + 1\n probs_merged = np.zeros([len(probs), map_size], dtype=probs.dtype.type)\n\n for i in range(old_classes):\n if maps[i] >= 0:\n probs_merged[:, maps[i]] += probs[:, i]\n if -1 in maps:\n row_sums = probs_merged.sum(axis=1)\n probs_merged /= row_sums[:, np.newaxis]\n return probs_merged\n\n\ndef color_sentence(sentence: str, word: str) -> str:\n \"\"\"\n Searches for a given token in the sentence and returns the sentence where the given token is colored red\n\n Parameters\n ----------\n sentence: str\n a sentence where the word is searched\n\n word: str\n keyword to find in `sentence`. Assumes the word exists in token\n\n Returns\n ---------\n colored_sentence: str\n `sentence` where the first occurance of the word is colored red, using `termcolor.colored`\n\n \"\"\"\n start_idx = sentence.index(word)\n before, after = sentence[:start_idx], sentence[start_idx + len(word) :]\n return \"%s%s%s\" % (before, colored(word, \"red\"), after)\n", "path": "cleanlab/internal/token_classification_utils.py"}]}
| 3,638 | 332 |
gh_patches_debug_42723
|
rasdani/github-patches
|
git_diff
|
privacyidea__privacyidea-1796
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hanging at contacting firebase service
When testing the iOS Push token App the communication to the firebase service hangs some times. It is not clear why.
We could however improve the communication here:
https://github.com/privacyidea/privacyidea/blob/master/privacyidea/lib/smsprovider/FirebaseProvider.py#L76
And reuse an existing access_token as long as it has not expired.
</issue>
<code>
[start of privacyidea/lib/smsprovider/FirebaseProvider.py]
1 # -*- coding: utf-8 -*-
2 #
3 # 2019-02-12 Cornelius Kölbel <[email protected]>
4 #
5 #
6 # This program is free software: you can redistribute it and/or
7 # modify it under the terms of the GNU Affero General Public
8 # License, version 3, as published by the Free Software Foundation.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU Affero General Public License for more details.
14 #
15 # You should have received a copy of the
16 # GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 #
20
21 __doc__ = """This is the provider class that communicates with Googles
22 Firebase Cloud Messaging Service.
23 This provider is used for the push token and can be used for SMS tokens.
24 """
25
26 from privacyidea.lib.smsprovider.SMSProvider import (ISMSProvider)
27 from privacyidea.lib.error import ConfigAdminError
28 from privacyidea.lib import _
29 import logging
30 from oauth2client.service_account import ServiceAccountCredentials
31 import requests
32 import json
33
34 FIREBASE_URL_SEND = 'https://fcm.googleapis.com/v1/projects/{0!s}/messages:send'
35 SCOPES = ['https://www.googleapis.com/auth/cloud-platform',
36 'https://www.googleapis.com/auth/datastore',
37 'https://www.googleapis.com/auth/devstorage.read_write',
38 'https://www.googleapis.com/auth/firebase',
39 'https://www.googleapis.com/auth/identitytoolkit',
40 'https://www.googleapis.com/auth/userinfo.email']
41
42 log = logging.getLogger(__name__)
43
44
45 class FIREBASE_CONFIG:
46 REGISTRATION_URL = "registration URL"
47 TTL = "time to live"
48 JSON_CONFIG = "JSON config file"
49 PROJECT_ID = "projectid"
50 PROJECT_NUMBER = "projectnumber"
51 APP_ID = "appid"
52 API_KEY = "apikey"
53 APP_ID_IOS = "appidios"
54 API_KEY_IOS = "apikeyios"
55
56
57 class FirebaseProvider(ISMSProvider):
58
59 def submit_message(self, firebase_token, data):
60 """
61 send a message to a registered Firebase client
62 This can be a simple OTP value or a cryptographic challenge response.
63
64 :param firebase_token: The firebase token of the smartphone
65 :type firebase_token: str
66 :param data: the data dictionary part of the message to submit to the phone
67 :type data: dict
68 :return: bool
69 """
70 res = False
71
72 credentials = ServiceAccountCredentials.\
73 from_json_keyfile_name(self.smsgateway.option_dict.get(FIREBASE_CONFIG.JSON_CONFIG),
74 SCOPES)
75
76 access_token_info = credentials.get_access_token()
77
78 # Should we do something with expires in?
79 # expires_in = access_token_info.expires_in
80
81 bearer_token = access_token_info.access_token
82 headers = {
83 'Authorization': u'Bearer {0!s}'.format(bearer_token),
84 'Content-Type': 'application/json; UTF-8',
85 }
86 fcm_message = {
87 "message": {
88 "data": data,
89 "token": firebase_token
90 }
91 }
92
93 url = FIREBASE_URL_SEND.format(self.smsgateway.option_dict.get(FIREBASE_CONFIG.PROJECT_ID))
94 resp = requests.post(url, data=json.dumps(fcm_message), headers=headers)
95
96 if resp.status_code == 200:
97 log.debug("Message sent successfully to Firebase service.")
98 res = True
99 else:
100 log.warning(u"Failed to send message to firebase service: {0!s}".format(resp.text))
101
102 return res
103
104 def check_configuration(self):
105 """
106 This method checks the sanity of the configuration of this provider.
107 If there is a configuration error, than an exception is raised.
108 :return:
109 """
110 json_file = self.smsgateway.option_dict.get(FIREBASE_CONFIG.JSON_CONFIG)
111 server_config = None
112 with open(json_file) as f:
113 server_config = json.load(f)
114 if server_config:
115 if server_config.get("type") != "service_account":
116 raise ConfigAdminError(description="The JSON file is not a valid firebase credentials file.")
117 project_id = self.smsgateway.option_dict.get(FIREBASE_CONFIG.PROJECT_ID)
118 if server_config.get("project_id") != project_id:
119 raise ConfigAdminError(description="The project_id you entered does not match the project_id from the JSON file.")
120
121 else:
122 raise ConfigAdminError(description="Please check your configuration. Can not load JSON file.")
123
124 # We need at least
125 # FIREBASE_CONFIG.API_KEY_IOS and FIREBASE_CONFIG.APP_ID_IOS
126 # or
127 # FIREBASE_CONFIG.API_KEY and FIREBASE_CONFIG.APP_ID
128 android_configured = bool(self.smsgateway.option_dict.get(FIREBASE_CONFIG.APP_ID)) and \
129 bool(self.smsgateway.option_dict.get(FIREBASE_CONFIG.API_KEY))
130 ios_configured = bool(self.smsgateway.option_dict.get(FIREBASE_CONFIG.APP_ID_IOS)) and \
131 bool(self.smsgateway.option_dict.get(FIREBASE_CONFIG.API_KEY_IOS))
132 if not android_configured and not ios_configured:
133 raise ConfigAdminError(description="You need to at least configure either app_id and api_key or"
134 " app_id_ios and api_key_ios.")
135
136 @classmethod
137 def parameters(cls):
138 """
139 Return a dictionary, that describes the parameters and options for the
140 Firebase Provider
141 Parameters are required keys to values.
142
143 :return: dict
144 """
145 params = {"options_allowed": False,
146 "parameters": {
147 FIREBASE_CONFIG.REGISTRATION_URL: {
148 "required": True,
149 "description": _('The URL the Push App should contact in the second enrollment step.'
150 ' Usually it is the endpoint /ttype/push of the privacyIDEA server.')},
151 FIREBASE_CONFIG.TTL: {
152 "required": True,
153 "description": _('The second enrollment step must be completed within this time (in minutes).')
154 },
155 FIREBASE_CONFIG.PROJECT_ID: {
156 "required": True,
157 "description": _("The project ID, that the client should use. Get it from your Firebase console.")
158 },
159 FIREBASE_CONFIG.PROJECT_NUMBER: {
160 "required": True,
161 "description": _(
162 "The project number, that the client should use. Get it from your Firebase console.")
163 },
164 FIREBASE_CONFIG.APP_ID: {
165 "required": False,
166 "description": _(
167 "The App ID, that the Android client should use. Get it from your Firebase console.")
168 },
169 FIREBASE_CONFIG.API_KEY: {
170 "required": False,
171 "description": _(
172 "The API Key, that the Android client should use. Get it from your Firebase console.")
173 },
174 FIREBASE_CONFIG.APP_ID_IOS:{
175 "required": False,
176 "description": _(
177 "The App ID, that the iOS client should use. Get it from your Firebase console.")
178 },
179 FIREBASE_CONFIG.API_KEY_IOS: {
180 "required": False,
181 "description": _(
182 "The API Key, that the iOS client should use. Get it from your Firebase console.")
183 },
184 FIREBASE_CONFIG.JSON_CONFIG: {
185 "required": True,
186 "description": _("The filename of the JSON config file, that allows privacyIDEA to talk"
187 " to the Firebase REST API.")
188 }
189 }
190 }
191 return params
192
[end of privacyidea/lib/smsprovider/FirebaseProvider.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/privacyidea/lib/smsprovider/FirebaseProvider.py b/privacyidea/lib/smsprovider/FirebaseProvider.py
--- a/privacyidea/lib/smsprovider/FirebaseProvider.py
+++ b/privacyidea/lib/smsprovider/FirebaseProvider.py
@@ -25,11 +25,14 @@
from privacyidea.lib.smsprovider.SMSProvider import (ISMSProvider)
from privacyidea.lib.error import ConfigAdminError
+from privacyidea.lib.framework import get_app_local_store
from privacyidea.lib import _
import logging
from oauth2client.service_account import ServiceAccountCredentials
import requests
import json
+import time
+import datetime
FIREBASE_URL_SEND = 'https://fcm.googleapis.com/v1/projects/{0!s}/messages:send'
SCOPES = ['https://www.googleapis.com/auth/cloud-platform',
@@ -42,6 +45,46 @@
log = logging.getLogger(__name__)
+class AccessToken(object):
+
+ def __init__(self, access_token, validity):
+ self.access_token = access_token
+ self.expires_at = time.time() + validity - 10
+
+
+def get_firebase_access_token(config_file_name):
+ """
+ This returns the access token for a given JSON config file name
+
+ :param config_file_name:
+ :return:
+ """
+ fbt = "firebase_token"
+ now = time.time()
+ app_store = get_app_local_store()
+
+ if fbt not in app_store or not isinstance(app_store[fbt], dict):
+ # initialize the firebase_token in the app_store as dict
+ app_store[fbt] = {}
+
+ if not isinstance(app_store[fbt].get(config_file_name), AccessToken) or \
+ now > app_store[fbt].get(config_file_name).expires_at:
+ # If the type of the config is not class AccessToken or
+ # if the token has expired
+ credentials = ServiceAccountCredentials.from_json_keyfile_name(config_file_name, SCOPES)
+ log.debug("Fetching a new access_token for {!r} from firebase...".format(config_file_name))
+ access_token_info = credentials.get_access_token()
+ # Now we set the expiration date for the new access_token with a margin of 10 seconds
+ At = AccessToken(access_token_info.access_token, access_token_info.expires_in)
+ # We do not use a lock here: The worst that could happen is that two threads
+ # fetch new auth tokens concurrently. In this case, one of them wins and is written to the dictionary.
+ app_store[fbt][config_file_name] = At
+ readable_time = datetime.datetime.fromtimestamp(At.expires_at).isoformat()
+ log.debug(u"Setting the expiration for {!r} of the new access_token to {!s}.".format(config_file_name, readable_time))
+
+ return app_store[fbt][config_file_name].access_token
+
+
class FIREBASE_CONFIG:
REGISTRATION_URL = "registration URL"
TTL = "time to live"
@@ -56,6 +99,11 @@
class FirebaseProvider(ISMSProvider):
+ def __init__(self, db_smsprovider_object=None, smsgateway=None):
+ ISMSProvider.__init__(self, db_smsprovider_object, smsgateway)
+ self.access_token_info = None
+ self.access_token_expires_at = 0
+
def submit_message(self, firebase_token, data):
"""
send a message to a registered Firebase client
@@ -69,16 +117,9 @@
"""
res = False
- credentials = ServiceAccountCredentials.\
- from_json_keyfile_name(self.smsgateway.option_dict.get(FIREBASE_CONFIG.JSON_CONFIG),
- SCOPES)
-
- access_token_info = credentials.get_access_token()
-
- # Should we do something with expires in?
- # expires_in = access_token_info.expires_in
+ bearer_token = get_firebase_access_token(self.smsgateway.option_dict.get(
+ FIREBASE_CONFIG.JSON_CONFIG))
- bearer_token = access_token_info.access_token
headers = {
'Authorization': u'Bearer {0!s}'.format(bearer_token),
'Content-Type': 'application/json; UTF-8',
@@ -189,4 +230,4 @@
}
}
return params
-
\ No newline at end of file
+
|
{"golden_diff": "diff --git a/privacyidea/lib/smsprovider/FirebaseProvider.py b/privacyidea/lib/smsprovider/FirebaseProvider.py\n--- a/privacyidea/lib/smsprovider/FirebaseProvider.py\n+++ b/privacyidea/lib/smsprovider/FirebaseProvider.py\n@@ -25,11 +25,14 @@\n \n from privacyidea.lib.smsprovider.SMSProvider import (ISMSProvider)\n from privacyidea.lib.error import ConfigAdminError\n+from privacyidea.lib.framework import get_app_local_store\n from privacyidea.lib import _\n import logging\n from oauth2client.service_account import ServiceAccountCredentials\n import requests\n import json\n+import time\n+import datetime\n \n FIREBASE_URL_SEND = 'https://fcm.googleapis.com/v1/projects/{0!s}/messages:send'\n SCOPES = ['https://www.googleapis.com/auth/cloud-platform',\n@@ -42,6 +45,46 @@\n log = logging.getLogger(__name__)\n \n \n+class AccessToken(object):\n+\n+ def __init__(self, access_token, validity):\n+ self.access_token = access_token\n+ self.expires_at = time.time() + validity - 10\n+\n+\n+def get_firebase_access_token(config_file_name):\n+ \"\"\"\n+ This returns the access token for a given JSON config file name\n+\n+ :param config_file_name:\n+ :return:\n+ \"\"\"\n+ fbt = \"firebase_token\"\n+ now = time.time()\n+ app_store = get_app_local_store()\n+\n+ if fbt not in app_store or not isinstance(app_store[fbt], dict):\n+ # initialize the firebase_token in the app_store as dict\n+ app_store[fbt] = {}\n+\n+ if not isinstance(app_store[fbt].get(config_file_name), AccessToken) or \\\n+ now > app_store[fbt].get(config_file_name).expires_at:\n+ # If the type of the config is not class AccessToken or\n+ # if the token has expired\n+ credentials = ServiceAccountCredentials.from_json_keyfile_name(config_file_name, SCOPES)\n+ log.debug(\"Fetching a new access_token for {!r} from firebase...\".format(config_file_name))\n+ access_token_info = credentials.get_access_token()\n+ # Now we set the expiration date for the new access_token with a margin of 10 seconds\n+ At = AccessToken(access_token_info.access_token, access_token_info.expires_in)\n+ # We do not use a lock here: The worst that could happen is that two threads\n+ # fetch new auth tokens concurrently. In this case, one of them wins and is written to the dictionary.\n+ app_store[fbt][config_file_name] = At\n+ readable_time = datetime.datetime.fromtimestamp(At.expires_at).isoformat()\n+ log.debug(u\"Setting the expiration for {!r} of the new access_token to {!s}.\".format(config_file_name, readable_time))\n+\n+ return app_store[fbt][config_file_name].access_token\n+\n+\n class FIREBASE_CONFIG:\n REGISTRATION_URL = \"registration URL\"\n TTL = \"time to live\"\n@@ -56,6 +99,11 @@\n \n class FirebaseProvider(ISMSProvider):\n \n+ def __init__(self, db_smsprovider_object=None, smsgateway=None):\n+ ISMSProvider.__init__(self, db_smsprovider_object, smsgateway)\n+ self.access_token_info = None\n+ self.access_token_expires_at = 0\n+\n def submit_message(self, firebase_token, data):\n \"\"\"\n send a message to a registered Firebase client\n@@ -69,16 +117,9 @@\n \"\"\"\n res = False\n \n- credentials = ServiceAccountCredentials.\\\n- from_json_keyfile_name(self.smsgateway.option_dict.get(FIREBASE_CONFIG.JSON_CONFIG),\n- SCOPES)\n-\n- access_token_info = credentials.get_access_token()\n-\n- # Should we do something with expires in?\n- # expires_in = access_token_info.expires_in\n+ bearer_token = get_firebase_access_token(self.smsgateway.option_dict.get(\n+ FIREBASE_CONFIG.JSON_CONFIG))\n \n- bearer_token = access_token_info.access_token\n headers = {\n 'Authorization': u'Bearer {0!s}'.format(bearer_token),\n 'Content-Type': 'application/json; UTF-8',\n@@ -189,4 +230,4 @@\n }\n }\n return params\n- \n\\ No newline at end of file\n+\n", "issue": "Hanging at contacting firebase service\nWhen testing the iOS Push token App the communication to the firebase service hangs some times. It is not clear why.\r\n\r\nWe could however improve the communication here:\r\nhttps://github.com/privacyidea/privacyidea/blob/master/privacyidea/lib/smsprovider/FirebaseProvider.py#L76\r\n\r\nAnd reuse an existing access_token as long as it has not expired.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2019-02-12 Cornelius K\u00f6lbel <[email protected]>\n#\n#\n# This program is free software: you can redistribute it and/or\n# modify it under the terms of the GNU Affero General Public\n# License, version 3, as published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the\n# GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n#\n\n__doc__ = \"\"\"This is the provider class that communicates with Googles\nFirebase Cloud Messaging Service.\nThis provider is used for the push token and can be used for SMS tokens.\n\"\"\"\n\nfrom privacyidea.lib.smsprovider.SMSProvider import (ISMSProvider)\nfrom privacyidea.lib.error import ConfigAdminError\nfrom privacyidea.lib import _\nimport logging\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport requests\nimport json\n\nFIREBASE_URL_SEND = 'https://fcm.googleapis.com/v1/projects/{0!s}/messages:send'\nSCOPES = ['https://www.googleapis.com/auth/cloud-platform',\n 'https://www.googleapis.com/auth/datastore',\n 'https://www.googleapis.com/auth/devstorage.read_write',\n 'https://www.googleapis.com/auth/firebase',\n 'https://www.googleapis.com/auth/identitytoolkit',\n 'https://www.googleapis.com/auth/userinfo.email']\n\nlog = logging.getLogger(__name__)\n\n\nclass FIREBASE_CONFIG:\n REGISTRATION_URL = \"registration URL\"\n TTL = \"time to live\"\n JSON_CONFIG = \"JSON config file\"\n PROJECT_ID = \"projectid\"\n PROJECT_NUMBER = \"projectnumber\"\n APP_ID = \"appid\"\n API_KEY = \"apikey\"\n APP_ID_IOS = \"appidios\"\n API_KEY_IOS = \"apikeyios\"\n\n\nclass FirebaseProvider(ISMSProvider):\n\n def submit_message(self, firebase_token, data):\n \"\"\"\n send a message to a registered Firebase client\n This can be a simple OTP value or a cryptographic challenge response.\n\n :param firebase_token: The firebase token of the smartphone\n :type firebase_token: str\n :param data: the data dictionary part of the message to submit to the phone\n :type data: dict\n :return: bool\n \"\"\"\n res = False\n\n credentials = ServiceAccountCredentials.\\\n from_json_keyfile_name(self.smsgateway.option_dict.get(FIREBASE_CONFIG.JSON_CONFIG),\n SCOPES)\n\n access_token_info = credentials.get_access_token()\n\n # Should we do something with expires in?\n # expires_in = access_token_info.expires_in\n\n bearer_token = access_token_info.access_token\n headers = {\n 'Authorization': u'Bearer {0!s}'.format(bearer_token),\n 'Content-Type': 'application/json; UTF-8',\n }\n fcm_message = {\n \"message\": {\n \"data\": data,\n \"token\": firebase_token\n }\n }\n\n url = FIREBASE_URL_SEND.format(self.smsgateway.option_dict.get(FIREBASE_CONFIG.PROJECT_ID))\n resp = requests.post(url, data=json.dumps(fcm_message), headers=headers)\n\n if resp.status_code == 200:\n log.debug(\"Message sent successfully to Firebase service.\")\n res = True\n else:\n log.warning(u\"Failed to send message to firebase service: {0!s}\".format(resp.text))\n\n return res\n\n def check_configuration(self):\n \"\"\"\n This method checks the sanity of the configuration of this provider.\n If there is a configuration error, than an exception is raised.\n :return:\n \"\"\"\n json_file = self.smsgateway.option_dict.get(FIREBASE_CONFIG.JSON_CONFIG)\n server_config = None\n with open(json_file) as f:\n server_config = json.load(f)\n if server_config:\n if server_config.get(\"type\") != \"service_account\":\n raise ConfigAdminError(description=\"The JSON file is not a valid firebase credentials file.\")\n project_id = self.smsgateway.option_dict.get(FIREBASE_CONFIG.PROJECT_ID)\n if server_config.get(\"project_id\") != project_id:\n raise ConfigAdminError(description=\"The project_id you entered does not match the project_id from the JSON file.\")\n\n else:\n raise ConfigAdminError(description=\"Please check your configuration. Can not load JSON file.\")\n\n # We need at least\n # FIREBASE_CONFIG.API_KEY_IOS and FIREBASE_CONFIG.APP_ID_IOS\n # or\n # FIREBASE_CONFIG.API_KEY and FIREBASE_CONFIG.APP_ID\n android_configured = bool(self.smsgateway.option_dict.get(FIREBASE_CONFIG.APP_ID)) and \\\n bool(self.smsgateway.option_dict.get(FIREBASE_CONFIG.API_KEY))\n ios_configured = bool(self.smsgateway.option_dict.get(FIREBASE_CONFIG.APP_ID_IOS)) and \\\n bool(self.smsgateway.option_dict.get(FIREBASE_CONFIG.API_KEY_IOS))\n if not android_configured and not ios_configured:\n raise ConfigAdminError(description=\"You need to at least configure either app_id and api_key or\"\n \" app_id_ios and api_key_ios.\")\n\n @classmethod\n def parameters(cls):\n \"\"\"\n Return a dictionary, that describes the parameters and options for the\n Firebase Provider\n Parameters are required keys to values.\n\n :return: dict\n \"\"\"\n params = {\"options_allowed\": False,\n \"parameters\": {\n FIREBASE_CONFIG.REGISTRATION_URL: {\n \"required\": True,\n \"description\": _('The URL the Push App should contact in the second enrollment step.'\n ' Usually it is the endpoint /ttype/push of the privacyIDEA server.')},\n FIREBASE_CONFIG.TTL: {\n \"required\": True,\n \"description\": _('The second enrollment step must be completed within this time (in minutes).')\n },\n FIREBASE_CONFIG.PROJECT_ID: {\n \"required\": True,\n \"description\": _(\"The project ID, that the client should use. Get it from your Firebase console.\")\n },\n FIREBASE_CONFIG.PROJECT_NUMBER: {\n \"required\": True,\n \"description\": _(\n \"The project number, that the client should use. Get it from your Firebase console.\")\n },\n FIREBASE_CONFIG.APP_ID: {\n \"required\": False,\n \"description\": _(\n \"The App ID, that the Android client should use. Get it from your Firebase console.\")\n },\n FIREBASE_CONFIG.API_KEY: {\n \"required\": False,\n \"description\": _(\n \"The API Key, that the Android client should use. Get it from your Firebase console.\")\n },\n FIREBASE_CONFIG.APP_ID_IOS:{\n \"required\": False,\n \"description\": _(\n \"The App ID, that the iOS client should use. Get it from your Firebase console.\")\n },\n FIREBASE_CONFIG.API_KEY_IOS: {\n \"required\": False,\n \"description\": _(\n \"The API Key, that the iOS client should use. Get it from your Firebase console.\")\n },\n FIREBASE_CONFIG.JSON_CONFIG: {\n \"required\": True,\n \"description\": _(\"The filename of the JSON config file, that allows privacyIDEA to talk\"\n \" to the Firebase REST API.\")\n }\n }\n }\n return params\n ", "path": "privacyidea/lib/smsprovider/FirebaseProvider.py"}]}
| 2,721 | 973 |
gh_patches_debug_4809
|
rasdani/github-patches
|
git_diff
|
getredash__redash-3563
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issues with Query Based Dropdown when the value is numeric
When the values the query returns for the dropdown are numeric, we have two issues:
1. We don’t load the correct value because the value from the URL passed as string, while the actual value is a number.
2. We don’t load results at all, because they fail parameter schema validation (the string doesn't match the values the dropdown has, which are numbers).
</issue>
<code>
[start of redash/utils/parameterized_query.py]
1 import pystache
2 from functools import partial
3 from flask_login import current_user
4 from numbers import Number
5 from redash.utils import mustache_render, json_loads
6 from redash.permissions import require_access, view_only
7 from funcy import distinct
8 from dateutil.parser import parse
9
10
11 def _pluck_name_and_value(default_column, row):
12 row = {k.lower(): v for k, v in row.items()}
13 name_column = "name" if "name" in row.keys() else default_column.lower()
14 value_column = "value" if "value" in row.keys() else default_column.lower()
15
16 return {"name": row[name_column], "value": row[value_column]}
17
18
19 def _load_result(query_id):
20 from redash.authentication.org_resolving import current_org
21 from redash import models
22
23 query = models.Query.get_by_id_and_org(query_id, current_org)
24 require_access(query.data_source.groups, current_user, view_only)
25 query_result = models.QueryResult.get_by_id_and_org(query.latest_query_data_id, current_org)
26
27 return json_loads(query_result.data)
28
29
30 def dropdown_values(query_id):
31 data = _load_result(query_id)
32 first_column = data["columns"][0]["name"]
33 pluck = partial(_pluck_name_and_value, first_column)
34 return map(pluck, data["rows"])
35
36
37 def _collect_key_names(nodes):
38 keys = []
39 for node in nodes._parse_tree:
40 if isinstance(node, pystache.parser._EscapeNode):
41 keys.append(node.key)
42 elif isinstance(node, pystache.parser._SectionNode):
43 keys.append(node.key)
44 keys.extend(_collect_key_names(node.parsed))
45
46 return distinct(keys)
47
48
49 def _collect_query_parameters(query):
50 nodes = pystache.parse(query)
51 keys = _collect_key_names(nodes)
52 return keys
53
54
55 def _parameter_names(parameter_values):
56 names = []
57 for key, value in parameter_values.iteritems():
58 if isinstance(value, dict):
59 for inner_key in value.keys():
60 names.append(u'{}.{}'.format(key, inner_key))
61 else:
62 names.append(key)
63
64 return names
65
66
67 def _is_date(string):
68 try:
69 parse(string)
70 return True
71 except ValueError:
72 return False
73
74
75 def _is_date_range(obj):
76 try:
77 return _is_date(obj["start"]) and _is_date(obj["end"])
78 except (KeyError, TypeError):
79 return False
80
81
82 class ParameterizedQuery(object):
83 def __init__(self, template, schema=None):
84 self.schema = schema or []
85 self.template = template
86 self.query = template
87 self.parameters = {}
88
89 def apply(self, parameters):
90 invalid_parameter_names = [key for (key, value) in parameters.iteritems() if not self._valid(key, value)]
91 if invalid_parameter_names:
92 raise InvalidParameterError(invalid_parameter_names)
93 else:
94 self.parameters.update(parameters)
95 self.query = mustache_render(self.template, self.parameters)
96
97 return self
98
99 def _valid(self, name, value):
100 if not self.schema:
101 return True
102
103 definition = next((definition for definition in self.schema if definition["name"] == name), None)
104
105 if not definition:
106 return False
107
108 validators = {
109 "text": lambda value: isinstance(value, basestring),
110 "number": lambda value: isinstance(value, Number),
111 "enum": lambda value: value in definition["enumOptions"],
112 "query": lambda value: value in [v["value"] for v in dropdown_values(definition["queryId"])],
113 "date": _is_date,
114 "datetime-local": _is_date,
115 "datetime-with-seconds": _is_date,
116 "date-range": _is_date_range,
117 "datetime-range": _is_date_range,
118 "datetime-range-with-seconds": _is_date_range,
119 }
120
121 validate = validators.get(definition["type"], lambda x: False)
122
123 return validate(value)
124
125 @property
126 def is_safe(self):
127 text_parameters = filter(lambda p: p["type"] == "text", self.schema)
128 return not any(text_parameters)
129
130 @property
131 def missing_params(self):
132 query_parameters = set(_collect_query_parameters(self.template))
133 return set(query_parameters) - set(_parameter_names(self.parameters))
134
135 @property
136 def text(self):
137 return self.query
138
139
140 class InvalidParameterError(Exception):
141 def __init__(self, parameters):
142 message = u"The following parameter values are incompatible with their definitions: {}".format(", ".join(parameters))
143 super(InvalidParameterError, self).__init__(message)
144
[end of redash/utils/parameterized_query.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/redash/utils/parameterized_query.py b/redash/utils/parameterized_query.py
--- a/redash/utils/parameterized_query.py
+++ b/redash/utils/parameterized_query.py
@@ -13,7 +13,7 @@
name_column = "name" if "name" in row.keys() else default_column.lower()
value_column = "value" if "value" in row.keys() else default_column.lower()
- return {"name": row[name_column], "value": row[value_column]}
+ return {"name": row[name_column], "value": unicode(row[value_column])}
def _load_result(query_id):
|
{"golden_diff": "diff --git a/redash/utils/parameterized_query.py b/redash/utils/parameterized_query.py\n--- a/redash/utils/parameterized_query.py\n+++ b/redash/utils/parameterized_query.py\n@@ -13,7 +13,7 @@\n name_column = \"name\" if \"name\" in row.keys() else default_column.lower()\n value_column = \"value\" if \"value\" in row.keys() else default_column.lower()\n \n- return {\"name\": row[name_column], \"value\": row[value_column]}\n+ return {\"name\": row[name_column], \"value\": unicode(row[value_column])}\n \n \n def _load_result(query_id):\n", "issue": "Issues with Query Based Dropdown when the value is numeric\nWhen the values the query returns for the dropdown are numeric, we have two issues:\r\n\r\n1. We don\u2019t load the correct value because the value from the URL passed as string, while the actual value is a number.\r\n2. We don\u2019t load results at all, because they fail parameter schema validation (the string doesn't match the values the dropdown has, which are numbers).\n", "before_files": [{"content": "import pystache\nfrom functools import partial\nfrom flask_login import current_user\nfrom numbers import Number\nfrom redash.utils import mustache_render, json_loads\nfrom redash.permissions import require_access, view_only\nfrom funcy import distinct\nfrom dateutil.parser import parse\n\n\ndef _pluck_name_and_value(default_column, row):\n row = {k.lower(): v for k, v in row.items()}\n name_column = \"name\" if \"name\" in row.keys() else default_column.lower()\n value_column = \"value\" if \"value\" in row.keys() else default_column.lower()\n\n return {\"name\": row[name_column], \"value\": row[value_column]}\n\n\ndef _load_result(query_id):\n from redash.authentication.org_resolving import current_org\n from redash import models\n\n query = models.Query.get_by_id_and_org(query_id, current_org)\n require_access(query.data_source.groups, current_user, view_only)\n query_result = models.QueryResult.get_by_id_and_org(query.latest_query_data_id, current_org)\n\n return json_loads(query_result.data)\n\n\ndef dropdown_values(query_id):\n data = _load_result(query_id)\n first_column = data[\"columns\"][0][\"name\"]\n pluck = partial(_pluck_name_and_value, first_column)\n return map(pluck, data[\"rows\"])\n\n\ndef _collect_key_names(nodes):\n keys = []\n for node in nodes._parse_tree:\n if isinstance(node, pystache.parser._EscapeNode):\n keys.append(node.key)\n elif isinstance(node, pystache.parser._SectionNode):\n keys.append(node.key)\n keys.extend(_collect_key_names(node.parsed))\n\n return distinct(keys)\n\n\ndef _collect_query_parameters(query):\n nodes = pystache.parse(query)\n keys = _collect_key_names(nodes)\n return keys\n\n\ndef _parameter_names(parameter_values):\n names = []\n for key, value in parameter_values.iteritems():\n if isinstance(value, dict):\n for inner_key in value.keys():\n names.append(u'{}.{}'.format(key, inner_key))\n else:\n names.append(key)\n\n return names\n\n\ndef _is_date(string):\n try:\n parse(string)\n return True\n except ValueError:\n return False\n\n\ndef _is_date_range(obj):\n try:\n return _is_date(obj[\"start\"]) and _is_date(obj[\"end\"])\n except (KeyError, TypeError):\n return False\n\n\nclass ParameterizedQuery(object):\n def __init__(self, template, schema=None):\n self.schema = schema or []\n self.template = template\n self.query = template\n self.parameters = {}\n\n def apply(self, parameters):\n invalid_parameter_names = [key for (key, value) in parameters.iteritems() if not self._valid(key, value)]\n if invalid_parameter_names:\n raise InvalidParameterError(invalid_parameter_names)\n else:\n self.parameters.update(parameters)\n self.query = mustache_render(self.template, self.parameters)\n\n return self\n\n def _valid(self, name, value):\n if not self.schema:\n return True\n\n definition = next((definition for definition in self.schema if definition[\"name\"] == name), None)\n\n if not definition:\n return False\n\n validators = {\n \"text\": lambda value: isinstance(value, basestring),\n \"number\": lambda value: isinstance(value, Number),\n \"enum\": lambda value: value in definition[\"enumOptions\"],\n \"query\": lambda value: value in [v[\"value\"] for v in dropdown_values(definition[\"queryId\"])],\n \"date\": _is_date,\n \"datetime-local\": _is_date,\n \"datetime-with-seconds\": _is_date,\n \"date-range\": _is_date_range,\n \"datetime-range\": _is_date_range,\n \"datetime-range-with-seconds\": _is_date_range,\n }\n\n validate = validators.get(definition[\"type\"], lambda x: False)\n\n return validate(value)\n\n @property\n def is_safe(self):\n text_parameters = filter(lambda p: p[\"type\"] == \"text\", self.schema)\n return not any(text_parameters)\n\n @property\n def missing_params(self):\n query_parameters = set(_collect_query_parameters(self.template))\n return set(query_parameters) - set(_parameter_names(self.parameters))\n\n @property\n def text(self):\n return self.query\n\n\nclass InvalidParameterError(Exception):\n def __init__(self, parameters):\n message = u\"The following parameter values are incompatible with their definitions: {}\".format(\", \".join(parameters))\n super(InvalidParameterError, self).__init__(message)\n", "path": "redash/utils/parameterized_query.py"}]}
| 1,949 | 141 |
gh_patches_debug_12489
|
rasdani/github-patches
|
git_diff
|
dask__distributed-270
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
WindowsError on dscheduler
So it appears that Bokeh is not found, but I have Bokeh and everything is up to date.
Interesting...
here lieth the traceback:
```
distributed.scheduler - INFO - Start Scheduler at: 192.168.0.7:8786
distributed.scheduler - INFO - http at: 192.168.0.7:9786
distributed.scheduler - WARNING - Could not start Bokeh web UI
Traceback (most recent call last):
File "C:\Anaconda\lib\site-packages\distributed\cli\dscheduler.py", line 88, in main
bokeh_proc = subprocess.Popen(args)
File "C:\Anaconda\lib\subprocess.py", line 710, in __init__
errread, errwrite)
File "C:\Anaconda\lib\subprocess.py", line 958, in _execute_child
startupinfo)
WindowsError: [Error 2] The system cannot find the file specified
Traceback (most recent call last):
File "C:\Anaconda\Scripts\dscheduler-script.py", line 5, in <module>
sys.exit(distributed.cli.dscheduler.go())
File "C:\Anaconda\lib\site-packages\distributed\cli\dscheduler.py", line 107, in go
main()
File "C:\Anaconda\lib\site-packages\click\core.py", line 716, in __call__
return self.main(*args, **kwargs)
File "C:\Anaconda\lib\site-packages\click\core.py", line 696, in main
rv = self.invoke(ctx)
File "C:\Anaconda\lib\site-packages\click\core.py", line 889, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Anaconda\lib\site-packages\click\core.py", line 534, in invoke
return callback(*args, **kwargs)
File "C:\Anaconda\lib\site-packages\distributed\cli\dscheduler.py", line 100, in main
bokeh_proc.terminate()
UnboundLocalError: local variable 'bokeh_proc' referenced before assignment
```
</issue>
<code>
[start of distributed/cli/dscheduler.py]
1 from __future__ import print_function, division, absolute_import
2
3 import json
4 import logging
5 import multiprocessing
6 import os
7 import socket
8 import subprocess
9 import sys
10 from time import sleep
11
12 import click
13
14 import distributed
15 from distributed import Scheduler
16 from distributed.utils import get_ip
17 from distributed.http import HTTPScheduler
18 from distributed.cli.utils import check_python_3
19 from tornado.ioloop import IOLoop
20
21 logger = logging.getLogger('distributed.scheduler')
22
23 import signal
24
25 def handle_signal(sig, frame):
26 IOLoop.instance().add_callback(IOLoop.instance().stop)
27
28 signal.signal(signal.SIGINT, handle_signal)
29 signal.signal(signal.SIGTERM, handle_signal)
30
31
32 @click.command()
33 @click.argument('center', type=str, default='')
34 @click.option('--port', type=int, default=8786, help="Serving port")
35 @click.option('--http-port', type=int, default=9786, help="HTTP port")
36 @click.option('--bokeh-port', type=int, default=8787, help="HTTP port")
37 @click.option('--bokeh/--no-bokeh', '_bokeh', default=True, show_default=True,
38 required=False, help="Launch Bokeh Web UI")
39 @click.option('--host', type=str, default=None,
40 help="IP or hostname of this server")
41 @click.option('--show/--no-show', default=False, help="Show web UI")
42 @click.option('--bokeh-whitelist', default=None, multiple=True,
43 help="IP addresses to whitelist for bokeh.")
44 def main(center, host, port, http_port, bokeh_port, show, _bokeh, bokeh_whitelist):
45 given_host = host
46 host = host or get_ip()
47 ip = socket.gethostbyname(host)
48 loop = IOLoop.current()
49 scheduler = Scheduler(center, ip=ip,
50 services={('http', http_port): HTTPScheduler})
51 if center:
52 loop.run_sync(scheduler.sync_center)
53 scheduler.start(port)
54
55 if _bokeh:
56 try:
57 import bokeh
58 import distributed.bokeh
59 hosts = ['%s:%d' % (h, bokeh_port) for h in
60 ['localhost', '127.0.0.1', ip, socket.gethostname(),
61 host] + list(bokeh_whitelist)]
62 dirname = os.path.dirname(distributed.__file__)
63 paths = [os.path.join(dirname, 'bokeh', name)
64 for name in ['status', 'tasks']]
65 binname = sys.argv[0][:-len('dscheduler')] + 'bokeh'
66 args = ([binname, 'serve'] + paths +
67 ['--log-level', 'warning',
68 '--check-unused-sessions=50',
69 '--unused-session-lifetime=1',
70 '--port', str(bokeh_port)] +
71 sum([['--host', host] for host in hosts], []))
72 if show:
73 args.append('--show')
74
75 bokeh_options = {'host': host if given_host else '127.0.0.1',
76 'http-port': http_port,
77 'tcp-port': port,
78 'bokeh-port': bokeh_port}
79 with open('.dask-web-ui.json', 'w') as f:
80 json.dump(bokeh_options, f, indent=2)
81
82 if sys.version_info[0] >= 3:
83 from bokeh.command.bootstrap import main
84 ctx = multiprocessing.get_context('spawn')
85 bokeh_proc = ctx.Process(target=main, args=(args,))
86 bokeh_proc.daemon = True
87 bokeh_proc.start()
88 else:
89 bokeh_proc = subprocess.Popen(args)
90
91 logger.info(" Bokeh UI at: http://%s:%d/status/"
92 % (ip, bokeh_port))
93 except ImportError:
94 logger.info("Please install Bokeh to get Web UI")
95 except Exception as e:
96 logger.warn("Could not start Bokeh web UI", exc_info=True)
97
98 loop.start()
99 loop.close()
100 scheduler.stop()
101 bokeh_proc.terminate()
102
103 logger.info("End scheduler at %s:%d", ip, port)
104
105
106 def go():
107 check_python_3()
108 main()
109
110
111 if __name__ == '__main__':
112 go()
113
[end of distributed/cli/dscheduler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/distributed/cli/dscheduler.py b/distributed/cli/dscheduler.py
--- a/distributed/cli/dscheduler.py
+++ b/distributed/cli/dscheduler.py
@@ -62,7 +62,8 @@
dirname = os.path.dirname(distributed.__file__)
paths = [os.path.join(dirname, 'bokeh', name)
for name in ['status', 'tasks']]
- binname = sys.argv[0][:-len('dscheduler')] + 'bokeh'
+ binname = 'bokeh.bat' if 'win' in sys.platform else 'bokeh'
+ binname = os.path.join(os.path.dirname(sys.argv[0]), binname)
args = ([binname, 'serve'] + paths +
['--log-level', 'warning',
'--check-unused-sessions=50',
|
{"golden_diff": "diff --git a/distributed/cli/dscheduler.py b/distributed/cli/dscheduler.py\n--- a/distributed/cli/dscheduler.py\n+++ b/distributed/cli/dscheduler.py\n@@ -62,7 +62,8 @@\n dirname = os.path.dirname(distributed.__file__)\n paths = [os.path.join(dirname, 'bokeh', name)\n for name in ['status', 'tasks']]\n- binname = sys.argv[0][:-len('dscheduler')] + 'bokeh'\n+ binname = 'bokeh.bat' if 'win' in sys.platform else 'bokeh'\n+ binname = os.path.join(os.path.dirname(sys.argv[0]), binname)\n args = ([binname, 'serve'] + paths +\n ['--log-level', 'warning',\n '--check-unused-sessions=50',\n", "issue": "WindowsError on dscheduler\nSo it appears that Bokeh is not found, but I have Bokeh and everything is up to date.\n\nInteresting...\n\nhere lieth the traceback:\n\n```\ndistributed.scheduler - INFO - Start Scheduler at: 192.168.0.7:8786\ndistributed.scheduler - INFO - http at: 192.168.0.7:9786\ndistributed.scheduler - WARNING - Could not start Bokeh web UI\nTraceback (most recent call last):\n File \"C:\\Anaconda\\lib\\site-packages\\distributed\\cli\\dscheduler.py\", line 88, in main\n bokeh_proc = subprocess.Popen(args)\n File \"C:\\Anaconda\\lib\\subprocess.py\", line 710, in __init__\n errread, errwrite)\n File \"C:\\Anaconda\\lib\\subprocess.py\", line 958, in _execute_child\n startupinfo)\nWindowsError: [Error 2] The system cannot find the file specified\nTraceback (most recent call last):\n File \"C:\\Anaconda\\Scripts\\dscheduler-script.py\", line 5, in <module>\n sys.exit(distributed.cli.dscheduler.go())\n File \"C:\\Anaconda\\lib\\site-packages\\distributed\\cli\\dscheduler.py\", line 107, in go\n main()\n File \"C:\\Anaconda\\lib\\site-packages\\click\\core.py\", line 716, in __call__\n return self.main(*args, **kwargs)\n File \"C:\\Anaconda\\lib\\site-packages\\click\\core.py\", line 696, in main\n rv = self.invoke(ctx)\n File \"C:\\Anaconda\\lib\\site-packages\\click\\core.py\", line 889, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File \"C:\\Anaconda\\lib\\site-packages\\click\\core.py\", line 534, in invoke\n return callback(*args, **kwargs)\n File \"C:\\Anaconda\\lib\\site-packages\\distributed\\cli\\dscheduler.py\", line 100, in main\n bokeh_proc.terminate()\nUnboundLocalError: local variable 'bokeh_proc' referenced before assignment\n```\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport socket\nimport subprocess\nimport sys\nfrom time import sleep\n\nimport click\n\nimport distributed\nfrom distributed import Scheduler\nfrom distributed.utils import get_ip\nfrom distributed.http import HTTPScheduler\nfrom distributed.cli.utils import check_python_3\nfrom tornado.ioloop import IOLoop\n\nlogger = logging.getLogger('distributed.scheduler')\n\nimport signal\n\ndef handle_signal(sig, frame):\n IOLoop.instance().add_callback(IOLoop.instance().stop)\n\nsignal.signal(signal.SIGINT, handle_signal)\nsignal.signal(signal.SIGTERM, handle_signal)\n\n\[email protected]()\[email protected]('center', type=str, default='')\[email protected]('--port', type=int, default=8786, help=\"Serving port\")\[email protected]('--http-port', type=int, default=9786, help=\"HTTP port\")\[email protected]('--bokeh-port', type=int, default=8787, help=\"HTTP port\")\[email protected]('--bokeh/--no-bokeh', '_bokeh', default=True, show_default=True,\n required=False, help=\"Launch Bokeh Web UI\")\[email protected]('--host', type=str, default=None,\n help=\"IP or hostname of this server\")\[email protected]('--show/--no-show', default=False, help=\"Show web UI\")\[email protected]('--bokeh-whitelist', default=None, multiple=True,\n help=\"IP addresses to whitelist for bokeh.\")\ndef main(center, host, port, http_port, bokeh_port, show, _bokeh, bokeh_whitelist):\n given_host = host\n host = host or get_ip()\n ip = socket.gethostbyname(host)\n loop = IOLoop.current()\n scheduler = Scheduler(center, ip=ip,\n services={('http', http_port): HTTPScheduler})\n if center:\n loop.run_sync(scheduler.sync_center)\n scheduler.start(port)\n\n if _bokeh:\n try:\n import bokeh\n import distributed.bokeh\n hosts = ['%s:%d' % (h, bokeh_port) for h in\n ['localhost', '127.0.0.1', ip, socket.gethostname(),\n host] + list(bokeh_whitelist)]\n dirname = os.path.dirname(distributed.__file__)\n paths = [os.path.join(dirname, 'bokeh', name)\n for name in ['status', 'tasks']]\n binname = sys.argv[0][:-len('dscheduler')] + 'bokeh'\n args = ([binname, 'serve'] + paths +\n ['--log-level', 'warning',\n '--check-unused-sessions=50',\n '--unused-session-lifetime=1',\n '--port', str(bokeh_port)] +\n sum([['--host', host] for host in hosts], []))\n if show:\n args.append('--show')\n\n bokeh_options = {'host': host if given_host else '127.0.0.1',\n 'http-port': http_port,\n 'tcp-port': port,\n 'bokeh-port': bokeh_port}\n with open('.dask-web-ui.json', 'w') as f:\n json.dump(bokeh_options, f, indent=2)\n\n if sys.version_info[0] >= 3:\n from bokeh.command.bootstrap import main\n ctx = multiprocessing.get_context('spawn')\n bokeh_proc = ctx.Process(target=main, args=(args,))\n bokeh_proc.daemon = True\n bokeh_proc.start()\n else:\n bokeh_proc = subprocess.Popen(args)\n\n logger.info(\" Bokeh UI at: http://%s:%d/status/\"\n % (ip, bokeh_port))\n except ImportError:\n logger.info(\"Please install Bokeh to get Web UI\")\n except Exception as e:\n logger.warn(\"Could not start Bokeh web UI\", exc_info=True)\n\n loop.start()\n loop.close()\n scheduler.stop()\n bokeh_proc.terminate()\n\n logger.info(\"End scheduler at %s:%d\", ip, port)\n\n\ndef go():\n check_python_3()\n main()\n\n\nif __name__ == '__main__':\n go()\n", "path": "distributed/cli/dscheduler.py"}]}
| 2,178 | 178 |
gh_patches_debug_64882
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-1931
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add screenshot test for syntax highlighting
</issue>
<code>
[start of e2e/scripts/st_code.py]
1 # Copyright 2018-2020 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16
17 st.code("# This code is awesome!")
18
19 st.code("")
20
[end of e2e/scripts/st_code.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/e2e/scripts/st_code.py b/e2e/scripts/st_code.py
--- a/e2e/scripts/st_code.py
+++ b/e2e/scripts/st_code.py
@@ -17,3 +17,9 @@
st.code("# This code is awesome!")
st.code("")
+
+code = """
+def hello():
+ print("Hello, Streamlit!")
+"""
+st.code(code, language="python")
|
{"golden_diff": "diff --git a/e2e/scripts/st_code.py b/e2e/scripts/st_code.py\n--- a/e2e/scripts/st_code.py\n+++ b/e2e/scripts/st_code.py\n@@ -17,3 +17,9 @@\n st.code(\"# This code is awesome!\")\n \n st.code(\"\")\n+\n+code = \"\"\"\n+def hello():\n+ print(\"Hello, Streamlit!\")\n+\"\"\"\n+st.code(code, language=\"python\")\n", "issue": "Add screenshot test for syntax highlighting\n\n", "before_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nst.code(\"# This code is awesome!\")\n\nst.code(\"\")\n", "path": "e2e/scripts/st_code.py"}]}
| 733 | 93 |
gh_patches_debug_22091
|
rasdani/github-patches
|
git_diff
|
PennyLaneAI__pennylane-2542
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Derivatives of `qml.CommutingEvolution`
In cleaning warnings from the test suite, I found this warning:
```
templates/test_subroutines/test_commuting_evolution.py::TestGradients::test_differentiable_hamiltonian
/Users/christina/Prog/pl/lib/python3.8/site-packages/autograd/tracer.py:14: UserWarning: Output seems independent of input.
warnings.warn("Output seems independent of input.")
```
The effect of `CommutingEvolution` on the state did not change with the time.
This problems seems to stem from [commuting_evolution.py Line 160](https://github.com/PennyLaneAI/pennylane/blob/e55f463bcbca1832505d9440d2f5144543f944fe/pennylane/templates/subroutines/commuting_evolution.py#L160):
```
return qml.templates.ApproxTimeEvolution(hamiltonian, time_and_coeffs[1], 1).decomposition()
```
This line incorrectly indexes into `time_and_coeffs` to try and extract the time. It should instead use `time_and_coeffs[0]`.
Upon fixing that bug, we now find that parameter shift no longer works for `CommutingEvolution`.
These two tests start to fail:
```
==================================================================================== short test summary info ====================================================================================
FAILED templates/test_subroutines/test_commuting_evolution.py::TestGradients::test_two_term_case - assert False
FAILED templates/test_subroutines/test_commuting_evolution.py::TestGradients::test_four_term_case - assert False
```
The tests that fail manually specify the frequencies. If the frequencies are specified, the operation sets its own `grad_recipe`. This `grad_recipe` gives incorrect results. I have yet to track down the problem with the `grad_recipe` property.
In summary, fixing the bug in the creation of the `ApproxTimeEvolution` operation allows finite difference derivatives and derivatives for operations without specified frequencies to work correctly. Since the finite-difference derivatives are now correct, we can see that the parameter-shift gradients never worked correctly.
</issue>
<code>
[start of pennylane/templates/subroutines/commuting_evolution.py]
1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 r"""
15 Contains the CommutingEvolution template.
16 """
17 # pylint: disable-msg=too-many-arguments,import-outside-toplevel
18 import pennylane as qml
19 from pennylane.operation import Operation, AnyWires
20
21
22 class CommutingEvolution(Operation):
23 r"""Applies the time-evolution operator for a Hamiltonian expressed as a linear combination
24 of mutually commuting Pauli words.
25
26 A commuting Hamiltonian is of the form
27
28 .. math:: H \ = \ \displaystyle\sum_{j} c_j P_j,
29
30 where :math:`P_j` are mutually commutative Pauli words and :math:`c_j` are real coefficients.
31 The time-evolution under a commuting Hamiltonian is given by a unitary of the form
32
33 .. math::
34
35 U(t) \ = \ e^{-i H t} \ = \exp(-i t \displaystyle\sum_j c_j P_j) =
36 \displaystyle\prod_j \exp(-i t c_j P_j).
37
38 If the Hamiltonian has a small number of unique eigenvalues, partial derivatives of observable
39 expectation values, i.e.
40
41 .. math:: \langle 0 | W(t)^\dagger O W(t) | 0 \rangle,
42
43 where :math:`W(t) = V U(t) Y` for some :math:`V` and :math:`Y`, taken with respect to
44 :math:`t` may be efficiently computed through generalized parameter shift rules. When
45 initialized, this template will automatically compute the parameter-shift rule if given the
46 Hamiltonian's eigenvalue frequencies, i.e., the unique positive differences between
47 eigenvalues.
48
49 .. warning::
50
51 This template uses the :class:`~.ApproxTimeEvolution` operation with ``n=1`` in order to
52 implement the time evolution, as a single-step Trotterization is exact for a commuting
53 Hamiltonian.
54
55 - If the input Hamiltonian contains Pauli words which do not commute, the
56 compilation of the time evolution operator to a sequence of gates will
57 not equate to the exact propagation under the given Hamiltonian.
58
59 - Furthermore, if the specified frequencies do not correspond to the
60 true eigenvalue frequency spectrum of the commuting Hamiltonian,
61 computed gradients will be incorrect in general.
62
63 Args:
64 hamiltonian (.Hamiltonian): The commuting Hamiltonian defining the time-evolution operator.
65 The Hamiltonian must be explicitly written
66 in terms of products of Pauli gates (:class:`~.PauliX`, :class:`~.PauliY`,
67 :class:`~.PauliZ`, and :class:`~.Identity`).
68 time (int or float): The time of evolution, namely the parameter :math:`t` in :math:`e^{- i H t}`.
69
70 Keyword args:
71 frequencies (tuple[int or float]): The unique positive differences between eigenvalues in
72 the spectrum of the Hamiltonian. If the frequencies are not given, the cost function
73 partial derivative will be computed using the standard two-term shift rule applied to
74 the constituent Pauli words in the Hamiltonian individually.
75
76 shifts (tuple[int or float]): The parameter shifts to use in obtaining the
77 generalized parameter shift rules. If unspecified, equidistant shifts are used.
78
79 .. details::
80 :title: Usage Details
81
82 The template is used inside a qnode:
83
84 .. code-block:: python
85
86 import pennylane as qml
87
88 n_wires = 2
89 dev = qml.device('default.qubit', wires=n_wires)
90
91 coeffs = [1, -1]
92 obs = [qml.PauliX(0) @ qml.PauliY(1), qml.PauliY(0) @ qml.PauliX(1)]
93 hamiltonian = qml.Hamiltonian(coeffs, obs)
94 frequencies = (2, 4)
95
96 @qml.qnode(dev)
97 def circuit(time):
98 qml.PauliX(0)
99 qml.CommutingEvolution(hamiltonian, time, frequencies)
100 return qml.expval(qml.PauliZ(0))
101
102 >>> circuit(1)
103 0.6536436208636115
104 """
105
106 num_wires = AnyWires
107 grad_method = None
108
109 def __init__(self, hamiltonian, time, frequencies=None, shifts=None, do_queue=True, id=None):
110 # pylint: disable=import-outside-toplevel
111 from pennylane.gradients.general_shift_rules import (
112 generate_shift_rule,
113 )
114
115 if not isinstance(hamiltonian, qml.Hamiltonian):
116 type_name = type(hamiltonian).__name__
117 raise TypeError(f"hamiltonian must be of type pennylane.Hamiltonian, got {type_name}")
118
119 trainable_hamiltonian = qml.math.requires_grad(hamiltonian.coeffs)
120 if frequencies is not None and not trainable_hamiltonian:
121 c, s = generate_shift_rule(frequencies, shifts).T
122 recipe = qml.math.stack([c, qml.math.ones_like(c), s]).T
123 self.grad_recipe = (recipe,) + (None,) * len(hamiltonian.data)
124 self.grad_method = "A"
125
126 self._hyperparameters = {
127 "hamiltonian": hamiltonian,
128 "frequencies": frequencies,
129 "shifts": shifts,
130 }
131
132 super().__init__(
133 time, *hamiltonian.parameters, wires=hamiltonian.wires, do_queue=do_queue, id=id
134 )
135
136 @staticmethod
137 def compute_decomposition(
138 *time_and_coeffs, wires, hamiltonian, **kwargs
139 ): # pylint: disable=arguments-differ,unused-argument
140 r"""Representation of the operator as a product of other operators.
141
142 .. math:: O = O_1 O_2 \dots O_n.
143
144 Args:
145 time_and_coeffs (list[tensor_like or float]): list of coefficients of the Hamiltonian, prepended by the time
146 variable
147 wires (Any or Iterable[Any]): wires that the operator acts on
148 hamiltonian (.Hamiltonian): The commuting Hamiltonian defining the time-evolution operator.
149 frequencies (tuple[int or float]): The unique positive differences between eigenvalues in
150 the spectrum of the Hamiltonian.
151 shifts (tuple[int or float]): The parameter shifts to use in obtaining the
152 generalized parameter shift rules. If unspecified, equidistant shifts are used.
153
154 .. seealso:: :meth:`~.CommutingEvolution.decomposition`.
155
156 Returns:
157 list[.Operator]: decomposition of the operator
158 """
159 # uses standard PauliRot decomposition through ApproxTimeEvolution.
160 hamiltonian = qml.Hamiltonian(time_and_coeffs[1:], hamiltonian.ops)
161 return qml.templates.ApproxTimeEvolution(hamiltonian, time_and_coeffs[1], 1).decomposition()
162
163 def adjoint(self): # pylint: disable=arguments-differ
164
165 hamiltonian = qml.Hamiltonian(self.parameters[1:], self.hyperparameters["hamiltonian"].ops)
166 time = self.parameters[0]
167 frequencies = self.hyperparameters["frequencies"]
168 shifts = self.hyperparameters["shifts"]
169
170 return CommutingEvolution(hamiltonian, -time, frequencies, shifts)
171
[end of pennylane/templates/subroutines/commuting_evolution.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pennylane/templates/subroutines/commuting_evolution.py b/pennylane/templates/subroutines/commuting_evolution.py
--- a/pennylane/templates/subroutines/commuting_evolution.py
+++ b/pennylane/templates/subroutines/commuting_evolution.py
@@ -135,7 +135,7 @@
@staticmethod
def compute_decomposition(
- *time_and_coeffs, wires, hamiltonian, **kwargs
+ time, *coeffs, wires, hamiltonian, **kwargs
): # pylint: disable=arguments-differ,unused-argument
r"""Representation of the operator as a product of other operators.
@@ -157,8 +157,8 @@
list[.Operator]: decomposition of the operator
"""
# uses standard PauliRot decomposition through ApproxTimeEvolution.
- hamiltonian = qml.Hamiltonian(time_and_coeffs[1:], hamiltonian.ops)
- return qml.templates.ApproxTimeEvolution(hamiltonian, time_and_coeffs[1], 1).decomposition()
+ hamiltonian = qml.Hamiltonian(coeffs, hamiltonian.ops)
+ return qml.ApproxTimeEvolution(hamiltonian, time, 1)
def adjoint(self): # pylint: disable=arguments-differ
|
{"golden_diff": "diff --git a/pennylane/templates/subroutines/commuting_evolution.py b/pennylane/templates/subroutines/commuting_evolution.py\n--- a/pennylane/templates/subroutines/commuting_evolution.py\n+++ b/pennylane/templates/subroutines/commuting_evolution.py\n@@ -135,7 +135,7 @@\n \r\n @staticmethod\r\n def compute_decomposition(\r\n- *time_and_coeffs, wires, hamiltonian, **kwargs\r\n+ time, *coeffs, wires, hamiltonian, **kwargs\r\n ): # pylint: disable=arguments-differ,unused-argument\r\n r\"\"\"Representation of the operator as a product of other operators.\r\n \r\n@@ -157,8 +157,8 @@\n list[.Operator]: decomposition of the operator\r\n \"\"\"\r\n # uses standard PauliRot decomposition through ApproxTimeEvolution.\r\n- hamiltonian = qml.Hamiltonian(time_and_coeffs[1:], hamiltonian.ops)\r\n- return qml.templates.ApproxTimeEvolution(hamiltonian, time_and_coeffs[1], 1).decomposition()\r\n+ hamiltonian = qml.Hamiltonian(coeffs, hamiltonian.ops)\r\n+ return qml.ApproxTimeEvolution(hamiltonian, time, 1)\r\n \r\n def adjoint(self): # pylint: disable=arguments-differ\n", "issue": "[BUG] Derivatives of `qml.CommutingEvolution`\nIn cleaning warnings from the test suite, I found this warning:\r\n\r\n```\r\ntemplates/test_subroutines/test_commuting_evolution.py::TestGradients::test_differentiable_hamiltonian\r\n /Users/christina/Prog/pl/lib/python3.8/site-packages/autograd/tracer.py:14: UserWarning: Output seems independent of input.\r\n warnings.warn(\"Output seems independent of input.\")\r\n```\r\n\r\nThe effect of `CommutingEvolution` on the state did not change with the time. \r\nThis problems seems to stem from [commuting_evolution.py Line 160](https://github.com/PennyLaneAI/pennylane/blob/e55f463bcbca1832505d9440d2f5144543f944fe/pennylane/templates/subroutines/commuting_evolution.py#L160):\r\n```\r\n return qml.templates.ApproxTimeEvolution(hamiltonian, time_and_coeffs[1], 1).decomposition()\r\n```\r\nThis line incorrectly indexes into `time_and_coeffs` to try and extract the time. It should instead use `time_and_coeffs[0]`.\r\n\r\nUpon fixing that bug, we now find that parameter shift no longer works for `CommutingEvolution`. \r\n\r\nThese two tests start to fail:\r\n```\r\n==================================================================================== short test summary info ====================================================================================\r\nFAILED templates/test_subroutines/test_commuting_evolution.py::TestGradients::test_two_term_case - assert False\r\nFAILED templates/test_subroutines/test_commuting_evolution.py::TestGradients::test_four_term_case - assert False\r\n```\r\n\r\nThe tests that fail manually specify the frequencies. If the frequencies are specified, the operation sets its own `grad_recipe`. This `grad_recipe` gives incorrect results. I have yet to track down the problem with the `grad_recipe` property.\r\n\r\n\r\nIn summary, fixing the bug in the creation of the `ApproxTimeEvolution` operation allows finite difference derivatives and derivatives for operations without specified frequencies to work correctly. Since the finite-difference derivatives are now correct, we can see that the parameter-shift gradients never worked correctly.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\nr\"\"\"\r\nContains the CommutingEvolution template.\r\n\"\"\"\r\n# pylint: disable-msg=too-many-arguments,import-outside-toplevel\r\nimport pennylane as qml\r\nfrom pennylane.operation import Operation, AnyWires\r\n\r\n\r\nclass CommutingEvolution(Operation):\r\n r\"\"\"Applies the time-evolution operator for a Hamiltonian expressed as a linear combination\r\n of mutually commuting Pauli words.\r\n\r\n A commuting Hamiltonian is of the form\r\n\r\n .. math:: H \\ = \\ \\displaystyle\\sum_{j} c_j P_j,\r\n\r\n where :math:`P_j` are mutually commutative Pauli words and :math:`c_j` are real coefficients.\r\n The time-evolution under a commuting Hamiltonian is given by a unitary of the form\r\n\r\n .. math::\r\n\r\n U(t) \\ = \\ e^{-i H t} \\ = \\exp(-i t \\displaystyle\\sum_j c_j P_j) =\r\n \\displaystyle\\prod_j \\exp(-i t c_j P_j).\r\n\r\n If the Hamiltonian has a small number of unique eigenvalues, partial derivatives of observable\r\n expectation values, i.e.\r\n\r\n .. math:: \\langle 0 | W(t)^\\dagger O W(t) | 0 \\rangle,\r\n\r\n where :math:`W(t) = V U(t) Y` for some :math:`V` and :math:`Y`, taken with respect to\r\n :math:`t` may be efficiently computed through generalized parameter shift rules. When\r\n initialized, this template will automatically compute the parameter-shift rule if given the\r\n Hamiltonian's eigenvalue frequencies, i.e., the unique positive differences between\r\n eigenvalues.\r\n\r\n .. warning::\r\n\r\n This template uses the :class:`~.ApproxTimeEvolution` operation with ``n=1`` in order to\r\n implement the time evolution, as a single-step Trotterization is exact for a commuting\r\n Hamiltonian.\r\n\r\n - If the input Hamiltonian contains Pauli words which do not commute, the\r\n compilation of the time evolution operator to a sequence of gates will\r\n not equate to the exact propagation under the given Hamiltonian.\r\n\r\n - Furthermore, if the specified frequencies do not correspond to the\r\n true eigenvalue frequency spectrum of the commuting Hamiltonian,\r\n computed gradients will be incorrect in general.\r\n\r\n Args:\r\n hamiltonian (.Hamiltonian): The commuting Hamiltonian defining the time-evolution operator.\r\n The Hamiltonian must be explicitly written\r\n in terms of products of Pauli gates (:class:`~.PauliX`, :class:`~.PauliY`,\r\n :class:`~.PauliZ`, and :class:`~.Identity`).\r\n time (int or float): The time of evolution, namely the parameter :math:`t` in :math:`e^{- i H t}`.\r\n\r\n Keyword args:\r\n frequencies (tuple[int or float]): The unique positive differences between eigenvalues in\r\n the spectrum of the Hamiltonian. If the frequencies are not given, the cost function\r\n partial derivative will be computed using the standard two-term shift rule applied to\r\n the constituent Pauli words in the Hamiltonian individually.\r\n\r\n shifts (tuple[int or float]): The parameter shifts to use in obtaining the\r\n generalized parameter shift rules. If unspecified, equidistant shifts are used.\r\n\r\n .. details::\r\n :title: Usage Details\r\n\r\n The template is used inside a qnode:\r\n\r\n .. code-block:: python\r\n\r\n import pennylane as qml\r\n\r\n n_wires = 2\r\n dev = qml.device('default.qubit', wires=n_wires)\r\n\r\n coeffs = [1, -1]\r\n obs = [qml.PauliX(0) @ qml.PauliY(1), qml.PauliY(0) @ qml.PauliX(1)]\r\n hamiltonian = qml.Hamiltonian(coeffs, obs)\r\n frequencies = (2, 4)\r\n\r\n @qml.qnode(dev)\r\n def circuit(time):\r\n qml.PauliX(0)\r\n qml.CommutingEvolution(hamiltonian, time, frequencies)\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n >>> circuit(1)\r\n 0.6536436208636115\r\n \"\"\"\r\n\r\n num_wires = AnyWires\r\n grad_method = None\r\n\r\n def __init__(self, hamiltonian, time, frequencies=None, shifts=None, do_queue=True, id=None):\r\n # pylint: disable=import-outside-toplevel\r\n from pennylane.gradients.general_shift_rules import (\r\n generate_shift_rule,\r\n )\r\n\r\n if not isinstance(hamiltonian, qml.Hamiltonian):\r\n type_name = type(hamiltonian).__name__\r\n raise TypeError(f\"hamiltonian must be of type pennylane.Hamiltonian, got {type_name}\")\r\n\r\n trainable_hamiltonian = qml.math.requires_grad(hamiltonian.coeffs)\r\n if frequencies is not None and not trainable_hamiltonian:\r\n c, s = generate_shift_rule(frequencies, shifts).T\r\n recipe = qml.math.stack([c, qml.math.ones_like(c), s]).T\r\n self.grad_recipe = (recipe,) + (None,) * len(hamiltonian.data)\r\n self.grad_method = \"A\"\r\n\r\n self._hyperparameters = {\r\n \"hamiltonian\": hamiltonian,\r\n \"frequencies\": frequencies,\r\n \"shifts\": shifts,\r\n }\r\n\r\n super().__init__(\r\n time, *hamiltonian.parameters, wires=hamiltonian.wires, do_queue=do_queue, id=id\r\n )\r\n\r\n @staticmethod\r\n def compute_decomposition(\r\n *time_and_coeffs, wires, hamiltonian, **kwargs\r\n ): # pylint: disable=arguments-differ,unused-argument\r\n r\"\"\"Representation of the operator as a product of other operators.\r\n\r\n .. math:: O = O_1 O_2 \\dots O_n.\r\n\r\n Args:\r\n time_and_coeffs (list[tensor_like or float]): list of coefficients of the Hamiltonian, prepended by the time\r\n variable\r\n wires (Any or Iterable[Any]): wires that the operator acts on\r\n hamiltonian (.Hamiltonian): The commuting Hamiltonian defining the time-evolution operator.\r\n frequencies (tuple[int or float]): The unique positive differences between eigenvalues in\r\n the spectrum of the Hamiltonian.\r\n shifts (tuple[int or float]): The parameter shifts to use in obtaining the\r\n generalized parameter shift rules. If unspecified, equidistant shifts are used.\r\n\r\n .. seealso:: :meth:`~.CommutingEvolution.decomposition`.\r\n\r\n Returns:\r\n list[.Operator]: decomposition of the operator\r\n \"\"\"\r\n # uses standard PauliRot decomposition through ApproxTimeEvolution.\r\n hamiltonian = qml.Hamiltonian(time_and_coeffs[1:], hamiltonian.ops)\r\n return qml.templates.ApproxTimeEvolution(hamiltonian, time_and_coeffs[1], 1).decomposition()\r\n\r\n def adjoint(self): # pylint: disable=arguments-differ\r\n\r\n hamiltonian = qml.Hamiltonian(self.parameters[1:], self.hyperparameters[\"hamiltonian\"].ops)\r\n time = self.parameters[0]\r\n frequencies = self.hyperparameters[\"frequencies\"]\r\n shifts = self.hyperparameters[\"shifts\"]\r\n\r\n return CommutingEvolution(hamiltonian, -time, frequencies, shifts)\r\n", "path": "pennylane/templates/subroutines/commuting_evolution.py"}]}
| 3,199 | 305 |
gh_patches_debug_41971
|
rasdani/github-patches
|
git_diff
|
pymedusa__Medusa-3864
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Github Personal Acces Token
### Before submitting your issue:
Enable debug logging in Medusa settings, reproduce the error (be sure to disable after the bug is fixed)
**Python Version**: `2.7.13 (default, Nov 24 2017, 17:33:09) [GCC 6.3.0 20170516]`
**Operating System**: `Linux-3.4.113-bananian-armv7l-with-debian-9.3`
**Locale**: `UTF-8`
**Branch**: [master](../tree/master)
**Database**: `44.9`
**Commit**: pymedusa/Medusa@d6eb72d4e1019e5d1286ae6ac1d7e4b318229ebe
**What you did:** I selected Github personal acces token as authentication method for submitting new issues, entered my token, and tried to submit an issue from within Medusa
**What happened:** It kept giving me the error message that I should enter my username/password
**What you expected:** I expected the issue to be submitted of course but I also expected to still have to enter my username, as is usual AFAIK when logging into Github through a 3rd party service
Unless I'm gravely mistaken (or made an error while entering my login info, but I triple checked it) I think the way of logging in with a git token is wrong, it's missing the username field, and can thus not work.
</issue>
<code>
[start of medusa/issue_submitter.py]
1 # coding=utf-8
2 """GitHub issue submitter."""
3 from __future__ import unicode_literals
4
5 import difflib
6 import locale
7 import logging
8 import platform
9 import sys
10 from builtins import object
11 from builtins import str
12 from datetime import datetime, timedelta
13
14 from github import InputFileContent
15 from github.GithubException import GithubException, RateLimitExceededException
16
17 from medusa import app, db
18 from medusa.classes import ErrorViewer
19 from medusa.github_client import authenticate, get_github_repo, token_authenticate
20 from medusa.logger.adapters.style import BraceAdapter
21
22 log = BraceAdapter(logging.getLogger(__name__))
23 log.logger.addHandler(logging.NullHandler())
24
25 ISSUE_REPORT = """
26 ### INFO
27 **Python Version**: `{python_version}`
28 **Operating System**: `{os}`
29 **Locale**: `{locale}`
30 **Branch**: [{branch}](../tree/{branch})
31 **Database**: `{db_major_version}.{db_minor_version}`
32 **Commit**: {org}/{repo}@{commit}
33 **Link to Log**: {log_url}
34 ### ERROR
35 {log_line}
36 ---
37 _STAFF NOTIFIED_: @{org}/support @{org}/moderators
38 """
39
40
41 class IssueSubmitter(object):
42 """GitHub issue submitter."""
43
44 MISSING_CREDENTIALS = 'Please set your GitHub Username and Password in the config. Unable to submit issue ticket to GitHub.'
45 DEBUG_NOT_ENABLED = 'Please enable Debug mode in the config. Unable to submit issue ticket to GitHub.'
46 NO_ISSUES = 'No issue to be submitted to GitHub.'
47 UNSUPPORTED_VERSION = 'Please update Medusa, unable to submit issue ticket to GitHub with an outdated version.'
48 ALREADY_RUNNING = 'An issue is already being submitted, please wait for it to complete.'
49 BAD_CREDENTIALS = 'Please check your Github credentials in Medusa settings. Bad Credentials error'
50 RATE_LIMIT = 'Please wait before submit new issues. Github Rate Limit Exceeded error'
51 GITHUB_EXCEPTION = 'Error trying to contact Github. Please try again'
52 EXISTING_ISSUE_LOCKED = 'Issue #{number} is locked, check GitHub to find info about the error.'
53 COMMENTED_EXISTING_ISSUE = 'Commented on existing issue #{number} successfully!'
54 ISSUE_CREATED = 'Your issue ticket #{number} was submitted successfully!'
55
56 TITLE_PREFIX = '[APP SUBMITTED]: '
57
58 def __init__(self):
59 """Initialize class with the default constructor."""
60 self.running = False
61
62 @staticmethod
63 def create_gist(github, logline):
64 """Create a private gist with log data for the specified log line."""
65 context_loglines = logline.get_context_loglines()
66 if context_loglines:
67 content = '\n'.join([str(ll) for ll in context_loglines])
68 return github.get_user().create_gist(False, {'application.log': InputFileContent(content)})
69
70 @staticmethod
71 def create_issue_data(logline, log_url):
72 """Create the issue data expected by github api to be submitted."""
73 try:
74 locale_name = locale.getdefaultlocale()[1]
75 except ValueError:
76 locale_name = 'unknown'
77
78 # Get current DB version
79 main_db_con = db.DBConnection()
80 cur_branch_major_db_version, cur_branch_minor_db_version = main_db_con.checkDBVersion()
81
82 commit = app.CUR_COMMIT_HASH
83 base_url = '../blob/{commit}'.format(commit=commit) if commit else None
84 return ISSUE_REPORT.format(
85 python_version=sys.version[:120].replace('\n', ''),
86 os=platform.platform(),
87 locale=locale_name,
88 branch=app.BRANCH,
89 org=app.GIT_ORG,
90 repo=app.GIT_REPO,
91 commit=commit,
92 db_major_version=cur_branch_major_db_version,
93 db_minor_version=cur_branch_minor_db_version,
94 log_url=log_url or '**No Log available**',
95 log_line=logline.format_to_html(base_url=base_url),
96 )
97
98 @classmethod
99 def find_similar_issues(cls, github_repo, loglines, max_age=timedelta(days=180)):
100 """Find similar issues in the GitHub repository."""
101 results = dict()
102 issues = github_repo.get_issues(state='all', since=datetime.now() - max_age)
103 for issue in issues:
104 if hasattr(issue, 'pull_request') and issue.pull_request:
105 continue
106 issue_title = issue.title
107 if issue_title.startswith(cls.TITLE_PREFIX):
108 issue_title = issue_title[len(cls.TITLE_PREFIX):]
109
110 for logline in loglines:
111 log_title = logline.issue_title
112 if cls.similar(log_title, issue_title):
113 results[logline.key] = issue
114
115 if len(results) >= len(loglines):
116 break
117
118 return results
119
120 @staticmethod
121 def similar(title1, title2, ratio=0.9):
122 """Compare title similarity."""
123 return difflib.SequenceMatcher(None, title1, title2).ratio() >= ratio
124
125 def submit_github_issue(self, version_checker, max_issues=500):
126 """Submit errors to github."""
127 def result(message, level=logging.WARNING):
128 log.log(level, message)
129 return [(message, None)]
130
131 if not app.DEBUG:
132 return result(self.DEBUG_NOT_ENABLED)
133
134 if not (app.GIT_USERNAME and app.GIT_PASSWORD) or app.GIT_TOKEN:
135 return result(self.MISSING_CREDENTIALS)
136
137 if not ErrorViewer.errors:
138 return result(self.NO_ISSUES, logging.INFO)
139
140 if not app.DEVELOPER and version_checker.need_update():
141 return result(self.UNSUPPORTED_VERSION)
142
143 if self.running:
144 return result(self.ALREADY_RUNNING)
145
146 self.running = True
147 try:
148 if app.GIT_AUTH_TYPE:
149 github = token_authenticate(app.GIT_TOKEN)
150 else:
151 github = authenticate(app.GIT_USERNAME, app.GIT_PASSWORD)
152 if not github:
153 return result(self.BAD_CREDENTIALS)
154
155 github_repo = get_github_repo(app.GIT_ORG, app.GIT_REPO, gh=github)
156 loglines = ErrorViewer.errors[:max_issues]
157 similar_issues = self.find_similar_issues(github_repo, loglines)
158
159 return self.submit_issues(github, github_repo, loglines, similar_issues)
160 except RateLimitExceededException:
161 return result(self.RATE_LIMIT)
162 except (GithubException, IOError):
163 return result(self.GITHUB_EXCEPTION)
164 finally:
165 self.running = False
166
167 @classmethod
168 def submit_issues(cls, github, github_repo, loglines, similar_issues):
169 """Submit issues to github."""
170 results = []
171 for line in loglines:
172 gist = cls.create_gist(github, line)
173 message = cls.create_issue_data(line, log_url=gist.html_url if gist else None)
174 similar_issue = similar_issues.get(line.key)
175 issue_id = None
176 if similar_issue:
177 if similar_issue.raw_data['locked']:
178 submitter_result = cls.EXISTING_ISSUE_LOCKED.format(number=similar_issue.number)
179 log.warning(submitter_result)
180 else:
181 similar_issue.create_comment(message)
182 issue_id = similar_issue.number
183 submitter_result = cls.COMMENTED_EXISTING_ISSUE.format(number=issue_id)
184 log.info(submitter_result)
185 ErrorViewer.remove(line)
186 else:
187 issue = github_repo.create_issue('{prefix}{title}'.format(prefix=cls.TITLE_PREFIX, title=line.issue_title), message)
188 issue_id = issue.number
189 submitter_result = cls.ISSUE_CREATED.format(number=issue_id)
190 log.info(submitter_result)
191 ErrorViewer.remove(line)
192 results.append((submitter_result, issue_id))
193
194 return results
195
[end of medusa/issue_submitter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/medusa/issue_submitter.py b/medusa/issue_submitter.py
--- a/medusa/issue_submitter.py
+++ b/medusa/issue_submitter.py
@@ -12,7 +12,7 @@
from datetime import datetime, timedelta
from github import InputFileContent
-from github.GithubException import GithubException, RateLimitExceededException
+from github.GithubException import GithubException, RateLimitExceededException, UnknownObjectException
from medusa import app, db
from medusa.classes import ErrorViewer
@@ -41,14 +41,16 @@
class IssueSubmitter(object):
"""GitHub issue submitter."""
- MISSING_CREDENTIALS = 'Please set your GitHub Username and Password in the config. Unable to submit issue ticket to GitHub.'
- DEBUG_NOT_ENABLED = 'Please enable Debug mode in the config. Unable to submit issue ticket to GitHub.'
+ MISSING_CREDENTIALS = 'Please set your GitHub Username and Password in the config. Unable to submit issue ticket to GitHub.'
+ MISSING_CREDENTIALS_TOKEN = 'Please set your GitHub personal access token in the config. Unable to submit issue ticket to GitHub.'
+ DEBUG_NOT_ENABLED = 'Please enable Debug mode in the config. Unable to submit issue ticket to GitHub.'
NO_ISSUES = 'No issue to be submitted to GitHub.'
UNSUPPORTED_VERSION = 'Please update Medusa, unable to submit issue ticket to GitHub with an outdated version.'
ALREADY_RUNNING = 'An issue is already being submitted, please wait for it to complete.'
BAD_CREDENTIALS = 'Please check your Github credentials in Medusa settings. Bad Credentials error'
RATE_LIMIT = 'Please wait before submit new issues. Github Rate Limit Exceeded error'
GITHUB_EXCEPTION = 'Error trying to contact Github. Please try again'
+ GITHUB_UNKNOWNOBJECTEXCEPTION = 'GitHub returned an error "Not Found". If using a token, make sure the proper scopes are selected.'
EXISTING_ISSUE_LOCKED = 'Issue #{number} is locked, check GitHub to find info about the error.'
COMMENTED_EXISTING_ISSUE = 'Commented on existing issue #{number} successfully!'
ISSUE_CREATED = 'Your issue ticket #{number} was submitted successfully!'
@@ -131,7 +133,10 @@
if not app.DEBUG:
return result(self.DEBUG_NOT_ENABLED)
- if not (app.GIT_USERNAME and app.GIT_PASSWORD) or app.GIT_TOKEN:
+ if app.GIT_AUTH_TYPE == 1 and not app.GIT_TOKEN:
+ return result(self.MISSING_CREDENTIALS_TOKEN)
+
+ if app.GIT_AUTH_TYPE == 0 and not (app.GIT_USERNAME and app.GIT_PASSWORD):
return result(self.MISSING_CREDENTIALS)
if not ErrorViewer.errors:
@@ -159,7 +164,10 @@
return self.submit_issues(github, github_repo, loglines, similar_issues)
except RateLimitExceededException:
return result(self.RATE_LIMIT)
- except (GithubException, IOError):
+ except (GithubException, IOError) as error:
+ # If the api return http status 404, authentication or permission issue(token right to create gists)
+ if isinstance(error, UnknownObjectException):
+ return result(self.GITHUB_UNKNOWNOBJECTEXCEPTION)
return result(self.GITHUB_EXCEPTION)
finally:
self.running = False
|
{"golden_diff": "diff --git a/medusa/issue_submitter.py b/medusa/issue_submitter.py\n--- a/medusa/issue_submitter.py\n+++ b/medusa/issue_submitter.py\n@@ -12,7 +12,7 @@\n from datetime import datetime, timedelta\n \n from github import InputFileContent\n-from github.GithubException import GithubException, RateLimitExceededException\n+from github.GithubException import GithubException, RateLimitExceededException, UnknownObjectException\n \n from medusa import app, db\n from medusa.classes import ErrorViewer\n@@ -41,14 +41,16 @@\n class IssueSubmitter(object):\n \"\"\"GitHub issue submitter.\"\"\"\n \n- MISSING_CREDENTIALS = 'Please set your GitHub Username and Password in the config. Unable to submit issue ticket to GitHub.'\n- DEBUG_NOT_ENABLED = 'Please enable Debug mode in the config. Unable to submit issue ticket to GitHub.'\n+ MISSING_CREDENTIALS = 'Please set your GitHub Username and Password in the config. Unable to submit issue ticket to GitHub.'\n+ MISSING_CREDENTIALS_TOKEN = 'Please set your GitHub personal access token in the config. Unable to submit issue ticket to GitHub.'\n+ DEBUG_NOT_ENABLED = 'Please enable Debug mode in the config. Unable to submit issue ticket to GitHub.'\n NO_ISSUES = 'No issue to be submitted to GitHub.'\n UNSUPPORTED_VERSION = 'Please update Medusa, unable to submit issue ticket to GitHub with an outdated version.'\n ALREADY_RUNNING = 'An issue is already being submitted, please wait for it to complete.'\n BAD_CREDENTIALS = 'Please check your Github credentials in Medusa settings. Bad Credentials error'\n RATE_LIMIT = 'Please wait before submit new issues. Github Rate Limit Exceeded error'\n GITHUB_EXCEPTION = 'Error trying to contact Github. Please try again'\n+ GITHUB_UNKNOWNOBJECTEXCEPTION = 'GitHub returned an error \"Not Found\". If using a token, make sure the proper scopes are selected.'\n EXISTING_ISSUE_LOCKED = 'Issue #{number} is locked, check GitHub to find info about the error.'\n COMMENTED_EXISTING_ISSUE = 'Commented on existing issue #{number} successfully!'\n ISSUE_CREATED = 'Your issue ticket #{number} was submitted successfully!'\n@@ -131,7 +133,10 @@\n if not app.DEBUG:\n return result(self.DEBUG_NOT_ENABLED)\n \n- if not (app.GIT_USERNAME and app.GIT_PASSWORD) or app.GIT_TOKEN:\n+ if app.GIT_AUTH_TYPE == 1 and not app.GIT_TOKEN:\n+ return result(self.MISSING_CREDENTIALS_TOKEN)\n+\n+ if app.GIT_AUTH_TYPE == 0 and not (app.GIT_USERNAME and app.GIT_PASSWORD):\n return result(self.MISSING_CREDENTIALS)\n \n if not ErrorViewer.errors:\n@@ -159,7 +164,10 @@\n return self.submit_issues(github, github_repo, loglines, similar_issues)\n except RateLimitExceededException:\n return result(self.RATE_LIMIT)\n- except (GithubException, IOError):\n+ except (GithubException, IOError) as error:\n+ # If the api return http status 404, authentication or permission issue(token right to create gists)\n+ if isinstance(error, UnknownObjectException):\n+ return result(self.GITHUB_UNKNOWNOBJECTEXCEPTION)\n return result(self.GITHUB_EXCEPTION)\n finally:\n self.running = False\n", "issue": "Github Personal Acces Token\n### Before submitting your issue:\r\n\r\nEnable debug logging in Medusa settings, reproduce the error (be sure to disable after the bug is fixed)\r\n**Python Version**: `2.7.13 (default, Nov 24 2017, 17:33:09) [GCC 6.3.0 20170516]`\r\n**Operating System**: `Linux-3.4.113-bananian-armv7l-with-debian-9.3`\r\n**Locale**: `UTF-8`\r\n**Branch**: [master](../tree/master)\r\n**Database**: `44.9`\r\n**Commit**: pymedusa/Medusa@d6eb72d4e1019e5d1286ae6ac1d7e4b318229ebe\r\n**What you did:** I selected Github personal acces token as authentication method for submitting new issues, entered my token, and tried to submit an issue from within Medusa\r\n**What happened:** It kept giving me the error message that I should enter my username/password\r\n**What you expected:** I expected the issue to be submitted of course but I also expected to still have to enter my username, as is usual AFAIK when logging into Github through a 3rd party service \r\n\r\nUnless I'm gravely mistaken (or made an error while entering my login info, but I triple checked it) I think the way of logging in with a git token is wrong, it's missing the username field, and can thus not work.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"GitHub issue submitter.\"\"\"\nfrom __future__ import unicode_literals\n\nimport difflib\nimport locale\nimport logging\nimport platform\nimport sys\nfrom builtins import object\nfrom builtins import str\nfrom datetime import datetime, timedelta\n\nfrom github import InputFileContent\nfrom github.GithubException import GithubException, RateLimitExceededException\n\nfrom medusa import app, db\nfrom medusa.classes import ErrorViewer\nfrom medusa.github_client import authenticate, get_github_repo, token_authenticate\nfrom medusa.logger.adapters.style import BraceAdapter\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\nISSUE_REPORT = \"\"\"\n### INFO\n**Python Version**: `{python_version}`\n**Operating System**: `{os}`\n**Locale**: `{locale}`\n**Branch**: [{branch}](../tree/{branch})\n**Database**: `{db_major_version}.{db_minor_version}`\n**Commit**: {org}/{repo}@{commit}\n**Link to Log**: {log_url}\n### ERROR\n{log_line}\n---\n_STAFF NOTIFIED_: @{org}/support @{org}/moderators\n\"\"\"\n\n\nclass IssueSubmitter(object):\n \"\"\"GitHub issue submitter.\"\"\"\n\n MISSING_CREDENTIALS = 'Please set your GitHub Username and Password in the config. Unable to submit issue ticket to GitHub.'\n DEBUG_NOT_ENABLED = 'Please enable Debug mode in the config. Unable to submit issue ticket to GitHub.'\n NO_ISSUES = 'No issue to be submitted to GitHub.'\n UNSUPPORTED_VERSION = 'Please update Medusa, unable to submit issue ticket to GitHub with an outdated version.'\n ALREADY_RUNNING = 'An issue is already being submitted, please wait for it to complete.'\n BAD_CREDENTIALS = 'Please check your Github credentials in Medusa settings. Bad Credentials error'\n RATE_LIMIT = 'Please wait before submit new issues. Github Rate Limit Exceeded error'\n GITHUB_EXCEPTION = 'Error trying to contact Github. Please try again'\n EXISTING_ISSUE_LOCKED = 'Issue #{number} is locked, check GitHub to find info about the error.'\n COMMENTED_EXISTING_ISSUE = 'Commented on existing issue #{number} successfully!'\n ISSUE_CREATED = 'Your issue ticket #{number} was submitted successfully!'\n\n TITLE_PREFIX = '[APP SUBMITTED]: '\n\n def __init__(self):\n \"\"\"Initialize class with the default constructor.\"\"\"\n self.running = False\n\n @staticmethod\n def create_gist(github, logline):\n \"\"\"Create a private gist with log data for the specified log line.\"\"\"\n context_loglines = logline.get_context_loglines()\n if context_loglines:\n content = '\\n'.join([str(ll) for ll in context_loglines])\n return github.get_user().create_gist(False, {'application.log': InputFileContent(content)})\n\n @staticmethod\n def create_issue_data(logline, log_url):\n \"\"\"Create the issue data expected by github api to be submitted.\"\"\"\n try:\n locale_name = locale.getdefaultlocale()[1]\n except ValueError:\n locale_name = 'unknown'\n\n # Get current DB version\n main_db_con = db.DBConnection()\n cur_branch_major_db_version, cur_branch_minor_db_version = main_db_con.checkDBVersion()\n\n commit = app.CUR_COMMIT_HASH\n base_url = '../blob/{commit}'.format(commit=commit) if commit else None\n return ISSUE_REPORT.format(\n python_version=sys.version[:120].replace('\\n', ''),\n os=platform.platform(),\n locale=locale_name,\n branch=app.BRANCH,\n org=app.GIT_ORG,\n repo=app.GIT_REPO,\n commit=commit,\n db_major_version=cur_branch_major_db_version,\n db_minor_version=cur_branch_minor_db_version,\n log_url=log_url or '**No Log available**',\n log_line=logline.format_to_html(base_url=base_url),\n )\n\n @classmethod\n def find_similar_issues(cls, github_repo, loglines, max_age=timedelta(days=180)):\n \"\"\"Find similar issues in the GitHub repository.\"\"\"\n results = dict()\n issues = github_repo.get_issues(state='all', since=datetime.now() - max_age)\n for issue in issues:\n if hasattr(issue, 'pull_request') and issue.pull_request:\n continue\n issue_title = issue.title\n if issue_title.startswith(cls.TITLE_PREFIX):\n issue_title = issue_title[len(cls.TITLE_PREFIX):]\n\n for logline in loglines:\n log_title = logline.issue_title\n if cls.similar(log_title, issue_title):\n results[logline.key] = issue\n\n if len(results) >= len(loglines):\n break\n\n return results\n\n @staticmethod\n def similar(title1, title2, ratio=0.9):\n \"\"\"Compare title similarity.\"\"\"\n return difflib.SequenceMatcher(None, title1, title2).ratio() >= ratio\n\n def submit_github_issue(self, version_checker, max_issues=500):\n \"\"\"Submit errors to github.\"\"\"\n def result(message, level=logging.WARNING):\n log.log(level, message)\n return [(message, None)]\n\n if not app.DEBUG:\n return result(self.DEBUG_NOT_ENABLED)\n\n if not (app.GIT_USERNAME and app.GIT_PASSWORD) or app.GIT_TOKEN:\n return result(self.MISSING_CREDENTIALS)\n\n if not ErrorViewer.errors:\n return result(self.NO_ISSUES, logging.INFO)\n\n if not app.DEVELOPER and version_checker.need_update():\n return result(self.UNSUPPORTED_VERSION)\n\n if self.running:\n return result(self.ALREADY_RUNNING)\n\n self.running = True\n try:\n if app.GIT_AUTH_TYPE:\n github = token_authenticate(app.GIT_TOKEN)\n else:\n github = authenticate(app.GIT_USERNAME, app.GIT_PASSWORD)\n if not github:\n return result(self.BAD_CREDENTIALS)\n\n github_repo = get_github_repo(app.GIT_ORG, app.GIT_REPO, gh=github)\n loglines = ErrorViewer.errors[:max_issues]\n similar_issues = self.find_similar_issues(github_repo, loglines)\n\n return self.submit_issues(github, github_repo, loglines, similar_issues)\n except RateLimitExceededException:\n return result(self.RATE_LIMIT)\n except (GithubException, IOError):\n return result(self.GITHUB_EXCEPTION)\n finally:\n self.running = False\n\n @classmethod\n def submit_issues(cls, github, github_repo, loglines, similar_issues):\n \"\"\"Submit issues to github.\"\"\"\n results = []\n for line in loglines:\n gist = cls.create_gist(github, line)\n message = cls.create_issue_data(line, log_url=gist.html_url if gist else None)\n similar_issue = similar_issues.get(line.key)\n issue_id = None\n if similar_issue:\n if similar_issue.raw_data['locked']:\n submitter_result = cls.EXISTING_ISSUE_LOCKED.format(number=similar_issue.number)\n log.warning(submitter_result)\n else:\n similar_issue.create_comment(message)\n issue_id = similar_issue.number\n submitter_result = cls.COMMENTED_EXISTING_ISSUE.format(number=issue_id)\n log.info(submitter_result)\n ErrorViewer.remove(line)\n else:\n issue = github_repo.create_issue('{prefix}{title}'.format(prefix=cls.TITLE_PREFIX, title=line.issue_title), message)\n issue_id = issue.number\n submitter_result = cls.ISSUE_CREATED.format(number=issue_id)\n log.info(submitter_result)\n ErrorViewer.remove(line)\n results.append((submitter_result, issue_id))\n\n return results\n", "path": "medusa/issue_submitter.py"}]}
| 2,991 | 742 |
gh_patches_debug_14740
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-6178
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ClientError (Throttling) in request_cdn_cache_invalidation()
https://sentry.prod.mozaws.net/operations/mdn-prod/issues/6766908/
```
ClientError: An error occurred (Throttling) when calling the CreateInvalidation operation (reached max retries: 4): Rate exceeded
(1 additional frame(s) were not displayed)
...
File "newrelic/hooks/application_celery.py", line 85, in wrapper
return wrapped(*args, **kwargs)
File "celery/app/trace.py", line 648, in __protected_call__
return self.run(*args, **kwargs)
File "kuma/api/tasks.py", line 180, in request_cdn_cache_invalidation
invalidation = client.create_invalidation(
File "botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "botocore/client.py", line 661, in _make_api_call
raise error_class(parsed_response, operation_name)
ClientError: An error occurred (Throttling) when calling the CreateInvalidation operation (reached max retries: 4): Rate exceeded
```
</issue>
<code>
[start of kuma/api/tasks.py]
1
2
3 import json
4 import time
5
6 import boto3
7 from celery import task
8 from django.conf import settings
9 from django.utils.module_loading import import_string
10
11 from kuma.core.utils import chunked
12 from kuma.wiki.models import Document
13
14 from .v1.views import document_api_data, get_content_based_redirect, get_s3_key
15
16
17 _s3_resource = None
18 S3_MAX_KEYS_PER_DELETE = 1000
19
20
21 def get_s3_resource(config=None):
22 """
23 Get or create the S3 resource. This function is not thread-safe, since it
24 uses the default session, rather than a separate session for each thread.
25 We do not use threads however, so we don't have to handle them.
26 """
27 global _s3_resource
28 if _s3_resource is None:
29 _s3_resource = boto3.resource('s3', config=config)
30 return _s3_resource
31
32
33 # The global cloudfront client object to be lazily defined
34 _cloudfront_client = None
35
36
37 def get_cloudfront_client(config=None):
38 """
39 Get or create the CloudFront client. This function is not
40 thread-safe, since it uses the default session, rather than
41 a separate session for each thread.
42 We do not use threads however, so we don't have to handle them.
43 """
44 global _cloudfront_client
45 if _cloudfront_client is None:
46 _cloudfront_client = boto3.client('cloudfront', config=config)
47 return _cloudfront_client
48
49
50 def get_s3_bucket(config=None):
51 """
52 Get the S3 bucket using the name configured in the environment, otherwise
53 return None.
54 """
55 if not settings.MDN_API_S3_BUCKET_NAME:
56 return None
57 s3 = get_s3_resource(config=config)
58 return s3.Bucket(settings.MDN_API_S3_BUCKET_NAME)
59
60
61 @task
62 def unpublish(doc_locale_slug_pairs, log=None, completion_message=None,
63 invalidate_cdn_cache=True):
64 """
65 Delete one or more documents from the S3 bucket serving the document API.
66 """
67 if not log:
68 log = unpublish.get_logger()
69
70 s3_bucket = get_s3_bucket()
71 if not s3_bucket:
72 log.info('Skipping unpublish of {!r}: no S3 bucket configured'.format(
73 doc_locale_slug_pairs))
74 return
75
76 keys_to_delete = (get_s3_key(locale=locale, slug=slug)
77 for locale, slug in doc_locale_slug_pairs)
78
79 for chunk in chunked(keys_to_delete, S3_MAX_KEYS_PER_DELETE):
80 response = s3_bucket.delete_objects(
81 Delete={
82 'Objects': [{'Key': key} for key in chunk]
83 }
84 )
85 for info in response.get('Deleted', ()):
86 log.info('Unpublished {}'.format(info['Key']))
87 for info in response.get('Errors', ()):
88 log.error('Unable to unpublish {}: ({}) {}'.format(
89 info['Key'], info['Code'], info['Message']))
90
91 if completion_message:
92 log.info(completion_message)
93
94 if invalidate_cdn_cache:
95 request_cdn_cache_invalidation.delay(doc_locale_slug_pairs)
96
97
98 @task
99 def publish(doc_pks, log=None, completion_message=None,
100 invalidate_cdn_cache=True):
101 """
102 Publish one or more documents to the S3 bucket serving the document API.
103 """
104 if not log:
105 log = publish.get_logger()
106
107 s3_bucket = get_s3_bucket()
108 if not s3_bucket:
109 log.info(
110 'Skipping publish of {!r}: no S3 bucket configured'.format(doc_pks))
111 return
112
113 if invalidate_cdn_cache:
114 # Use this to turn the document IDs into pairs of (locale, slug).
115 doc_locale_slug_pairs = []
116
117 for pk in doc_pks:
118 try:
119 doc = Document.objects.get(pk=pk)
120 except Document.DoesNotExist:
121 log.error('Document with pk={} does not exist'.format(pk))
122 continue
123
124 if invalidate_cdn_cache:
125 # Build up this list for the benefit of triggering a
126 # CDN cache invalidation.
127 doc_locale_slug_pairs.append((doc.locale, doc.slug))
128
129 kwargs = dict(
130 ACL='public-read',
131 Key=get_s3_key(doc),
132 ContentType='application/json',
133 ContentLanguage=doc.locale,
134 )
135 redirect = get_content_based_redirect(doc)
136 if redirect:
137 redirect_url, is_redirect_to_document = redirect
138 if is_redirect_to_document:
139 kwargs.update(WebsiteRedirectLocation=redirect_url)
140 data = document_api_data(redirect_url=redirect_url)
141 kwargs.update(Body=json.dumps(data))
142 else:
143 data = document_api_data(doc)
144 kwargs.update(Body=json.dumps(data))
145 s3_object = s3_bucket.put_object(**kwargs)
146 log.info('Published {!r}'.format(s3_object))
147
148 if completion_message:
149 log.info(completion_message)
150
151 if invalidate_cdn_cache and doc_locale_slug_pairs:
152 request_cdn_cache_invalidation.delay(doc_locale_slug_pairs)
153
154
155 @task
156 def request_cdn_cache_invalidation(doc_locale_slug_pairs, log=None):
157 """
158 Trigger an attempt to purge the given documents from one or more
159 of the configured CloudFront distributions.
160 """
161 if not log:
162 log = request_cdn_cache_invalidation.get_logger()
163
164 client = get_cloudfront_client()
165 for label, conf in settings.MDN_CLOUDFRONT_DISTRIBUTIONS.items():
166 if not conf['id']:
167 log.info('No Distribution ID available for CloudFront {!r}'.format(
168 label
169 ))
170 continue
171 transform_function = import_string(conf['transform_function'])
172 paths = (
173 transform_function(locale, slug)
174 for locale, slug in doc_locale_slug_pairs
175 )
176 # In case the transform function decided to "opt-out" on a particular
177 # (locale, slug) it might return a falsy value.
178 paths = [x for x in paths if x]
179 if paths:
180 invalidation = client.create_invalidation(
181 DistributionId=conf['id'],
182 InvalidationBatch={
183 'Paths': {
184 'Quantity': len(paths),
185 'Items': paths
186 },
187 # The 'CallerReference' just needs to be a unique string.
188 # By using a timestamp we get slightly more information
189 # than using a UUID or a random string. But it needs to
190 # be sufficiently "different" that's why we use 6
191 # significant figures to avoid the unlikely chance that
192 # this code gets executed concurrently within a small
193 # time window.
194 'CallerReference': '{:.6f}'.format(time.time())
195 }
196 )
197 log.info(
198 'Issued cache invalidation for {!r} in {} distribution'
199 ' (received with {})'.format(
200 paths,
201 label,
202 invalidation['ResponseMetadata']['HTTPStatusCode']
203 )
204 )
205
[end of kuma/api/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kuma/api/tasks.py b/kuma/api/tasks.py
--- a/kuma/api/tasks.py
+++ b/kuma/api/tasks.py
@@ -4,9 +4,11 @@
import time
import boto3
+from botocore.exceptions import ClientError
from celery import task
from django.conf import settings
from django.utils.module_loading import import_string
+from redo import retriable
from kuma.core.utils import chunked
from kuma.wiki.models import Document
@@ -153,6 +155,7 @@
@task
+@retriable(retry_exceptions=(ClientError,))
def request_cdn_cache_invalidation(doc_locale_slug_pairs, log=None):
"""
Trigger an attempt to purge the given documents from one or more
|
{"golden_diff": "diff --git a/kuma/api/tasks.py b/kuma/api/tasks.py\n--- a/kuma/api/tasks.py\n+++ b/kuma/api/tasks.py\n@@ -4,9 +4,11 @@\n import time\n \n import boto3\n+from botocore.exceptions import ClientError\n from celery import task\n from django.conf import settings\n from django.utils.module_loading import import_string\n+from redo import retriable\n \n from kuma.core.utils import chunked\n from kuma.wiki.models import Document\n@@ -153,6 +155,7 @@\n \n \n @task\n+@retriable(retry_exceptions=(ClientError,))\n def request_cdn_cache_invalidation(doc_locale_slug_pairs, log=None):\n \"\"\"\n Trigger an attempt to purge the given documents from one or more\n", "issue": "ClientError (Throttling) in request_cdn_cache_invalidation()\nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/6766908/\n\n```\nClientError: An error occurred (Throttling) when calling the CreateInvalidation operation (reached max retries: 4): Rate exceeded\n(1 additional frame(s) were not displayed)\n...\n File \"newrelic/hooks/application_celery.py\", line 85, in wrapper\n return wrapped(*args, **kwargs)\n File \"celery/app/trace.py\", line 648, in __protected_call__\n return self.run(*args, **kwargs)\n File \"kuma/api/tasks.py\", line 180, in request_cdn_cache_invalidation\n invalidation = client.create_invalidation(\n File \"botocore/client.py\", line 357, in _api_call\n return self._make_api_call(operation_name, kwargs)\n File \"botocore/client.py\", line 661, in _make_api_call\n raise error_class(parsed_response, operation_name)\n\nClientError: An error occurred (Throttling) when calling the CreateInvalidation operation (reached max retries: 4): Rate exceeded\n```\n", "before_files": [{"content": "\n\nimport json\nimport time\n\nimport boto3\nfrom celery import task\nfrom django.conf import settings\nfrom django.utils.module_loading import import_string\n\nfrom kuma.core.utils import chunked\nfrom kuma.wiki.models import Document\n\nfrom .v1.views import document_api_data, get_content_based_redirect, get_s3_key\n\n\n_s3_resource = None\nS3_MAX_KEYS_PER_DELETE = 1000\n\n\ndef get_s3_resource(config=None):\n \"\"\"\n Get or create the S3 resource. This function is not thread-safe, since it\n uses the default session, rather than a separate session for each thread.\n We do not use threads however, so we don't have to handle them.\n \"\"\"\n global _s3_resource\n if _s3_resource is None:\n _s3_resource = boto3.resource('s3', config=config)\n return _s3_resource\n\n\n# The global cloudfront client object to be lazily defined\n_cloudfront_client = None\n\n\ndef get_cloudfront_client(config=None):\n \"\"\"\n Get or create the CloudFront client. This function is not\n thread-safe, since it uses the default session, rather than\n a separate session for each thread.\n We do not use threads however, so we don't have to handle them.\n \"\"\"\n global _cloudfront_client\n if _cloudfront_client is None:\n _cloudfront_client = boto3.client('cloudfront', config=config)\n return _cloudfront_client\n\n\ndef get_s3_bucket(config=None):\n \"\"\"\n Get the S3 bucket using the name configured in the environment, otherwise\n return None.\n \"\"\"\n if not settings.MDN_API_S3_BUCKET_NAME:\n return None\n s3 = get_s3_resource(config=config)\n return s3.Bucket(settings.MDN_API_S3_BUCKET_NAME)\n\n\n@task\ndef unpublish(doc_locale_slug_pairs, log=None, completion_message=None,\n invalidate_cdn_cache=True):\n \"\"\"\n Delete one or more documents from the S3 bucket serving the document API.\n \"\"\"\n if not log:\n log = unpublish.get_logger()\n\n s3_bucket = get_s3_bucket()\n if not s3_bucket:\n log.info('Skipping unpublish of {!r}: no S3 bucket configured'.format(\n doc_locale_slug_pairs))\n return\n\n keys_to_delete = (get_s3_key(locale=locale, slug=slug)\n for locale, slug in doc_locale_slug_pairs)\n\n for chunk in chunked(keys_to_delete, S3_MAX_KEYS_PER_DELETE):\n response = s3_bucket.delete_objects(\n Delete={\n 'Objects': [{'Key': key} for key in chunk]\n }\n )\n for info in response.get('Deleted', ()):\n log.info('Unpublished {}'.format(info['Key']))\n for info in response.get('Errors', ()):\n log.error('Unable to unpublish {}: ({}) {}'.format(\n info['Key'], info['Code'], info['Message']))\n\n if completion_message:\n log.info(completion_message)\n\n if invalidate_cdn_cache:\n request_cdn_cache_invalidation.delay(doc_locale_slug_pairs)\n\n\n@task\ndef publish(doc_pks, log=None, completion_message=None,\n invalidate_cdn_cache=True):\n \"\"\"\n Publish one or more documents to the S3 bucket serving the document API.\n \"\"\"\n if not log:\n log = publish.get_logger()\n\n s3_bucket = get_s3_bucket()\n if not s3_bucket:\n log.info(\n 'Skipping publish of {!r}: no S3 bucket configured'.format(doc_pks))\n return\n\n if invalidate_cdn_cache:\n # Use this to turn the document IDs into pairs of (locale, slug).\n doc_locale_slug_pairs = []\n\n for pk in doc_pks:\n try:\n doc = Document.objects.get(pk=pk)\n except Document.DoesNotExist:\n log.error('Document with pk={} does not exist'.format(pk))\n continue\n\n if invalidate_cdn_cache:\n # Build up this list for the benefit of triggering a\n # CDN cache invalidation.\n doc_locale_slug_pairs.append((doc.locale, doc.slug))\n\n kwargs = dict(\n ACL='public-read',\n Key=get_s3_key(doc),\n ContentType='application/json',\n ContentLanguage=doc.locale,\n )\n redirect = get_content_based_redirect(doc)\n if redirect:\n redirect_url, is_redirect_to_document = redirect\n if is_redirect_to_document:\n kwargs.update(WebsiteRedirectLocation=redirect_url)\n data = document_api_data(redirect_url=redirect_url)\n kwargs.update(Body=json.dumps(data))\n else:\n data = document_api_data(doc)\n kwargs.update(Body=json.dumps(data))\n s3_object = s3_bucket.put_object(**kwargs)\n log.info('Published {!r}'.format(s3_object))\n\n if completion_message:\n log.info(completion_message)\n\n if invalidate_cdn_cache and doc_locale_slug_pairs:\n request_cdn_cache_invalidation.delay(doc_locale_slug_pairs)\n\n\n@task\ndef request_cdn_cache_invalidation(doc_locale_slug_pairs, log=None):\n \"\"\"\n Trigger an attempt to purge the given documents from one or more\n of the configured CloudFront distributions.\n \"\"\"\n if not log:\n log = request_cdn_cache_invalidation.get_logger()\n\n client = get_cloudfront_client()\n for label, conf in settings.MDN_CLOUDFRONT_DISTRIBUTIONS.items():\n if not conf['id']:\n log.info('No Distribution ID available for CloudFront {!r}'.format(\n label\n ))\n continue\n transform_function = import_string(conf['transform_function'])\n paths = (\n transform_function(locale, slug)\n for locale, slug in doc_locale_slug_pairs\n )\n # In case the transform function decided to \"opt-out\" on a particular\n # (locale, slug) it might return a falsy value.\n paths = [x for x in paths if x]\n if paths:\n invalidation = client.create_invalidation(\n DistributionId=conf['id'],\n InvalidationBatch={\n 'Paths': {\n 'Quantity': len(paths),\n 'Items': paths\n },\n # The 'CallerReference' just needs to be a unique string.\n # By using a timestamp we get slightly more information\n # than using a UUID or a random string. But it needs to\n # be sufficiently \"different\" that's why we use 6\n # significant figures to avoid the unlikely chance that\n # this code gets executed concurrently within a small\n # time window.\n 'CallerReference': '{:.6f}'.format(time.time())\n }\n )\n log.info(\n 'Issued cache invalidation for {!r} in {} distribution'\n ' (received with {})'.format(\n paths,\n label,\n invalidation['ResponseMetadata']['HTTPStatusCode']\n )\n )\n", "path": "kuma/api/tasks.py"}]}
| 2,792 | 166 |
gh_patches_debug_34655
|
rasdani/github-patches
|
git_diff
|
modal-labs__modal-examples-708
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mixtral tutorial doesn't work without huggingface access token
The tutorial for running mixtral on VLLM doesn't work since the model cannot be downloaded without a huggingface access token. This is because mixtral is now a gated model: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
```
Image build for im-1P0Aou6cl9H3BAwictbALw failed with the exception: │
│ GatedRepoError('401 Client Error. (Request ID: │
│ Root=1-66213747-475d6ad5261bb9eb4931c4fd;025f8bf1-0bb2-42ac-86a0-743e752004a0)\n\nCannot access gated repo for │
│ url https://huggingface.co/api/models/mistralai/Mixtral-8x7B-Instruct-v0.1/revision/main.\nRepo model │
│ mistralai/Mixtral-8x7B-Instruct-v0.1 is gated. You must be authenticated to access it.')
```
Affected Tutorial: https://modal.com/docs/examples/vllm_mixtral
Affected Code: https://github.com/modal-labs/modal-examples/blob/main/06_gpu_and_ml/llm-serving/vllm_mixtral.py
This can be fixed using a similar approach as done [here]( https://github.com/modal-labs/modal-examples/pull/539) (adding an env var for a `HF_TOKEN`) to the [function call](https://github.com/modal-labs/modal-examples/blob/main/06_gpu_and_ml/llm-serving/vllm_mixtral.py#L45-L49) where the model is downloaded.
Also the tutorial needs to be updated to inform the user that a huggingface access token is required.
</issue>
<code>
[start of 06_gpu_and_ml/llm-serving/vllm_mixtral.py]
1 # # Fast inference with vLLM (Mixtral 8x7B)
2 #
3 # In this example, we show how to run basic inference, using [`vLLM`](https://github.com/vllm-project/vllm)
4 # to take advantage of PagedAttention, which speeds up sequential inferences with optimized key-value caching.
5 #
6 # We are running the [Mixtral 8x7B Instruct](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) model here,
7 # which is a mixture-of-experts model finetuned for conversation.
8 # You can expect ~3 minute cold starts.
9 # For a single request, the throughput is over 50 tokens/second.
10 # The larger the batch of prompts, the higher the throughput (up to hundreds of tokens per second).
11 #
12 # ## Setup
13 #
14 # First we import the components we need from `modal`.
15
16 import os
17 import time
18
19 import modal
20
21 MODEL_DIR = "/model"
22 MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1"
23 GPU_CONFIG = modal.gpu.A100(memory=80, count=2)
24
25
26 # ## Define a container image
27 #
28 # We want to create a Modal image which has the model weights pre-saved to a directory. The benefit of this
29 # is that the container no longer has to re-download the model from Huggingface - instead, it will take
30 # advantage of Modal's internal filesystem for faster cold starts.
31 #
32 # ### Download the weights
33 #
34 # We can download the model to a particular directory using the HuggingFace utility function `snapshot_download`.
35 #
36 # Mixtral is beefy, at nearly 100 GB in `safetensors` format, so this can take some time -- at least a few minutes.
37 #
38 # Tip: avoid using global variables in this function. Changes to code outside this function will not be detected and the download step will not re-run.
39 def download_model_to_image(model_dir, model_name):
40 from huggingface_hub import snapshot_download
41 from transformers.utils import move_cache
42
43 os.makedirs(model_dir, exist_ok=True)
44
45 snapshot_download(
46 model_name,
47 local_dir=model_dir,
48 ignore_patterns=["*.pt", "*.bin"], # Using safetensors
49 )
50 move_cache()
51
52
53 # ### Image definition
54 # We’ll start from a Dockerhub image recommended by `vLLM`, and use
55 # run_function to run the function defined above to ensure the weights of
56 # the model are saved within the container image.
57
58 vllm_image = (
59 modal.Image.debian_slim()
60 .pip_install(
61 "vllm==0.4.0.post1",
62 "torch==2.1.2",
63 "transformers==4.39.3",
64 "ray==2.10.0",
65 "hf-transfer==0.1.6",
66 "huggingface_hub==0.22.2",
67 )
68 .env({"HF_HUB_ENABLE_HF_TRANSFER": "1"})
69 .run_function(
70 download_model_to_image,
71 timeout=60 * 20,
72 kwargs={"model_dir": MODEL_DIR, "model_name": MODEL_NAME},
73 )
74 )
75
76 stub = modal.Stub("example-vllm-mixtral")
77
78
79 # ## The model class
80 #
81 # The inference function is best represented with Modal's [class syntax](/docs/guide/lifecycle-functions) and the `@enter` decorator.
82 # This enables us to load the model into memory just once every time a container starts up, and keep it cached
83 # on the GPU for each subsequent invocation of the function.
84 #
85 # The `vLLM` library allows the code to remain quite clean. We do have to patch the multi-GPU setup due to issues with Ray.
86 @stub.cls(
87 gpu=GPU_CONFIG,
88 timeout=60 * 10,
89 container_idle_timeout=60 * 10,
90 allow_concurrent_inputs=10,
91 image=vllm_image,
92 )
93 class Model:
94 @modal.enter()
95 def start_engine(self):
96 from vllm.engine.arg_utils import AsyncEngineArgs
97 from vllm.engine.async_llm_engine import AsyncLLMEngine
98
99 print("🥶 cold starting inference")
100 start = time.monotonic_ns()
101
102 engine_args = AsyncEngineArgs(
103 model=MODEL_DIR,
104 tensor_parallel_size=GPU_CONFIG.count,
105 gpu_memory_utilization=0.90,
106 enforce_eager=False, # capture the graph for faster inference, but slower cold starts
107 disable_log_stats=True, # disable logging so we can stream tokens
108 disable_log_requests=True,
109 )
110 self.template = "<s> [INST] {user} [/INST] "
111
112 # this can take some time!
113 self.engine = AsyncLLMEngine.from_engine_args(engine_args)
114 duration_s = (time.monotonic_ns() - start) / 1e9
115 print(f"🏎️ engine started in {duration_s:.0f}s")
116
117 @modal.method()
118 async def completion_stream(self, user_question):
119 from vllm import SamplingParams
120 from vllm.utils import random_uuid
121
122 sampling_params = SamplingParams(
123 temperature=0.75,
124 max_tokens=128,
125 repetition_penalty=1.1,
126 )
127
128 request_id = random_uuid()
129 result_generator = self.engine.generate(
130 self.template.format(user=user_question),
131 sampling_params,
132 request_id,
133 )
134 index, num_tokens = 0, 0
135 start = time.monotonic_ns()
136 async for output in result_generator:
137 if (
138 output.outputs[0].text
139 and "\ufffd" == output.outputs[0].text[-1]
140 ):
141 continue
142 text_delta = output.outputs[0].text[index:]
143 index = len(output.outputs[0].text)
144 num_tokens = len(output.outputs[0].token_ids)
145
146 yield text_delta
147 duration_s = (time.monotonic_ns() - start) / 1e9
148
149 yield (
150 f"\n\tGenerated {num_tokens} tokens from {MODEL_NAME} in {duration_s:.1f}s,"
151 f" throughput = {num_tokens / duration_s:.0f} tokens/second on {GPU_CONFIG}.\n"
152 )
153
154 @modal.exit()
155 def stop_engine(self):
156 if GPU_CONFIG.count > 1:
157 import ray
158
159 ray.shutdown()
160
161
162 # ## Run the model
163 # We define a [`local_entrypoint`](/docs/guide/apps#entrypoints-for-ephemeral-apps) to call our remote function
164 # sequentially for a list of inputs. You can run this locally with `modal run -q vllm_mixtral.py`. The `q` flag
165 # enables the text to stream in your local terminal.
166 @stub.local_entrypoint()
167 def main():
168 questions = [
169 "Implement a Python function to compute the Fibonacci numbers.",
170 "What is the fable involving a fox and grapes?",
171 "What were the major contributing factors to the fall of the Roman Empire?",
172 "Describe the city of the future, considering advances in technology, environmental changes, and societal shifts.",
173 "What is the product of 9 and 8?",
174 "Who was Emperor Norton I, and what was his significance in San Francisco's history?",
175 ]
176 model = Model()
177 for question in questions:
178 print("Sending new request:", question, "\n\n")
179 for text in model.completion_stream.remote_gen(question):
180 print(text, end="", flush=text.endswith("\n"))
181
182
183 # ## Deploy and invoke the model
184 # Once we deploy this model with `modal deploy text_generation_inference.py`,
185 # we can invoke inference from other apps, sharing the same pool
186 # of GPU containers with all other apps we might need.
187 #
188 # ```
189 # $ python
190 # >>> import modal
191 # >>> f = modal.Function.lookup("example-tgi-Mixtral-8x7B-Instruct-v0.1", "Model.generate")
192 # >>> f.remote("What is the story about the fox and grapes?")
193 # 'The story about the fox and grapes ...
194 # ```
195
196 # ## Coupling a frontend web application
197 #
198 # We can stream inference from a FastAPI backend, also deployed on Modal.
199 #
200 # You can try our deployment [here](https://modal-labs--vllm-mixtral.modal.run).
201
202 from pathlib import Path
203
204 from modal import Mount, asgi_app
205
206 frontend_path = Path(__file__).parent.parent / "llm-frontend"
207
208
209 @stub.function(
210 mounts=[Mount.from_local_dir(frontend_path, remote_path="/assets")],
211 keep_warm=1,
212 allow_concurrent_inputs=20,
213 timeout=60 * 10,
214 )
215 @asgi_app()
216 def vllm_mixtral():
217 import json
218
219 import fastapi
220 import fastapi.staticfiles
221 from fastapi.responses import StreamingResponse
222
223 web_app = fastapi.FastAPI()
224
225 @web_app.get("/stats")
226 async def stats():
227 stats = await Model().completion_stream.get_current_stats.aio()
228 return {
229 "backlog": stats.backlog,
230 "num_total_runners": stats.num_total_runners,
231 "model": MODEL_NAME + " (vLLM)",
232 }
233
234 @web_app.get("/completion/{question}")
235 async def completion(question: str):
236 from urllib.parse import unquote
237
238 async def generate():
239 async for text in Model().completion_stream.remote_gen.aio(
240 unquote(question)
241 ):
242 yield f"data: {json.dumps(dict(text=text), ensure_ascii=False)}\n\n"
243
244 return StreamingResponse(generate(), media_type="text/event-stream")
245
246 web_app.mount(
247 "/", fastapi.staticfiles.StaticFiles(directory="/assets", html=True)
248 )
249 return web_app
250
[end of 06_gpu_and_ml/llm-serving/vllm_mixtral.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/06_gpu_and_ml/llm-serving/vllm_mixtral.py b/06_gpu_and_ml/llm-serving/vllm_mixtral.py
--- a/06_gpu_and_ml/llm-serving/vllm_mixtral.py
+++ b/06_gpu_and_ml/llm-serving/vllm_mixtral.py
@@ -20,6 +20,7 @@
MODEL_DIR = "/model"
MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1"
+MODEL_REVISION = "1e637f2d7cb0a9d6fb1922f305cb784995190a83"
GPU_CONFIG = modal.gpu.A100(memory=80, count=2)
@@ -33,10 +34,15 @@
#
# We can download the model to a particular directory using the HuggingFace utility function `snapshot_download`.
#
-# Mixtral is beefy, at nearly 100 GB in `safetensors` format, so this can take some time -- at least a few minutes.
+# For this step to work on a [gated model](https://huggingface.co/docs/hub/en/models-gated)
+# like Mixtral 8x7B, the `HF_TOKEN` environment variable must be set.
+#
+# After [creating a HuggingFace access token](https://huggingface.co/settings/tokens)
+# and accepting the [terms of use](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1),
+# head to the [secrets page](https://modal.com/secrets) to share it with Modal as `huggingface-secret`.
#
-# Tip: avoid using global variables in this function. Changes to code outside this function will not be detected and the download step will not re-run.
-def download_model_to_image(model_dir, model_name):
+# Mixtral is beefy, at nearly 100 GB in `safetensors` format, so this can take some time -- at least a few minutes.
+def download_model_to_image(model_dir, model_name, model_revision):
from huggingface_hub import snapshot_download
from transformers.utils import move_cache
@@ -44,6 +50,7 @@
snapshot_download(
model_name,
+ revision=model_revision,
local_dir=model_dir,
ignore_patterns=["*.pt", "*.bin"], # Using safetensors
)
@@ -69,7 +76,12 @@
.run_function(
download_model_to_image,
timeout=60 * 20,
- kwargs={"model_dir": MODEL_DIR, "model_name": MODEL_NAME},
+ kwargs={
+ "model_dir": MODEL_DIR,
+ "model_name": MODEL_NAME,
+ "model_revision": MODEL_REVISION,
+ },
+ secrets=[modal.Secret.from_name("huggingface-secret")],
)
)
|
{"golden_diff": "diff --git a/06_gpu_and_ml/llm-serving/vllm_mixtral.py b/06_gpu_and_ml/llm-serving/vllm_mixtral.py\n--- a/06_gpu_and_ml/llm-serving/vllm_mixtral.py\n+++ b/06_gpu_and_ml/llm-serving/vllm_mixtral.py\n@@ -20,6 +20,7 @@\n \n MODEL_DIR = \"/model\"\n MODEL_NAME = \"mistralai/Mixtral-8x7B-Instruct-v0.1\"\n+MODEL_REVISION = \"1e637f2d7cb0a9d6fb1922f305cb784995190a83\"\n GPU_CONFIG = modal.gpu.A100(memory=80, count=2)\n \n \n@@ -33,10 +34,15 @@\n #\n # We can download the model to a particular directory using the HuggingFace utility function `snapshot_download`.\n #\n-# Mixtral is beefy, at nearly 100 GB in `safetensors` format, so this can take some time -- at least a few minutes.\n+# For this step to work on a [gated model](https://huggingface.co/docs/hub/en/models-gated)\n+# like Mixtral 8x7B, the `HF_TOKEN` environment variable must be set.\n+#\n+# After [creating a HuggingFace access token](https://huggingface.co/settings/tokens)\n+# and accepting the [terms of use](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1),\n+# head to the [secrets page](https://modal.com/secrets) to share it with Modal as `huggingface-secret`.\n #\n-# Tip: avoid using global variables in this function. Changes to code outside this function will not be detected and the download step will not re-run.\n-def download_model_to_image(model_dir, model_name):\n+# Mixtral is beefy, at nearly 100 GB in `safetensors` format, so this can take some time -- at least a few minutes.\n+def download_model_to_image(model_dir, model_name, model_revision):\n from huggingface_hub import snapshot_download\n from transformers.utils import move_cache\n \n@@ -44,6 +50,7 @@\n \n snapshot_download(\n model_name,\n+ revision=model_revision,\n local_dir=model_dir,\n ignore_patterns=[\"*.pt\", \"*.bin\"], # Using safetensors\n )\n@@ -69,7 +76,12 @@\n .run_function(\n download_model_to_image,\n timeout=60 * 20,\n- kwargs={\"model_dir\": MODEL_DIR, \"model_name\": MODEL_NAME},\n+ kwargs={\n+ \"model_dir\": MODEL_DIR,\n+ \"model_name\": MODEL_NAME,\n+ \"model_revision\": MODEL_REVISION,\n+ },\n+ secrets=[modal.Secret.from_name(\"huggingface-secret\")],\n )\n )\n", "issue": "Mixtral tutorial doesn't work without huggingface access token\nThe tutorial for running mixtral on VLLM doesn't work since the model cannot be downloaded without a huggingface access token. This is because mixtral is now a gated model: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1\r\n\r\n```\r\nImage build for im-1P0Aou6cl9H3BAwictbALw failed with the exception: \u2502\r\n\u2502 GatedRepoError('401 Client Error. (Request ID: \u2502\r\n\u2502 Root=1-66213747-475d6ad5261bb9eb4931c4fd;025f8bf1-0bb2-42ac-86a0-743e752004a0)\\n\\nCannot access gated repo for \u2502\r\n\u2502 url https://huggingface.co/api/models/mistralai/Mixtral-8x7B-Instruct-v0.1/revision/main.\\nRepo model \u2502\r\n\u2502 mistralai/Mixtral-8x7B-Instruct-v0.1 is gated. You must be authenticated to access it.')\r\n```\r\n\r\nAffected Tutorial: https://modal.com/docs/examples/vllm_mixtral\r\nAffected Code: https://github.com/modal-labs/modal-examples/blob/main/06_gpu_and_ml/llm-serving/vllm_mixtral.py\r\n\r\nThis can be fixed using a similar approach as done [here]( https://github.com/modal-labs/modal-examples/pull/539) (adding an env var for a `HF_TOKEN`) to the [function call](https://github.com/modal-labs/modal-examples/blob/main/06_gpu_and_ml/llm-serving/vllm_mixtral.py#L45-L49) where the model is downloaded.\r\nAlso the tutorial needs to be updated to inform the user that a huggingface access token is required.\n", "before_files": [{"content": "# # Fast inference with vLLM (Mixtral 8x7B)\n#\n# In this example, we show how to run basic inference, using [`vLLM`](https://github.com/vllm-project/vllm)\n# to take advantage of PagedAttention, which speeds up sequential inferences with optimized key-value caching.\n#\n# We are running the [Mixtral 8x7B Instruct](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) model here,\n# which is a mixture-of-experts model finetuned for conversation.\n# You can expect ~3 minute cold starts.\n# For a single request, the throughput is over 50 tokens/second.\n# The larger the batch of prompts, the higher the throughput (up to hundreds of tokens per second).\n#\n# ## Setup\n#\n# First we import the components we need from `modal`.\n\nimport os\nimport time\n\nimport modal\n\nMODEL_DIR = \"/model\"\nMODEL_NAME = \"mistralai/Mixtral-8x7B-Instruct-v0.1\"\nGPU_CONFIG = modal.gpu.A100(memory=80, count=2)\n\n\n# ## Define a container image\n#\n# We want to create a Modal image which has the model weights pre-saved to a directory. The benefit of this\n# is that the container no longer has to re-download the model from Huggingface - instead, it will take\n# advantage of Modal's internal filesystem for faster cold starts.\n#\n# ### Download the weights\n#\n# We can download the model to a particular directory using the HuggingFace utility function `snapshot_download`.\n#\n# Mixtral is beefy, at nearly 100 GB in `safetensors` format, so this can take some time -- at least a few minutes.\n#\n# Tip: avoid using global variables in this function. Changes to code outside this function will not be detected and the download step will not re-run.\ndef download_model_to_image(model_dir, model_name):\n from huggingface_hub import snapshot_download\n from transformers.utils import move_cache\n\n os.makedirs(model_dir, exist_ok=True)\n\n snapshot_download(\n model_name,\n local_dir=model_dir,\n ignore_patterns=[\"*.pt\", \"*.bin\"], # Using safetensors\n )\n move_cache()\n\n\n# ### Image definition\n# We\u2019ll start from a Dockerhub image recommended by `vLLM`, and use\n# run_function to run the function defined above to ensure the weights of\n# the model are saved within the container image.\n\nvllm_image = (\n modal.Image.debian_slim()\n .pip_install(\n \"vllm==0.4.0.post1\",\n \"torch==2.1.2\",\n \"transformers==4.39.3\",\n \"ray==2.10.0\",\n \"hf-transfer==0.1.6\",\n \"huggingface_hub==0.22.2\",\n )\n .env({\"HF_HUB_ENABLE_HF_TRANSFER\": \"1\"})\n .run_function(\n download_model_to_image,\n timeout=60 * 20,\n kwargs={\"model_dir\": MODEL_DIR, \"model_name\": MODEL_NAME},\n )\n)\n\nstub = modal.Stub(\"example-vllm-mixtral\")\n\n\n# ## The model class\n#\n# The inference function is best represented with Modal's [class syntax](/docs/guide/lifecycle-functions) and the `@enter` decorator.\n# This enables us to load the model into memory just once every time a container starts up, and keep it cached\n# on the GPU for each subsequent invocation of the function.\n#\n# The `vLLM` library allows the code to remain quite clean. We do have to patch the multi-GPU setup due to issues with Ray.\[email protected](\n gpu=GPU_CONFIG,\n timeout=60 * 10,\n container_idle_timeout=60 * 10,\n allow_concurrent_inputs=10,\n image=vllm_image,\n)\nclass Model:\n @modal.enter()\n def start_engine(self):\n from vllm.engine.arg_utils import AsyncEngineArgs\n from vllm.engine.async_llm_engine import AsyncLLMEngine\n\n print(\"\ud83e\udd76 cold starting inference\")\n start = time.monotonic_ns()\n\n engine_args = AsyncEngineArgs(\n model=MODEL_DIR,\n tensor_parallel_size=GPU_CONFIG.count,\n gpu_memory_utilization=0.90,\n enforce_eager=False, # capture the graph for faster inference, but slower cold starts\n disable_log_stats=True, # disable logging so we can stream tokens\n disable_log_requests=True,\n )\n self.template = \"<s> [INST] {user} [/INST] \"\n\n # this can take some time!\n self.engine = AsyncLLMEngine.from_engine_args(engine_args)\n duration_s = (time.monotonic_ns() - start) / 1e9\n print(f\"\ud83c\udfce\ufe0f engine started in {duration_s:.0f}s\")\n\n @modal.method()\n async def completion_stream(self, user_question):\n from vllm import SamplingParams\n from vllm.utils import random_uuid\n\n sampling_params = SamplingParams(\n temperature=0.75,\n max_tokens=128,\n repetition_penalty=1.1,\n )\n\n request_id = random_uuid()\n result_generator = self.engine.generate(\n self.template.format(user=user_question),\n sampling_params,\n request_id,\n )\n index, num_tokens = 0, 0\n start = time.monotonic_ns()\n async for output in result_generator:\n if (\n output.outputs[0].text\n and \"\\ufffd\" == output.outputs[0].text[-1]\n ):\n continue\n text_delta = output.outputs[0].text[index:]\n index = len(output.outputs[0].text)\n num_tokens = len(output.outputs[0].token_ids)\n\n yield text_delta\n duration_s = (time.monotonic_ns() - start) / 1e9\n\n yield (\n f\"\\n\\tGenerated {num_tokens} tokens from {MODEL_NAME} in {duration_s:.1f}s,\"\n f\" throughput = {num_tokens / duration_s:.0f} tokens/second on {GPU_CONFIG}.\\n\"\n )\n\n @modal.exit()\n def stop_engine(self):\n if GPU_CONFIG.count > 1:\n import ray\n\n ray.shutdown()\n\n\n# ## Run the model\n# We define a [`local_entrypoint`](/docs/guide/apps#entrypoints-for-ephemeral-apps) to call our remote function\n# sequentially for a list of inputs. You can run this locally with `modal run -q vllm_mixtral.py`. The `q` flag\n# enables the text to stream in your local terminal.\[email protected]_entrypoint()\ndef main():\n questions = [\n \"Implement a Python function to compute the Fibonacci numbers.\",\n \"What is the fable involving a fox and grapes?\",\n \"What were the major contributing factors to the fall of the Roman Empire?\",\n \"Describe the city of the future, considering advances in technology, environmental changes, and societal shifts.\",\n \"What is the product of 9 and 8?\",\n \"Who was Emperor Norton I, and what was his significance in San Francisco's history?\",\n ]\n model = Model()\n for question in questions:\n print(\"Sending new request:\", question, \"\\n\\n\")\n for text in model.completion_stream.remote_gen(question):\n print(text, end=\"\", flush=text.endswith(\"\\n\"))\n\n\n# ## Deploy and invoke the model\n# Once we deploy this model with `modal deploy text_generation_inference.py`,\n# we can invoke inference from other apps, sharing the same pool\n# of GPU containers with all other apps we might need.\n#\n# ```\n# $ python\n# >>> import modal\n# >>> f = modal.Function.lookup(\"example-tgi-Mixtral-8x7B-Instruct-v0.1\", \"Model.generate\")\n# >>> f.remote(\"What is the story about the fox and grapes?\")\n# 'The story about the fox and grapes ...\n# ```\n\n# ## Coupling a frontend web application\n#\n# We can stream inference from a FastAPI backend, also deployed on Modal.\n#\n# You can try our deployment [here](https://modal-labs--vllm-mixtral.modal.run).\n\nfrom pathlib import Path\n\nfrom modal import Mount, asgi_app\n\nfrontend_path = Path(__file__).parent.parent / \"llm-frontend\"\n\n\[email protected](\n mounts=[Mount.from_local_dir(frontend_path, remote_path=\"/assets\")],\n keep_warm=1,\n allow_concurrent_inputs=20,\n timeout=60 * 10,\n)\n@asgi_app()\ndef vllm_mixtral():\n import json\n\n import fastapi\n import fastapi.staticfiles\n from fastapi.responses import StreamingResponse\n\n web_app = fastapi.FastAPI()\n\n @web_app.get(\"/stats\")\n async def stats():\n stats = await Model().completion_stream.get_current_stats.aio()\n return {\n \"backlog\": stats.backlog,\n \"num_total_runners\": stats.num_total_runners,\n \"model\": MODEL_NAME + \" (vLLM)\",\n }\n\n @web_app.get(\"/completion/{question}\")\n async def completion(question: str):\n from urllib.parse import unquote\n\n async def generate():\n async for text in Model().completion_stream.remote_gen.aio(\n unquote(question)\n ):\n yield f\"data: {json.dumps(dict(text=text), ensure_ascii=False)}\\n\\n\"\n\n return StreamingResponse(generate(), media_type=\"text/event-stream\")\n\n web_app.mount(\n \"/\", fastapi.staticfiles.StaticFiles(directory=\"/assets\", html=True)\n )\n return web_app\n", "path": "06_gpu_and_ml/llm-serving/vllm_mixtral.py"}]}
| 3,781 | 664 |
gh_patches_debug_12659
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-1611
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make symbol.get bulletproof against duplicated symbols
@lebr0nli pointed out an interesting case with `symbol.get` failing when Pwntools+Pwndbg is used: https://github.com/pwndbg/pwndbg/pull/1605#discussion_r1126105118
The bug actually does not require Pwntools at all and was partially caused by our `_add_main_to_symbols` hook. I am going to remove that hook in https://github.com/pwndbg/pwndbg/pull/1609 if tests passes, since it seems reduntant.
However, that does not really fix the problem as there may always be duplicated symbols such that `info symbols main` would show:
```
pwndbg> info symbol main
main in section .text of /pwndbg/bug/vaccine
main in section .text of /pwndbg/bug/vaccine
```
This can probably be mitigated by doing something like:
```py
loc_string, *_ = result.split(...)
```
instead of:
```py
loc_string, _ = result.split(...)
```
In the `symbol.get` implementation.
This would also mean we would effectively always assume the first symbol is the one we want to use, but I also do not see a good other way to resolve the duplicated symbol issue.
</issue>
<code>
[start of pwndbg/gdblib/symbol.py]
1 """
2 Looking up addresses for function names / symbols, and
3 vice-versa.
4
5 Uses IDA when available if there isn't sufficient symbol
6 information available.
7 """
8 import re
9
10 import gdb
11
12 import pwndbg.gdblib.android
13 import pwndbg.gdblib.arch
14 import pwndbg.gdblib.elf
15 import pwndbg.gdblib.events
16 import pwndbg.gdblib.file
17 import pwndbg.gdblib.info
18 import pwndbg.gdblib.memory
19 import pwndbg.gdblib.qemu
20 import pwndbg.gdblib.remote
21 import pwndbg.gdblib.stack
22 import pwndbg.gdblib.vmmap
23 import pwndbg.ida
24 import pwndbg.lib.memoize
25
26
27 def _get_debug_file_directory():
28 """
29 Retrieve the debug file directory path.
30
31 The debug file directory path ('show debug-file-directory') is a comma-
32 separated list of directories which GDB will look in to find the binaries
33 currently loaded.
34 """
35 result = gdb.execute("show debug-file-directory", to_string=True, from_tty=False)
36 expr = r'The directory where separate debug symbols are searched for is "(.*)".\n'
37
38 match = re.search(expr, result)
39
40 if match:
41 return match.group(1)
42 return ""
43
44
45 def _set_debug_file_directory(d) -> None:
46 gdb.execute("set debug-file-directory %s" % d, to_string=True, from_tty=False)
47
48
49 def _add_debug_file_directory(d) -> None:
50 current = _get_debug_file_directory()
51 if current:
52 _set_debug_file_directory("%s:%s" % (current, d))
53 else:
54 _set_debug_file_directory(d)
55
56
57 if "/usr/lib/debug" not in _get_debug_file_directory():
58 _add_debug_file_directory("/usr/lib/debug")
59
60
61 @pwndbg.lib.memoize.reset_on_objfile
62 def get(address: int, gdb_only=False) -> str:
63 """
64 Retrieve the name for the symbol located at `address` - either from GDB or from IDA sync
65 Passing `gdb_only=True`
66 """
67 # Note: we do not return "" on `address < pwndbg.gdblib.memory.MMAP_MIN_ADDR`
68 # because this may be used to find out the symbol name on PIE binaries that weren't started yet
69 # and then their symbol addresses can be found by GDB on their (non-rebased) offsets
70
71 # Fast path: GDB's `info symbol` returns 'Numeric constant too large' here
72 if address >= ((1 << 64) - 1):
73 return ""
74
75 # This sucks, but there's not a GDB API for this.
76 result = gdb.execute("info symbol %#x" % int(address), to_string=True, from_tty=False)
77
78 if not gdb_only and result.startswith("No symbol"):
79 address = int(address)
80 exe = pwndbg.gdblib.elf.exe()
81 if exe:
82 exe_map = pwndbg.gdblib.vmmap.find(exe.address)
83 if exe_map and address in exe_map:
84 res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)
85 return res or ""
86
87 # See https://github.com/bminor/binutils-gdb/blob/d1702fea87aa62dff7de465464097dba63cc8c0f/gdb/printcmd.c#L1594-L1624
88 # The most often encountered formats looks like this:
89 # "main in section .text of /bin/bash"
90 # "main + 3 in section .text of /bin/bash"
91 # "system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6"
92 # "No symbol matches system-1"
93 # But there are some others that we have to account for as well
94 if " in section " in result:
95 loc_string, _ = result.split(" in section ")
96 elif " in load address range of " in result:
97 loc_string, _ = result.split(" in load address range of ")
98 elif " overlay section " in result:
99 result, _ = result.split(" overlay section ")
100 loc_string, _ = result.split(" in ")
101 else:
102 loc_string = ""
103
104 # If there is 'main + 87' we want to replace it with 'main+87' etc.
105 return loc_string.replace(" + ", "+")
106
107
108 @pwndbg.lib.memoize.reset_on_objfile
109 def address(symbol: str) -> int:
110 """
111 Get the address for `symbol`
112 """
113 try:
114 symbol_obj = gdb.lookup_symbol(symbol)[0]
115 if symbol_obj:
116 return int(symbol_obj.value().address)
117 except gdb.error as e:
118 # Symbol lookup only throws exceptions on errors, not if it failed to
119 # lookup a symbol. We want to raise these errors so we can handle them
120 # properly, but there are some we haven't figured out how to fix yet, so
121 # we ignore those here
122 skipped_exceptions = []
123
124 # This is exception is being thrown by the Go typeinfo tests, we should
125 # investigate why this is happening and see if we can explicitly check
126 # for it with `gdb.selected_frame()`
127 skipped_exceptions.append("No frame selected")
128
129 # If we try to look up a TLS variable when there is no TLS, this
130 # exception occurs. Ideally we should come up with a way to check for
131 # this case before calling `gdb.lookup_symbol`
132 skipped_exceptions.append("Cannot find thread-local")
133
134 if all(x not in str(e) for x in skipped_exceptions):
135 raise e
136
137 try:
138 # Unfortunately, `gdb.lookup_symbol` does not seem to handle all
139 # symbols, so we need to fallback to using `info address`. See
140 # https://sourceware.org/pipermail/gdb/2022-October/050362.html
141 address = pwndbg.gdblib.info.address(symbol)
142 if address is None or not pwndbg.gdblib.vmmap.find(address):
143 return None
144
145 return address
146
147 except gdb.error:
148 return None
149
150 try:
151 # TODO: We should properly check if we have a connection to the IDA server first
152 address = pwndbg.ida.LocByName(symbol)
153 if address:
154 return address
155 except Exception:
156 pass
157
158 return None
159
160
161 @pwndbg.lib.memoize.reset_on_objfile
162 @pwndbg.lib.memoize.reset_on_thread
163 def static_linkage_symbol_address(symbol: str) -> int:
164 """
165 Get the address for static linkage `symbol`
166 """
167
168 try:
169 if hasattr(gdb, "lookup_static_symbol"):
170 symbol_obj = gdb.lookup_static_symbol(symbol)
171 else:
172 # GDB < 9.x does not have `gdb.lookup_static_symbol`
173 # We will fallback to `gdb.lookup_symbol` here, but the drawback is that we might find incorrect symbol if there is a symbol with the same name which is not static linkage
174 # But this is better than just returning None
175 # TODO/FIXME: Find a way to get the static linkage symbol's address in GDB < 9.x
176 symbol_obj = gdb.lookup_symbol(symbol)[0]
177 return int(symbol_obj.value().address) if symbol_obj else None
178 except gdb.error:
179 return None
180
181
182 @pwndbg.lib.memoize.reset_on_stop
183 @pwndbg.lib.memoize.reset_on_start
184 def selected_frame_source_absolute_filename():
185 """
186 Retrieve the symbol table’s source absolute file name from the selected frame.
187
188 In case of missing symbol table or frame information, None is returned.
189 """
190 try:
191 frame = gdb.selected_frame()
192 except gdb.error:
193 return None
194
195 if not frame:
196 return None
197
198 sal = frame.find_sal()
199 if not sal:
200 return None
201
202 symtab = sal.symtab
203 if not symtab:
204 return None
205
206 return symtab.fullname()
207
[end of pwndbg/gdblib/symbol.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwndbg/gdblib/symbol.py b/pwndbg/gdblib/symbol.py
--- a/pwndbg/gdblib/symbol.py
+++ b/pwndbg/gdblib/symbol.py
@@ -84,6 +84,10 @@
res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)
return res or ""
+ # If there are newlines, which means that there are multiple symbols for the address
+ # then use the first one (see also #1610)
+ result = result[: result.index("\n")]
+
# See https://github.com/bminor/binutils-gdb/blob/d1702fea87aa62dff7de465464097dba63cc8c0f/gdb/printcmd.c#L1594-L1624
# The most often encountered formats looks like this:
# "main in section .text of /bin/bash"
|
{"golden_diff": "diff --git a/pwndbg/gdblib/symbol.py b/pwndbg/gdblib/symbol.py\n--- a/pwndbg/gdblib/symbol.py\n+++ b/pwndbg/gdblib/symbol.py\n@@ -84,6 +84,10 @@\n res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)\n return res or \"\"\n \n+ # If there are newlines, which means that there are multiple symbols for the address\n+ # then use the first one (see also #1610)\n+ result = result[: result.index(\"\\n\")]\n+\n # See https://github.com/bminor/binutils-gdb/blob/d1702fea87aa62dff7de465464097dba63cc8c0f/gdb/printcmd.c#L1594-L1624\n # The most often encountered formats looks like this:\n # \"main in section .text of /bin/bash\"\n", "issue": "Make symbol.get bulletproof against duplicated symbols\n@lebr0nli pointed out an interesting case with `symbol.get` failing when Pwntools+Pwndbg is used: https://github.com/pwndbg/pwndbg/pull/1605#discussion_r1126105118\r\n\r\nThe bug actually does not require Pwntools at all and was partially caused by our `_add_main_to_symbols` hook. I am going to remove that hook in https://github.com/pwndbg/pwndbg/pull/1609 if tests passes, since it seems reduntant.\r\n\r\nHowever, that does not really fix the problem as there may always be duplicated symbols such that `info symbols main` would show:\r\n```\r\npwndbg> info symbol main\r\nmain in section .text of /pwndbg/bug/vaccine\r\nmain in section .text of /pwndbg/bug/vaccine\r\n```\r\n\r\nThis can probably be mitigated by doing something like:\r\n```py\r\nloc_string, *_ = result.split(...)\r\n```\r\n\r\ninstead of:\r\n```py\r\nloc_string, _ = result.split(...)\r\n```\r\n\r\nIn the `symbol.get` implementation.\r\n\r\nThis would also mean we would effectively always assume the first symbol is the one we want to use, but I also do not see a good other way to resolve the duplicated symbol issue.\n", "before_files": [{"content": "\"\"\"\nLooking up addresses for function names / symbols, and\nvice-versa.\n\nUses IDA when available if there isn't sufficient symbol\ninformation available.\n\"\"\"\nimport re\n\nimport gdb\n\nimport pwndbg.gdblib.android\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.elf\nimport pwndbg.gdblib.events\nimport pwndbg.gdblib.file\nimport pwndbg.gdblib.info\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.qemu\nimport pwndbg.gdblib.remote\nimport pwndbg.gdblib.stack\nimport pwndbg.gdblib.vmmap\nimport pwndbg.ida\nimport pwndbg.lib.memoize\n\n\ndef _get_debug_file_directory():\n \"\"\"\n Retrieve the debug file directory path.\n\n The debug file directory path ('show debug-file-directory') is a comma-\n separated list of directories which GDB will look in to find the binaries\n currently loaded.\n \"\"\"\n result = gdb.execute(\"show debug-file-directory\", to_string=True, from_tty=False)\n expr = r'The directory where separate debug symbols are searched for is \"(.*)\".\\n'\n\n match = re.search(expr, result)\n\n if match:\n return match.group(1)\n return \"\"\n\n\ndef _set_debug_file_directory(d) -> None:\n gdb.execute(\"set debug-file-directory %s\" % d, to_string=True, from_tty=False)\n\n\ndef _add_debug_file_directory(d) -> None:\n current = _get_debug_file_directory()\n if current:\n _set_debug_file_directory(\"%s:%s\" % (current, d))\n else:\n _set_debug_file_directory(d)\n\n\nif \"/usr/lib/debug\" not in _get_debug_file_directory():\n _add_debug_file_directory(\"/usr/lib/debug\")\n\n\[email protected]_on_objfile\ndef get(address: int, gdb_only=False) -> str:\n \"\"\"\n Retrieve the name for the symbol located at `address` - either from GDB or from IDA sync\n Passing `gdb_only=True`\n \"\"\"\n # Note: we do not return \"\" on `address < pwndbg.gdblib.memory.MMAP_MIN_ADDR`\n # because this may be used to find out the symbol name on PIE binaries that weren't started yet\n # and then their symbol addresses can be found by GDB on their (non-rebased) offsets\n\n # Fast path: GDB's `info symbol` returns 'Numeric constant too large' here\n if address >= ((1 << 64) - 1):\n return \"\"\n\n # This sucks, but there's not a GDB API for this.\n result = gdb.execute(\"info symbol %#x\" % int(address), to_string=True, from_tty=False)\n\n if not gdb_only and result.startswith(\"No symbol\"):\n address = int(address)\n exe = pwndbg.gdblib.elf.exe()\n if exe:\n exe_map = pwndbg.gdblib.vmmap.find(exe.address)\n if exe_map and address in exe_map:\n res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)\n return res or \"\"\n\n # See https://github.com/bminor/binutils-gdb/blob/d1702fea87aa62dff7de465464097dba63cc8c0f/gdb/printcmd.c#L1594-L1624\n # The most often encountered formats looks like this:\n # \"main in section .text of /bin/bash\"\n # \"main + 3 in section .text of /bin/bash\"\n # \"system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6\"\n # \"No symbol matches system-1\"\n # But there are some others that we have to account for as well\n if \" in section \" in result:\n loc_string, _ = result.split(\" in section \")\n elif \" in load address range of \" in result:\n loc_string, _ = result.split(\" in load address range of \")\n elif \" overlay section \" in result:\n result, _ = result.split(\" overlay section \")\n loc_string, _ = result.split(\" in \")\n else:\n loc_string = \"\"\n\n # If there is 'main + 87' we want to replace it with 'main+87' etc.\n return loc_string.replace(\" + \", \"+\")\n\n\[email protected]_on_objfile\ndef address(symbol: str) -> int:\n \"\"\"\n Get the address for `symbol`\n \"\"\"\n try:\n symbol_obj = gdb.lookup_symbol(symbol)[0]\n if symbol_obj:\n return int(symbol_obj.value().address)\n except gdb.error as e:\n # Symbol lookup only throws exceptions on errors, not if it failed to\n # lookup a symbol. We want to raise these errors so we can handle them\n # properly, but there are some we haven't figured out how to fix yet, so\n # we ignore those here\n skipped_exceptions = []\n\n # This is exception is being thrown by the Go typeinfo tests, we should\n # investigate why this is happening and see if we can explicitly check\n # for it with `gdb.selected_frame()`\n skipped_exceptions.append(\"No frame selected\")\n\n # If we try to look up a TLS variable when there is no TLS, this\n # exception occurs. Ideally we should come up with a way to check for\n # this case before calling `gdb.lookup_symbol`\n skipped_exceptions.append(\"Cannot find thread-local\")\n\n if all(x not in str(e) for x in skipped_exceptions):\n raise e\n\n try:\n # Unfortunately, `gdb.lookup_symbol` does not seem to handle all\n # symbols, so we need to fallback to using `info address`. See\n # https://sourceware.org/pipermail/gdb/2022-October/050362.html\n address = pwndbg.gdblib.info.address(symbol)\n if address is None or not pwndbg.gdblib.vmmap.find(address):\n return None\n\n return address\n\n except gdb.error:\n return None\n\n try:\n # TODO: We should properly check if we have a connection to the IDA server first\n address = pwndbg.ida.LocByName(symbol)\n if address:\n return address\n except Exception:\n pass\n\n return None\n\n\[email protected]_on_objfile\[email protected]_on_thread\ndef static_linkage_symbol_address(symbol: str) -> int:\n \"\"\"\n Get the address for static linkage `symbol`\n \"\"\"\n\n try:\n if hasattr(gdb, \"lookup_static_symbol\"):\n symbol_obj = gdb.lookup_static_symbol(symbol)\n else:\n # GDB < 9.x does not have `gdb.lookup_static_symbol`\n # We will fallback to `gdb.lookup_symbol` here, but the drawback is that we might find incorrect symbol if there is a symbol with the same name which is not static linkage\n # But this is better than just returning None\n # TODO/FIXME: Find a way to get the static linkage symbol's address in GDB < 9.x\n symbol_obj = gdb.lookup_symbol(symbol)[0]\n return int(symbol_obj.value().address) if symbol_obj else None\n except gdb.error:\n return None\n\n\[email protected]_on_stop\[email protected]_on_start\ndef selected_frame_source_absolute_filename():\n \"\"\"\n Retrieve the symbol table\u2019s source absolute file name from the selected frame.\n\n In case of missing symbol table or frame information, None is returned.\n \"\"\"\n try:\n frame = gdb.selected_frame()\n except gdb.error:\n return None\n\n if not frame:\n return None\n\n sal = frame.find_sal()\n if not sal:\n return None\n\n symtab = sal.symtab\n if not symtab:\n return None\n\n return symtab.fullname()\n", "path": "pwndbg/gdblib/symbol.py"}]}
| 3,119 | 225 |
gh_patches_debug_961
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-2484
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Test development versions of Python
Uses the `3.11-dev` and `nightly` deadsnakes release streams to test urllib3 against Python versions.
</issue>
<code>
[start of noxfile.py]
1 import os
2 import shutil
3 import subprocess
4
5 import nox
6
7 SOURCE_FILES = [
8 "docs/",
9 "dummyserver/",
10 "src/",
11 "test/",
12 "noxfile.py",
13 "setup.py",
14 ]
15
16
17 def tests_impl(
18 session: nox.Session,
19 extras: str = "socks,secure,brotli",
20 byte_string_comparisons: bool = True,
21 ) -> None:
22 # Install deps and the package itself.
23 session.install("-r", "dev-requirements.txt")
24 session.install(f".[{extras}]")
25
26 # Show the pip version.
27 session.run("pip", "--version")
28 # Print the Python version and bytesize.
29 session.run("python", "--version")
30 session.run("python", "-c", "import struct; print(struct.calcsize('P') * 8)")
31 # Print OpenSSL information.
32 session.run("python", "-m", "OpenSSL.debug")
33
34 # Inspired from https://github.com/pyca/cryptography
35 # We use parallel mode and then combine here so that coverage.py will take
36 # the paths like .tox/pyXY/lib/pythonX.Y/site-packages/urllib3/__init__.py
37 # and collapse them into src/urllib3/__init__.py.
38
39 session.run(
40 "python",
41 *(("-bb",) if byte_string_comparisons else ()),
42 "-m",
43 "coverage",
44 "run",
45 "--parallel-mode",
46 "-m",
47 "pytest",
48 "-r",
49 "a",
50 "--tb=native",
51 "--no-success-flaky-report",
52 *(session.posargs or ("test/",)),
53 env={"PYTHONWARNINGS": "always::DeprecationWarning"},
54 )
55 session.run("coverage", "combine")
56 session.run("coverage", "report", "-m")
57 session.run("coverage", "xml")
58
59
60 @nox.session(python=["3.7", "3.8", "3.9", "3.10", "pypy"])
61 def test(session: nox.Session) -> None:
62 tests_impl(session)
63
64
65 @nox.session(python=["2.7"])
66 def unsupported_python2(session: nox.Session) -> None:
67 # Can't check both returncode and output with session.run
68 process = subprocess.run(
69 ["python", "setup.py", "install"],
70 env={**session.env},
71 text=True,
72 capture_output=True,
73 )
74 assert process.returncode == 1
75 print(process.stderr)
76 assert "Unsupported Python version" in process.stderr
77
78
79 @nox.session(python=["3"])
80 def test_brotlipy(session: nox.Session) -> None:
81 """Check that if 'brotlipy' is installed instead of 'brotli' or
82 'brotlicffi' that we still don't blow up.
83 """
84 session.install("brotlipy")
85 tests_impl(session, extras="socks,secure", byte_string_comparisons=False)
86
87
88 def git_clone(session: nox.Session, git_url: str) -> None:
89 session.run("git", "clone", "--depth", "1", git_url, external=True)
90
91
92 @nox.session()
93 def downstream_botocore(session: nox.Session) -> None:
94 root = os.getcwd()
95 tmp_dir = session.create_tmp()
96
97 session.cd(tmp_dir)
98 git_clone(session, "https://github.com/boto/botocore")
99 session.chdir("botocore")
100 session.run("git", "rev-parse", "HEAD", external=True)
101 session.run("python", "scripts/ci/install")
102
103 session.cd(root)
104 session.install(".", silent=False)
105 session.cd(f"{tmp_dir}/botocore")
106
107 session.run("python", "-c", "import urllib3; print(urllib3.__version__)")
108 session.run("python", "scripts/ci/run-tests")
109
110
111 @nox.session()
112 def downstream_requests(session: nox.Session) -> None:
113 root = os.getcwd()
114 tmp_dir = session.create_tmp()
115
116 session.cd(tmp_dir)
117 git_clone(session, "https://github.com/psf/requests")
118 session.chdir("requests")
119 session.run("git", "apply", f"{root}/ci/requests.patch", external=True)
120 session.run("git", "rev-parse", "HEAD", external=True)
121 session.install(".[socks]", silent=False)
122 session.install("-r", "requirements-dev.txt", silent=False)
123
124 session.cd(root)
125 session.install(".", silent=False)
126 session.cd(f"{tmp_dir}/requests")
127
128 session.run("python", "-c", "import urllib3; print(urllib3.__version__)")
129 session.run("pytest", "tests")
130
131
132 @nox.session()
133 def format(session: nox.Session) -> None:
134 """Run code formatters."""
135 session.install("pre-commit")
136 session.run("pre-commit", "--version")
137
138 process = subprocess.run(
139 ["pre-commit", "run", "--all-files"],
140 env=session.env,
141 text=True,
142 stdout=subprocess.PIPE,
143 stderr=subprocess.STDOUT,
144 )
145 # Ensure that pre-commit itself ran successfully
146 assert process.returncode in (0, 1)
147
148 lint(session)
149
150
151 @nox.session
152 def lint(session: nox.Session) -> None:
153 session.install("pre-commit")
154 session.run("pre-commit", "run", "--all-files")
155
156 mypy(session)
157
158
159 @nox.session(python="3.8")
160 def mypy(session: nox.Session) -> None:
161 """Run mypy."""
162 session.install("-r", "mypy-requirements.txt")
163 session.run("mypy", "--version")
164 session.run(
165 "mypy",
166 "dummyserver",
167 "noxfile.py",
168 "src/urllib3",
169 "test",
170 )
171
172
173 @nox.session
174 def docs(session: nox.Session) -> None:
175 session.install("-r", "docs/requirements.txt")
176 session.install(".[socks,secure,brotli]")
177
178 session.chdir("docs")
179 if os.path.exists("_build"):
180 shutil.rmtree("_build")
181 session.run("sphinx-build", "-b", "html", "-W", ".", "_build/html")
182
[end of noxfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -57,7 +57,7 @@
session.run("coverage", "xml")
[email protected](python=["3.7", "3.8", "3.9", "3.10", "pypy"])
[email protected](python=["3.7", "3.8", "3.9", "3.10", "3.11", "pypy"])
def test(session: nox.Session) -> None:
tests_impl(session)
|
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -57,7 +57,7 @@\n session.run(\"coverage\", \"xml\")\n \n \[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"pypy\"])\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\", \"pypy\"])\n def test(session: nox.Session) -> None:\n tests_impl(session)\n", "issue": "Test development versions of Python\nUses the `3.11-dev` and `nightly` deadsnakes release streams to test urllib3 against Python versions.\n", "before_files": [{"content": "import os\nimport shutil\nimport subprocess\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(\n session: nox.Session,\n extras: str = \"socks,secure,brotli\",\n byte_string_comparisons: bool = True,\n) -> None:\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n # Inspired from https://github.com/pyca/cryptography\n # We use parallel mode and then combine here so that coverage.py will take\n # the paths like .tox/pyXY/lib/pythonX.Y/site-packages/urllib3/__init__.py\n # and collapse them into src/urllib3/__init__.py.\n\n session.run(\n \"python\",\n *((\"-bb\",) if byte_string_comparisons else ()),\n \"-m\",\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n \"-r\",\n \"a\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n session.run(\"coverage\", \"combine\")\n session.run(\"coverage\", \"report\", \"-m\")\n session.run(\"coverage\", \"xml\")\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"pypy\"])\ndef test(session: nox.Session) -> None:\n tests_impl(session)\n\n\[email protected](python=[\"2.7\"])\ndef unsupported_python2(session: nox.Session) -> None:\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Unsupported Python version\" in process.stderr\n\n\[email protected](python=[\"3\"])\ndef test_brotlipy(session: nox.Session) -> None:\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\", byte_string_comparisons=False)\n\n\ndef git_clone(session: nox.Session, git_url: str) -> None:\n session.run(\"git\", \"clone\", \"--depth\", \"1\", git_url, external=True)\n\n\[email protected]()\ndef downstream_botocore(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/boto/botocore\")\n session.chdir(\"botocore\")\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.run(\"python\", \"scripts/ci/install\")\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/botocore\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"python\", \"scripts/ci/run-tests\")\n\n\[email protected]()\ndef downstream_requests(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/psf/requests\")\n session.chdir(\"requests\")\n session.run(\"git\", \"apply\", f\"{root}/ci/requests.patch\", external=True)\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.install(\".[socks]\", silent=False)\n session.install(\"-r\", \"requirements-dev.txt\", silent=False)\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/requests\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"pytest\", \"tests\")\n\n\[email protected]()\ndef format(session: nox.Session) -> None:\n \"\"\"Run code formatters.\"\"\"\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"--version\")\n\n process = subprocess.run(\n [\"pre-commit\", \"run\", \"--all-files\"],\n env=session.env,\n text=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n # Ensure that pre-commit itself ran successfully\n assert process.returncode in (0, 1)\n\n lint(session)\n\n\[email protected]\ndef lint(session: nox.Session) -> None:\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\[email protected](python=\"3.8\")\ndef mypy(session: nox.Session) -> None:\n \"\"\"Run mypy.\"\"\"\n session.install(\"-r\", \"mypy-requirements.txt\")\n session.run(\"mypy\", \"--version\")\n session.run(\n \"mypy\",\n \"dummyserver\",\n \"noxfile.py\",\n \"src/urllib3\",\n \"test\",\n )\n\n\[email protected]\ndef docs(session: nox.Session) -> None:\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py"}]}
| 2,342 | 134 |
gh_patches_debug_8185
|
rasdani/github-patches
|
git_diff
|
openstates__openstates-scrapers-2087
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ID: scrape fails on substitute representative
Once #2053 is merged, the people scraper is blowing up reading https://legislature.idaho.gov/house/membership/ .
Search down to "Scanlin", who you'll find is a "substitute" representative. There's a parsing problem because the scraper expects the name to be bold and the party to not be; in this case "Substitute" is bold only.
For a description of what a "Substitute" is, see http://www.spokesman.com/blogs/boise/2014/mar/16/idahos-legislative-substitute-law-unique/
Just fixing this would leave two legislators in that district, where, as in Highlander, "there can be only one". Representative Kloc, who Scanlin is substituting for, is directly above him.
Assuming that his roll call votes, committee memberships, etc. are registered to the substitute, I believe a fully correct fix for this would involve:
- lookahead for a "Substitute", skipping the current legislator if the next legislator is a substitute.
- rewriting the line parser to skip "Substitute" and just use the text (not bold format) for name/party parsing.
</issue>
<code>
[start of openstates/id/people.py]
1 import re
2 import lxml.html
3 from pupa.scrape import Scraper, Person
4
5 BASE_URL = 'https://legislature.idaho.gov/%s/membership/'
6 CHAMBERS = {'upper': 'senate', 'lower': 'house'}
7 PARTY = {
8 '(R)': 'Republican',
9 '(D)': 'Democratic',
10 }
11
12 phone_patterns = {
13 'office': re.compile(r'Statehouse'),
14 'business': re.compile(r'Bus'),
15 'home': re.compile(r'Home'),
16 }
17
18 parse_phone_pattern = re.compile(r'tel:(?:\+1)?(\d{10}$)')
19 fax_pattern = re.compile(r'fax\s+\((\d{3})\)\s+(\d{3})-(\d{4})', re.IGNORECASE)
20 address_pattern = re.compile(r', \d{5}')
21 address_replace_pattern = re.compile(r'(\d{5})')
22
23
24 def get_phones(el):
25 phones = {}
26 for link in el.xpath('p/a[@class = "mob-tel"]'):
27 prefix = link.getprevious().tail
28 for label, pattern in phone_patterns.items():
29 if pattern.search(prefix) is not None:
30 phones[label] = parse_phone(link.get('href'))
31 return phones
32
33
34 def parse_phone(phone):
35 res = parse_phone_pattern.search(phone)
36 if res is not None:
37 return res.groups()[0]
38
39
40 def get_fax(el):
41 res = fax_pattern.search(el.text_content())
42 if res is not None:
43 return ''.join(res.groups())
44
45
46 def get_address(el):
47 for br in el.xpath('p/br'):
48 piece = (br.tail or '').strip()
49 res = address_pattern.search(piece)
50 if res is not None:
51 return address_replace_pattern.sub(r'ID \1', piece).strip()
52
53
54 class IDPersonScraper(Scraper):
55 """Legislator data seems to be available for the current term only."""
56 jurisdiction = 'id'
57
58 def scrape(self, chamber=None):
59 if chamber:
60 yield from self.scrape_chamber(chamber)
61 else:
62 yield from self.scrape_chamber('upper')
63 yield from self.scrape_chamber('lower')
64
65 def scrape_chamber(self, chamber):
66 """
67 Scrapes legislators for the current term only
68 """
69 # self.validate_term(term, latest_only=True)
70 url = BASE_URL % CHAMBERS[chamber].lower()
71 index = self.get(url, verify=False).text
72 html = lxml.html.fromstring(index)
73 html.make_links_absolute(url)
74
75 rows = html.xpath('//div[contains(@class, "row-equal-height")]')
76
77 for row in rows:
78 img_url = row.xpath('.//img/@src')[0]
79
80 inner = row.xpath('.//div[@class="vc-column-innner-wrapper"]')[1]
81 if 'Resigned' in inner.text_content():
82 continue
83
84 name = inner.xpath('p/strong')[0].text.replace(u'\xa0', ' ').strip()
85 name = re.sub('\s+', ' ', name)
86 party = PARTY[inner.xpath('p/strong')[0].tail.strip()]
87 email = inner.xpath('p/strong/a')[0].text
88 district = inner.xpath('p/a')[0].text.replace('District ', '')
89
90 person_url = inner.xpath('p/a/@href')[0]
91 # skip roles for now
92 role = ''
93 # for com in inner.xpath('p/a[contains(@href, "committees")]'):
94 # role = com.tail.strip()
95
96 person = Person(name=name, district=district,
97 party=party, primary_org=chamber,
98 image=img_url, role=role)
99 phones = get_phones(inner)
100 phone = phones.get('home') or phones.get('business')
101 office_phone = phones.get('office')
102 address = get_address(inner)
103 fax = get_fax(inner)
104 if address:
105 person.add_contact_detail(type='address', value=address,
106 note='District Office')
107 if phone:
108 person.add_contact_detail(type='voice', value=phone,
109 note='District Office')
110 if fax:
111 person.add_contact_detail(type='fax', value=fax,
112 note='District Office')
113 if email:
114 person.add_contact_detail(type='email', value=email,
115 note='District Office')
116 if office_phone:
117 person.add_contact_detail(type='voice', value=office_phone,
118 note='Capitol Office')
119 person.add_source(url)
120 person.add_link(person_url)
121 yield person
122
[end of openstates/id/people.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/openstates/id/people.py b/openstates/id/people.py
--- a/openstates/id/people.py
+++ b/openstates/id/people.py
@@ -78,7 +78,8 @@
img_url = row.xpath('.//img/@src')[0]
inner = row.xpath('.//div[@class="vc-column-innner-wrapper"]')[1]
- if 'Resigned' in inner.text_content():
+ inner_text = inner.text_content()
+ if 'Resigned' in inner_text or 'Substitute' in inner_text:
continue
name = inner.xpath('p/strong')[0].text.replace(u'\xa0', ' ').strip()
|
{"golden_diff": "diff --git a/openstates/id/people.py b/openstates/id/people.py\n--- a/openstates/id/people.py\n+++ b/openstates/id/people.py\n@@ -78,7 +78,8 @@\n img_url = row.xpath('.//img/@src')[0]\n \n inner = row.xpath('.//div[@class=\"vc-column-innner-wrapper\"]')[1]\n- if 'Resigned' in inner.text_content():\n+ inner_text = inner.text_content()\n+ if 'Resigned' in inner_text or 'Substitute' in inner_text:\n continue\n \n name = inner.xpath('p/strong')[0].text.replace(u'\\xa0', ' ').strip()\n", "issue": "ID: scrape fails on substitute representative\nOnce #2053 is merged, the people scraper is blowing up reading https://legislature.idaho.gov/house/membership/ . \r\nSearch down to \"Scanlin\", who you'll find is a \"substitute\" representative. There's a parsing problem because the scraper expects the name to be bold and the party to not be; in this case \"Substitute\" is bold only.\r\n\r\nFor a description of what a \"Substitute\" is, see http://www.spokesman.com/blogs/boise/2014/mar/16/idahos-legislative-substitute-law-unique/ \r\n\r\nJust fixing this would leave two legislators in that district, where, as in Highlander, \"there can be only one\". Representative Kloc, who Scanlin is substituting for, is directly above him.\r\n\r\nAssuming that his roll call votes, committee memberships, etc. are registered to the substitute, I believe a fully correct fix for this would involve:\r\n- lookahead for a \"Substitute\", skipping the current legislator if the next legislator is a substitute.\r\n- rewriting the line parser to skip \"Substitute\" and just use the text (not bold format) for name/party parsing.\r\n\n", "before_files": [{"content": "import re\nimport lxml.html\nfrom pupa.scrape import Scraper, Person\n\nBASE_URL = 'https://legislature.idaho.gov/%s/membership/'\nCHAMBERS = {'upper': 'senate', 'lower': 'house'}\nPARTY = {\n '(R)': 'Republican',\n '(D)': 'Democratic',\n}\n\nphone_patterns = {\n 'office': re.compile(r'Statehouse'),\n 'business': re.compile(r'Bus'),\n 'home': re.compile(r'Home'),\n}\n\nparse_phone_pattern = re.compile(r'tel:(?:\\+1)?(\\d{10}$)')\nfax_pattern = re.compile(r'fax\\s+\\((\\d{3})\\)\\s+(\\d{3})-(\\d{4})', re.IGNORECASE)\naddress_pattern = re.compile(r', \\d{5}')\naddress_replace_pattern = re.compile(r'(\\d{5})')\n\n\ndef get_phones(el):\n phones = {}\n for link in el.xpath('p/a[@class = \"mob-tel\"]'):\n prefix = link.getprevious().tail\n for label, pattern in phone_patterns.items():\n if pattern.search(prefix) is not None:\n phones[label] = parse_phone(link.get('href'))\n return phones\n\n\ndef parse_phone(phone):\n res = parse_phone_pattern.search(phone)\n if res is not None:\n return res.groups()[0]\n\n\ndef get_fax(el):\n res = fax_pattern.search(el.text_content())\n if res is not None:\n return ''.join(res.groups())\n\n\ndef get_address(el):\n for br in el.xpath('p/br'):\n piece = (br.tail or '').strip()\n res = address_pattern.search(piece)\n if res is not None:\n return address_replace_pattern.sub(r'ID \\1', piece).strip()\n\n\nclass IDPersonScraper(Scraper):\n \"\"\"Legislator data seems to be available for the current term only.\"\"\"\n jurisdiction = 'id'\n\n def scrape(self, chamber=None):\n if chamber:\n yield from self.scrape_chamber(chamber)\n else:\n yield from self.scrape_chamber('upper')\n yield from self.scrape_chamber('lower')\n\n def scrape_chamber(self, chamber):\n \"\"\"\n Scrapes legislators for the current term only\n \"\"\"\n # self.validate_term(term, latest_only=True)\n url = BASE_URL % CHAMBERS[chamber].lower()\n index = self.get(url, verify=False).text\n html = lxml.html.fromstring(index)\n html.make_links_absolute(url)\n\n rows = html.xpath('//div[contains(@class, \"row-equal-height\")]')\n\n for row in rows:\n img_url = row.xpath('.//img/@src')[0]\n\n inner = row.xpath('.//div[@class=\"vc-column-innner-wrapper\"]')[1]\n if 'Resigned' in inner.text_content():\n continue\n\n name = inner.xpath('p/strong')[0].text.replace(u'\\xa0', ' ').strip()\n name = re.sub('\\s+', ' ', name)\n party = PARTY[inner.xpath('p/strong')[0].tail.strip()]\n email = inner.xpath('p/strong/a')[0].text\n district = inner.xpath('p/a')[0].text.replace('District ', '')\n\n person_url = inner.xpath('p/a/@href')[0]\n # skip roles for now\n role = ''\n # for com in inner.xpath('p/a[contains(@href, \"committees\")]'):\n # role = com.tail.strip()\n\n person = Person(name=name, district=district,\n party=party, primary_org=chamber,\n image=img_url, role=role)\n phones = get_phones(inner)\n phone = phones.get('home') or phones.get('business')\n office_phone = phones.get('office')\n address = get_address(inner)\n fax = get_fax(inner)\n if address:\n person.add_contact_detail(type='address', value=address,\n note='District Office')\n if phone:\n person.add_contact_detail(type='voice', value=phone,\n note='District Office')\n if fax:\n person.add_contact_detail(type='fax', value=fax,\n note='District Office')\n if email:\n person.add_contact_detail(type='email', value=email,\n note='District Office')\n if office_phone:\n person.add_contact_detail(type='voice', value=office_phone,\n note='Capitol Office')\n person.add_source(url)\n person.add_link(person_url)\n yield person\n", "path": "openstates/id/people.py"}]}
| 2,032 | 146 |
gh_patches_debug_4435
|
rasdani/github-patches
|
git_diff
|
pystiche__pystiche-544
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Documentation theme
We are currently using the [ReadTheDocs `sphinx` theme](https://github.com/readthedocs/sphinx_rtd_theme):
https://github.com/pystiche/pystiche/blob/65f4d787e44b1ffbf7e5b6e48298ed8c7460e5a9/docs/source/conf.py#L85-L90
A lot of other projects in the PyData ecosystem have migrated to one common [base theme](https://pydata-sphinx-theme.readthedocs.io/en/latest/). IMHO it looks a lot more modern in comparison.
In terms of accessibility, I think it would be a also good thing if we made the switch so users familiar with `numpy`'s, `pandas`' or any other of the many libraries using that theme, are instantly familiar how to navigate the documentation.
</issue>
<code>
[start of docs/source/conf.py]
1 import contextlib
2 import os
3 import re
4 import shutil
5 import warnings
6 from datetime import datetime
7 from distutils.util import strtobool
8 from importlib_metadata import metadata as extract_metadata
9 from os import path
10 from unittest import mock
11 from urllib.parse import urljoin
12
13 from sphinx_gallery.sorting import ExampleTitleSortKey, ExplicitOrder
14 from tqdm import tqdm
15
16 import torch
17
18 from pystiche.misc import download_file
19
20 HERE = path.dirname(__file__)
21 PROJECT_ROOT = path.abspath(path.join(HERE, "..", ".."))
22
23
24 def get_bool_env_var(name, default=False):
25 try:
26 return bool(strtobool(os.environ[name]))
27 except KeyError:
28 return default
29
30
31 GITHUB_ACTIONS = get_bool_env_var("GITHUB_ACTIONS")
32 RTD = get_bool_env_var("READTHEDOCS")
33 CI = GITHUB_ACTIONS or RTD or get_bool_env_var("CI")
34
35
36 def project():
37 extension = None
38
39 metadata = extract_metadata("pystiche")
40 project = metadata["name"]
41 author = metadata["author"]
42 copyright = f"{datetime.now().year}, {author}"
43 release = metadata["version"]
44 version = release.split(".dev")[0]
45 config = dict(
46 project=project,
47 author=author,
48 copyright=copyright,
49 release=release,
50 version=version,
51 )
52
53 return extension, config
54
55
56 def autodoc():
57 extensions = [
58 "sphinx.ext.autodoc",
59 "sphinx.ext.napoleon",
60 "sphinx_autodoc_typehints",
61 ]
62
63 config = None
64
65 return extensions, config
66
67
68 def intersphinx():
69 extension = "sphinx.ext.intersphinx"
70 config = dict(
71 intersphinx_mapping={
72 "python": ("https://docs.python.org/3.6", None),
73 "torch": ("https://pytorch.org/docs/stable/", None),
74 "torchvision": ("https://pytorch.org/docs/stable/", None),
75 "PIL": ("https://pillow.readthedocs.io/en/stable/", None),
76 "numpy": ("https://numpy.org/doc/1.18/", None),
77 "requests": ("https://requests.readthedocs.io/en/stable/", None),
78 "matplotlib": ("https://matplotlib.org", None),
79 }
80 )
81 return extension, config
82
83
84 def html():
85 extension = None
86
87 config = dict(html_theme="sphinx_rtd_theme")
88
89 return extension, config
90
91
92 def latex():
93 extension = None
94
95 with open(path.join(HERE, "custom_cmds.tex"), "r") as fh:
96 custom_cmds = fh.read()
97 config = dict(
98 latex_elements={"preamble": custom_cmds},
99 mathjax_inline=[r"\(" + custom_cmds, r"\)"],
100 mathjax_display=[r"\[" + custom_cmds, r"\]"],
101 )
102
103 return extension, config
104
105
106 def bibtex():
107 extension = "sphinxcontrib.bibtex"
108
109 config = dict(bibtex_bibfiles=["references.bib"])
110
111 return extension, config
112
113
114 def doctest():
115 extension = "sphinx.ext.doctest"
116
117 doctest_global_setup = """
118 import torch
119 from torch import nn
120
121 import pystiche
122
123 import warnings
124 warnings.filterwarnings("ignore", category=FutureWarning)
125
126 from unittest import mock
127
128 patcher = mock.patch(
129 "pystiche.enc.models.utils.ModelMultiLayerEncoder.load_state_dict_from_url"
130 )
131 patcher.start()
132 """
133
134 doctest_global_cleanup = """
135 mock.patch.stopall()
136 """
137 config = dict(
138 doctest_global_setup=doctest_global_setup,
139 doctest_global_cleanup=doctest_global_cleanup,
140 )
141
142 return extension, config
143
144
145 def sphinx_gallery():
146 extension = "sphinx_gallery.gen_gallery"
147
148 plot_gallery = get_bool_env_var("PYSTICHE_PLOT_GALLERY", default=not CI)
149 download_gallery = get_bool_env_var("PYSTICHE_DOWNLOAD_GALLERY", default=CI)
150
151 def download():
152 nonlocal extension
153 nonlocal plot_gallery
154
155 # version and release are available as soon as the project config is loaded
156 version = globals()["version"]
157 release = globals()["release"]
158
159 base = "https://download.pystiche.org/galleries/"
160 is_dev = version != release
161 file = "main.zip" if is_dev else f"v{version}.zip"
162
163 url = urljoin(base, file)
164 print(f"Downloading pre-built galleries from {url}")
165 download_file(url, file)
166
167 with contextlib.suppress(FileNotFoundError):
168 shutil.rmtree(path.join(HERE, "galleries"))
169 shutil.unpack_archive(file, extract_dir=".")
170 os.remove(file)
171
172 extension = "sphinx_gallery.load_style"
173 plot_gallery = False
174
175 def show_cuda_memory(func):
176 torch.cuda.reset_peak_memory_stats()
177 out = func()
178
179 stats = torch.cuda.memory_stats()
180 peak_bytes_usage = stats["allocated_bytes.all.peak"]
181 memory = peak_bytes_usage / 1024 ** 2
182
183 return memory, out
184
185 def patch_tqdm():
186 patchers = [mock.patch("tqdm.std._supports_unicode", return_value=True)]
187
188 display = tqdm.display
189 close = tqdm.close
190 displayed = set()
191
192 def display_only_last(self, msg=None, pos=None):
193 if self.n != self.total or self in displayed:
194 return
195
196 display(self, msg=msg, pos=pos)
197 displayed.add(self)
198
199 patchers.append(mock.patch("tqdm.std.tqdm.display", new=display_only_last))
200
201 def close_(self):
202 close(self)
203 with contextlib.suppress(KeyError):
204 displayed.remove(self)
205
206 patchers.append(mock.patch("tqdm.std.tqdm.close", new=close_))
207
208 for patcher in patchers:
209 patcher.start()
210
211 class PysticheExampleTitleSortKey(ExampleTitleSortKey):
212 def __call__(self, filename):
213 # The beginner example *without* pystiche is placed before the example
214 # *with* to clarify the narrative.
215 if filename == "example_nst_without_pystiche.py":
216 return "1"
217 elif filename == "example_nst_with_pystiche.py":
218 return "2"
219 else:
220 return super().__call__(filename)
221
222 def filter_warnings():
223 # See #https://github.com/pytorch/pytorch/issues/60053
224 warnings.filterwarnings(
225 "ignore",
226 category=UserWarning,
227 message=(
228 re.escape(
229 "Named tensors and all their associated APIs are an experimental "
230 "feature and subject to change. Please do not use them for "
231 "anything important until they are released as stable. (Triggered "
232 "internally at /pytorch/c10/core/TensorImpl.h:1156.)"
233 )
234 ),
235 )
236
237 if download_gallery:
238 download()
239
240 if plot_gallery and not torch.cuda.is_available():
241 msg = (
242 "The galleries will be built, but CUDA is not available. "
243 "This will take a long time."
244 )
245 print(msg)
246
247 sphinx_gallery_conf = {
248 "examples_dirs": path.join(PROJECT_ROOT, "examples"),
249 "gallery_dirs": path.join("galleries", "examples"),
250 "filename_pattern": re.escape(os.sep) + r"example_\w+[.]py$",
251 "ignore_pattern": re.escape(os.sep) + r"_\w+[.]py$",
252 "line_numbers": True,
253 "remove_config_comments": True,
254 "plot_gallery": plot_gallery,
255 "subsection_order": ExplicitOrder(
256 [
257 path.join("..", "..", "examples", sub_gallery)
258 for sub_gallery in ("beginner", "advanced")
259 ]
260 ),
261 "within_subsection_order": PysticheExampleTitleSortKey,
262 "show_memory": show_cuda_memory if torch.cuda.is_available() else True,
263 }
264
265 config = dict(sphinx_gallery_conf=sphinx_gallery_conf)
266 filter_warnings()
267
268 patch_tqdm()
269 filter_warnings()
270
271 return extension, config
272
273
274 def logo():
275 extension = None
276
277 config = dict(html_logo="../../logo.svg")
278
279 return extension, config
280
281
282 extensions = []
283 for loader in (
284 project,
285 autodoc,
286 intersphinx,
287 html,
288 latex,
289 bibtex,
290 doctest,
291 sphinx_gallery,
292 logo,
293 ):
294 extension, config = loader()
295
296 if extension:
297 if isinstance(extension, str):
298 extension = (extension,)
299 extensions.extend(extension)
300
301 if config:
302 globals().update(config)
303
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -84,7 +84,16 @@
def html():
extension = None
- config = dict(html_theme="sphinx_rtd_theme")
+ config = dict(
+ html_theme="pydata_sphinx_theme",
+ html_theme_options=dict(show_prev_next=False, use_edit_page_button=True),
+ html_context=dict(
+ github_user="pystiche",
+ github_repo="pystiche",
+ github_version="main",
+ doc_path="docs/source",
+ ),
+ )
return extension, config
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -84,7 +84,16 @@\n def html():\n extension = None\n \n- config = dict(html_theme=\"sphinx_rtd_theme\")\n+ config = dict(\n+ html_theme=\"pydata_sphinx_theme\",\n+ html_theme_options=dict(show_prev_next=False, use_edit_page_button=True),\n+ html_context=dict(\n+ github_user=\"pystiche\",\n+ github_repo=\"pystiche\",\n+ github_version=\"main\",\n+ doc_path=\"docs/source\",\n+ ),\n+ )\n \n return extension, config\n", "issue": "Documentation theme\nWe are currently using the [ReadTheDocs `sphinx` theme](https://github.com/readthedocs/sphinx_rtd_theme):\r\n\r\nhttps://github.com/pystiche/pystiche/blob/65f4d787e44b1ffbf7e5b6e48298ed8c7460e5a9/docs/source/conf.py#L85-L90\r\n\r\nA lot of other projects in the PyData ecosystem have migrated to one common [base theme](https://pydata-sphinx-theme.readthedocs.io/en/latest/). IMHO it looks a lot more modern in comparison. \r\n\r\nIn terms of accessibility, I think it would be a also good thing if we made the switch so users familiar with `numpy`'s, `pandas`' or any other of the many libraries using that theme, are instantly familiar how to navigate the documentation.\r\n\r\n\n", "before_files": [{"content": "import contextlib\nimport os\nimport re\nimport shutil\nimport warnings\nfrom datetime import datetime\nfrom distutils.util import strtobool\nfrom importlib_metadata import metadata as extract_metadata\nfrom os import path\nfrom unittest import mock\nfrom urllib.parse import urljoin\n\nfrom sphinx_gallery.sorting import ExampleTitleSortKey, ExplicitOrder\nfrom tqdm import tqdm\n\nimport torch\n\nfrom pystiche.misc import download_file\n\nHERE = path.dirname(__file__)\nPROJECT_ROOT = path.abspath(path.join(HERE, \"..\", \"..\"))\n\n\ndef get_bool_env_var(name, default=False):\n try:\n return bool(strtobool(os.environ[name]))\n except KeyError:\n return default\n\n\nGITHUB_ACTIONS = get_bool_env_var(\"GITHUB_ACTIONS\")\nRTD = get_bool_env_var(\"READTHEDOCS\")\nCI = GITHUB_ACTIONS or RTD or get_bool_env_var(\"CI\")\n\n\ndef project():\n extension = None\n\n metadata = extract_metadata(\"pystiche\")\n project = metadata[\"name\"]\n author = metadata[\"author\"]\n copyright = f\"{datetime.now().year}, {author}\"\n release = metadata[\"version\"]\n version = release.split(\".dev\")[0]\n config = dict(\n project=project,\n author=author,\n copyright=copyright,\n release=release,\n version=version,\n )\n\n return extension, config\n\n\ndef autodoc():\n extensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.napoleon\",\n \"sphinx_autodoc_typehints\",\n ]\n\n config = None\n\n return extensions, config\n\n\ndef intersphinx():\n extension = \"sphinx.ext.intersphinx\"\n config = dict(\n intersphinx_mapping={\n \"python\": (\"https://docs.python.org/3.6\", None),\n \"torch\": (\"https://pytorch.org/docs/stable/\", None),\n \"torchvision\": (\"https://pytorch.org/docs/stable/\", None),\n \"PIL\": (\"https://pillow.readthedocs.io/en/stable/\", None),\n \"numpy\": (\"https://numpy.org/doc/1.18/\", None),\n \"requests\": (\"https://requests.readthedocs.io/en/stable/\", None),\n \"matplotlib\": (\"https://matplotlib.org\", None),\n }\n )\n return extension, config\n\n\ndef html():\n extension = None\n\n config = dict(html_theme=\"sphinx_rtd_theme\")\n\n return extension, config\n\n\ndef latex():\n extension = None\n\n with open(path.join(HERE, \"custom_cmds.tex\"), \"r\") as fh:\n custom_cmds = fh.read()\n config = dict(\n latex_elements={\"preamble\": custom_cmds},\n mathjax_inline=[r\"\\(\" + custom_cmds, r\"\\)\"],\n mathjax_display=[r\"\\[\" + custom_cmds, r\"\\]\"],\n )\n\n return extension, config\n\n\ndef bibtex():\n extension = \"sphinxcontrib.bibtex\"\n\n config = dict(bibtex_bibfiles=[\"references.bib\"])\n\n return extension, config\n\n\ndef doctest():\n extension = \"sphinx.ext.doctest\"\n\n doctest_global_setup = \"\"\"\nimport torch\nfrom torch import nn\n\nimport pystiche\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\n\nfrom unittest import mock\n\npatcher = mock.patch(\n \"pystiche.enc.models.utils.ModelMultiLayerEncoder.load_state_dict_from_url\"\n)\npatcher.start()\n\"\"\"\n\n doctest_global_cleanup = \"\"\"\nmock.patch.stopall()\n\"\"\"\n config = dict(\n doctest_global_setup=doctest_global_setup,\n doctest_global_cleanup=doctest_global_cleanup,\n )\n\n return extension, config\n\n\ndef sphinx_gallery():\n extension = \"sphinx_gallery.gen_gallery\"\n\n plot_gallery = get_bool_env_var(\"PYSTICHE_PLOT_GALLERY\", default=not CI)\n download_gallery = get_bool_env_var(\"PYSTICHE_DOWNLOAD_GALLERY\", default=CI)\n\n def download():\n nonlocal extension\n nonlocal plot_gallery\n\n # version and release are available as soon as the project config is loaded\n version = globals()[\"version\"]\n release = globals()[\"release\"]\n\n base = \"https://download.pystiche.org/galleries/\"\n is_dev = version != release\n file = \"main.zip\" if is_dev else f\"v{version}.zip\"\n\n url = urljoin(base, file)\n print(f\"Downloading pre-built galleries from {url}\")\n download_file(url, file)\n\n with contextlib.suppress(FileNotFoundError):\n shutil.rmtree(path.join(HERE, \"galleries\"))\n shutil.unpack_archive(file, extract_dir=\".\")\n os.remove(file)\n\n extension = \"sphinx_gallery.load_style\"\n plot_gallery = False\n\n def show_cuda_memory(func):\n torch.cuda.reset_peak_memory_stats()\n out = func()\n\n stats = torch.cuda.memory_stats()\n peak_bytes_usage = stats[\"allocated_bytes.all.peak\"]\n memory = peak_bytes_usage / 1024 ** 2\n\n return memory, out\n\n def patch_tqdm():\n patchers = [mock.patch(\"tqdm.std._supports_unicode\", return_value=True)]\n\n display = tqdm.display\n close = tqdm.close\n displayed = set()\n\n def display_only_last(self, msg=None, pos=None):\n if self.n != self.total or self in displayed:\n return\n\n display(self, msg=msg, pos=pos)\n displayed.add(self)\n\n patchers.append(mock.patch(\"tqdm.std.tqdm.display\", new=display_only_last))\n\n def close_(self):\n close(self)\n with contextlib.suppress(KeyError):\n displayed.remove(self)\n\n patchers.append(mock.patch(\"tqdm.std.tqdm.close\", new=close_))\n\n for patcher in patchers:\n patcher.start()\n\n class PysticheExampleTitleSortKey(ExampleTitleSortKey):\n def __call__(self, filename):\n # The beginner example *without* pystiche is placed before the example\n # *with* to clarify the narrative.\n if filename == \"example_nst_without_pystiche.py\":\n return \"1\"\n elif filename == \"example_nst_with_pystiche.py\":\n return \"2\"\n else:\n return super().__call__(filename)\n\n def filter_warnings():\n # See #https://github.com/pytorch/pytorch/issues/60053\n warnings.filterwarnings(\n \"ignore\",\n category=UserWarning,\n message=(\n re.escape(\n \"Named tensors and all their associated APIs are an experimental \"\n \"feature and subject to change. Please do not use them for \"\n \"anything important until they are released as stable. (Triggered \"\n \"internally at /pytorch/c10/core/TensorImpl.h:1156.)\"\n )\n ),\n )\n\n if download_gallery:\n download()\n\n if plot_gallery and not torch.cuda.is_available():\n msg = (\n \"The galleries will be built, but CUDA is not available. \"\n \"This will take a long time.\"\n )\n print(msg)\n\n sphinx_gallery_conf = {\n \"examples_dirs\": path.join(PROJECT_ROOT, \"examples\"),\n \"gallery_dirs\": path.join(\"galleries\", \"examples\"),\n \"filename_pattern\": re.escape(os.sep) + r\"example_\\w+[.]py$\",\n \"ignore_pattern\": re.escape(os.sep) + r\"_\\w+[.]py$\",\n \"line_numbers\": True,\n \"remove_config_comments\": True,\n \"plot_gallery\": plot_gallery,\n \"subsection_order\": ExplicitOrder(\n [\n path.join(\"..\", \"..\", \"examples\", sub_gallery)\n for sub_gallery in (\"beginner\", \"advanced\")\n ]\n ),\n \"within_subsection_order\": PysticheExampleTitleSortKey,\n \"show_memory\": show_cuda_memory if torch.cuda.is_available() else True,\n }\n\n config = dict(sphinx_gallery_conf=sphinx_gallery_conf)\n filter_warnings()\n\n patch_tqdm()\n filter_warnings()\n\n return extension, config\n\n\ndef logo():\n extension = None\n\n config = dict(html_logo=\"../../logo.svg\")\n\n return extension, config\n\n\nextensions = []\nfor loader in (\n project,\n autodoc,\n intersphinx,\n html,\n latex,\n bibtex,\n doctest,\n sphinx_gallery,\n logo,\n):\n extension, config = loader()\n\n if extension:\n if isinstance(extension, str):\n extension = (extension,)\n extensions.extend(extension)\n\n if config:\n globals().update(config)\n", "path": "docs/source/conf.py"}]}
| 3,413 | 149 |
gh_patches_debug_2085
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-556
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add StadtService Brühl
Add Source for StadtService Brühl
Update stadtservice_bruehl_de.md
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py]
1 import datetime
2 import logging
3
4 import requests
5 from bs4 import BeautifulSoup
6 from waste_collection_schedule import Collection # type: ignore[attr-defined]
7 from waste_collection_schedule.service.ICS import ICS
8
9 TITLE = "StadtService Brühl"
10 DESCRIPTION = "Source für Abfallkalender StadtService Brühl"
11 URL = "https://stadtservice-bruehl.de"
12 TEST_CASES = {"TEST1": {"strasse": "Badorfer Straße", "hnr": "1"}}
13
14 _LOGGER = logging.getLogger(__name__)
15
16
17 class Source:
18 def __init__(self, strasse, hnr):
19 self._strasse = strasse
20 self._hnr = hnr
21 self._ics = ICS()
22
23 def fetch(self):
24
25 today = datetime.date.today()
26 year = today.year
27 # Get District
28 data = {
29 "street": self._strasse,
30 "street_number": self._hnr,
31 "send_street_and_nummber_data": "",
32 }
33
34 r = requests.post(
35 "https://services.stadtservice-bruehl.de/abfallkalender/", data=data
36 )
37 r.raise_for_status()
38
39 soup = BeautifulSoup(r.text, "html.parser")
40
41 for tag in soup.find_all("input", type="hidden"):
42 # print(tag["name"])
43 # print(tag["value"])
44 if tag["name"] == "post_district":
45 post_district = tag["value"]
46
47 if post_district == "":
48 raise Exception("Unable to get district")
49
50 # print(post_district);
51 # Get ICAL
52 data = {
53 "post_year": year,
54 "post_district": post_district,
55 "post_street_name": self._strasse,
56 "post_street_number": self._hnr,
57 "checked_waste_type_hausmuell": "on",
58 "checked_waste_type_gelber_sack": "on",
59 "checked_waste_type_altpapier": "on",
60 "checked_waste_type_bio": "on",
61 "checked_waste_type_weihnachtsbaeume": "on",
62 "checked_waste_type_strassenlaub": "on",
63 "form_page_id": "9",
64 "reminder_time": "8",
65 "send_ics_download_configurator_data": "",
66 }
67
68 r = requests.post(
69 "https://services.stadtservice-bruehl.de/abfallkalender/individuellen-abfuhrkalender-herunterladen/",
70 data=data,
71 )
72 r.raise_for_status()
73
74 dates = self._ics.convert(r.text)
75
76 entries = []
77 for d in dates:
78 entries.append(Collection(d[0], d[1]))
79
80 return entries
81
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py
@@ -18,7 +18,7 @@
def __init__(self, strasse, hnr):
self._strasse = strasse
self._hnr = hnr
- self._ics = ICS()
+ self._ics = ICS(regex="(.*?) \\- ", split_at=", ")
def fetch(self):
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py\n@@ -18,7 +18,7 @@\n def __init__(self, strasse, hnr):\n self._strasse = strasse\n self._hnr = hnr\n- self._ics = ICS()\n+ self._ics = ICS(regex=\"(.*?) \\\\- \", split_at=\", \")\n \n def fetch(self):\n", "issue": "Add StadtService Br\u00fchl\nAdd Source for StadtService Br\u00fchl\r\n\r\nUpdate stadtservice_bruehl_de.md\n", "before_files": [{"content": "import datetime\nimport logging\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"StadtService Br\u00fchl\"\nDESCRIPTION = \"Source f\u00fcr Abfallkalender StadtService Br\u00fchl\"\nURL = \"https://stadtservice-bruehl.de\"\nTEST_CASES = {\"TEST1\": {\"strasse\": \"Badorfer Stra\u00dfe\", \"hnr\": \"1\"}}\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, strasse, hnr):\n self._strasse = strasse\n self._hnr = hnr\n self._ics = ICS()\n\n def fetch(self):\n\n today = datetime.date.today()\n year = today.year\n # Get District\n data = {\n \"street\": self._strasse,\n \"street_number\": self._hnr,\n \"send_street_and_nummber_data\": \"\",\n }\n\n r = requests.post(\n \"https://services.stadtservice-bruehl.de/abfallkalender/\", data=data\n )\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n for tag in soup.find_all(\"input\", type=\"hidden\"):\n # print(tag[\"name\"])\n # print(tag[\"value\"])\n if tag[\"name\"] == \"post_district\":\n post_district = tag[\"value\"]\n\n if post_district == \"\":\n raise Exception(\"Unable to get district\")\n\n # print(post_district);\n # Get ICAL\n data = {\n \"post_year\": year,\n \"post_district\": post_district,\n \"post_street_name\": self._strasse,\n \"post_street_number\": self._hnr,\n \"checked_waste_type_hausmuell\": \"on\",\n \"checked_waste_type_gelber_sack\": \"on\",\n \"checked_waste_type_altpapier\": \"on\",\n \"checked_waste_type_bio\": \"on\",\n \"checked_waste_type_weihnachtsbaeume\": \"on\",\n \"checked_waste_type_strassenlaub\": \"on\",\n \"form_page_id\": \"9\",\n \"reminder_time\": \"8\",\n \"send_ics_download_configurator_data\": \"\",\n }\n\n r = requests.post(\n \"https://services.stadtservice-bruehl.de/abfallkalender/individuellen-abfuhrkalender-herunterladen/\",\n data=data,\n )\n r.raise_for_status()\n\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py"}]}
| 1,331 | 168 |
gh_patches_debug_4795
|
rasdani/github-patches
|
git_diff
|
netket__netket-211
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Get rid of netket::Qubits class
`netket::Qubits` is really just a `CustomHilbert` with `localstates = {0.0, 1.0}`. No need for code duplication.
</issue>
<code>
[start of netket/hilbert.py]
1 from ._C_netket.hilbert import *
2
[end of netket/hilbert.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/netket/hilbert.py b/netket/hilbert.py
--- a/netket/hilbert.py
+++ b/netket/hilbert.py
@@ -1 +1,22 @@
from ._C_netket.hilbert import *
+
+
+def Qubit(graph):
+ """
+ Constructs a new ``Qubit`` given a graph.
+
+ Args:
+ graph: Graph representation of sites.
+
+ Examples:
+ Simple qubit hilbert space.
+
+ ```python
+ >>> from netket.graph import Hypercube
+ >>> from netket.hilbert import Qubit
+ >>> g = Hypercube(length=10,n_dim=2,pbc=True)
+ >>> hi = Qubit(graph=g)
+ >>> print(hi.size)
+ 100
+ """
+ return CustomHilbert(graph, local_states=[0, 1])
|
{"golden_diff": "diff --git a/netket/hilbert.py b/netket/hilbert.py\n--- a/netket/hilbert.py\n+++ b/netket/hilbert.py\n@@ -1 +1,22 @@\n from ._C_netket.hilbert import *\n+\n+\n+def Qubit(graph):\n+ \"\"\"\n+ Constructs a new ``Qubit`` given a graph.\n+\n+ Args:\n+ graph: Graph representation of sites.\n+\n+ Examples:\n+ Simple qubit hilbert space.\n+\n+ ```python\n+ >>> from netket.graph import Hypercube\n+ >>> from netket.hilbert import Qubit\n+ >>> g = Hypercube(length=10,n_dim=2,pbc=True)\n+ >>> hi = Qubit(graph=g)\n+ >>> print(hi.size)\n+ 100\n+ \"\"\"\n+ return CustomHilbert(graph, local_states=[0, 1])\n", "issue": "Get rid of netket::Qubits class\n`netket::Qubits` is really just a `CustomHilbert` with `localstates = {0.0, 1.0}`. No need for code duplication.\n", "before_files": [{"content": "from ._C_netket.hilbert import *\n", "path": "netket/hilbert.py"}]}
| 598 | 201 |
gh_patches_debug_16453
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-18206
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cast
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/manipulation.py]
1 # global
2 import ivy
3 from ivy.functional.frontends.paddle.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6 from ivy.func_wrapper import with_unsupported_dtypes
7
8
9 @to_ivy_arrays_and_back
10 def reshape(x, shape):
11 return ivy.reshape(x, shape)
12
13
14 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
15 @to_ivy_arrays_and_back
16 def abs(x, name=None):
17 return ivy.abs(x)
18
19
20 absolute = abs
21
22
23 @to_ivy_arrays_and_back
24 def stack(x, axis=0, name=None):
25 return ivy.stack(x, axis=axis)
26
27
28 @with_unsupported_dtypes({"2.5.0 and below": ("int8", "int16")}, "paddle")
29 @to_ivy_arrays_and_back
30 def concat(x, axis, name=None):
31 return ivy.concat(x, axis=axis)
32
33
34 @with_unsupported_dtypes(
35 {"2.5.0 and below": ("int8", "uint8", "int16", "float16")},
36 "paddle",
37 )
38 @to_ivy_arrays_and_back
39 def tile(x, repeat_times, name=None):
40 return ivy.tile(x, repeats=repeat_times)
41
42
43 @with_unsupported_dtypes(
44 {"2.5.0 and below": ("int16", "complex64", "complex128")},
45 "paddle",
46 )
47 @to_ivy_arrays_and_back
48 def split(x, num_or_sections, axis=0, name=None):
49 return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)
50
51
52 @with_unsupported_dtypes(
53 {"2.5.0 and below": ("float16", "bfloat16", "int8", "int16")},
54 "paddle",
55 )
56 @to_ivy_arrays_and_back
57 def squeeze(x, axis=None, name=None):
58 return ivy.squeeze(x, axis=axis)
59
[end of ivy/functional/frontends/paddle/tensor/manipulation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py
--- a/ivy/functional/frontends/paddle/tensor/manipulation.py
+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py
@@ -3,7 +3,10 @@
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
-from ivy.func_wrapper import with_unsupported_dtypes
+from ivy.func_wrapper import (
+ with_unsupported_dtypes,
+ with_supported_dtypes,
+)
@to_ivy_arrays_and_back
@@ -56,3 +59,22 @@
@to_ivy_arrays_and_back
def squeeze(x, axis=None, name=None):
return ivy.squeeze(x, axis=axis)
+
+
+@with_supported_dtypes(
+ {
+ "2.5.0 and below": (
+ "bool",
+ "float16",
+ "float32",
+ "float64",
+ "int32",
+ "int64",
+ "uint8",
+ )
+ },
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def cast(x, dtype):
+ return ivy.astype(x, dtype)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py\n--- a/ivy/functional/frontends/paddle/tensor/manipulation.py\n+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py\n@@ -3,7 +3,10 @@\n from ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n )\n-from ivy.func_wrapper import with_unsupported_dtypes\n+from ivy.func_wrapper import (\n+ with_unsupported_dtypes,\n+ with_supported_dtypes,\n+)\n \n \n @to_ivy_arrays_and_back\n@@ -56,3 +59,22 @@\n @to_ivy_arrays_and_back\n def squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n+\n+\n+@with_supported_dtypes(\n+ {\n+ \"2.5.0 and below\": (\n+ \"bool\",\n+ \"float16\",\n+ \"float32\",\n+ \"float64\",\n+ \"int32\",\n+ \"int64\",\n+ \"uint8\",\n+ )\n+ },\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def cast(x, dtype):\n+ return ivy.astype(x, dtype)\n", "issue": "cast\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape):\n return ivy.reshape(x, shape)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n", "path": "ivy/functional/frontends/paddle/tensor/manipulation.py"}]}
| 1,111 | 298 |
gh_patches_debug_19481
|
rasdani/github-patches
|
git_diff
|
rlworkgroup__garage-1770
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Export TaskSamplers in __init__
This will put them in the public API and make them visible in the API docs
</issue>
<code>
[start of src/garage/experiment/__init__.py]
1 """Experiment functions."""
2 from garage.experiment.local_runner import LocalRunner
3 from garage.experiment.local_tf_runner import LocalTFRunner
4 from garage.experiment.meta_evaluator import MetaEvaluator
5 from garage.experiment.snapshotter import SnapshotConfig, Snapshotter
6 from garage.experiment.task_sampler import TaskSampler
7
8 __all__ = [
9 'LocalRunner',
10 'LocalTFRunner',
11 'MetaEvaluator',
12 'Snapshotter',
13 'SnapshotConfig',
14 'TaskSampler',
15 ]
16
[end of src/garage/experiment/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/garage/experiment/__init__.py b/src/garage/experiment/__init__.py
--- a/src/garage/experiment/__init__.py
+++ b/src/garage/experiment/__init__.py
@@ -1,9 +1,15 @@
"""Experiment functions."""
+# yapf: disable
from garage.experiment.local_runner import LocalRunner
from garage.experiment.local_tf_runner import LocalTFRunner
from garage.experiment.meta_evaluator import MetaEvaluator
from garage.experiment.snapshotter import SnapshotConfig, Snapshotter
-from garage.experiment.task_sampler import TaskSampler
+from garage.experiment.task_sampler import (ConstructEnvsSampler,
+ EnvPoolSampler,
+ SetTaskSampler,
+ TaskSampler)
+
+# yapf: enable
__all__ = [
'LocalRunner',
@@ -12,4 +18,7 @@
'Snapshotter',
'SnapshotConfig',
'TaskSampler',
+ 'ConstructEnvsSampler',
+ 'EnvPoolSampler',
+ 'SetTaskSampler',
]
|
{"golden_diff": "diff --git a/src/garage/experiment/__init__.py b/src/garage/experiment/__init__.py\n--- a/src/garage/experiment/__init__.py\n+++ b/src/garage/experiment/__init__.py\n@@ -1,9 +1,15 @@\n \"\"\"Experiment functions.\"\"\"\n+# yapf: disable\n from garage.experiment.local_runner import LocalRunner\n from garage.experiment.local_tf_runner import LocalTFRunner\n from garage.experiment.meta_evaluator import MetaEvaluator\n from garage.experiment.snapshotter import SnapshotConfig, Snapshotter\n-from garage.experiment.task_sampler import TaskSampler\n+from garage.experiment.task_sampler import (ConstructEnvsSampler,\n+ EnvPoolSampler,\n+ SetTaskSampler,\n+ TaskSampler)\n+\n+# yapf: enable\n \n __all__ = [\n 'LocalRunner',\n@@ -12,4 +18,7 @@\n 'Snapshotter',\n 'SnapshotConfig',\n 'TaskSampler',\n+ 'ConstructEnvsSampler',\n+ 'EnvPoolSampler',\n+ 'SetTaskSampler',\n ]\n", "issue": "Export TaskSamplers in __init__\nThis will put them in the public API and make them visible in the API docs\n", "before_files": [{"content": "\"\"\"Experiment functions.\"\"\"\nfrom garage.experiment.local_runner import LocalRunner\nfrom garage.experiment.local_tf_runner import LocalTFRunner\nfrom garage.experiment.meta_evaluator import MetaEvaluator\nfrom garage.experiment.snapshotter import SnapshotConfig, Snapshotter\nfrom garage.experiment.task_sampler import TaskSampler\n\n__all__ = [\n 'LocalRunner',\n 'LocalTFRunner',\n 'MetaEvaluator',\n 'Snapshotter',\n 'SnapshotConfig',\n 'TaskSampler',\n]\n", "path": "src/garage/experiment/__init__.py"}]}
| 691 | 227 |
gh_patches_debug_26652
|
rasdani/github-patches
|
git_diff
|
easybuilders__easybuild-framework-2786
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue running tests
```
[mboisson@build-node easybuild-framework]$ python -O -m test.framework.suite test_update
INFO: This is (based on) vsc.install.shared_setup 0.10.17
Traceback (most recent call last):
File "/cvmfs/soft.computecanada.ca/nix/store/4x0hqnpd0hfh62m3apkxmz8hz3hlsikx-python-2.7.13-env/lib/python2.7/runpy.py", line 174, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/cvmfs/soft.computecanada.ca/nix/store/4x0hqnpd0hfh62m3apkxmz8hz3hlsikx-python-2.7.13-env/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/home/mboisson/git/easybuild-framework/test/framework/suite.py", line 44, in <module>
import test.framework.asyncprocess as a
File "/home/mboisson/git/easybuild-framework/test/framework/asyncprocess.py", line 34, in <module>
from test.framework.utilities import EnhancedTestCase
File "/home/mboisson/git/easybuild-framework/test/framework/utilities.py", line 71, in <module>
go = EasyBuildOptions(go_useconfigfiles=False)
File "easybuild/tools/options.py", line 246, in __init__
super(EasyBuildOptions, self).__init__(*args, **kwargs)
File "/cvmfs/soft.computecanada.ca/nix/store/4x0hqnpd0hfh62m3apkxmz8hz3hlsikx-python-2.7.13-env/lib/python2.7/site-packages/vsc/utils/generaloption.py", line 982, in __init__
self.main_options()
File "/cvmfs/soft.computecanada.ca/nix/store/4x0hqnpd0hfh62m3apkxmz8hz3hlsikx-python-2.7.13-env/lib/python2.7/site-packages/vsc/utils/generaloption.py", line 1058, in main_options
fn()
File "easybuild/tools/options.py", line 493, in config_options
'choice', 'store', DEFAULT_REPOSITORY, sorted(avail_repositories().keys())),
File "easybuild/tools/repository/repository.py", line 152, in avail_repositories
raise EasyBuildError("avail_repositories: FileRepository missing from list of repositories")
easybuild.tools.build_log.EasyBuildError: 'avail_repositories: FileRepository missing from list of repositories'
```
Opening an issue for @boegel
</issue>
<code>
[start of easybuild/tools/utilities.py]
1 # #
2 # Copyright 2012-2019 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 # #
25 """
26 Module with various utility functions
27
28 :author: Kenneth Hoste (Ghent University)
29 """
30 import glob
31 import os
32 import string
33 import sys
34 from vsc.utils import fancylogger
35
36 from easybuild.tools.build_log import EasyBuildError, print_msg
37 from easybuild.tools.config import build_option
38
39
40 _log = fancylogger.getLogger('tools.utilities')
41
42
43 # a list of all ascii characters
44 ASCII_CHARS = string.maketrans('', '')
45 # a list of all unwanted ascii characters (we only want to keep digits, letters and _)
46 UNWANTED_CHARS = ASCII_CHARS.translate(ASCII_CHARS, string.digits + string.ascii_letters + "_")
47
48
49 def read_environment(env_vars, strict=False):
50 """NO LONGER SUPPORTED: use read_environment from easybuild.tools.environment instead"""
51 _log.nosupport("read_environment has been moved to easybuild.tools.environment", '2.0')
52
53
54 def flatten(lst):
55 """Flatten a list of lists."""
56 res = []
57 for x in lst:
58 res.extend(x)
59 return res
60
61
62 def quote_str(val, escape_newline=False, prefer_single_quotes=False):
63 """
64 Obtain a new value to be used in string replacement context.
65
66 For non-string values, it just returns the exact same value.
67
68 For string values, it tries to escape the string in quotes, e.g.,
69 foo becomes 'foo', foo'bar becomes "foo'bar",
70 foo'bar"baz becomes \"\"\"foo'bar"baz\"\"\", etc.
71
72 :param escape_newline: wrap strings that include a newline in triple quotes
73 """
74
75 if isinstance(val, basestring):
76 # forced triple double quotes
77 if ("'" in val and '"' in val) or (escape_newline and '\n' in val):
78 return '"""%s"""' % val
79 # single quotes to escape double quote used in strings
80 elif '"' in val:
81 return "'%s'" % val
82 # if single quotes are preferred, use single quotes;
83 # unless a space or a single quote are in the string
84 elif prefer_single_quotes and "'" not in val and ' ' not in val:
85 return "'%s'" % val
86 # fallback on double quotes (required in tcl syntax)
87 else:
88 return '"%s"' % val
89 else:
90 return val
91
92
93 def quote_py_str(val):
94 """Version of quote_str specific for generating use in Python context (e.g., easyconfig parameters)."""
95 return quote_str(val, escape_newline=True, prefer_single_quotes=True)
96
97
98 def remove_unwanted_chars(inputstring):
99 """Remove unwanted characters from the given string and return a copy
100
101 All non-letter and non-numeral characters are considered unwanted except for underscore ('_'), see UNWANTED_CHARS.
102 """
103 return inputstring.translate(ASCII_CHARS, UNWANTED_CHARS)
104
105
106 def import_available_modules(namespace):
107 """
108 Import all available module in the specified namespace.
109
110 :param namespace: The namespace to import modules from.
111 """
112 modules = []
113 for path in sys.path:
114 for module in sorted(glob.glob(os.path.sep.join([path] + namespace.split('.') + ['*.py']))):
115 if not module.endswith('__init__.py'):
116 mod_name = module.split(os.path.sep)[-1].split('.')[0]
117 modpath = '.'.join([namespace, mod_name])
118 _log.debug("importing module %s", modpath)
119 try:
120 mod = __import__(modpath, globals(), locals(), [''])
121 except ImportError as err:
122 raise EasyBuildError("import_available_modules: Failed to import %s: %s", modpath, err)
123 modules.append(mod)
124 return modules
125
126
127 def only_if_module_is_available(modnames, pkgname=None, url=None):
128 """Decorator to guard functions/methods against missing required module with specified name."""
129 if pkgname and url is None:
130 url = 'https://pypi.python.org/pypi/%s' % pkgname
131
132 if isinstance(modnames, basestring):
133 modnames = (modnames,)
134
135 def wrap(orig):
136 """Decorated function, raises ImportError if specified module is not available."""
137 try:
138 imported = None
139 for modname in modnames:
140 try:
141 __import__(modname)
142 imported = modname
143 break
144 except ImportError:
145 pass
146
147 if imported is None:
148 raise ImportError("None of the specified modules %s is available" % ', '.join(modnames))
149 else:
150 return orig
151
152 except ImportError as err:
153 def error(*args, **kwargs):
154 msg = "%s; required module '%s' is not available" % (err, modname)
155 if pkgname:
156 msg += " (provided by Python package %s, available from %s)" % (pkgname, url)
157 elif url:
158 msg += " (available from %s)" % url
159 raise EasyBuildError("ImportError: %s", msg)
160 return error
161
162 return wrap
163
164
165 def trace_msg(message, silent=False):
166 """Print trace message."""
167 if build_option('trace'):
168 print_msg(' >> ' + message, prefix=False)
169
[end of easybuild/tools/utilities.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/easybuild/tools/utilities.py b/easybuild/tools/utilities.py
--- a/easybuild/tools/utilities.py
+++ b/easybuild/tools/utilities.py
@@ -111,7 +111,16 @@
"""
modules = []
for path in sys.path:
- for module in sorted(glob.glob(os.path.sep.join([path] + namespace.split('.') + ['*.py']))):
+
+ cand_modpath_glob = os.path.sep.join([path] + namespace.split('.') + ['*.py'])
+
+ # if sys.path entry being considered is the empty string
+ # (which corresponds to Python packages/modules in current working directory being considered),
+ # we need to strip off / from the start of the path
+ if path == '' and cand_modpath_glob.startswith(os.path.sep):
+ cand_modpath_glob = cand_modpath_glob.lstrip(os.path.sep)
+
+ for module in sorted(glob.glob(cand_modpath_glob)):
if not module.endswith('__init__.py'):
mod_name = module.split(os.path.sep)[-1].split('.')[0]
modpath = '.'.join([namespace, mod_name])
@@ -120,7 +129,10 @@
mod = __import__(modpath, globals(), locals(), [''])
except ImportError as err:
raise EasyBuildError("import_available_modules: Failed to import %s: %s", modpath, err)
- modules.append(mod)
+
+ if mod not in modules:
+ modules.append(mod)
+
return modules
|
{"golden_diff": "diff --git a/easybuild/tools/utilities.py b/easybuild/tools/utilities.py\n--- a/easybuild/tools/utilities.py\n+++ b/easybuild/tools/utilities.py\n@@ -111,7 +111,16 @@\n \"\"\"\n modules = []\n for path in sys.path:\n- for module in sorted(glob.glob(os.path.sep.join([path] + namespace.split('.') + ['*.py']))):\n+\n+ cand_modpath_glob = os.path.sep.join([path] + namespace.split('.') + ['*.py'])\n+\n+ # if sys.path entry being considered is the empty string\n+ # (which corresponds to Python packages/modules in current working directory being considered),\n+ # we need to strip off / from the start of the path\n+ if path == '' and cand_modpath_glob.startswith(os.path.sep):\n+ cand_modpath_glob = cand_modpath_glob.lstrip(os.path.sep)\n+\n+ for module in sorted(glob.glob(cand_modpath_glob)):\n if not module.endswith('__init__.py'):\n mod_name = module.split(os.path.sep)[-1].split('.')[0]\n modpath = '.'.join([namespace, mod_name])\n@@ -120,7 +129,10 @@\n mod = __import__(modpath, globals(), locals(), [''])\n except ImportError as err:\n raise EasyBuildError(\"import_available_modules: Failed to import %s: %s\", modpath, err)\n- modules.append(mod)\n+\n+ if mod not in modules:\n+ modules.append(mod)\n+\n return modules\n", "issue": "Issue running tests\n```\r\n[mboisson@build-node easybuild-framework]$ python -O -m test.framework.suite test_update\r\nINFO: This is (based on) vsc.install.shared_setup 0.10.17\r\nTraceback (most recent call last):\r\n File \"/cvmfs/soft.computecanada.ca/nix/store/4x0hqnpd0hfh62m3apkxmz8hz3hlsikx-python-2.7.13-env/lib/python2.7/runpy.py\", line 174, in _run_module_as_main\r\n \"__main__\", fname, loader, pkg_name)\r\n File \"/cvmfs/soft.computecanada.ca/nix/store/4x0hqnpd0hfh62m3apkxmz8hz3hlsikx-python-2.7.13-env/lib/python2.7/runpy.py\", line 72, in _run_code\r\n exec code in run_globals\r\n File \"/home/mboisson/git/easybuild-framework/test/framework/suite.py\", line 44, in <module>\r\n import test.framework.asyncprocess as a\r\n File \"/home/mboisson/git/easybuild-framework/test/framework/asyncprocess.py\", line 34, in <module>\r\n from test.framework.utilities import EnhancedTestCase\r\n File \"/home/mboisson/git/easybuild-framework/test/framework/utilities.py\", line 71, in <module>\r\n go = EasyBuildOptions(go_useconfigfiles=False)\r\n File \"easybuild/tools/options.py\", line 246, in __init__\r\n super(EasyBuildOptions, self).__init__(*args, **kwargs)\r\n File \"/cvmfs/soft.computecanada.ca/nix/store/4x0hqnpd0hfh62m3apkxmz8hz3hlsikx-python-2.7.13-env/lib/python2.7/site-packages/vsc/utils/generaloption.py\", line 982, in __init__\r\n self.main_options()\r\n File \"/cvmfs/soft.computecanada.ca/nix/store/4x0hqnpd0hfh62m3apkxmz8hz3hlsikx-python-2.7.13-env/lib/python2.7/site-packages/vsc/utils/generaloption.py\", line 1058, in main_options\r\n fn()\r\n File \"easybuild/tools/options.py\", line 493, in config_options\r\n 'choice', 'store', DEFAULT_REPOSITORY, sorted(avail_repositories().keys())),\r\n File \"easybuild/tools/repository/repository.py\", line 152, in avail_repositories\r\n raise EasyBuildError(\"avail_repositories: FileRepository missing from list of repositories\")\r\neasybuild.tools.build_log.EasyBuildError: 'avail_repositories: FileRepository missing from list of repositories'\r\n``` \r\n\r\nOpening an issue for @boegel \n", "before_files": [{"content": "# #\n# Copyright 2012-2019 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n# #\n\"\"\"\nModule with various utility functions\n\n:author: Kenneth Hoste (Ghent University)\n\"\"\"\nimport glob\nimport os\nimport string\nimport sys\nfrom vsc.utils import fancylogger\n\nfrom easybuild.tools.build_log import EasyBuildError, print_msg\nfrom easybuild.tools.config import build_option\n\n\n_log = fancylogger.getLogger('tools.utilities')\n\n\n# a list of all ascii characters\nASCII_CHARS = string.maketrans('', '')\n# a list of all unwanted ascii characters (we only want to keep digits, letters and _)\nUNWANTED_CHARS = ASCII_CHARS.translate(ASCII_CHARS, string.digits + string.ascii_letters + \"_\")\n\n\ndef read_environment(env_vars, strict=False):\n \"\"\"NO LONGER SUPPORTED: use read_environment from easybuild.tools.environment instead\"\"\"\n _log.nosupport(\"read_environment has been moved to easybuild.tools.environment\", '2.0')\n\n\ndef flatten(lst):\n \"\"\"Flatten a list of lists.\"\"\"\n res = []\n for x in lst:\n res.extend(x)\n return res\n\n\ndef quote_str(val, escape_newline=False, prefer_single_quotes=False):\n \"\"\"\n Obtain a new value to be used in string replacement context.\n\n For non-string values, it just returns the exact same value.\n\n For string values, it tries to escape the string in quotes, e.g.,\n foo becomes 'foo', foo'bar becomes \"foo'bar\",\n foo'bar\"baz becomes \\\"\\\"\\\"foo'bar\"baz\\\"\\\"\\\", etc.\n\n :param escape_newline: wrap strings that include a newline in triple quotes\n \"\"\"\n\n if isinstance(val, basestring):\n # forced triple double quotes\n if (\"'\" in val and '\"' in val) or (escape_newline and '\\n' in val):\n return '\"\"\"%s\"\"\"' % val\n # single quotes to escape double quote used in strings\n elif '\"' in val:\n return \"'%s'\" % val\n # if single quotes are preferred, use single quotes;\n # unless a space or a single quote are in the string\n elif prefer_single_quotes and \"'\" not in val and ' ' not in val:\n return \"'%s'\" % val\n # fallback on double quotes (required in tcl syntax)\n else:\n return '\"%s\"' % val\n else:\n return val\n\n\ndef quote_py_str(val):\n \"\"\"Version of quote_str specific for generating use in Python context (e.g., easyconfig parameters).\"\"\"\n return quote_str(val, escape_newline=True, prefer_single_quotes=True)\n\n\ndef remove_unwanted_chars(inputstring):\n \"\"\"Remove unwanted characters from the given string and return a copy\n\n All non-letter and non-numeral characters are considered unwanted except for underscore ('_'), see UNWANTED_CHARS.\n \"\"\"\n return inputstring.translate(ASCII_CHARS, UNWANTED_CHARS)\n\n\ndef import_available_modules(namespace):\n \"\"\"\n Import all available module in the specified namespace.\n\n :param namespace: The namespace to import modules from.\n \"\"\"\n modules = []\n for path in sys.path:\n for module in sorted(glob.glob(os.path.sep.join([path] + namespace.split('.') + ['*.py']))):\n if not module.endswith('__init__.py'):\n mod_name = module.split(os.path.sep)[-1].split('.')[0]\n modpath = '.'.join([namespace, mod_name])\n _log.debug(\"importing module %s\", modpath)\n try:\n mod = __import__(modpath, globals(), locals(), [''])\n except ImportError as err:\n raise EasyBuildError(\"import_available_modules: Failed to import %s: %s\", modpath, err)\n modules.append(mod)\n return modules\n\n\ndef only_if_module_is_available(modnames, pkgname=None, url=None):\n \"\"\"Decorator to guard functions/methods against missing required module with specified name.\"\"\"\n if pkgname and url is None:\n url = 'https://pypi.python.org/pypi/%s' % pkgname\n\n if isinstance(modnames, basestring):\n modnames = (modnames,)\n\n def wrap(orig):\n \"\"\"Decorated function, raises ImportError if specified module is not available.\"\"\"\n try:\n imported = None\n for modname in modnames:\n try:\n __import__(modname)\n imported = modname\n break\n except ImportError:\n pass\n\n if imported is None:\n raise ImportError(\"None of the specified modules %s is available\" % ', '.join(modnames))\n else:\n return orig\n\n except ImportError as err:\n def error(*args, **kwargs):\n msg = \"%s; required module '%s' is not available\" % (err, modname)\n if pkgname:\n msg += \" (provided by Python package %s, available from %s)\" % (pkgname, url)\n elif url:\n msg += \" (available from %s)\" % url\n raise EasyBuildError(\"ImportError: %s\", msg)\n return error\n\n return wrap\n\n\ndef trace_msg(message, silent=False):\n \"\"\"Print trace message.\"\"\"\n if build_option('trace'):\n print_msg(' >> ' + message, prefix=False)\n", "path": "easybuild/tools/utilities.py"}]}
| 2,959 | 340 |
gh_patches_debug_2540
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-23070
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
exponential_
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/random.py]
1 # global
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.func_wrapper import with_supported_device_and_dtypes, with_unsupported_dtypes
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 @with_supported_dtypes(
11 {"2.5.1 and below": ("float32", "float64")},
12 "paddle",
13 )
14 @to_ivy_arrays_and_back
15 def normal(mean=0.0, std=1.0, shape=None, name=None):
16 return ivy.random_normal(mean=mean, std=std, shape=shape)
17
18
19 @with_supported_dtypes(
20 {"2.5.1 and below": ("float32", "float64")},
21 "paddle",
22 )
23 @to_ivy_arrays_and_back
24 def poisson(x, name=None):
25 return ivy.poisson(x, shape=None, device=None, dtype=None, seed=None, out=None)
26
27
28 @with_supported_device_and_dtypes(
29 {
30 "2.5.1 and above": {
31 "cpu": (
32 "bfloat16",
33 "float32",
34 "float64",
35 ),
36 "gpu": (
37 "bfloat16",
38 "float16",
39 "float32",
40 "float64",
41 ),
42 },
43 "2.4.2 and below": {
44 "cpu": (
45 "float32",
46 "float64",
47 ),
48 "gpu": (
49 "float16",
50 "float32",
51 "float64",
52 ),
53 },
54 },
55 "paddle",
56 )
57 @to_ivy_arrays_and_back
58 def rand(shape, dtype=None, name=None):
59 return ivy.random_uniform(low=0.0, high=1.0, shape=shape, dtype=dtype, seed=None)
60
61
62 @to_ivy_arrays_and_back
63 def randint(low=0, high=None, shape=[1], dtype=None, name=None):
64 return ivy.randint(low, high, shape=shape, dtype=dtype)
65
66
67 @with_unsupported_dtypes(
68 {"2.5.1 and below": ("int16", "float16", "bfloat16", "uint8")},
69 "paddle",
70 )
71 @to_ivy_arrays_and_back
72 def randint_like(x, low=0, high=None, dtype=None, name=None):
73 if high is None:
74 high = low
75 low = 0
76 if high <= 0:
77 raise ivy.exceptions.IvyError(
78 "If high is None, low must be greater than 0, but received low = 0."
79 )
80 return ivy.randint(low, high, shape=x.shape, dtype=dtype, seed=None)
81
82
83 def randn(shape, dtype=None, name=None):
84 if dtype not in ["float32", "float64"]:
85 raise ivy.exceptions.IvyError(
86 "Unsupported dtype for randn, only float32 and float64 are supported, "
87 )
88 return ivy.random_normal(shape=shape, dtype=dtype, seed=None)
89
90
91 @with_supported_dtypes(
92 {"2.5.1 and below": ("float32", "float64")},
93 "paddle",
94 )
95 @to_ivy_arrays_and_back
96 def standard_normal(shape, dtype=None, name=None):
97 return ivy.random_normal(mean=0, std=1, shape=shape, dtype=dtype)
98
99
100 @with_supported_dtypes(
101 {"2.5.1 and below": ("float32", "float64")},
102 "paddle",
103 )
104 @to_ivy_arrays_and_back
105 def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
106 return ivy.random_uniform(low=min, high=max, shape=shape, dtype=dtype, seed=seed)
107
108
109 @with_supported_dtypes(
110 {"2.5.1 and below": ("float32", "float64")},
111 "paddle",
112 )
113 @to_ivy_arrays_and_back
114 def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
115 x = ivy.array(x)
116 return ivy.random_uniform(
117 low=min, high=max, shape=x.shape, dtype=x.dtype, seed=seed
118 )
119
[end of ivy/functional/frontends/paddle/tensor/random.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/frontends/paddle/tensor/random.py b/ivy/functional/frontends/paddle/tensor/random.py
--- a/ivy/functional/frontends/paddle/tensor/random.py
+++ b/ivy/functional/frontends/paddle/tensor/random.py
@@ -7,6 +7,15 @@
)
+@with_supported_dtypes(
+ {"2.5.1 and below": ("float32", "float64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def exponential_(x, lam=1.0, name=None):
+ return ivy.multiply(lam, ivy.exp(ivy.multiply(-lam, x)))
+
+
@with_supported_dtypes(
{"2.5.1 and below": ("float32", "float64")},
"paddle",
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/random.py b/ivy/functional/frontends/paddle/tensor/random.py\n--- a/ivy/functional/frontends/paddle/tensor/random.py\n+++ b/ivy/functional/frontends/paddle/tensor/random.py\n@@ -7,6 +7,15 @@\n )\n \n \n+@with_supported_dtypes(\n+ {\"2.5.1 and below\": (\"float32\", \"float64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def exponential_(x, lam=1.0, name=None):\n+ return ivy.multiply(lam, ivy.exp(ivy.multiply(-lam, x)))\n+\n+\n @with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n", "issue": "exponential_\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.func_wrapper import with_supported_device_and_dtypes, with_unsupported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef normal(mean=0.0, std=1.0, shape=None, name=None):\n return ivy.random_normal(mean=mean, std=std, shape=shape)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef poisson(x, name=None):\n return ivy.poisson(x, shape=None, device=None, dtype=None, seed=None, out=None)\n\n\n@with_supported_device_and_dtypes(\n {\n \"2.5.1 and above\": {\n \"cpu\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n },\n \"2.4.2 and below\": {\n \"cpu\": (\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n },\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef rand(shape, dtype=None, name=None):\n return ivy.random_uniform(low=0.0, high=1.0, shape=shape, dtype=dtype, seed=None)\n\n\n@to_ivy_arrays_and_back\ndef randint(low=0, high=None, shape=[1], dtype=None, name=None):\n return ivy.randint(low, high, shape=shape, dtype=dtype)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int16\", \"float16\", \"bfloat16\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef randint_like(x, low=0, high=None, dtype=None, name=None):\n if high is None:\n high = low\n low = 0\n if high <= 0:\n raise ivy.exceptions.IvyError(\n \"If high is None, low must be greater than 0, but received low = 0.\"\n )\n return ivy.randint(low, high, shape=x.shape, dtype=dtype, seed=None)\n\n\ndef randn(shape, dtype=None, name=None):\n if dtype not in [\"float32\", \"float64\"]:\n raise ivy.exceptions.IvyError(\n \"Unsupported dtype for randn, only float32 and float64 are supported, \"\n )\n return ivy.random_normal(shape=shape, dtype=dtype, seed=None)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef standard_normal(shape, dtype=None, name=None):\n return ivy.random_normal(mean=0, std=1, shape=shape, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):\n return ivy.random_uniform(low=min, high=max, shape=shape, dtype=dtype, seed=seed)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef uniform_(x, min=-1.0, max=1.0, seed=0, name=None):\n x = ivy.array(x)\n return ivy.random_uniform(\n low=min, high=max, shape=x.shape, dtype=x.dtype, seed=seed\n )\n", "path": "ivy/functional/frontends/paddle/tensor/random.py"}]}
| 1,748 | 186 |
gh_patches_debug_341
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-981
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
JSON Merge Patch deserialization is broken
Merge-patch operations are broken. It looks like a deserialization problem, which may have been introduced with #790. Also, we should definitely include some view tests for this content-type. My bad :/
```
echo '{"data": {"aaa": "bbb"}}' | http patch localhost:8888/v1/buckets/b1 Content-Type:application/merge-patch+json -a aaa:
HTTP/1.1 400 Bad Request
Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff
Content-Length: 331
Content-Type: application/json; charset=UTF-8
Date: Sun, 11 Dec 2016 23:47:26 GMT
Server: waitress
{
"code": 400,
"details": [
{
"description": "\"{\"data\": {\"aaa\": \"bbb\"}}\n\" is not a mapping type: Does not implement dict-like functionality.",
"location": "body",
"name": ""
}
],
"errno": 107,
"error": "Invalid parameters",
"message": "\"{\"data\": {\"aaa\": \"bbb\"}}\n\" is not a mapping type: Does not implement dict-like functionality."
}
```
JSON Merge Patch deserialization is broken
Merge-patch operations are broken. It looks like a deserialization problem, which may have been introduced with #790. Also, we should definitely include some view tests for this content-type. My bad :/
```
echo '{"data": {"aaa": "bbb"}}' | http patch localhost:8888/v1/buckets/b1 Content-Type:application/merge-patch+json -a aaa:
HTTP/1.1 400 Bad Request
Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff
Content-Length: 331
Content-Type: application/json; charset=UTF-8
Date: Sun, 11 Dec 2016 23:47:26 GMT
Server: waitress
{
"code": 400,
"details": [
{
"description": "\"{\"data\": {\"aaa\": \"bbb\"}}\n\" is not a mapping type: Does not implement dict-like functionality.",
"location": "body",
"name": ""
}
],
"errno": 107,
"error": "Invalid parameters",
"message": "\"{\"data\": {\"aaa\": \"bbb\"}}\n\" is not a mapping type: Does not implement dict-like functionality."
}
```
JSON Merge Patch deserialization is broken
Merge-patch operations are broken. It looks like a deserialization problem, which may have been introduced with #790. Also, we should definitely include some view tests for this content-type. My bad :/
```
echo '{"data": {"aaa": "bbb"}}' | http patch localhost:8888/v1/buckets/b1 Content-Type:application/merge-patch+json -a aaa:
HTTP/1.1 400 Bad Request
Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff
Content-Length: 331
Content-Type: application/json; charset=UTF-8
Date: Sun, 11 Dec 2016 23:47:26 GMT
Server: waitress
{
"code": 400,
"details": [
{
"description": "\"{\"data\": {\"aaa\": \"bbb\"}}\n\" is not a mapping type: Does not implement dict-like functionality.",
"location": "body",
"name": ""
}
],
"errno": 107,
"error": "Invalid parameters",
"message": "\"{\"data\": {\"aaa\": \"bbb\"}}\n\" is not a mapping type: Does not implement dict-like functionality."
}
```
</issue>
<code>
[start of setup.py]
1 import platform
2 import codecs
3 import os
4 from setuptools import setup, find_packages
5
6 here = os.path.abspath(os.path.dirname(__file__))
7
8
9 def read_file(filename):
10 """Open a related file and return its content."""
11 with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:
12 content = f.read()
13 return content
14
15 README = read_file('README.rst')
16 CHANGELOG = read_file('CHANGELOG.rst')
17 CONTRIBUTORS = read_file('CONTRIBUTORS.rst')
18
19 installed_with_pypy = platform.python_implementation() == 'PyPy'
20
21 REQUIREMENTS = [
22 'colander',
23 'colorama',
24 'cornice >= 2.1',
25 'jsonschema',
26 'jsonpatch',
27 'python-dateutil',
28 'pyramid_multiauth >= 0.8', # User on policy selected event.
29 'transaction',
30 'pyramid_tm',
31 'requests',
32 'six',
33 'structlog >= 16.1.0',
34 'enum34',
35 'waitress',
36 ]
37
38 if installed_with_pypy:
39 # We install psycopg2cffi instead of psycopg2 when dealing with pypy
40 # Note: JSONB support landed after psycopg2cffi 2.7.0
41 POSTGRESQL_REQUIRES = [
42 'SQLAlchemy',
43 'psycopg2cffi>2.7.0',
44 'zope.sqlalchemy',
45 ]
46 else:
47 # ujson is not pypy compliant, as it uses the CPython C API
48 REQUIREMENTS.append('ujson >= 1.35')
49 POSTGRESQL_REQUIRES = [
50 'SQLAlchemy',
51 'psycopg2>2.5',
52 'zope.sqlalchemy',
53 ]
54
55 REDIS_REQUIRES = [
56 'kinto_redis'
57 ]
58
59 SETUP_REQUIRES = [
60 'pytest-runner'
61 ]
62
63 TEST_REQUIREMENTS = [
64 'pytest',
65 'WebTest'
66 ]
67
68 DEPENDENCY_LINKS = [
69 ]
70
71 MONITORING_REQUIRES = [
72 'raven',
73 'statsd',
74 'newrelic',
75 'werkzeug',
76 ]
77
78 ENTRY_POINTS = {
79 'paste.app_factory': [
80 'main = kinto:main',
81 ],
82 'console_scripts': [
83 'kinto = kinto.__main__:main'
84 ],
85 }
86
87
88 setup(name='kinto',
89 version='5.0.1.dev0',
90 description='Kinto Web Service - Store, Sync, Share, and Self-Host.',
91 long_description=README + "\n\n" + CHANGELOG + "\n\n" + CONTRIBUTORS,
92 license='Apache License (2.0)',
93 classifiers=[
94 "Programming Language :: Python",
95 "Programming Language :: Python :: 2",
96 "Programming Language :: Python :: 2.7",
97 "Programming Language :: Python :: 3",
98 "Programming Language :: Python :: 3.4",
99 "Programming Language :: Python :: 3.5",
100 "Programming Language :: Python :: Implementation :: CPython",
101 "Programming Language :: Python :: Implementation :: PyPy",
102 "Topic :: Internet :: WWW/HTTP",
103 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
104 "License :: OSI Approved :: Apache Software License"
105 ],
106 keywords="web sync json storage services",
107 author='Mozilla Services',
108 author_email='[email protected]',
109 url='https://github.com/Kinto/kinto',
110 packages=find_packages(),
111 package_data={'': ['*.rst', '*.py']},
112 include_package_data=True,
113 zip_safe=False,
114 setup_requires=SETUP_REQUIRES,
115 tests_require=TEST_REQUIREMENTS,
116 install_requires=REQUIREMENTS,
117 extras_require={
118 'redis': REDIS_REQUIRES,
119 'postgresql': POSTGRESQL_REQUIRES,
120 'monitoring': MONITORING_REQUIRES,
121 ":python_version=='2.7'": ["functools32", "futures"],
122 },
123 test_suite="tests",
124 dependency_links=DEPENDENCY_LINKS,
125 entry_points=ENTRY_POINTS)
126
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@
REQUIREMENTS = [
'colander',
'colorama',
- 'cornice >= 2.1',
+ 'cornice >= 2.3',
'jsonschema',
'jsonpatch',
'python-dateutil',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,7 +21,7 @@\n REQUIREMENTS = [\n 'colander',\n 'colorama',\n- 'cornice >= 2.1',\n+ 'cornice >= 2.3',\n 'jsonschema',\n 'jsonpatch',\n 'python-dateutil',\n", "issue": "JSON Merge Patch deserialization is broken \nMerge-patch operations are broken. It looks like a deserialization problem, which may have been introduced with #790. Also, we should definitely include some view tests for this content-type. My bad :/ \r\n \r\n```\r\necho '{\"data\": {\"aaa\": \"bbb\"}}' | http patch localhost:8888/v1/buckets/b1 Content-Type:application/merge-patch+json -a aaa:\r\nHTTP/1.1 400 Bad Request\r\nAccess-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff\r\nContent-Length: 331\r\nContent-Type: application/json; charset=UTF-8\r\nDate: Sun, 11 Dec 2016 23:47:26 GMT\r\nServer: waitress\r\n\r\n{\r\n \"code\": 400,\r\n \"details\": [\r\n {\r\n \"description\": \"\\\"{\\\"data\\\": {\\\"aaa\\\": \\\"bbb\\\"}}\\n\\\" is not a mapping type: Does not implement dict-like functionality.\",\r\n \"location\": \"body\",\r\n \"name\": \"\"\r\n }\r\n ],\r\n \"errno\": 107,\r\n \"error\": \"Invalid parameters\",\r\n \"message\": \"\\\"{\\\"data\\\": {\\\"aaa\\\": \\\"bbb\\\"}}\\n\\\" is not a mapping type: Does not implement dict-like functionality.\"\r\n}\r\n```\nJSON Merge Patch deserialization is broken \nMerge-patch operations are broken. It looks like a deserialization problem, which may have been introduced with #790. Also, we should definitely include some view tests for this content-type. My bad :/ \r\n \r\n```\r\necho '{\"data\": {\"aaa\": \"bbb\"}}' | http patch localhost:8888/v1/buckets/b1 Content-Type:application/merge-patch+json -a aaa:\r\nHTTP/1.1 400 Bad Request\r\nAccess-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff\r\nContent-Length: 331\r\nContent-Type: application/json; charset=UTF-8\r\nDate: Sun, 11 Dec 2016 23:47:26 GMT\r\nServer: waitress\r\n\r\n{\r\n \"code\": 400,\r\n \"details\": [\r\n {\r\n \"description\": \"\\\"{\\\"data\\\": {\\\"aaa\\\": \\\"bbb\\\"}}\\n\\\" is not a mapping type: Does not implement dict-like functionality.\",\r\n \"location\": \"body\",\r\n \"name\": \"\"\r\n }\r\n ],\r\n \"errno\": 107,\r\n \"error\": \"Invalid parameters\",\r\n \"message\": \"\\\"{\\\"data\\\": {\\\"aaa\\\": \\\"bbb\\\"}}\\n\\\" is not a mapping type: Does not implement dict-like functionality.\"\r\n}\r\n```\nJSON Merge Patch deserialization is broken \nMerge-patch operations are broken. It looks like a deserialization problem, which may have been introduced with #790. Also, we should definitely include some view tests for this content-type. My bad :/ \r\n \r\n```\r\necho '{\"data\": {\"aaa\": \"bbb\"}}' | http patch localhost:8888/v1/buckets/b1 Content-Type:application/merge-patch+json -a aaa:\r\nHTTP/1.1 400 Bad Request\r\nAccess-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff\r\nContent-Length: 331\r\nContent-Type: application/json; charset=UTF-8\r\nDate: Sun, 11 Dec 2016 23:47:26 GMT\r\nServer: waitress\r\n\r\n{\r\n \"code\": 400,\r\n \"details\": [\r\n {\r\n \"description\": \"\\\"{\\\"data\\\": {\\\"aaa\\\": \\\"bbb\\\"}}\\n\\\" is not a mapping type: Does not implement dict-like functionality.\",\r\n \"location\": \"body\",\r\n \"name\": \"\"\r\n }\r\n ],\r\n \"errno\": 107,\r\n \"error\": \"Invalid parameters\",\r\n \"message\": \"\\\"{\\\"data\\\": {\\\"aaa\\\": \\\"bbb\\\"}}\\n\\\" is not a mapping type: Does not implement dict-like functionality.\"\r\n}\r\n```\n", "before_files": [{"content": "import platform\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\ninstalled_with_pypy = platform.python_implementation() == 'PyPy'\n\nREQUIREMENTS = [\n 'colander',\n 'colorama',\n 'cornice >= 2.1',\n 'jsonschema',\n 'jsonpatch',\n 'python-dateutil',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'transaction',\n 'pyramid_tm',\n 'requests',\n 'six',\n 'structlog >= 16.1.0',\n 'enum34',\n 'waitress',\n]\n\nif installed_with_pypy:\n # We install psycopg2cffi instead of psycopg2 when dealing with pypy\n # Note: JSONB support landed after psycopg2cffi 2.7.0\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2cffi>2.7.0',\n 'zope.sqlalchemy',\n ]\nelse:\n # ujson is not pypy compliant, as it uses the CPython C API\n REQUIREMENTS.append('ujson >= 1.35')\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2>2.5',\n 'zope.sqlalchemy',\n ]\n\nREDIS_REQUIRES = [\n 'kinto_redis'\n]\n\nSETUP_REQUIRES = [\n 'pytest-runner'\n]\n\nTEST_REQUIREMENTS = [\n 'pytest',\n 'WebTest'\n]\n\nDEPENDENCY_LINKS = [\n]\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='5.0.1.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=README + \"\\n\\n\" + CHANGELOG + \"\\n\\n\" + CONTRIBUTORS,\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web sync json storage services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py']},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n 'redis': REDIS_REQUIRES,\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n \":python_version=='2.7'\": [\"functools32\", \"futures\"],\n },\n test_suite=\"tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py"}]}
| 2,547 | 84 |
gh_patches_debug_6006
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-6091
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
After upgrading to 8.21, Sentry upgrade wizard hangs on auth.allow-registration
When upgrading my onpremise installation form 8.20 to 8.21 (using docker-compose), the wizard prompted me to set the “Anonymize Beacon” option. When clicking “Continue”, the following error message pops up:
> An invalid value for (auth.allow-registration) was passed to the server.
There was no such option `auth.allow-registration` set in either `sentry.conf.py` or `config.yml`.
The call to `https://sentry.example.com/api/0/internal/options/` returned a 400, with the following params:
```
system.admin-email: [email protected]
system.url-prefix | https://sentry.example.com
auth.allow-registration | 0
beacon.anonymous | true
```
I worked around that by resending the request, changing 0 for False. My install is now fine
Running `sentry config get auth.allow-registration`, whether before the fix or after, returns:
```
type: BOOLEAN
from config: <not set>
current: 0
```
This looks somehow similar to #5699.
Hope that helps!
</issue>
<code>
[start of src/sentry/utils/types.py]
1 """
2 sentry.utils.types
3 ~~~~~~~~~~~~~~~~~~
4
5 :copyright: (c) 2010-2016 by the Sentry Team, see AUTHORS for more details.
6 :license: BSD, see LICENSE for more details.
7 """
8 from __future__ import absolute_import, print_function
9
10 import six
11
12 from yaml.parser import ParserError
13 from yaml.scanner import ScannerError
14
15 from sentry.utils.yaml import safe_load
16
17 __all__ = ('InvalidTypeError', 'Any', 'Bool', 'Int', 'Float', 'String', 'Dict', 'Sequence', )
18
19
20 class InvalidTypeError(TypeError):
21 pass
22
23
24 class Type(object):
25 """Base Type that provides type coersion"""
26 name = ''
27 # Default value to be returned when initializing
28 default = None
29 # Types that do not need to be coerced
30 expected_types = ()
31 # Types that are acceptable for coersion
32 compatible_types = six.string_types
33
34 def __call__(self, value=None):
35 if value is None:
36 return self._default()
37 if self.test(value):
38 return value
39 if isinstance(value, self.compatible_types):
40 rv = self.convert(value)
41 # Make sure convert was able to do the right thing
42 # and give us the type we were expecting
43 if self.test(rv):
44 return rv
45 raise InvalidTypeError('{!r} is not a valid {}'.format(value, repr(self)))
46
47 def convert(self, value):
48 return value
49
50 def _default(self):
51 return self.default
52
53 def test(self, value):
54 """Check if the value is the correct type or not"""
55 return isinstance(value, self.expected_types)
56
57 def __repr__(self):
58 return self.name
59
60
61 class AnyType(Type):
62 """A type that accepts any value and does no coersion"""
63 name = 'any'
64 expected_types = (object, )
65 compatible_types = (object, )
66
67
68 class BoolType(Type):
69 "Coerce a boolean from a string"
70 name = 'boolean'
71 default = False
72 expected_types = (bool, )
73
74 def convert(self, value):
75 value = value.lower()
76 if value in ('y', 'yes', 't', 'true', '1', 'on'):
77 return True
78 if value in ('n', 'no', 'f', 'false', '0', 'off'):
79 return False
80
81
82 class IntType(Type):
83 """Coerce an integer from a string"""
84 name = 'integer'
85 default = 0
86 expected_types = six.integer_types
87
88 def convert(self, value):
89 try:
90 return int(value)
91 except ValueError:
92 return
93
94
95 class FloatType(Type):
96 """Coerce a float from a string or integer"""
97 name = 'float'
98 default = 0.0
99 expected_types = (float, )
100 compatible_types = six.string_types + six.integer_types + (float, )
101
102 def convert(self, value):
103 try:
104 return float(value)
105 except ValueError:
106 return
107
108
109 class StringType(Type):
110 """String type without any coersion, must be a string"""
111 name = 'string'
112 default = u''
113 expected_types = six.string_types
114 compatible_types = six.string_types
115
116
117 class DictType(Type):
118 """Coerce a dict out of a json/yaml string"""
119 name = 'dictionary'
120 expected_types = (dict, )
121
122 def _default(self):
123 # make sure we create a fresh dict each time
124 return {}
125
126 def convert(self, value):
127 try:
128 return safe_load(value)
129 except (AttributeError, ParserError, ScannerError):
130 return
131
132
133 class SequenceType(Type):
134 """Coerce a tuple out of a json/yaml string or a list"""
135 name = 'sequence'
136 default = ()
137 expected_types = (tuple, list)
138 compatible_types = six.string_types + (tuple, list)
139
140 def convert(self, value):
141 if isinstance(value, six.string_types):
142 try:
143 value = safe_load(value)
144 except (AttributeError, ParserError, ScannerError):
145 return
146 if isinstance(value, list):
147 value = tuple(value)
148 return value
149
150
151 # Initialize singletons of each type for easy reuse
152 Any = AnyType()
153 Bool = BoolType()
154 Int = IntType()
155 Float = FloatType()
156 String = StringType()
157 Dict = DictType()
158 Sequence = SequenceType()
159
160 # Mapping for basic types into what their Type is
161 _type_mapping = {
162 bool: Bool,
163 int: Int,
164 float: Float,
165 six.binary_type: String,
166 six.text_type: String,
167 dict: Dict,
168 tuple: Sequence,
169 list: Sequence,
170 }
171
172
173 def type_from_value(value):
174 """Fetch Type based on a primitive value"""
175 return _type_mapping[type(value)]
176
[end of src/sentry/utils/types.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/sentry/utils/types.py b/src/sentry/utils/types.py
--- a/src/sentry/utils/types.py
+++ b/src/sentry/utils/types.py
@@ -70,8 +70,11 @@
name = 'boolean'
default = False
expected_types = (bool, )
+ compatible_types = six.string_types + six.integer_types
def convert(self, value):
+ if isinstance(value, six.integer_types):
+ return bool(value)
value = value.lower()
if value in ('y', 'yes', 't', 'true', '1', 'on'):
return True
|
{"golden_diff": "diff --git a/src/sentry/utils/types.py b/src/sentry/utils/types.py\n--- a/src/sentry/utils/types.py\n+++ b/src/sentry/utils/types.py\n@@ -70,8 +70,11 @@\n name = 'boolean'\n default = False\n expected_types = (bool, )\n+ compatible_types = six.string_types + six.integer_types\n \n def convert(self, value):\n+ if isinstance(value, six.integer_types):\n+ return bool(value)\n value = value.lower()\n if value in ('y', 'yes', 't', 'true', '1', 'on'):\n return True\n", "issue": "After upgrading to 8.21, Sentry upgrade wizard hangs on auth.allow-registration\nWhen upgrading my onpremise installation form 8.20 to 8.21 (using docker-compose), the wizard prompted me to set the \u201cAnonymize Beacon\u201d option. When clicking \u201cContinue\u201d, the following error message pops up:\r\n> An invalid value for (auth.allow-registration) was passed to the server.\r\n\r\nThere was no such option `auth.allow-registration` set in either `sentry.conf.py` or `config.yml`.\r\n\r\nThe call to `https://sentry.example.com/api/0/internal/options/` returned a 400, with the following params:\r\n```\r\nsystem.admin-email: [email protected]\r\nsystem.url-prefix | https://sentry.example.com\r\nauth.allow-registration | 0\r\nbeacon.anonymous | true\r\n```\r\nI worked around that by resending the request, changing 0 for False. My install is now fine\r\n\r\nRunning `sentry config get auth.allow-registration`, whether before the fix or after, returns:\r\n```\r\n type: BOOLEAN\r\n from config: <not set>\r\n current: 0\r\n```\r\nThis looks somehow similar to #5699.\r\n\r\nHope that helps!\n", "before_files": [{"content": "\"\"\"\nsentry.utils.types\n~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2016 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport six\n\nfrom yaml.parser import ParserError\nfrom yaml.scanner import ScannerError\n\nfrom sentry.utils.yaml import safe_load\n\n__all__ = ('InvalidTypeError', 'Any', 'Bool', 'Int', 'Float', 'String', 'Dict', 'Sequence', )\n\n\nclass InvalidTypeError(TypeError):\n pass\n\n\nclass Type(object):\n \"\"\"Base Type that provides type coersion\"\"\"\n name = ''\n # Default value to be returned when initializing\n default = None\n # Types that do not need to be coerced\n expected_types = ()\n # Types that are acceptable for coersion\n compatible_types = six.string_types\n\n def __call__(self, value=None):\n if value is None:\n return self._default()\n if self.test(value):\n return value\n if isinstance(value, self.compatible_types):\n rv = self.convert(value)\n # Make sure convert was able to do the right thing\n # and give us the type we were expecting\n if self.test(rv):\n return rv\n raise InvalidTypeError('{!r} is not a valid {}'.format(value, repr(self)))\n\n def convert(self, value):\n return value\n\n def _default(self):\n return self.default\n\n def test(self, value):\n \"\"\"Check if the value is the correct type or not\"\"\"\n return isinstance(value, self.expected_types)\n\n def __repr__(self):\n return self.name\n\n\nclass AnyType(Type):\n \"\"\"A type that accepts any value and does no coersion\"\"\"\n name = 'any'\n expected_types = (object, )\n compatible_types = (object, )\n\n\nclass BoolType(Type):\n \"Coerce a boolean from a string\"\n name = 'boolean'\n default = False\n expected_types = (bool, )\n\n def convert(self, value):\n value = value.lower()\n if value in ('y', 'yes', 't', 'true', '1', 'on'):\n return True\n if value in ('n', 'no', 'f', 'false', '0', 'off'):\n return False\n\n\nclass IntType(Type):\n \"\"\"Coerce an integer from a string\"\"\"\n name = 'integer'\n default = 0\n expected_types = six.integer_types\n\n def convert(self, value):\n try:\n return int(value)\n except ValueError:\n return\n\n\nclass FloatType(Type):\n \"\"\"Coerce a float from a string or integer\"\"\"\n name = 'float'\n default = 0.0\n expected_types = (float, )\n compatible_types = six.string_types + six.integer_types + (float, )\n\n def convert(self, value):\n try:\n return float(value)\n except ValueError:\n return\n\n\nclass StringType(Type):\n \"\"\"String type without any coersion, must be a string\"\"\"\n name = 'string'\n default = u''\n expected_types = six.string_types\n compatible_types = six.string_types\n\n\nclass DictType(Type):\n \"\"\"Coerce a dict out of a json/yaml string\"\"\"\n name = 'dictionary'\n expected_types = (dict, )\n\n def _default(self):\n # make sure we create a fresh dict each time\n return {}\n\n def convert(self, value):\n try:\n return safe_load(value)\n except (AttributeError, ParserError, ScannerError):\n return\n\n\nclass SequenceType(Type):\n \"\"\"Coerce a tuple out of a json/yaml string or a list\"\"\"\n name = 'sequence'\n default = ()\n expected_types = (tuple, list)\n compatible_types = six.string_types + (tuple, list)\n\n def convert(self, value):\n if isinstance(value, six.string_types):\n try:\n value = safe_load(value)\n except (AttributeError, ParserError, ScannerError):\n return\n if isinstance(value, list):\n value = tuple(value)\n return value\n\n\n# Initialize singletons of each type for easy reuse\nAny = AnyType()\nBool = BoolType()\nInt = IntType()\nFloat = FloatType()\nString = StringType()\nDict = DictType()\nSequence = SequenceType()\n\n# Mapping for basic types into what their Type is\n_type_mapping = {\n bool: Bool,\n int: Int,\n float: Float,\n six.binary_type: String,\n six.text_type: String,\n dict: Dict,\n tuple: Sequence,\n list: Sequence,\n}\n\n\ndef type_from_value(value):\n \"\"\"Fetch Type based on a primitive value\"\"\"\n return _type_mapping[type(value)]\n", "path": "src/sentry/utils/types.py"}]}
| 2,253 | 136 |
gh_patches_debug_27774
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmpose-946
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
a dataset bug causing topdown training very slow, wasting 3 min every epoch
i found a dataset bug, i test it on several server(including 8 a100 with 96 core cpu), it all happened. for every epoch, this bug cause about 3min time wasting. i jsut can locat the bug, but i don't known why it happen. it seems only happen when distribution launching.
bug loaction: when you lauch a topdown method, eg, topdown_heatmap/coco/res50_coco_256x192.py, go to /mmcv/runner/epoch_based_runner.py, about line 48. there is such func
self.call_hook('before_train_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
for i, data_batch in enumerate(self.data_loader):
self._inner_iter = i
at the every epoch begining, the ( for i, data_batch in enumerate(self.data_loader): ) takes about 3min, it make the training very slow.
you can modify the ori code to the code below to reproduce this issue, this only happen at very epoch begining.
self.call_hook('before_train_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
print('before_train_epoch, time:{}'.format(time.time()-start_time))
start_time = time.time()
for i, data_batch in enumerate(self.data_loader):
self._inner_iter = i
print('before_train_iter_load_data, time:{}'.format(time.time()-start_time))
here is my sys information
Python: 3.7.10 | packaged by conda-forge | (default, Feb 19 2021, 16:07:37) [GCC 9.3.0]
CUDA available: True GPU 0,1,2,3,4,5,6,7: A100-SXM4-40GB
CUDA_HOME: /usr/local/cuda-11.1
NVCC: Build cuda_11.1.TC455_06.29190527_0 GCC: gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0
PyTorch: 1.8.1+cu111
PyTorch compiling details: PyTorch built with:
- GCC 7.3
- C++ Version: 201402
- Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications
- Intel(R) MKL-DNN v1.7.0 (Git Hash 7aed236906b1f7a05c0917e5257a1af05e9ff683)
- OpenMP 201511 (a.k.a. OpenMP 4.5)
- NNPACK is enabled
- CPU capability usage: AVX2
- CUDA Runtime 11.1
- NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-
gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86
- CuDNN 8.0.5
- Magma 2.5.2
- Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.1, CUDNN_VERSION=8.0.5, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated
-fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -O2 -fPIC -Wno-narrowing -Wall -We
xtra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable
-Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-ps
abi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -f
no-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.8.1, USE_CUDA=
ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON,
TorchVision: 0.9.1+cu111
OpenCV: 4.5.3
MMCV: 1.3.8
MMCV Compiler: GCC 7.5
MMCV CUDA Compiler: 11.1
MMPose: 0.15.0+51b4b45
</issue>
<code>
[start of mmpose/apis/train.py]
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import warnings
3
4 import torch
5 from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
6 from mmcv.runner import DistSamplerSeedHook, EpochBasedRunner, OptimizerHook
7
8 from mmpose.core import DistEvalHook, EvalHook, build_optimizers
9 from mmpose.core.distributed_wrapper import DistributedDataParallelWrapper
10 from mmpose.datasets import build_dataloader, build_dataset
11 from mmpose.utils import get_root_logger
12
13 try:
14 from mmcv.runner import Fp16OptimizerHook
15 except ImportError:
16 warnings.warn(
17 'Fp16OptimizerHook from mmpose will be deprecated from '
18 'v0.15.0. Please install mmcv>=1.1.4', DeprecationWarning)
19 from mmpose.core import Fp16OptimizerHook
20
21
22 def train_model(model,
23 dataset,
24 cfg,
25 distributed=False,
26 validate=False,
27 timestamp=None,
28 meta=None):
29 """Train model entry function.
30
31 Args:
32 model (nn.Module): The model to be trained.
33 dataset (Dataset): Train dataset.
34 cfg (dict): The config dict for training.
35 distributed (bool): Whether to use distributed training.
36 Default: False.
37 validate (bool): Whether to do evaluation. Default: False.
38 timestamp (str | None): Local time for runner. Default: None.
39 meta (dict | None): Meta dict to record some important information.
40 Default: None
41 """
42 logger = get_root_logger(cfg.log_level)
43
44 # prepare data loaders
45 dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
46 dataloader_setting = dict(
47 samples_per_gpu=cfg.data.get('samples_per_gpu', {}),
48 workers_per_gpu=cfg.data.get('workers_per_gpu', {}),
49 # cfg.gpus will be ignored if distributed
50 num_gpus=len(cfg.gpu_ids),
51 dist=distributed,
52 seed=cfg.seed)
53 dataloader_setting = dict(dataloader_setting,
54 **cfg.data.get('train_dataloader', {}))
55
56 data_loaders = [
57 build_dataloader(ds, **dataloader_setting) for ds in dataset
58 ]
59
60 # determine wether use adversarial training precess or not
61 use_adverserial_train = cfg.get('use_adversarial_train', False)
62
63 # put model on gpus
64 if distributed:
65 find_unused_parameters = cfg.get('find_unused_parameters', True)
66 # Sets the `find_unused_parameters` parameter in
67 # torch.nn.parallel.DistributedDataParallel
68
69 if use_adverserial_train:
70 # Use DistributedDataParallelWrapper for adversarial training
71 model = DistributedDataParallelWrapper(
72 model,
73 device_ids=[torch.cuda.current_device()],
74 broadcast_buffers=False,
75 find_unused_parameters=find_unused_parameters)
76 else:
77 model = MMDistributedDataParallel(
78 model.cuda(),
79 device_ids=[torch.cuda.current_device()],
80 broadcast_buffers=False,
81 find_unused_parameters=find_unused_parameters)
82 else:
83 model = MMDataParallel(
84 model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
85
86 # build runner
87 optimizer = build_optimizers(model, cfg.optimizer)
88
89 runner = EpochBasedRunner(
90 model,
91 optimizer=optimizer,
92 work_dir=cfg.work_dir,
93 logger=logger,
94 meta=meta)
95 # an ugly workaround to make .log and .log.json filenames the same
96 runner.timestamp = timestamp
97
98 if use_adverserial_train:
99 # The optimizer step process is included in the train_step function
100 # of the model, so the runner should NOT include optimizer hook.
101 optimizer_config = None
102 else:
103 # fp16 setting
104 fp16_cfg = cfg.get('fp16', None)
105 if fp16_cfg is not None:
106 optimizer_config = Fp16OptimizerHook(
107 **cfg.optimizer_config, **fp16_cfg, distributed=distributed)
108 elif distributed and 'type' not in cfg.optimizer_config:
109 optimizer_config = OptimizerHook(**cfg.optimizer_config)
110 else:
111 optimizer_config = cfg.optimizer_config
112
113 # register hooks
114 runner.register_training_hooks(cfg.lr_config, optimizer_config,
115 cfg.checkpoint_config, cfg.log_config,
116 cfg.get('momentum_config', None))
117 if distributed:
118 runner.register_hook(DistSamplerSeedHook())
119
120 # register eval hooks
121 if validate:
122 eval_cfg = cfg.get('evaluation', {})
123 val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
124 dataloader_setting = dict(
125 samples_per_gpu=1,
126 workers_per_gpu=cfg.data.get('workers_per_gpu', {}),
127 # cfg.gpus will be ignored if distributed
128 num_gpus=len(cfg.gpu_ids),
129 dist=distributed,
130 drop_last=False,
131 shuffle=False)
132 dataloader_setting = dict(dataloader_setting,
133 **cfg.data.get('val_dataloader', {}))
134 val_dataloader = build_dataloader(val_dataset, **dataloader_setting)
135 eval_hook = DistEvalHook if distributed else EvalHook
136 runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
137
138 if cfg.resume_from:
139 runner.resume(cfg.resume_from)
140 elif cfg.load_from:
141 runner.load_checkpoint(cfg.load_from)
142 runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
143
[end of mmpose/apis/train.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mmpose/apis/train.py b/mmpose/apis/train.py
--- a/mmpose/apis/train.py
+++ b/mmpose/apis/train.py
@@ -43,19 +43,33 @@
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
- dataloader_setting = dict(
- samples_per_gpu=cfg.data.get('samples_per_gpu', {}),
- workers_per_gpu=cfg.data.get('workers_per_gpu', {}),
- # cfg.gpus will be ignored if distributed
- num_gpus=len(cfg.gpu_ids),
- dist=distributed,
- seed=cfg.seed)
- dataloader_setting = dict(dataloader_setting,
- **cfg.data.get('train_dataloader', {}))
-
- data_loaders = [
- build_dataloader(ds, **dataloader_setting) for ds in dataset
- ]
+ # step 1: give default values and override (if exist) from cfg.data
+ loader_cfg = {
+ **dict(
+ seed=cfg.get('seed'),
+ drop_last=False,
+ dist=distributed,
+ num_gpus=len(cfg.gpu_ids)),
+ **({} if torch.__version__ != 'parrots' else dict(
+ prefetch_num=2,
+ pin_memory=False,
+ )),
+ **dict((k, cfg.data[k]) for k in [
+ 'samples_per_gpu',
+ 'workers_per_gpu',
+ 'shuffle',
+ 'seed',
+ 'drop_last',
+ 'prefetch_num',
+ 'pin_memory',
+ 'persistent_workers',
+ ] if k in cfg.data)
+ }
+
+ # step 2: cfg.data.train_dataloader has highest priority
+ train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {}))
+
+ data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
# determine wether use adversarial training precess or not
use_adverserial_train = cfg.get('use_adversarial_train', False)
|
{"golden_diff": "diff --git a/mmpose/apis/train.py b/mmpose/apis/train.py\n--- a/mmpose/apis/train.py\n+++ b/mmpose/apis/train.py\n@@ -43,19 +43,33 @@\n \n # prepare data loaders\n dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]\n- dataloader_setting = dict(\n- samples_per_gpu=cfg.data.get('samples_per_gpu', {}),\n- workers_per_gpu=cfg.data.get('workers_per_gpu', {}),\n- # cfg.gpus will be ignored if distributed\n- num_gpus=len(cfg.gpu_ids),\n- dist=distributed,\n- seed=cfg.seed)\n- dataloader_setting = dict(dataloader_setting,\n- **cfg.data.get('train_dataloader', {}))\n-\n- data_loaders = [\n- build_dataloader(ds, **dataloader_setting) for ds in dataset\n- ]\n+ # step 1: give default values and override (if exist) from cfg.data\n+ loader_cfg = {\n+ **dict(\n+ seed=cfg.get('seed'),\n+ drop_last=False,\n+ dist=distributed,\n+ num_gpus=len(cfg.gpu_ids)),\n+ **({} if torch.__version__ != 'parrots' else dict(\n+ prefetch_num=2,\n+ pin_memory=False,\n+ )),\n+ **dict((k, cfg.data[k]) for k in [\n+ 'samples_per_gpu',\n+ 'workers_per_gpu',\n+ 'shuffle',\n+ 'seed',\n+ 'drop_last',\n+ 'prefetch_num',\n+ 'pin_memory',\n+ 'persistent_workers',\n+ ] if k in cfg.data)\n+ }\n+\n+ # step 2: cfg.data.train_dataloader has highest priority\n+ train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {}))\n+\n+ data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]\n \n # determine wether use adversarial training precess or not\n use_adverserial_train = cfg.get('use_adversarial_train', False)\n", "issue": "a dataset bug causing topdown training very slow, wasting 3 min every epoch\ni found a dataset bug, i test it on several server(including 8 a100 with 96 core cpu), it all happened. for every epoch, this bug cause about 3min time wasting. i jsut can locat the bug, but i don't known why it happen. it seems only happen when distribution launching.\r\n\r\nbug loaction: when you lauch a topdown method, eg, topdown_heatmap/coco/res50_coco_256x192.py, go to /mmcv/runner/epoch_based_runner.py, about line 48. there is such func\r\n\r\n self.call_hook('before_train_epoch')\r\n time.sleep(2) # Prevent possible deadlock during epoch transition\r\n for i, data_batch in enumerate(self.data_loader):\r\n self._inner_iter = i\r\n\r\nat the every epoch begining, the ( for i, data_batch in enumerate(self.data_loader): ) takes about 3min, it make the training very slow.\r\n\r\nyou can modify the ori code to the code below to reproduce this issue, this only happen at very epoch begining.\r\n\r\n self.call_hook('before_train_epoch')\r\n time.sleep(2) # Prevent possible deadlock during epoch transition\r\n print('before_train_epoch, time:{}'.format(time.time()-start_time))\r\n start_time = time.time()\r\n for i, data_batch in enumerate(self.data_loader):\r\n self._inner_iter = i\r\n print('before_train_iter_load_data, time:{}'.format(time.time()-start_time))\r\n\r\nhere is my sys information\r\nPython: 3.7.10 | packaged by conda-forge | (default, Feb 19 2021, 16:07:37) [GCC 9.3.0] \r\nCUDA available: True GPU 0,1,2,3,4,5,6,7: A100-SXM4-40GB \r\nCUDA_HOME: /usr/local/cuda-11.1 \r\nNVCC: Build cuda_11.1.TC455_06.29190527_0 GCC: gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0 \r\nPyTorch: 1.8.1+cu111 \r\nPyTorch compiling details: PyTorch built with: \r\n - GCC 7.3 \r\n - C++ Version: 201402 \r\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\r\n - Intel(R) MKL-DNN v1.7.0 (Git Hash 7aed236906b1f7a05c0917e5257a1af05e9ff683)\r\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\r\n - NNPACK is enabled \r\n - CPU capability usage: AVX2 \r\n - CUDA Runtime 11.1\r\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-\r\ngencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\r\n - CuDNN 8.0.5 \r\n - Magma 2.5.2 \r\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.1, CUDNN_VERSION=8.0.5, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated\r\n-fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -O2 -fPIC -Wno-narrowing -Wall -We\r\nxtra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable\r\n-Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-ps\r\nabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -f\r\nno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.8.1, USE_CUDA=\r\nON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON,\r\n \r\nTorchVision: 0.9.1+cu111 \r\nOpenCV: 4.5.3 \r\nMMCV: 1.3.8 \r\nMMCV Compiler: GCC 7.5\r\nMMCV CUDA Compiler: 11.1\r\nMMPose: 0.15.0+51b4b45\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmcv.runner import DistSamplerSeedHook, EpochBasedRunner, OptimizerHook\n\nfrom mmpose.core import DistEvalHook, EvalHook, build_optimizers\nfrom mmpose.core.distributed_wrapper import DistributedDataParallelWrapper\nfrom mmpose.datasets import build_dataloader, build_dataset\nfrom mmpose.utils import get_root_logger\n\ntry:\n from mmcv.runner import Fp16OptimizerHook\nexcept ImportError:\n warnings.warn(\n 'Fp16OptimizerHook from mmpose will be deprecated from '\n 'v0.15.0. Please install mmcv>=1.1.4', DeprecationWarning)\n from mmpose.core import Fp16OptimizerHook\n\n\ndef train_model(model,\n dataset,\n cfg,\n distributed=False,\n validate=False,\n timestamp=None,\n meta=None):\n \"\"\"Train model entry function.\n\n Args:\n model (nn.Module): The model to be trained.\n dataset (Dataset): Train dataset.\n cfg (dict): The config dict for training.\n distributed (bool): Whether to use distributed training.\n Default: False.\n validate (bool): Whether to do evaluation. Default: False.\n timestamp (str | None): Local time for runner. Default: None.\n meta (dict | None): Meta dict to record some important information.\n Default: None\n \"\"\"\n logger = get_root_logger(cfg.log_level)\n\n # prepare data loaders\n dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]\n dataloader_setting = dict(\n samples_per_gpu=cfg.data.get('samples_per_gpu', {}),\n workers_per_gpu=cfg.data.get('workers_per_gpu', {}),\n # cfg.gpus will be ignored if distributed\n num_gpus=len(cfg.gpu_ids),\n dist=distributed,\n seed=cfg.seed)\n dataloader_setting = dict(dataloader_setting,\n **cfg.data.get('train_dataloader', {}))\n\n data_loaders = [\n build_dataloader(ds, **dataloader_setting) for ds in dataset\n ]\n\n # determine wether use adversarial training precess or not\n use_adverserial_train = cfg.get('use_adversarial_train', False)\n\n # put model on gpus\n if distributed:\n find_unused_parameters = cfg.get('find_unused_parameters', True)\n # Sets the `find_unused_parameters` parameter in\n # torch.nn.parallel.DistributedDataParallel\n\n if use_adverserial_train:\n # Use DistributedDataParallelWrapper for adversarial training\n model = DistributedDataParallelWrapper(\n model,\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n model = MMDataParallel(\n model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)\n\n # build runner\n optimizer = build_optimizers(model, cfg.optimizer)\n\n runner = EpochBasedRunner(\n model,\n optimizer=optimizer,\n work_dir=cfg.work_dir,\n logger=logger,\n meta=meta)\n # an ugly workaround to make .log and .log.json filenames the same\n runner.timestamp = timestamp\n\n if use_adverserial_train:\n # The optimizer step process is included in the train_step function\n # of the model, so the runner should NOT include optimizer hook.\n optimizer_config = None\n else:\n # fp16 setting\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n optimizer_config = Fp16OptimizerHook(\n **cfg.optimizer_config, **fp16_cfg, distributed=distributed)\n elif distributed and 'type' not in cfg.optimizer_config:\n optimizer_config = OptimizerHook(**cfg.optimizer_config)\n else:\n optimizer_config = cfg.optimizer_config\n\n # register hooks\n runner.register_training_hooks(cfg.lr_config, optimizer_config,\n cfg.checkpoint_config, cfg.log_config,\n cfg.get('momentum_config', None))\n if distributed:\n runner.register_hook(DistSamplerSeedHook())\n\n # register eval hooks\n if validate:\n eval_cfg = cfg.get('evaluation', {})\n val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n dataloader_setting = dict(\n samples_per_gpu=1,\n workers_per_gpu=cfg.data.get('workers_per_gpu', {}),\n # cfg.gpus will be ignored if distributed\n num_gpus=len(cfg.gpu_ids),\n dist=distributed,\n drop_last=False,\n shuffle=False)\n dataloader_setting = dict(dataloader_setting,\n **cfg.data.get('val_dataloader', {}))\n val_dataloader = build_dataloader(val_dataset, **dataloader_setting)\n eval_hook = DistEvalHook if distributed else EvalHook\n runner.register_hook(eval_hook(val_dataloader, **eval_cfg))\n\n if cfg.resume_from:\n runner.resume(cfg.resume_from)\n elif cfg.load_from:\n runner.load_checkpoint(cfg.load_from)\n runner.run(data_loaders, cfg.workflow, cfg.total_epochs)\n", "path": "mmpose/apis/train.py"}]}
| 3,380 | 467 |
gh_patches_debug_9805
|
rasdani/github-patches
|
git_diff
|
qutebrowser__qutebrowser-3175
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnicodeEncodeError with os.path.isdir in NetworkManager.createRequest
Report with `LC_ALL=C`:
```
10:56:42 DEBUG init app:process_pos_args:275 Startup URL file:///tmp/people.inf.elte.hu/mersaai/helpvizsgahoz/cpp%20vizsg%ED%B3%83%ED%B2%A1ra/index.html
[...]
10:56:42 ERROR misc utils:wrapper:616 Error in qutebrowser.browser.network.networkmanager.NetworkManager.createRequest
Traceback (most recent call last):
File "/usr/lib/python3.5/site-packages/qutebrowser/utils/utils.py", line 614, in wrapper
return func(*args, **kwargs)
File "/usr/lib/python3.5/site-packages/qutebrowser/browser/network/networkmanager.py", line 365, in createRequest
op, req, outgoing_data)
File "/usr/lib/python3.5/site-packages/qutebrowser/browser/network/filescheme.py", line 116, in createRequest
if os.path.isdir(path):
File "/usr/lib/python3.5/genericpath.py", line 42, in isdir
st = os.stat(s)
UnicodeEncodeError: 'ascii' codec can't encode characters in position 55-60: ordinal not in range(128)
```
</issue>
<code>
[start of qutebrowser/browser/webkit/network/filescheme.py]
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
4 # Copyright 2015-2017 Antoni Boucher (antoyo) <[email protected]>
5 #
6 # This file is part of qutebrowser.
7 #
8 # qutebrowser is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # qutebrowser is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
20 #
21 # pylint complains when using .render() on jinja templates, so we make it shut
22 # up for this whole module.
23
24 """Handler functions for file:... pages."""
25
26 import os
27
28 from qutebrowser.browser.webkit.network import schemehandler, networkreply
29 from qutebrowser.utils import jinja
30
31
32 def get_file_list(basedir, all_files, filterfunc):
33 """Get a list of files filtered by a filter function and sorted by name.
34
35 Args:
36 basedir: The parent directory of all files.
37 all_files: The list of files to filter and sort.
38 filterfunc: The filter function.
39
40 Return:
41 A list of dicts. Each dict contains the name and absname keys.
42 """
43 items = []
44 for filename in all_files:
45 absname = os.path.join(basedir, filename)
46 if filterfunc(absname):
47 items.append({'name': filename, 'absname': absname})
48 return sorted(items, key=lambda v: v['name'].lower())
49
50
51 def is_root(directory):
52 """Check if the directory is the root directory.
53
54 Args:
55 directory: The directory to check.
56
57 Return:
58 Whether the directory is a root directory or not.
59 """
60 # If you're curious as why this works:
61 # dirname('/') = '/'
62 # dirname('/home') = '/'
63 # dirname('/home/') = '/home'
64 # dirname('/home/foo') = '/home'
65 # basically, for files (no trailing slash) it removes the file part, and
66 # for directories, it removes the trailing slash, so the only way for this
67 # to be equal is if the directory is the root directory.
68 return os.path.dirname(directory) == directory
69
70
71 def parent_dir(directory):
72 """Return the parent directory for the given directory.
73
74 Args:
75 directory: The path to the directory.
76
77 Return:
78 The path to the parent directory.
79 """
80 return os.path.normpath(os.path.join(directory, os.pardir))
81
82
83 def dirbrowser_html(path):
84 """Get the directory browser web page.
85
86 Args:
87 path: The directory path.
88
89 Return:
90 The HTML of the web page.
91 """
92 title = "Browse directory: {}".format(path)
93
94 if is_root(path):
95 parent = None
96 else:
97 parent = parent_dir(path)
98
99 try:
100 all_files = os.listdir(path)
101 except OSError as e:
102 html = jinja.render('error.html',
103 title="Error while reading directory",
104 url='file:///{}'.format(path), error=str(e))
105 return html.encode('UTF-8', errors='xmlcharrefreplace')
106
107 files = get_file_list(path, all_files, os.path.isfile)
108 directories = get_file_list(path, all_files, os.path.isdir)
109 html = jinja.render('dirbrowser.html', title=title, url=path,
110 parent=parent, files=files, directories=directories)
111 return html.encode('UTF-8', errors='xmlcharrefreplace')
112
113
114 class FileSchemeHandler(schemehandler.SchemeHandler):
115
116 """Scheme handler for file: URLs."""
117
118 def createRequest(self, _op, request, _outgoing_data):
119 """Create a new request.
120
121 Args:
122 request: const QNetworkRequest & req
123 _op: Operation op
124 _outgoing_data: QIODevice * outgoingData
125
126 Return:
127 A QNetworkReply for directories, None for files.
128 """
129 path = request.url().toLocalFile()
130 if os.path.isdir(path):
131 data = dirbrowser_html(path)
132 return networkreply.FixedDataNetworkReply(
133 request, data, 'text/html', self.parent())
134
[end of qutebrowser/browser/webkit/network/filescheme.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/qutebrowser/browser/webkit/network/filescheme.py b/qutebrowser/browser/webkit/network/filescheme.py
--- a/qutebrowser/browser/webkit/network/filescheme.py
+++ b/qutebrowser/browser/webkit/network/filescheme.py
@@ -127,7 +127,10 @@
A QNetworkReply for directories, None for files.
"""
path = request.url().toLocalFile()
- if os.path.isdir(path):
- data = dirbrowser_html(path)
- return networkreply.FixedDataNetworkReply(
- request, data, 'text/html', self.parent())
+ try:
+ if os.path.isdir(path):
+ data = dirbrowser_html(path)
+ return networkreply.FixedDataNetworkReply(
+ request, data, 'text/html', self.parent())
+ except UnicodeEncodeError:
+ return None
|
{"golden_diff": "diff --git a/qutebrowser/browser/webkit/network/filescheme.py b/qutebrowser/browser/webkit/network/filescheme.py\n--- a/qutebrowser/browser/webkit/network/filescheme.py\n+++ b/qutebrowser/browser/webkit/network/filescheme.py\n@@ -127,7 +127,10 @@\n A QNetworkReply for directories, None for files.\n \"\"\"\n path = request.url().toLocalFile()\n- if os.path.isdir(path):\n- data = dirbrowser_html(path)\n- return networkreply.FixedDataNetworkReply(\n- request, data, 'text/html', self.parent())\n+ try:\n+ if os.path.isdir(path):\n+ data = dirbrowser_html(path)\n+ return networkreply.FixedDataNetworkReply(\n+ request, data, 'text/html', self.parent())\n+ except UnicodeEncodeError:\n+ return None\n", "issue": "UnicodeEncodeError with os.path.isdir in NetworkManager.createRequest\nReport with `LC_ALL=C`:\n\n```\n10:56:42 DEBUG init app:process_pos_args:275 Startup URL file:///tmp/people.inf.elte.hu/mersaai/helpvizsgahoz/cpp%20vizsg%ED%B3%83%ED%B2%A1ra/index.html\n[...]\n10:56:42 ERROR misc utils:wrapper:616 Error in qutebrowser.browser.network.networkmanager.NetworkManager.createRequest\nTraceback (most recent call last):\n File \"/usr/lib/python3.5/site-packages/qutebrowser/utils/utils.py\", line 614, in wrapper\n return func(*args, **kwargs)\n File \"/usr/lib/python3.5/site-packages/qutebrowser/browser/network/networkmanager.py\", line 365, in createRequest\n op, req, outgoing_data)\n File \"/usr/lib/python3.5/site-packages/qutebrowser/browser/network/filescheme.py\", line 116, in createRequest\n if os.path.isdir(path):\n File \"/usr/lib/python3.5/genericpath.py\", line 42, in isdir\n st = os.stat(s)\nUnicodeEncodeError: 'ascii' codec can't encode characters in position 55-60: ordinal not in range(128)\n```\n\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n# Copyright 2015-2017 Antoni Boucher (antoyo) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n#\n# pylint complains when using .render() on jinja templates, so we make it shut\n# up for this whole module.\n\n\"\"\"Handler functions for file:... pages.\"\"\"\n\nimport os\n\nfrom qutebrowser.browser.webkit.network import schemehandler, networkreply\nfrom qutebrowser.utils import jinja\n\n\ndef get_file_list(basedir, all_files, filterfunc):\n \"\"\"Get a list of files filtered by a filter function and sorted by name.\n\n Args:\n basedir: The parent directory of all files.\n all_files: The list of files to filter and sort.\n filterfunc: The filter function.\n\n Return:\n A list of dicts. Each dict contains the name and absname keys.\n \"\"\"\n items = []\n for filename in all_files:\n absname = os.path.join(basedir, filename)\n if filterfunc(absname):\n items.append({'name': filename, 'absname': absname})\n return sorted(items, key=lambda v: v['name'].lower())\n\n\ndef is_root(directory):\n \"\"\"Check if the directory is the root directory.\n\n Args:\n directory: The directory to check.\n\n Return:\n Whether the directory is a root directory or not.\n \"\"\"\n # If you're curious as why this works:\n # dirname('/') = '/'\n # dirname('/home') = '/'\n # dirname('/home/') = '/home'\n # dirname('/home/foo') = '/home'\n # basically, for files (no trailing slash) it removes the file part, and\n # for directories, it removes the trailing slash, so the only way for this\n # to be equal is if the directory is the root directory.\n return os.path.dirname(directory) == directory\n\n\ndef parent_dir(directory):\n \"\"\"Return the parent directory for the given directory.\n\n Args:\n directory: The path to the directory.\n\n Return:\n The path to the parent directory.\n \"\"\"\n return os.path.normpath(os.path.join(directory, os.pardir))\n\n\ndef dirbrowser_html(path):\n \"\"\"Get the directory browser web page.\n\n Args:\n path: The directory path.\n\n Return:\n The HTML of the web page.\n \"\"\"\n title = \"Browse directory: {}\".format(path)\n\n if is_root(path):\n parent = None\n else:\n parent = parent_dir(path)\n\n try:\n all_files = os.listdir(path)\n except OSError as e:\n html = jinja.render('error.html',\n title=\"Error while reading directory\",\n url='file:///{}'.format(path), error=str(e))\n return html.encode('UTF-8', errors='xmlcharrefreplace')\n\n files = get_file_list(path, all_files, os.path.isfile)\n directories = get_file_list(path, all_files, os.path.isdir)\n html = jinja.render('dirbrowser.html', title=title, url=path,\n parent=parent, files=files, directories=directories)\n return html.encode('UTF-8', errors='xmlcharrefreplace')\n\n\nclass FileSchemeHandler(schemehandler.SchemeHandler):\n\n \"\"\"Scheme handler for file: URLs.\"\"\"\n\n def createRequest(self, _op, request, _outgoing_data):\n \"\"\"Create a new request.\n\n Args:\n request: const QNetworkRequest & req\n _op: Operation op\n _outgoing_data: QIODevice * outgoingData\n\n Return:\n A QNetworkReply for directories, None for files.\n \"\"\"\n path = request.url().toLocalFile()\n if os.path.isdir(path):\n data = dirbrowser_html(path)\n return networkreply.FixedDataNetworkReply(\n request, data, 'text/html', self.parent())\n", "path": "qutebrowser/browser/webkit/network/filescheme.py"}]}
| 2,167 | 187 |
gh_patches_debug_41370
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-2520
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Do not include unconfirmed tables for users with viewer & editor access
## Description
1. GET requests to `/api/db/v0/tables/`, `/api/db/v0/tables/<table_id>` and `common_data.tables` currently include unconfirmed tables for all users.
2. Request to `/api/db/v0/schemas/` and `/api/db/v0/schemas/<schema_id>/` have count for `num_tables` which contains count of all tables including ones that are not confirmed.
Confirmation can currently only be performed by a schema manager or a db manager. Users with viewer and editor access cannot should not be able to see tables that aren't confirmed yet.
The UI currently hides unconfirmed tables for case (1), but this should happen at an API level. The count issue mentioned in (2) however, is currently visible on the UI leading to inconsistent count being shown between DB page and schema page.
## Expected behavior
* Endpoints and common_data that contains tables should not include unconfirmed tables for users with viewer and editor access. i.e. Permission check should take `import_verified` into account. This includes:
- Table list
- Table get
- common_data.tables
* `schema.num_tables` should also take this into account.
</issue>
<code>
[start of mathesar/api/db/permissions/table.py]
1 from django.db.models import Q
2 from rest_access_policy import AccessPolicy
3
4 from mathesar.models.users import DatabaseRole, Role, SchemaRole
5
6
7 class TableAccessPolicy(AccessPolicy):
8 """
9 Anyone can view Table as long as they have
10 at least a Viewer access to the schema or its database
11 Create Access is restricted to superusers or managers of the schema or the database the table is part of.
12 Only superuser or schema/database manager can delete/modify/update the Table
13 """
14
15 statements = [
16 {
17 # Restrictions for the create method is done by the Serializers when creating the schema,
18 # As the permissions depend on the database object.
19 'action': [
20 'list',
21 'retrieve',
22 'create',
23 'type_suggestions',
24 'dependents',
25 'ui_dependents',
26 'joinable_tables',
27 ],
28 'principal': 'authenticated',
29 'effect': 'allow',
30 },
31
32 {
33 'action': [
34 'destroy',
35 'update',
36 'partial_update',
37 'split_table',
38 'move_columns',
39 'previews',
40 'existing_import',
41 'map_imported_columns'
42 ],
43 'principal': 'authenticated',
44 'effect': 'allow',
45 'condition_expression': ['(is_superuser or is_table_manager)']
46 },
47 ]
48
49 @classmethod
50 def _scope_queryset(cls, request, qs, allowed_roles):
51 if not (request.user.is_superuser or request.user.is_anonymous):
52 permissible_database_role_filter = (
53 Q(schema__database__database_role__role__in=allowed_roles)
54 & Q(schema__database__database_role__user=request.user)
55 )
56 permissible_schema_roles_filter = (
57 Q(schema__schema_role__role__in=allowed_roles) & Q(schema__schema_role__user=request.user)
58 )
59 qs = qs.filter(permissible_database_role_filter | permissible_schema_roles_filter)
60 return qs
61
62 @classmethod
63 def scope_queryset(cls, request, qs):
64 """
65 Used for scoping the queryset of Serializer RelatedField which reference a Table
66 """
67 allowed_roles = (Role.MANAGER.value,)
68
69 if request.method.lower() == 'get':
70 allowed_roles = allowed_roles + (Role.EDITOR.value, Role.VIEWER.value)
71 return TableAccessPolicy._scope_queryset(request, qs, allowed_roles)
72
73 @classmethod
74 def scope_viewset_queryset(cls, request, qs):
75 """
76 Used for scoping queryset of the TableViewSet.
77 It is used for listing all the table the user has Viewer access.
78 Restrictions are then applied based on the request method using the Policy statements.
79 This helps us to throw correct error status code instead of a 404 error code
80 """
81 allowed_roles = (Role.MANAGER.value, Role.EDITOR.value, Role.VIEWER.value)
82 return TableAccessPolicy._scope_queryset(request, qs, allowed_roles)
83
84 def is_table_manager(self, request, view, action):
85 # Table access control is based on Schema and Database Roles as of now
86 # TODO Include Table Role based access when Table Roles are introduced
87 table = view.get_object()
88 is_schema_manager = SchemaRole.objects.filter(
89 user=request.user,
90 schema=table.schema,
91 role=Role.MANAGER.value
92 ).exists()
93 is_db_manager = DatabaseRole.objects.filter(
94 user=request.user,
95 database=table.schema.database,
96 role=Role.MANAGER.value
97 ).exists()
98 return is_db_manager or is_schema_manager
99
[end of mathesar/api/db/permissions/table.py]
[start of mathesar/api/serializers/schemas.py]
1 from rest_access_policy import PermittedSlugRelatedField
2 from rest_framework import serializers
3
4 from mathesar.api.db.permissions.database import DatabaseAccessPolicy
5 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
6 from mathesar.models.base import Database, Schema
7
8
9 class SchemaSerializer(MathesarErrorMessageMixin, serializers.HyperlinkedModelSerializer):
10 name = serializers.CharField()
11 # Restrict access to databases with create access.
12 # Unlike PermittedPkRelatedField this field uses a slug instead of an id
13 # Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/
14 database = PermittedSlugRelatedField(
15 access_policy=DatabaseAccessPolicy,
16 slug_field='name',
17 queryset=Database.current_objects.all()
18 )
19 description = serializers.CharField(
20 required=False, allow_blank=True, default=None, allow_null=True
21 )
22 num_tables = serializers.SerializerMethodField()
23 num_queries = serializers.SerializerMethodField()
24
25 class Meta:
26 model = Schema
27 fields = [
28 'id', 'name', 'database', 'has_dependents', 'description',
29 'num_tables', 'num_queries'
30 ]
31
32 def get_num_tables(self, obj):
33 return obj.tables.count()
34
35 def get_num_queries(self, obj):
36 return sum(t.queries.count() for t in obj.tables.all())
37
[end of mathesar/api/serializers/schemas.py]
[start of mathesar/api/db/viewsets/schemas.py]
1 from django_filters import rest_framework as filters
2 from rest_access_policy import AccessViewSetMixin
3 from rest_framework import status, viewsets
4 from rest_framework.decorators import action
5 from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
6 from rest_framework.response import Response
7
8 from mathesar.api.db.permissions.schema import SchemaAccessPolicy
9 from mathesar.api.dj_filters import SchemaFilter
10 from mathesar.api.pagination import DefaultLimitOffsetPagination
11 from mathesar.api.serializers.dependents import DependentSerializer, DependentFilterSerializer
12 from mathesar.api.serializers.schemas import SchemaSerializer
13 from mathesar.models.base import Schema
14 from mathesar.utils.schemas import create_schema_and_object
15 from mathesar.api.exceptions.validation_exceptions.exceptions import EditingPublicSchemaIsDisallowed
16
17
18 class SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):
19 serializer_class = SchemaSerializer
20 pagination_class = DefaultLimitOffsetPagination
21 filter_backends = (filters.DjangoFilterBackend,)
22 filterset_class = SchemaFilter
23 access_policy = SchemaAccessPolicy
24
25 def get_queryset(self):
26 qs = Schema.objects.all().order_by('-created_at')
27 return self.access_policy.scope_viewset_queryset(self.request, qs)
28
29 def create(self, request):
30 serializer = SchemaSerializer(data=request.data, context={'request': request})
31 serializer.is_valid(raise_exception=True)
32 database_name = serializer.validated_data['database'].name
33 schema = create_schema_and_object(
34 serializer.validated_data['name'],
35 database_name,
36 comment=serializer.validated_data.get('description')
37 )
38 serializer = SchemaSerializer(schema)
39 return Response(serializer.data, status=status.HTTP_201_CREATED)
40
41 def partial_update(self, request, pk=None):
42 serializer = SchemaSerializer(
43 data=request.data, context={'request': request}, partial=True
44 )
45 serializer.is_valid(raise_exception=True)
46
47 schema = self.get_object()
48
49 # We forbid editing the public schema
50 if schema.name == "public":
51 raise EditingPublicSchemaIsDisallowed()
52
53 schema.update_sa_schema(serializer.validated_data)
54
55 # Reload the schema to avoid cached properties
56 schema = self.get_object()
57 schema.clear_name_cache()
58 serializer = SchemaSerializer(schema, context={'request': request})
59 return Response(serializer.data)
60
61 def destroy(self, request, pk=None):
62 schema = self.get_object()
63 schema.delete_sa_schema()
64 return Response(status=status.HTTP_204_NO_CONTENT)
65
66 @action(methods=['get'], detail=True)
67 def dependents(self, request, pk=None):
68 serializer = DependentFilterSerializer(data=request.GET)
69 serializer.is_valid(raise_exception=True)
70 types_exclude = serializer.validated_data['exclude']
71
72 schema = self.get_object()
73 serializer = DependentSerializer(schema.get_dependents(types_exclude), many=True, context={'request': request})
74 return Response(serializer.data)
75
[end of mathesar/api/db/viewsets/schemas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mathesar/api/db/permissions/table.py b/mathesar/api/db/permissions/table.py
--- a/mathesar/api/db/permissions/table.py
+++ b/mathesar/api/db/permissions/table.py
@@ -49,13 +49,38 @@
@classmethod
def _scope_queryset(cls, request, qs, allowed_roles):
if not (request.user.is_superuser or request.user.is_anonymous):
- permissible_database_role_filter = (
+ db_role_access_filter = (
Q(schema__database__database_role__role__in=allowed_roles)
& Q(schema__database__database_role__user=request.user)
)
+ schema_role_access_filter = (
+ Q(schema__schema_role__role__in=allowed_roles)
+ & Q(schema__schema_role__user=request.user)
+ )
+
+ # Filters to check whether user is a db/schema manager
+ is_database_manager = (
+ Q(schema__database__database_role__role='manager')
+ & Q(schema__database__database_role__user=request.user)
+ )
+ is_schema_manager = (
+ Q(schema__schema_role__role='manager')
+ & Q(schema__schema_role__user=request.user)
+ )
+
+ # Filter for confirmed tables
+ cnf_table_filter = (Q(import_verified=True) | Q(import_verified__isnull=True))
+
+ # Filters for the purpose of including/removing unconfirmed tables based on user's role
+ permissible_database_role_filter = (
+ is_database_manager & Q(import_verified=False)
+ | cnf_table_filter & db_role_access_filter
+ )
permissible_schema_roles_filter = (
- Q(schema__schema_role__role__in=allowed_roles) & Q(schema__schema_role__user=request.user)
+ is_schema_manager & Q(import_verified=False)
+ | cnf_table_filter & schema_role_access_filter
)
+
qs = qs.filter(permissible_database_role_filter | permissible_schema_roles_filter)
return qs
diff --git a/mathesar/api/db/viewsets/schemas.py b/mathesar/api/db/viewsets/schemas.py
--- a/mathesar/api/db/viewsets/schemas.py
+++ b/mathesar/api/db/viewsets/schemas.py
@@ -35,7 +35,7 @@
database_name,
comment=serializer.validated_data.get('description')
)
- serializer = SchemaSerializer(schema)
+ serializer = SchemaSerializer(schema, context={'request': request})
return Response(serializer.data, status=status.HTTP_201_CREATED)
def partial_update(self, request, pk=None):
diff --git a/mathesar/api/serializers/schemas.py b/mathesar/api/serializers/schemas.py
--- a/mathesar/api/serializers/schemas.py
+++ b/mathesar/api/serializers/schemas.py
@@ -1,9 +1,11 @@
from rest_access_policy import PermittedSlugRelatedField
from rest_framework import serializers
+from mathesar.api.db.permissions.table import TableAccessPolicy
+
from mathesar.api.db.permissions.database import DatabaseAccessPolicy
from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
-from mathesar.models.base import Database, Schema
+from mathesar.models.base import Database, Schema, Table
class SchemaSerializer(MathesarErrorMessageMixin, serializers.HyperlinkedModelSerializer):
@@ -30,7 +32,9 @@
]
def get_num_tables(self, obj):
- return obj.tables.count()
+ qs = Table.objects.filter(schema=obj)
+ count = TableAccessPolicy.scope_queryset(self.context['request'], qs).count()
+ return count
def get_num_queries(self, obj):
return sum(t.queries.count() for t in obj.tables.all())
|
{"golden_diff": "diff --git a/mathesar/api/db/permissions/table.py b/mathesar/api/db/permissions/table.py\n--- a/mathesar/api/db/permissions/table.py\n+++ b/mathesar/api/db/permissions/table.py\n@@ -49,13 +49,38 @@\n @classmethod\n def _scope_queryset(cls, request, qs, allowed_roles):\n if not (request.user.is_superuser or request.user.is_anonymous):\n- permissible_database_role_filter = (\n+ db_role_access_filter = (\n Q(schema__database__database_role__role__in=allowed_roles)\n & Q(schema__database__database_role__user=request.user)\n )\n+ schema_role_access_filter = (\n+ Q(schema__schema_role__role__in=allowed_roles)\n+ & Q(schema__schema_role__user=request.user)\n+ )\n+\n+ # Filters to check whether user is a db/schema manager\n+ is_database_manager = (\n+ Q(schema__database__database_role__role='manager')\n+ & Q(schema__database__database_role__user=request.user)\n+ )\n+ is_schema_manager = (\n+ Q(schema__schema_role__role='manager')\n+ & Q(schema__schema_role__user=request.user)\n+ )\n+\n+ # Filter for confirmed tables\n+ cnf_table_filter = (Q(import_verified=True) | Q(import_verified__isnull=True))\n+\n+ # Filters for the purpose of including/removing unconfirmed tables based on user's role\n+ permissible_database_role_filter = (\n+ is_database_manager & Q(import_verified=False)\n+ | cnf_table_filter & db_role_access_filter\n+ )\n permissible_schema_roles_filter = (\n- Q(schema__schema_role__role__in=allowed_roles) & Q(schema__schema_role__user=request.user)\n+ is_schema_manager & Q(import_verified=False)\n+ | cnf_table_filter & schema_role_access_filter\n )\n+\n qs = qs.filter(permissible_database_role_filter | permissible_schema_roles_filter)\n return qs\n \ndiff --git a/mathesar/api/db/viewsets/schemas.py b/mathesar/api/db/viewsets/schemas.py\n--- a/mathesar/api/db/viewsets/schemas.py\n+++ b/mathesar/api/db/viewsets/schemas.py\n@@ -35,7 +35,7 @@\n database_name,\n comment=serializer.validated_data.get('description')\n )\n- serializer = SchemaSerializer(schema)\n+ serializer = SchemaSerializer(schema, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n \n def partial_update(self, request, pk=None):\ndiff --git a/mathesar/api/serializers/schemas.py b/mathesar/api/serializers/schemas.py\n--- a/mathesar/api/serializers/schemas.py\n+++ b/mathesar/api/serializers/schemas.py\n@@ -1,9 +1,11 @@\n from rest_access_policy import PermittedSlugRelatedField\n from rest_framework import serializers\n \n+from mathesar.api.db.permissions.table import TableAccessPolicy\n+\n from mathesar.api.db.permissions.database import DatabaseAccessPolicy\n from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\n-from mathesar.models.base import Database, Schema\n+from mathesar.models.base import Database, Schema, Table\n \n \n class SchemaSerializer(MathesarErrorMessageMixin, serializers.HyperlinkedModelSerializer):\n@@ -30,7 +32,9 @@\n ]\n \n def get_num_tables(self, obj):\n- return obj.tables.count()\n+ qs = Table.objects.filter(schema=obj)\n+ count = TableAccessPolicy.scope_queryset(self.context['request'], qs).count()\n+ return count\n \n def get_num_queries(self, obj):\n return sum(t.queries.count() for t in obj.tables.all())\n", "issue": "Do not include unconfirmed tables for users with viewer & editor access\n## Description\r\n1. GET requests to `/api/db/v0/tables/`, `/api/db/v0/tables/<table_id>` and `common_data.tables` currently include unconfirmed tables for all users.\r\n2. Request to `/api/db/v0/schemas/` and `/api/db/v0/schemas/<schema_id>/` have count for `num_tables` which contains count of all tables including ones that are not confirmed.\r\n\r\nConfirmation can currently only be performed by a schema manager or a db manager. Users with viewer and editor access cannot should not be able to see tables that aren't confirmed yet.\r\n\r\nThe UI currently hides unconfirmed tables for case (1), but this should happen at an API level. The count issue mentioned in (2) however, is currently visible on the UI leading to inconsistent count being shown between DB page and schema page.\r\n\r\n## Expected behavior\r\n* Endpoints and common_data that contains tables should not include unconfirmed tables for users with viewer and editor access. i.e. Permission check should take `import_verified` into account. This includes:\r\n - Table list\r\n - Table get\r\n - common_data.tables\r\n* `schema.num_tables` should also take this into account.\n", "before_files": [{"content": "from django.db.models import Q\nfrom rest_access_policy import AccessPolicy\n\nfrom mathesar.models.users import DatabaseRole, Role, SchemaRole\n\n\nclass TableAccessPolicy(AccessPolicy):\n \"\"\"\n Anyone can view Table as long as they have\n at least a Viewer access to the schema or its database\n Create Access is restricted to superusers or managers of the schema or the database the table is part of.\n Only superuser or schema/database manager can delete/modify/update the Table\n \"\"\"\n\n statements = [\n {\n # Restrictions for the create method is done by the Serializers when creating the schema,\n # As the permissions depend on the database object.\n 'action': [\n 'list',\n 'retrieve',\n 'create',\n 'type_suggestions',\n 'dependents',\n 'ui_dependents',\n 'joinable_tables',\n ],\n 'principal': 'authenticated',\n 'effect': 'allow',\n },\n\n {\n 'action': [\n 'destroy',\n 'update',\n 'partial_update',\n 'split_table',\n 'move_columns',\n 'previews',\n 'existing_import',\n 'map_imported_columns'\n ],\n 'principal': 'authenticated',\n 'effect': 'allow',\n 'condition_expression': ['(is_superuser or is_table_manager)']\n },\n ]\n\n @classmethod\n def _scope_queryset(cls, request, qs, allowed_roles):\n if not (request.user.is_superuser or request.user.is_anonymous):\n permissible_database_role_filter = (\n Q(schema__database__database_role__role__in=allowed_roles)\n & Q(schema__database__database_role__user=request.user)\n )\n permissible_schema_roles_filter = (\n Q(schema__schema_role__role__in=allowed_roles) & Q(schema__schema_role__user=request.user)\n )\n qs = qs.filter(permissible_database_role_filter | permissible_schema_roles_filter)\n return qs\n\n @classmethod\n def scope_queryset(cls, request, qs):\n \"\"\"\n Used for scoping the queryset of Serializer RelatedField which reference a Table\n \"\"\"\n allowed_roles = (Role.MANAGER.value,)\n\n if request.method.lower() == 'get':\n allowed_roles = allowed_roles + (Role.EDITOR.value, Role.VIEWER.value)\n return TableAccessPolicy._scope_queryset(request, qs, allowed_roles)\n\n @classmethod\n def scope_viewset_queryset(cls, request, qs):\n \"\"\"\n Used for scoping queryset of the TableViewSet.\n It is used for listing all the table the user has Viewer access.\n Restrictions are then applied based on the request method using the Policy statements.\n This helps us to throw correct error status code instead of a 404 error code\n \"\"\"\n allowed_roles = (Role.MANAGER.value, Role.EDITOR.value, Role.VIEWER.value)\n return TableAccessPolicy._scope_queryset(request, qs, allowed_roles)\n\n def is_table_manager(self, request, view, action):\n # Table access control is based on Schema and Database Roles as of now\n # TODO Include Table Role based access when Table Roles are introduced\n table = view.get_object()\n is_schema_manager = SchemaRole.objects.filter(\n user=request.user,\n schema=table.schema,\n role=Role.MANAGER.value\n ).exists()\n is_db_manager = DatabaseRole.objects.filter(\n user=request.user,\n database=table.schema.database,\n role=Role.MANAGER.value\n ).exists()\n return is_db_manager or is_schema_manager\n", "path": "mathesar/api/db/permissions/table.py"}, {"content": "from rest_access_policy import PermittedSlugRelatedField\nfrom rest_framework import serializers\n\nfrom mathesar.api.db.permissions.database import DatabaseAccessPolicy\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.models.base import Database, Schema\n\n\nclass SchemaSerializer(MathesarErrorMessageMixin, serializers.HyperlinkedModelSerializer):\n name = serializers.CharField()\n # Restrict access to databases with create access.\n # Unlike PermittedPkRelatedField this field uses a slug instead of an id\n # Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/\n database = PermittedSlugRelatedField(\n access_policy=DatabaseAccessPolicy,\n slug_field='name',\n queryset=Database.current_objects.all()\n )\n description = serializers.CharField(\n required=False, allow_blank=True, default=None, allow_null=True\n )\n num_tables = serializers.SerializerMethodField()\n num_queries = serializers.SerializerMethodField()\n\n class Meta:\n model = Schema\n fields = [\n 'id', 'name', 'database', 'has_dependents', 'description',\n 'num_tables', 'num_queries'\n ]\n\n def get_num_tables(self, obj):\n return obj.tables.count()\n\n def get_num_queries(self, obj):\n return sum(t.queries.count() for t in obj.tables.all())\n", "path": "mathesar/api/serializers/schemas.py"}, {"content": "from django_filters import rest_framework as filters\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.response import Response\n\nfrom mathesar.api.db.permissions.schema import SchemaAccessPolicy\nfrom mathesar.api.dj_filters import SchemaFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.dependents import DependentSerializer, DependentFilterSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.models.base import Schema\nfrom mathesar.utils.schemas import create_schema_and_object\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import EditingPublicSchemaIsDisallowed\n\n\nclass SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n serializer_class = SchemaSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = SchemaFilter\n access_policy = SchemaAccessPolicy\n\n def get_queryset(self):\n qs = Schema.objects.all().order_by('-created_at')\n return self.access_policy.scope_viewset_queryset(self.request, qs)\n\n def create(self, request):\n serializer = SchemaSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n database_name = serializer.validated_data['database'].name\n schema = create_schema_and_object(\n serializer.validated_data['name'],\n database_name,\n comment=serializer.validated_data.get('description')\n )\n serializer = SchemaSerializer(schema)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None):\n serializer = SchemaSerializer(\n data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n\n schema = self.get_object()\n\n # We forbid editing the public schema\n if schema.name == \"public\":\n raise EditingPublicSchemaIsDisallowed()\n\n schema.update_sa_schema(serializer.validated_data)\n\n # Reload the schema to avoid cached properties\n schema = self.get_object()\n schema.clear_name_cache()\n serializer = SchemaSerializer(schema, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n schema = self.get_object()\n schema.delete_sa_schema()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def dependents(self, request, pk=None):\n serializer = DependentFilterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n types_exclude = serializer.validated_data['exclude']\n\n schema = self.get_object()\n serializer = DependentSerializer(schema.get_dependents(types_exclude), many=True, context={'request': request})\n return Response(serializer.data)\n", "path": "mathesar/api/db/viewsets/schemas.py"}]}
| 2,896 | 802 |
gh_patches_debug_22884
|
rasdani/github-patches
|
git_diff
|
HypothesisWorks__hypothesis-601
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Changelog doesn't check for date for *this* release
https://github.com/HypothesisWorks/hypothesis-python/pull/580 is currently passing its build but shouldn't be because its date in the changelog says TBD. However the current date was present for *another* release.
The changelog check should check for the current date occurring on the same line as the current version number.
</issue>
<code>
[start of scripts/check-changelog.py]
1 #!/usr/bin/env python
2
3 # coding=utf-8
4 #
5 # This file is part of Hypothesis, which may be found at
6 # https://github.com/HypothesisWorks/hypothesis-python
7 #
8 # Most of this work is copyright (C) 2013-2017 David R. MacIver
9 # ([email protected]), but it contains contributions by others. See
10 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
11 # consult the git log if you need to determine who owns an individual
12 # contribution.
13 #
14 # This Source Code Form is subject to the terms of the Mozilla Public License,
15 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
16 # obtain one at http://mozilla.org/MPL/2.0/.
17 #
18 # END HEADER
19
20 from __future__ import division, print_function, absolute_import
21
22 import os
23 import sys
24 from datetime import datetime, timedelta
25
26 import hypothesistooling as tools
27
28 sys.path.append(os.path.dirname(__file__)) # noqa
29
30
31 if __name__ == '__main__':
32
33 if not tools.has_source_changes():
34 print('No source changes found')
35 sys.exit(0)
36
37 changelog = tools.changelog()
38
39 if '\n%s - ' % (tools.__version__,) not in changelog:
40 print(
41 'The current version (%s) isn\'t mentioned in the changelog' % (
42 tools.__version__,))
43 sys.exit(1)
44
45 now = datetime.utcnow()
46
47 hour = timedelta(hours=1)
48
49 acceptable_dates = {
50 d.strftime('%Y-%m-%d')
51 for d in (now, now + hour, now - hour)
52 }
53
54 when = ' or '.join(sorted(acceptable_dates))
55
56 if not any(d in changelog for d in acceptable_dates):
57 print((
58 'The current date (%s) isn\'t mentioned in the changelog. '
59 'Remember this will be released as soon as you merge to master!'
60 ) % (when,))
61 sys.exit(1)
62
[end of scripts/check-changelog.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/check-changelog.py b/scripts/check-changelog.py
--- a/scripts/check-changelog.py
+++ b/scripts/check-changelog.py
@@ -34,28 +34,18 @@
print('No source changes found')
sys.exit(0)
- changelog = tools.changelog()
-
- if '\n%s - ' % (tools.__version__,) not in changelog:
- print(
- 'The current version (%s) isn\'t mentioned in the changelog' % (
- tools.__version__,))
- sys.exit(1)
-
now = datetime.utcnow()
-
hour = timedelta(hours=1)
-
- acceptable_dates = {
- d.strftime('%Y-%m-%d')
+ acceptable_lines = sorted(set(
+ '{} - {}'.format(tools.__version__, d.strftime('%Y-%m-%d'))
for d in (now, now + hour, now - hour)
- }
-
- when = ' or '.join(sorted(acceptable_dates))
-
- if not any(d in changelog for d in acceptable_dates):
- print((
- 'The current date (%s) isn\'t mentioned in the changelog. '
- 'Remember this will be released as soon as you merge to master!'
- ) % (when,))
+ ))
+
+ for line in tools.changelog().split('\n'):
+ if line.strip() in acceptable_lines:
+ break
+ else:
+ print('No line with version and current date (%s) in the changelog. '
+ 'Remember this will be released as soon as you merge to master!'
+ % ' or '.join(repr(line) for line in acceptable_lines))
sys.exit(1)
|
{"golden_diff": "diff --git a/scripts/check-changelog.py b/scripts/check-changelog.py\n--- a/scripts/check-changelog.py\n+++ b/scripts/check-changelog.py\n@@ -34,28 +34,18 @@\n print('No source changes found')\n sys.exit(0)\n \n- changelog = tools.changelog()\n-\n- if '\\n%s - ' % (tools.__version__,) not in changelog:\n- print(\n- 'The current version (%s) isn\\'t mentioned in the changelog' % (\n- tools.__version__,))\n- sys.exit(1)\n-\n now = datetime.utcnow()\n-\n hour = timedelta(hours=1)\n-\n- acceptable_dates = {\n- d.strftime('%Y-%m-%d')\n+ acceptable_lines = sorted(set(\n+ '{} - {}'.format(tools.__version__, d.strftime('%Y-%m-%d'))\n for d in (now, now + hour, now - hour)\n- }\n-\n- when = ' or '.join(sorted(acceptable_dates))\n-\n- if not any(d in changelog for d in acceptable_dates):\n- print((\n- 'The current date (%s) isn\\'t mentioned in the changelog. '\n- 'Remember this will be released as soon as you merge to master!'\n- ) % (when,))\n+ ))\n+\n+ for line in tools.changelog().split('\\n'):\n+ if line.strip() in acceptable_lines:\n+ break\n+ else:\n+ print('No line with version and current date (%s) in the changelog. '\n+ 'Remember this will be released as soon as you merge to master!'\n+ % ' or '.join(repr(line) for line in acceptable_lines))\n sys.exit(1)\n", "issue": "Changelog doesn't check for date for *this* release\nhttps://github.com/HypothesisWorks/hypothesis-python/pull/580 is currently passing its build but shouldn't be because its date in the changelog says TBD. However the current date was present for *another* release.\r\n\r\nThe changelog check should check for the current date occurring on the same line as the current version number.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2017 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nfrom datetime import datetime, timedelta\n\nimport hypothesistooling as tools\n\nsys.path.append(os.path.dirname(__file__)) # noqa\n\n\nif __name__ == '__main__':\n\n if not tools.has_source_changes():\n print('No source changes found')\n sys.exit(0)\n\n changelog = tools.changelog()\n\n if '\\n%s - ' % (tools.__version__,) not in changelog:\n print(\n 'The current version (%s) isn\\'t mentioned in the changelog' % (\n tools.__version__,))\n sys.exit(1)\n\n now = datetime.utcnow()\n\n hour = timedelta(hours=1)\n\n acceptable_dates = {\n d.strftime('%Y-%m-%d')\n for d in (now, now + hour, now - hour)\n }\n\n when = ' or '.join(sorted(acceptable_dates))\n\n if not any(d in changelog for d in acceptable_dates):\n print((\n 'The current date (%s) isn\\'t mentioned in the changelog. '\n 'Remember this will be released as soon as you merge to master!'\n ) % (when,))\n sys.exit(1)\n", "path": "scripts/check-changelog.py"}]}
| 1,195 | 379 |
gh_patches_debug_1746
|
rasdani/github-patches
|
git_diff
|
beeware__toga-998
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Have tutorial 3 open up pybee.org by default
**Is your feature request related to a problem? Please describe.**
https://toga.readthedocs.io/en/latest/tutorial/tutorial-3.html shows a screenshot of pybee.org being opened, but the code defaults to github.com.
**Describe the solution you'd like**
Have the code direct to pybee.org by default.
**Describe alternatives you've considered**
An updated screenshot of the tutorial to show github.com.
</issue>
<code>
[start of examples/tutorial3/tutorial/app.py]
1 import toga
2 from toga.style.pack import CENTER, COLUMN, ROW, Pack
3
4
5 class Graze(toga.App):
6 def startup(self):
7 self.main_window = toga.MainWindow(title=self.name)
8
9 self.webview = toga.WebView(style=Pack(flex=1))
10 self.url_input = toga.TextInput(
11 initial='https://github.com/',
12 style=Pack(flex=1)
13 )
14
15 box = toga.Box(
16 children=[
17 toga.Box(
18 children=[
19 self.url_input,
20 toga.Button('Go', on_press=self.load_page, style=Pack(width=50, padding_left=5)),
21 ],
22 style=Pack(
23 direction=ROW,
24 alignment=CENTER,
25 padding=5,
26 )
27 ),
28 self.webview,
29 ],
30 style=Pack(
31 direction=COLUMN
32 )
33 )
34
35 self.main_window.content = box
36 self.webview.url = self.url_input.value
37
38 # Show the main window
39 self.main_window.show()
40
41 def load_page(self, widget):
42 self.webview.url = self.url_input.value
43
44
45 def main():
46 return Graze('Graze', 'org.beeware.graze')
47
48
49 if __name__ == '__main__':
50 main().main_loop()
51
[end of examples/tutorial3/tutorial/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/tutorial3/tutorial/app.py b/examples/tutorial3/tutorial/app.py
--- a/examples/tutorial3/tutorial/app.py
+++ b/examples/tutorial3/tutorial/app.py
@@ -8,7 +8,7 @@
self.webview = toga.WebView(style=Pack(flex=1))
self.url_input = toga.TextInput(
- initial='https://github.com/',
+ initial='https://beeware.org/',
style=Pack(flex=1)
)
|
{"golden_diff": "diff --git a/examples/tutorial3/tutorial/app.py b/examples/tutorial3/tutorial/app.py\n--- a/examples/tutorial3/tutorial/app.py\n+++ b/examples/tutorial3/tutorial/app.py\n@@ -8,7 +8,7 @@\n \n self.webview = toga.WebView(style=Pack(flex=1))\n self.url_input = toga.TextInput(\n- initial='https://github.com/',\n+ initial='https://beeware.org/',\n style=Pack(flex=1)\n )\n", "issue": "Have tutorial 3 open up pybee.org by default\n**Is your feature request related to a problem? Please describe.**\r\nhttps://toga.readthedocs.io/en/latest/tutorial/tutorial-3.html shows a screenshot of pybee.org being opened, but the code defaults to github.com.\r\n\r\n**Describe the solution you'd like**\r\nHave the code direct to pybee.org by default.\r\n\r\n**Describe alternatives you've considered**\r\nAn updated screenshot of the tutorial to show github.com.\r\n\n", "before_files": [{"content": "import toga\nfrom toga.style.pack import CENTER, COLUMN, ROW, Pack\n\n\nclass Graze(toga.App):\n def startup(self):\n self.main_window = toga.MainWindow(title=self.name)\n\n self.webview = toga.WebView(style=Pack(flex=1))\n self.url_input = toga.TextInput(\n initial='https://github.com/',\n style=Pack(flex=1)\n )\n\n box = toga.Box(\n children=[\n toga.Box(\n children=[\n self.url_input,\n toga.Button('Go', on_press=self.load_page, style=Pack(width=50, padding_left=5)),\n ],\n style=Pack(\n direction=ROW,\n alignment=CENTER,\n padding=5,\n )\n ),\n self.webview,\n ],\n style=Pack(\n direction=COLUMN\n )\n )\n\n self.main_window.content = box\n self.webview.url = self.url_input.value\n\n # Show the main window\n self.main_window.show()\n\n def load_page(self, widget):\n self.webview.url = self.url_input.value\n\n\ndef main():\n return Graze('Graze', 'org.beeware.graze')\n\n\nif __name__ == '__main__':\n main().main_loop()\n", "path": "examples/tutorial3/tutorial/app.py"}]}
| 1,007 | 103 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.