problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_22879
|
rasdani/github-patches
|
git_diff
|
techmatters__terraso-backend-1223
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add ability to change user password from Django admin
## Description
Add ability to change user password from Django admin.
</issue>
<code>
[start of terraso_backend/apps/core/admin.py]
1 # Copyright © 2021-2023 Technology Matters
2 #
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License as published
5 # by the Free Software Foundation, either version 3 of the License, or
6 # (at your option) any later version.
7 #
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU Affero General Public License for more details.
12 #
13 # You should have received a copy of the GNU Affero General Public License
14 # along with this program. If not, see https://www.gnu.org/licenses/.
15
16 from django.contrib import admin
17
18 from .models import (
19 Group,
20 Landscape,
21 LandscapeDevelopmentStrategy,
22 LandscapeGroup,
23 SharedResource,
24 TaxonomyTerm,
25 User,
26 UserPreference,
27 )
28
29
30 @admin.register(Group)
31 class GroupAdmin(admin.ModelAdmin):
32 list_display = ("name", "slug", "website", "created_at")
33
34 def get_queryset(self, request):
35 qs = super().get_queryset(request)
36 return qs.exclude(associated_landscapes__is_default_landscape_group=True)
37
38
39 @admin.register(Landscape)
40 class LandscapeAdmin(admin.ModelAdmin):
41 list_display = ("name", "slug", "location", "website", "created_at")
42 raw_id_fields = ("membership_list",)
43
44
45 class LandscapeDefaultGroup(Group):
46 class Meta:
47 proxy = True
48
49
50 @admin.register(LandscapeGroup)
51 class LandscapeGroupAdmin(admin.ModelAdmin):
52 list_display = ("landscape", "group")
53
54
55 class UserPreferenceInline(admin.TabularInline):
56 model = UserPreference
57
58
59 @admin.register(User)
60 class UserAdmin(admin.ModelAdmin):
61 list_display = ("email", "first_name", "last_name", "created_at", "is_staff")
62 inlines = [UserPreferenceInline]
63
64
65 @admin.register(TaxonomyTerm)
66 class TaxonomyTermAdmin(admin.ModelAdmin):
67 list_display = ("value_original", "type", "value_en", "value_es")
68
69
70 @admin.register(LandscapeDevelopmentStrategy)
71 class LandscapeDevelopmentStrategyAdmin(admin.ModelAdmin):
72 list_display = ("id", "landscape")
73
74
75 @admin.register(SharedResource)
76 class SharedResourceAdmin(admin.ModelAdmin):
77 list_display = ("id", "share_uuid", "share_access")
78
[end of terraso_backend/apps/core/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/terraso_backend/apps/core/admin.py b/terraso_backend/apps/core/admin.py
--- a/terraso_backend/apps/core/admin.py
+++ b/terraso_backend/apps/core/admin.py
@@ -14,6 +14,7 @@
# along with this program. If not, see https://www.gnu.org/licenses/.
from django.contrib import admin
+from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from .models import (
Group,
@@ -57,9 +58,28 @@
@admin.register(User)
-class UserAdmin(admin.ModelAdmin):
+class UserAdmin(DjangoUserAdmin):
+ ordering = ("email",)
list_display = ("email", "first_name", "last_name", "created_at", "is_staff")
+ search_fields = ("email", "first_name", "last_name")
inlines = [UserPreferenceInline]
+ fieldsets = (
+ (None, {"fields": ("email", "password")}),
+ ("Personal info", {"fields": ("first_name", "last_name")}),
+ (
+ "Permissions",
+ {
+ "fields": (
+ "is_active",
+ "is_staff",
+ "is_superuser",
+ "groups",
+ "user_permissions",
+ ),
+ },
+ ),
+ ("Important dates", {"fields": ("last_login", "date_joined")}),
+ )
@admin.register(TaxonomyTerm)
|
{"golden_diff": "diff --git a/terraso_backend/apps/core/admin.py b/terraso_backend/apps/core/admin.py\n--- a/terraso_backend/apps/core/admin.py\n+++ b/terraso_backend/apps/core/admin.py\n@@ -14,6 +14,7 @@\n # along with this program. If not, see https://www.gnu.org/licenses/.\n \n from django.contrib import admin\n+from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin\n \n from .models import (\n Group,\n@@ -57,9 +58,28 @@\n \n \n @admin.register(User)\n-class UserAdmin(admin.ModelAdmin):\n+class UserAdmin(DjangoUserAdmin):\n+ ordering = (\"email\",)\n list_display = (\"email\", \"first_name\", \"last_name\", \"created_at\", \"is_staff\")\n+ search_fields = (\"email\", \"first_name\", \"last_name\")\n inlines = [UserPreferenceInline]\n+ fieldsets = (\n+ (None, {\"fields\": (\"email\", \"password\")}),\n+ (\"Personal info\", {\"fields\": (\"first_name\", \"last_name\")}),\n+ (\n+ \"Permissions\",\n+ {\n+ \"fields\": (\n+ \"is_active\",\n+ \"is_staff\",\n+ \"is_superuser\",\n+ \"groups\",\n+ \"user_permissions\",\n+ ),\n+ },\n+ ),\n+ (\"Important dates\", {\"fields\": (\"last_login\", \"date_joined\")}),\n+ )\n \n \n @admin.register(TaxonomyTerm)\n", "issue": "Add ability to change user password from Django admin\n## Description\r\nAdd ability to change user password from Django admin.\n", "before_files": [{"content": "# Copyright \u00a9 2021-2023 Technology Matters\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see https://www.gnu.org/licenses/.\n\nfrom django.contrib import admin\n\nfrom .models import (\n Group,\n Landscape,\n LandscapeDevelopmentStrategy,\n LandscapeGroup,\n SharedResource,\n TaxonomyTerm,\n User,\n UserPreference,\n)\n\n\[email protected](Group)\nclass GroupAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"slug\", \"website\", \"created_at\")\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.exclude(associated_landscapes__is_default_landscape_group=True)\n\n\[email protected](Landscape)\nclass LandscapeAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"slug\", \"location\", \"website\", \"created_at\")\n raw_id_fields = (\"membership_list\",)\n\n\nclass LandscapeDefaultGroup(Group):\n class Meta:\n proxy = True\n\n\[email protected](LandscapeGroup)\nclass LandscapeGroupAdmin(admin.ModelAdmin):\n list_display = (\"landscape\", \"group\")\n\n\nclass UserPreferenceInline(admin.TabularInline):\n model = UserPreference\n\n\[email protected](User)\nclass UserAdmin(admin.ModelAdmin):\n list_display = (\"email\", \"first_name\", \"last_name\", \"created_at\", \"is_staff\")\n inlines = [UserPreferenceInline]\n\n\[email protected](TaxonomyTerm)\nclass TaxonomyTermAdmin(admin.ModelAdmin):\n list_display = (\"value_original\", \"type\", \"value_en\", \"value_es\")\n\n\[email protected](LandscapeDevelopmentStrategy)\nclass LandscapeDevelopmentStrategyAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"landscape\")\n\n\[email protected](SharedResource)\nclass SharedResourceAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"share_uuid\", \"share_access\")\n", "path": "terraso_backend/apps/core/admin.py"}]}
| 1,221 | 321 |
gh_patches_debug_26602
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-1263
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build Image Missing Arguments
The build image function is missing some arguments that are present in the v1.24 api.
- shmsize - Size of /dev/shm in bytes. The size must be greater than 0. If omitted the system uses 64MB.
- labels – JSON map of string pairs for labels to set on the image.
See: https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/build-image-from-a-dockerfile
</issue>
<code>
[start of docker/api/build.py]
1 import logging
2 import os
3 import re
4 import json
5
6 from .. import constants
7 from .. import errors
8 from .. import auth
9 from .. import utils
10
11
12 log = logging.getLogger(__name__)
13
14
15 class BuildApiMixin(object):
16 def build(self, path=None, tag=None, quiet=False, fileobj=None,
17 nocache=False, rm=False, stream=False, timeout=None,
18 custom_context=False, encoding=None, pull=False,
19 forcerm=False, dockerfile=None, container_limits=None,
20 decode=False, buildargs=None, gzip=False):
21 remote = context = None
22 headers = {}
23 container_limits = container_limits or {}
24 if path is None and fileobj is None:
25 raise TypeError("Either path or fileobj needs to be provided.")
26 if gzip and encoding is not None:
27 raise errors.DockerException(
28 'Can not use custom encoding if gzip is enabled'
29 )
30
31 for key in container_limits.keys():
32 if key not in constants.CONTAINER_LIMITS_KEYS:
33 raise errors.DockerException(
34 'Invalid container_limits key {0}'.format(key)
35 )
36
37 if custom_context:
38 if not fileobj:
39 raise TypeError("You must specify fileobj with custom_context")
40 context = fileobj
41 elif fileobj is not None:
42 context = utils.mkbuildcontext(fileobj)
43 elif path.startswith(('http://', 'https://',
44 'git://', 'github.com/', 'git@')):
45 remote = path
46 elif not os.path.isdir(path):
47 raise TypeError("You must specify a directory to build in path")
48 else:
49 dockerignore = os.path.join(path, '.dockerignore')
50 exclude = None
51 if os.path.exists(dockerignore):
52 with open(dockerignore, 'r') as f:
53 exclude = list(filter(bool, f.read().splitlines()))
54 context = utils.tar(
55 path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
56 )
57 encoding = 'gzip' if gzip else encoding
58
59 if utils.compare_version('1.8', self._version) >= 0:
60 stream = True
61
62 if dockerfile and utils.compare_version('1.17', self._version) < 0:
63 raise errors.InvalidVersion(
64 'dockerfile was only introduced in API version 1.17'
65 )
66
67 if utils.compare_version('1.19', self._version) < 0:
68 pull = 1 if pull else 0
69
70 u = self._url('/build')
71 params = {
72 't': tag,
73 'remote': remote,
74 'q': quiet,
75 'nocache': nocache,
76 'rm': rm,
77 'forcerm': forcerm,
78 'pull': pull,
79 'dockerfile': dockerfile,
80 }
81 params.update(container_limits)
82
83 if buildargs:
84 if utils.version_gte(self._version, '1.21'):
85 params.update({'buildargs': json.dumps(buildargs)})
86 else:
87 raise errors.InvalidVersion(
88 'buildargs was only introduced in API version 1.21'
89 )
90
91 if context is not None:
92 headers = {'Content-Type': 'application/tar'}
93 if encoding:
94 headers['Content-Encoding'] = encoding
95
96 if utils.compare_version('1.9', self._version) >= 0:
97 self._set_auth_headers(headers)
98
99 response = self._post(
100 u,
101 data=context,
102 params=params,
103 headers=headers,
104 stream=stream,
105 timeout=timeout,
106 )
107
108 if context is not None and not custom_context:
109 context.close()
110
111 if stream:
112 return self._stream_helper(response, decode=decode)
113 else:
114 output = self._result(response)
115 srch = r'Successfully built ([0-9a-f]+)'
116 match = re.search(srch, output)
117 if not match:
118 return None, output
119 return match.group(1), output
120
121 def _set_auth_headers(self, headers):
122 log.debug('Looking for auth config')
123
124 # If we don't have any auth data so far, try reloading the config
125 # file one more time in case anything showed up in there.
126 if not self._auth_configs:
127 log.debug("No auth config in memory - loading from filesystem")
128 self._auth_configs = auth.load_config()
129
130 # Send the full auth configuration (if any exists), since the build
131 # could use any (or all) of the registries.
132 if self._auth_configs:
133 log.debug(
134 'Sending auth config ({0})'.format(
135 ', '.join(repr(k) for k in self._auth_configs.keys())
136 )
137 )
138
139 if utils.compare_version('1.19', self._version) >= 0:
140 headers['X-Registry-Config'] = auth.encode_header(
141 self._auth_configs
142 )
143 else:
144 headers['X-Registry-Config'] = auth.encode_header({
145 'configs': self._auth_configs
146 })
147 else:
148 log.debug('No auth config found')
149
[end of docker/api/build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docker/api/build.py b/docker/api/build.py
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -17,7 +17,8 @@
nocache=False, rm=False, stream=False, timeout=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
- decode=False, buildargs=None, gzip=False):
+ decode=False, buildargs=None, gzip=False, shmsize=None,
+ labels=None):
remote = context = None
headers = {}
container_limits = container_limits or {}
@@ -88,6 +89,22 @@
'buildargs was only introduced in API version 1.21'
)
+ if shmsize:
+ if utils.version_gte(self._version, '1.22'):
+ params.update({'shmsize': shmsize})
+ else:
+ raise errors.InvalidVersion(
+ 'shmsize was only introduced in API version 1.22'
+ )
+
+ if labels:
+ if utils.version_gte(self._version, '1.23'):
+ params.update({'labels': json.dumps(labels)})
+ else:
+ raise errors.InvalidVersion(
+ 'labels was only introduced in API version 1.23'
+ )
+
if context is not None:
headers = {'Content-Type': 'application/tar'}
if encoding:
|
{"golden_diff": "diff --git a/docker/api/build.py b/docker/api/build.py\n--- a/docker/api/build.py\n+++ b/docker/api/build.py\n@@ -17,7 +17,8 @@\n nocache=False, rm=False, stream=False, timeout=None,\n custom_context=False, encoding=None, pull=False,\n forcerm=False, dockerfile=None, container_limits=None,\n- decode=False, buildargs=None, gzip=False):\n+ decode=False, buildargs=None, gzip=False, shmsize=None,\n+ labels=None):\n remote = context = None\n headers = {}\n container_limits = container_limits or {}\n@@ -88,6 +89,22 @@\n 'buildargs was only introduced in API version 1.21'\n )\n \n+ if shmsize:\n+ if utils.version_gte(self._version, '1.22'):\n+ params.update({'shmsize': shmsize})\n+ else:\n+ raise errors.InvalidVersion(\n+ 'shmsize was only introduced in API version 1.22'\n+ )\n+\n+ if labels:\n+ if utils.version_gte(self._version, '1.23'):\n+ params.update({'labels': json.dumps(labels)})\n+ else:\n+ raise errors.InvalidVersion(\n+ 'labels was only introduced in API version 1.23'\n+ )\n+\n if context is not None:\n headers = {'Content-Type': 'application/tar'}\n if encoding:\n", "issue": "Build Image Missing Arguments\nThe build image function is missing some arguments that are present in the v1.24 api.\n- shmsize - Size of /dev/shm in bytes. The size must be greater than 0. If omitted the system uses 64MB.\n- labels \u2013 JSON map of string pairs for labels to set on the image.\n\nSee: https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/build-image-from-a-dockerfile\n\n", "before_files": [{"content": "import logging\nimport os\nimport re\nimport json\n\nfrom .. import constants\nfrom .. import errors\nfrom .. import auth\nfrom .. import utils\n\n\nlog = logging.getLogger(__name__)\n\n\nclass BuildApiMixin(object):\n def build(self, path=None, tag=None, quiet=False, fileobj=None,\n nocache=False, rm=False, stream=False, timeout=None,\n custom_context=False, encoding=None, pull=False,\n forcerm=False, dockerfile=None, container_limits=None,\n decode=False, buildargs=None, gzip=False):\n remote = context = None\n headers = {}\n container_limits = container_limits or {}\n if path is None and fileobj is None:\n raise TypeError(\"Either path or fileobj needs to be provided.\")\n if gzip and encoding is not None:\n raise errors.DockerException(\n 'Can not use custom encoding if gzip is enabled'\n )\n\n for key in container_limits.keys():\n if key not in constants.CONTAINER_LIMITS_KEYS:\n raise errors.DockerException(\n 'Invalid container_limits key {0}'.format(key)\n )\n\n if custom_context:\n if not fileobj:\n raise TypeError(\"You must specify fileobj with custom_context\")\n context = fileobj\n elif fileobj is not None:\n context = utils.mkbuildcontext(fileobj)\n elif path.startswith(('http://', 'https://',\n 'git://', 'github.com/', 'git@')):\n remote = path\n elif not os.path.isdir(path):\n raise TypeError(\"You must specify a directory to build in path\")\n else:\n dockerignore = os.path.join(path, '.dockerignore')\n exclude = None\n if os.path.exists(dockerignore):\n with open(dockerignore, 'r') as f:\n exclude = list(filter(bool, f.read().splitlines()))\n context = utils.tar(\n path, exclude=exclude, dockerfile=dockerfile, gzip=gzip\n )\n encoding = 'gzip' if gzip else encoding\n\n if utils.compare_version('1.8', self._version) >= 0:\n stream = True\n\n if dockerfile and utils.compare_version('1.17', self._version) < 0:\n raise errors.InvalidVersion(\n 'dockerfile was only introduced in API version 1.17'\n )\n\n if utils.compare_version('1.19', self._version) < 0:\n pull = 1 if pull else 0\n\n u = self._url('/build')\n params = {\n 't': tag,\n 'remote': remote,\n 'q': quiet,\n 'nocache': nocache,\n 'rm': rm,\n 'forcerm': forcerm,\n 'pull': pull,\n 'dockerfile': dockerfile,\n }\n params.update(container_limits)\n\n if buildargs:\n if utils.version_gte(self._version, '1.21'):\n params.update({'buildargs': json.dumps(buildargs)})\n else:\n raise errors.InvalidVersion(\n 'buildargs was only introduced in API version 1.21'\n )\n\n if context is not None:\n headers = {'Content-Type': 'application/tar'}\n if encoding:\n headers['Content-Encoding'] = encoding\n\n if utils.compare_version('1.9', self._version) >= 0:\n self._set_auth_headers(headers)\n\n response = self._post(\n u,\n data=context,\n params=params,\n headers=headers,\n stream=stream,\n timeout=timeout,\n )\n\n if context is not None and not custom_context:\n context.close()\n\n if stream:\n return self._stream_helper(response, decode=decode)\n else:\n output = self._result(response)\n srch = r'Successfully built ([0-9a-f]+)'\n match = re.search(srch, output)\n if not match:\n return None, output\n return match.group(1), output\n\n def _set_auth_headers(self, headers):\n log.debug('Looking for auth config')\n\n # If we don't have any auth data so far, try reloading the config\n # file one more time in case anything showed up in there.\n if not self._auth_configs:\n log.debug(\"No auth config in memory - loading from filesystem\")\n self._auth_configs = auth.load_config()\n\n # Send the full auth configuration (if any exists), since the build\n # could use any (or all) of the registries.\n if self._auth_configs:\n log.debug(\n 'Sending auth config ({0})'.format(\n ', '.join(repr(k) for k in self._auth_configs.keys())\n )\n )\n\n if utils.compare_version('1.19', self._version) >= 0:\n headers['X-Registry-Config'] = auth.encode_header(\n self._auth_configs\n )\n else:\n headers['X-Registry-Config'] = auth.encode_header({\n 'configs': self._auth_configs\n })\n else:\n log.debug('No auth config found')\n", "path": "docker/api/build.py"}]}
| 2,068 | 321 |
gh_patches_debug_27671
|
rasdani/github-patches
|
git_diff
|
ocadotechnology__codeforlife-portal-417
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
From django administration page, in Portal, can't access Teachers or Students
Trying to access a Student or Teacher from the administration page leads to an error:
Failed to load resource: the server responded with a status of 500 (OK)
</issue>
<code>
[start of portal/admin.py]
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2016, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 from django.contrib import admin
38 from django.contrib.auth.models import User
39 from django.contrib.auth.admin import UserAdmin
40
41
42 from portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification
43
44
45 class ClassAdmin(admin.ModelAdmin):
46 search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']
47 list_filter = ['teacher']
48
49
50 class SchoolAdmin(admin.ModelAdmin):
51 search_fields = ['name', 'country', 'postcode', 'town']
52 list_filter = ['postcode', 'country']
53
54
55 class StudentAdmin(admin.ModelAdmin):
56 search_fields = ['new_user__first_name', 'new_user__last_name']
57 list_filter = ['class_field', 'class_field__teacher']
58
59
60 class TeacherAdmin(admin.ModelAdmin):
61 search_fields = ['new_user__first_name', 'new_user__last_name']
62 list_filter = ['school']
63
64
65 class UserProfileAdmin(admin.ModelAdmin):
66 search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']
67 list_filter = ['user__date_joined']
68 list_display = ['user', 'joined_recently']
69
70
71 class EmailVerificationAdmin(admin.ModelAdmin):
72 search_fields = ['new_user']
73
74
75 UserAdmin.list_display += ('date_joined',)
76 UserAdmin.list_filter += ('date_joined',)
77
78
79 admin.site.register(Class, ClassAdmin)
80 admin.site.register(Student, StudentAdmin)
81 admin.site.register(Guardian)
82 admin.site.register(Teacher, TeacherAdmin)
83 admin.site.register(School, SchoolAdmin)
84 admin.site.unregister(User)
85 admin.site.register(User, UserAdmin)
86 admin.site.register(UserProfile, UserProfileAdmin)
87 admin.site.register(FrontPageNews)
88 admin.site.register(EmailVerification, EmailVerificationAdmin)
89
[end of portal/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/portal/admin.py b/portal/admin.py
--- a/portal/admin.py
+++ b/portal/admin.py
@@ -45,6 +45,7 @@
class ClassAdmin(admin.ModelAdmin):
search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']
list_filter = ['teacher']
+ readonly_fields = ['teacher']
class SchoolAdmin(admin.ModelAdmin):
@@ -55,17 +56,22 @@
class StudentAdmin(admin.ModelAdmin):
search_fields = ['new_user__first_name', 'new_user__last_name']
list_filter = ['class_field', 'class_field__teacher']
+ readonly_fields = ['user', 'new_user']
+ raw_id_fields = ['class_field', 'pending_class_request']
class TeacherAdmin(admin.ModelAdmin):
search_fields = ['new_user__first_name', 'new_user__last_name']
list_filter = ['school']
+ readonly_fields = ['user', 'new_user']
+ raw_id_fields = ['school', 'pending_join_request']
class UserProfileAdmin(admin.ModelAdmin):
search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']
list_filter = ['user__date_joined']
list_display = ['user', 'joined_recently']
+ readonly_fields = ['user']
class EmailVerificationAdmin(admin.ModelAdmin):
|
{"golden_diff": "diff --git a/portal/admin.py b/portal/admin.py\n--- a/portal/admin.py\n+++ b/portal/admin.py\n@@ -45,6 +45,7 @@\n class ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n+ readonly_fields = ['teacher']\n \n \n class SchoolAdmin(admin.ModelAdmin):\n@@ -55,17 +56,22 @@\n class StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n+ readonly_fields = ['user', 'new_user']\n+ raw_id_fields = ['class_field', 'pending_class_request']\n \n \n class TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n+ readonly_fields = ['user', 'new_user']\n+ raw_id_fields = ['school', 'pending_join_request']\n \n \n class UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n+ readonly_fields = ['user']\n \n \n class EmailVerificationAdmin(admin.ModelAdmin):\n", "issue": "From django administration page, in Portal, can't access Teachers or Students\nTrying to access a Student or Teacher from the administration page leads to an error:\nFailed to load resource: the server responded with a status of 500 (OK)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2016, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\n\n\nfrom portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification\n\n\nclass ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n\n\nclass SchoolAdmin(admin.ModelAdmin):\n search_fields = ['name', 'country', 'postcode', 'town']\n list_filter = ['postcode', 'country']\n\n\nclass StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n\n\nclass TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n\n\nclass EmailVerificationAdmin(admin.ModelAdmin):\n search_fields = ['new_user']\n\n\nUserAdmin.list_display += ('date_joined',)\nUserAdmin.list_filter += ('date_joined',)\n\n\nadmin.site.register(Class, ClassAdmin)\nadmin.site.register(Student, StudentAdmin)\nadmin.site.register(Guardian)\nadmin.site.register(Teacher, TeacherAdmin)\nadmin.site.register(School, SchoolAdmin)\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(UserProfile, UserProfileAdmin)\nadmin.site.register(FrontPageNews)\nadmin.site.register(EmailVerification, EmailVerificationAdmin)\n", "path": "portal/admin.py"}]}
| 1,540 | 303 |
gh_patches_debug_22348
|
rasdani/github-patches
|
git_diff
|
hedyorg__hedy-687
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Link to latests shared program is empty

(see link at the bottom)
</issue>
<code>
[start of website/teacher.py]
1 from website.auth import requires_login, is_teacher, current_user
2 import utils
3 import uuid
4 from flask import request, jsonify, redirect
5 from flask_helpers import render_template
6 import os
7 import hedyweb
8 TRANSLATIONS = hedyweb.Translations ()
9 from config import config
10 cookie_name = config ['session'] ['cookie_name']
11
12 def routes (app, database, requested_lang):
13 global DATABASE
14 DATABASE = database
15
16 from app import render_main_menu
17
18 @app.route('/class/<class_id>', methods=['GET'])
19 @requires_login
20 def get_class (user, class_id):
21 if not is_teacher (request):
22 return 'Only teachers can retrieve classes', 403
23 Class = DATABASE.get_class (class_id)
24 if not Class or Class ['teacher'] != user ['username']:
25 return 'No such class', 404
26 students = []
27 for student_username in Class.get ('students', []):
28 student = DATABASE.user_by_username (student_username)
29 programs = DATABASE.programs_for_user(student_username)
30 highest_level = max(program['level'] for program in programs) if len(programs) else 0
31 sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))
32 latest_shared = sorted_public_programs[-1] if sorted_public_programs else None
33 students.append ({'username': student_username, 'last_login': utils.mstoisostring (student ['last_login']), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})
34
35 if utils.is_testing_request (request):
36 return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})
37 return render_template ('class-overview.html', lang=requested_lang (), auth=TRANSLATIONS.get_translations (requested_lang (), 'Auth'), menu=render_main_menu('my-profile'), username=current_user (request) ['username'], current_page='my-profile', class_info={'students': students, 'link': os.getenv ('BASE_URL') + '/hedy/l/' + Class ['link'], 'name': Class ['name'], 'id': Class ['id']})
38
39 @app.route('/class', methods=['POST'])
40 @requires_login
41 def create_class (user):
42 if not is_teacher (request):
43 return 'Only teachers can create classes', 403
44
45 body = request.json
46 # Validations
47 if not isinstance(body, dict):
48 return 'body must be an object', 400
49 if not isinstance(body.get('name'), str):
50 return 'name must be a string', 400
51
52 Class = {
53 'id': uuid.uuid4().hex,
54 'date': utils.timems (),
55 'teacher': user ['username'],
56 'link': utils.random_id_generator (7),
57 'name': body ['name']
58 }
59
60 DATABASE.store_class (Class)
61
62 return {}, 200
63
64 @app.route('/class/<class_id>', methods=['PUT'])
65 @requires_login
66 def update_class (user, class_id):
67 if not is_teacher (request):
68 return 'Only teachers can update classes', 403
69
70 body = request.json
71 # Validations
72 if not isinstance(body, dict):
73 return 'body must be an object', 400
74 if not isinstance(body.get('name'), str):
75 return 'name must be a string', 400
76
77 Class = DATABASE.get_class (class_id)
78 if not Class or Class ['teacher'] != user ['username']:
79 return 'No such class', 404
80
81 Class = DATABASE.update_class (class_id, body ['name'])
82
83 return {}, 200
84
85 @app.route('/class/<class_id>', methods=['DELETE'])
86 @requires_login
87 def delete_class (user, class_id):
88 Class = DATABASE.get_class (class_id)
89 if not Class or Class ['teacher'] != user ['username']:
90 return 'No such class', 404
91
92 DATABASE.delete_class (Class)
93
94 return {}, 200
95
96 @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])
97 def prejoin_class (class_id, link):
98 Class = DATABASE.get_class (class_id)
99 if not Class or Class ['link'] != link:
100 return 'No such class', 404
101 user = {}
102 if request.cookies.get (cookie_name):
103 token = DATABASE.get_token(request.cookies.get (cookie_name))
104 if token:
105 user = DATABASE.user_by_username(token ['username'])
106
107 return render_template ('class-prejoin.html', lang=requested_lang (), auth=TRANSLATIONS.get_translations (requested_lang (), 'Auth'), menu=render_main_menu('my-profile'), username=current_user (request) ['username'], current_page='my-profile', class_info={'link': os.getenv ('BASE_URL') + '/class/' + Class ['id'] + '/join/' + Class ['link'] + '?lang=' + requested_lang (), 'name': Class ['name']})
108
109 @app.route('/class/<class_id>/join/<link>', methods=['GET'])
110 @requires_login
111 def join_class (user, class_id, link):
112 Class = DATABASE.get_class (class_id)
113 if not Class or Class ['link'] != link:
114 return 'No such class', 404
115
116 DATABASE.add_student_to_class (Class ['id'], user ['username'])
117
118 return redirect(request.url.replace('/class/' + class_id + '/join/' + link, '/my-profile'), code=302)
119
120 @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])
121 @requires_login
122 def leave_class (user, class_id, student_id):
123
124 Class = DATABASE.get_class (class_id)
125 if not Class or Class ['teacher'] != user ['username']:
126 return 'No such class', 404
127
128 DATABASE.remove_student_from_class (Class ['id'], student_id)
129
130 return {}, 200
131
132 @app.route('/hedy/l/<link_id>', methods=['GET'])
133 def resolve_class_link (link_id):
134 Class = DATABASE.resolve_class_link (link_id)
135 if not Class:
136 return 'Invalid link', 404
137 return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)
138
[end of website/teacher.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/teacher.py b/website/teacher.py
--- a/website/teacher.py
+++ b/website/teacher.py
@@ -29,7 +29,11 @@
programs = DATABASE.programs_for_user(student_username)
highest_level = max(program['level'] for program in programs) if len(programs) else 0
sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))
- latest_shared = sorted_public_programs[-1] if sorted_public_programs else None
+ if sorted_public_programs:
+ latest_shared = sorted_public_programs[-1]
+ latest_shared['link'] = os.getenv ('BASE_URL') + f"hedy/{latest_shared['id']}/view"
+ else:
+ latest_shared = None
students.append ({'username': student_username, 'last_login': utils.mstoisostring (student ['last_login']), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})
if utils.is_testing_request (request):
|
{"golden_diff": "diff --git a/website/teacher.py b/website/teacher.py\n--- a/website/teacher.py\n+++ b/website/teacher.py\n@@ -29,7 +29,11 @@\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n- latest_shared = sorted_public_programs[-1] if sorted_public_programs else None\n+ if sorted_public_programs:\n+ latest_shared = sorted_public_programs[-1]\n+ latest_shared['link'] = os.getenv ('BASE_URL') + f\"hedy/{latest_shared['id']}/view\"\n+ else:\n+ latest_shared = None\n students.append ({'username': student_username, 'last_login': utils.mstoisostring (student ['last_login']), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n \n if utils.is_testing_request (request):\n", "issue": "Link to latests shared program is empty\n\r\n\r\n(see link at the bottom)\n", "before_files": [{"content": "from website.auth import requires_login, is_teacher, current_user\nimport utils\nimport uuid\nfrom flask import request, jsonify, redirect\nfrom flask_helpers import render_template\nimport os\nimport hedyweb\nTRANSLATIONS = hedyweb.Translations ()\nfrom config import config\ncookie_name = config ['session'] ['cookie_name']\n\ndef routes (app, database, requested_lang):\n global DATABASE\n DATABASE = database\n\n from app import render_main_menu\n\n @app.route('/class/<class_id>', methods=['GET'])\n @requires_login\n def get_class (user, class_id):\n if not is_teacher (request):\n return 'Only teachers can retrieve classes', 403\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n students = []\n for student_username in Class.get ('students', []):\n student = DATABASE.user_by_username (student_username)\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n latest_shared = sorted_public_programs[-1] if sorted_public_programs else None\n students.append ({'username': student_username, 'last_login': utils.mstoisostring (student ['last_login']), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n\n if utils.is_testing_request (request):\n return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n return render_template ('class-overview.html', lang=requested_lang (), auth=TRANSLATIONS.get_translations (requested_lang (), 'Auth'), menu=render_main_menu('my-profile'), username=current_user (request) ['username'], current_page='my-profile', class_info={'students': students, 'link': os.getenv ('BASE_URL') + '/hedy/l/' + Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n\n @app.route('/class', methods=['POST'])\n @requires_login\n def create_class (user):\n if not is_teacher (request):\n return 'Only teachers can create classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = {\n 'id': uuid.uuid4().hex,\n 'date': utils.timems (),\n 'teacher': user ['username'],\n 'link': utils.random_id_generator (7),\n 'name': body ['name']\n }\n\n DATABASE.store_class (Class)\n\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['PUT'])\n @requires_login\n def update_class (user, class_id):\n if not is_teacher (request):\n return 'Only teachers can update classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n Class = DATABASE.update_class (class_id, body ['name'])\n\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['DELETE'])\n @requires_login\n def delete_class (user, class_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.delete_class (Class)\n\n return {}, 200\n\n @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])\n def prejoin_class (class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return 'No such class', 404\n user = {}\n if request.cookies.get (cookie_name):\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n user = DATABASE.user_by_username(token ['username'])\n\n return render_template ('class-prejoin.html', lang=requested_lang (), auth=TRANSLATIONS.get_translations (requested_lang (), 'Auth'), menu=render_main_menu('my-profile'), username=current_user (request) ['username'], current_page='my-profile', class_info={'link': os.getenv ('BASE_URL') + '/class/' + Class ['id'] + '/join/' + Class ['link'] + '?lang=' + requested_lang (), 'name': Class ['name']})\n\n @app.route('/class/<class_id>/join/<link>', methods=['GET'])\n @requires_login\n def join_class (user, class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return 'No such class', 404\n\n DATABASE.add_student_to_class (Class ['id'], user ['username'])\n\n return redirect(request.url.replace('/class/' + class_id + '/join/' + link, '/my-profile'), code=302)\n\n @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])\n @requires_login\n def leave_class (user, class_id, student_id):\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.remove_student_from_class (Class ['id'], student_id)\n\n return {}, 200\n\n @app.route('/hedy/l/<link_id>', methods=['GET'])\n def resolve_class_link (link_id):\n Class = DATABASE.resolve_class_link (link_id)\n if not Class:\n return 'Invalid link', 404\n return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)\n", "path": "website/teacher.py"}]}
| 2,338 | 242 |
gh_patches_debug_27236
|
rasdani/github-patches
|
git_diff
|
redis__redis-py-2324
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for WITHSUFFIXTRIE to FT.CREATE
RediSearch now supports another option (WITHSUFFIXTRIE) during index creation. We need to extend the [FT.CREATE](https://sourcegraph.com/github.com/RediSearch/RediSearch/-/blob/docs/commands/ft.create.md) calls to support this
</issue>
<code>
[start of redis/commands/search/field.py]
1 from typing import List
2
3 from redis import DataError
4
5
6 class Field:
7
8 NUMERIC = "NUMERIC"
9 TEXT = "TEXT"
10 WEIGHT = "WEIGHT"
11 GEO = "GEO"
12 TAG = "TAG"
13 VECTOR = "VECTOR"
14 SORTABLE = "SORTABLE"
15 NOINDEX = "NOINDEX"
16 AS = "AS"
17
18 def __init__(
19 self,
20 name: str,
21 args: List[str] = None,
22 sortable: bool = False,
23 no_index: bool = False,
24 as_name: str = None,
25 ):
26 if args is None:
27 args = []
28 self.name = name
29 self.args = args
30 self.args_suffix = list()
31 self.as_name = as_name
32
33 if sortable:
34 self.args_suffix.append(Field.SORTABLE)
35 if no_index:
36 self.args_suffix.append(Field.NOINDEX)
37
38 if no_index and not sortable:
39 raise ValueError("Non-Sortable non-Indexable fields are ignored")
40
41 def append_arg(self, value):
42 self.args.append(value)
43
44 def redis_args(self):
45 args = [self.name]
46 if self.as_name:
47 args += [self.AS, self.as_name]
48 args += self.args
49 args += self.args_suffix
50 return args
51
52
53 class TextField(Field):
54 """
55 TextField is used to define a text field in a schema definition
56 """
57
58 NOSTEM = "NOSTEM"
59 PHONETIC = "PHONETIC"
60
61 def __init__(
62 self,
63 name: str,
64 weight: float = 1.0,
65 no_stem: bool = False,
66 phonetic_matcher: str = None,
67 **kwargs,
68 ):
69 Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
70
71 if no_stem:
72 Field.append_arg(self, self.NOSTEM)
73 if phonetic_matcher and phonetic_matcher in [
74 "dm:en",
75 "dm:fr",
76 "dm:pt",
77 "dm:es",
78 ]:
79 Field.append_arg(self, self.PHONETIC)
80 Field.append_arg(self, phonetic_matcher)
81
82
83 class NumericField(Field):
84 """
85 NumericField is used to define a numeric field in a schema definition
86 """
87
88 def __init__(self, name: str, **kwargs):
89 Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)
90
91
92 class GeoField(Field):
93 """
94 GeoField is used to define a geo-indexing field in a schema definition
95 """
96
97 def __init__(self, name: str, **kwargs):
98 Field.__init__(self, name, args=[Field.GEO], **kwargs)
99
100
101 class TagField(Field):
102 """
103 TagField is a tag-indexing field with simpler compression and tokenization.
104 See http://redisearch.io/Tags/
105 """
106
107 SEPARATOR = "SEPARATOR"
108 CASESENSITIVE = "CASESENSITIVE"
109
110 def __init__(
111 self, name: str, separator: str = ",", case_sensitive: bool = False, **kwargs
112 ):
113 args = [Field.TAG, self.SEPARATOR, separator]
114 if case_sensitive:
115 args.append(self.CASESENSITIVE)
116
117 Field.__init__(self, name, args=args, **kwargs)
118
119
120 class VectorField(Field):
121 """
122 Allows vector similarity queries against the value in this attribute.
123 See https://oss.redis.com/redisearch/Vectors/#vector_fields.
124 """
125
126 def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):
127 """
128 Create Vector Field. Notice that Vector cannot have sortable or no_index tag,
129 although it's also a Field.
130
131 ``name`` is the name of the field.
132
133 ``algorithm`` can be "FLAT" or "HNSW".
134
135 ``attributes`` each algorithm can have specific attributes. Some of them
136 are mandatory and some of them are optional. See
137 https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm
138 for more information.
139 """
140 sort = kwargs.get("sortable", False)
141 noindex = kwargs.get("no_index", False)
142
143 if sort or noindex:
144 raise DataError("Cannot set 'sortable' or 'no_index' in Vector fields.")
145
146 if algorithm.upper() not in ["FLAT", "HNSW"]:
147 raise DataError(
148 "Realtime vector indexing supporting 2 Indexing Methods:"
149 "'FLAT' and 'HNSW'."
150 )
151
152 attr_li = []
153
154 for key, value in attributes.items():
155 attr_li.extend([key, value])
156
157 Field.__init__(
158 self, name, args=[Field.VECTOR, algorithm, len(attr_li), *attr_li], **kwargs
159 )
160
[end of redis/commands/search/field.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py
--- a/redis/commands/search/field.py
+++ b/redis/commands/search/field.py
@@ -64,6 +64,7 @@
weight: float = 1.0,
no_stem: bool = False,
phonetic_matcher: str = None,
+ withsuffixtrie: bool = False,
**kwargs,
):
Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
@@ -78,6 +79,8 @@
]:
Field.append_arg(self, self.PHONETIC)
Field.append_arg(self, phonetic_matcher)
+ if withsuffixtrie:
+ Field.append_arg(self, "WITHSUFFIXTRIE")
class NumericField(Field):
@@ -108,11 +111,18 @@
CASESENSITIVE = "CASESENSITIVE"
def __init__(
- self, name: str, separator: str = ",", case_sensitive: bool = False, **kwargs
+ self,
+ name: str,
+ separator: str = ",",
+ case_sensitive: bool = False,
+ withsuffixtrie: bool = False,
+ **kwargs,
):
args = [Field.TAG, self.SEPARATOR, separator]
if case_sensitive:
args.append(self.CASESENSITIVE)
+ if withsuffixtrie:
+ args.append("WITHSUFFIXTRIE")
Field.__init__(self, name, args=args, **kwargs)
|
{"golden_diff": "diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py\n--- a/redis/commands/search/field.py\n+++ b/redis/commands/search/field.py\n@@ -64,6 +64,7 @@\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n+ withsuffixtrie: bool = False,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n@@ -78,6 +79,8 @@\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n+ if withsuffixtrie:\n+ Field.append_arg(self, \"WITHSUFFIXTRIE\")\n \n \n class NumericField(Field):\n@@ -108,11 +111,18 @@\n CASESENSITIVE = \"CASESENSITIVE\"\n \n def __init__(\n- self, name: str, separator: str = \",\", case_sensitive: bool = False, **kwargs\n+ self,\n+ name: str,\n+ separator: str = \",\",\n+ case_sensitive: bool = False,\n+ withsuffixtrie: bool = False,\n+ **kwargs,\n ):\n args = [Field.TAG, self.SEPARATOR, separator]\n if case_sensitive:\n args.append(self.CASESENSITIVE)\n+ if withsuffixtrie:\n+ args.append(\"WITHSUFFIXTRIE\")\n \n Field.__init__(self, name, args=args, **kwargs)\n", "issue": "Add support for WITHSUFFIXTRIE to FT.CREATE \nRediSearch now supports another option (WITHSUFFIXTRIE) during index creation. We need to extend the [FT.CREATE](https://sourcegraph.com/github.com/RediSearch/RediSearch/-/blob/docs/commands/ft.create.md) calls to support this\n", "before_files": [{"content": "from typing import List\n\nfrom redis import DataError\n\n\nclass Field:\n\n NUMERIC = \"NUMERIC\"\n TEXT = \"TEXT\"\n WEIGHT = \"WEIGHT\"\n GEO = \"GEO\"\n TAG = \"TAG\"\n VECTOR = \"VECTOR\"\n SORTABLE = \"SORTABLE\"\n NOINDEX = \"NOINDEX\"\n AS = \"AS\"\n\n def __init__(\n self,\n name: str,\n args: List[str] = None,\n sortable: bool = False,\n no_index: bool = False,\n as_name: str = None,\n ):\n if args is None:\n args = []\n self.name = name\n self.args = args\n self.args_suffix = list()\n self.as_name = as_name\n\n if sortable:\n self.args_suffix.append(Field.SORTABLE)\n if no_index:\n self.args_suffix.append(Field.NOINDEX)\n\n if no_index and not sortable:\n raise ValueError(\"Non-Sortable non-Indexable fields are ignored\")\n\n def append_arg(self, value):\n self.args.append(value)\n\n def redis_args(self):\n args = [self.name]\n if self.as_name:\n args += [self.AS, self.as_name]\n args += self.args\n args += self.args_suffix\n return args\n\n\nclass TextField(Field):\n \"\"\"\n TextField is used to define a text field in a schema definition\n \"\"\"\n\n NOSTEM = \"NOSTEM\"\n PHONETIC = \"PHONETIC\"\n\n def __init__(\n self,\n name: str,\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n\n if no_stem:\n Field.append_arg(self, self.NOSTEM)\n if phonetic_matcher and phonetic_matcher in [\n \"dm:en\",\n \"dm:fr\",\n \"dm:pt\",\n \"dm:es\",\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n\n\nclass NumericField(Field):\n \"\"\"\n NumericField is used to define a numeric field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)\n\n\nclass GeoField(Field):\n \"\"\"\n GeoField is used to define a geo-indexing field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.GEO], **kwargs)\n\n\nclass TagField(Field):\n \"\"\"\n TagField is a tag-indexing field with simpler compression and tokenization.\n See http://redisearch.io/Tags/\n \"\"\"\n\n SEPARATOR = \"SEPARATOR\"\n CASESENSITIVE = \"CASESENSITIVE\"\n\n def __init__(\n self, name: str, separator: str = \",\", case_sensitive: bool = False, **kwargs\n ):\n args = [Field.TAG, self.SEPARATOR, separator]\n if case_sensitive:\n args.append(self.CASESENSITIVE)\n\n Field.__init__(self, name, args=args, **kwargs)\n\n\nclass VectorField(Field):\n \"\"\"\n Allows vector similarity queries against the value in this attribute.\n See https://oss.redis.com/redisearch/Vectors/#vector_fields.\n \"\"\"\n\n def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):\n \"\"\"\n Create Vector Field. Notice that Vector cannot have sortable or no_index tag,\n although it's also a Field.\n\n ``name`` is the name of the field.\n\n ``algorithm`` can be \"FLAT\" or \"HNSW\".\n\n ``attributes`` each algorithm can have specific attributes. Some of them\n are mandatory and some of them are optional. See\n https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm\n for more information.\n \"\"\"\n sort = kwargs.get(\"sortable\", False)\n noindex = kwargs.get(\"no_index\", False)\n\n if sort or noindex:\n raise DataError(\"Cannot set 'sortable' or 'no_index' in Vector fields.\")\n\n if algorithm.upper() not in [\"FLAT\", \"HNSW\"]:\n raise DataError(\n \"Realtime vector indexing supporting 2 Indexing Methods:\"\n \"'FLAT' and 'HNSW'.\"\n )\n\n attr_li = []\n\n for key, value in attributes.items():\n attr_li.extend([key, value])\n\n Field.__init__(\n self, name, args=[Field.VECTOR, algorithm, len(attr_li), *attr_li], **kwargs\n )\n", "path": "redis/commands/search/field.py"}]}
| 2,043 | 353 |
gh_patches_debug_37669
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1577
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add JP prices
See https://github.com/tmrowco/electricitymap-contrib/pull/1543#issuecomment-411281685 by @tmslaine
</issue>
<code>
[start of parsers/JP.py]
1 #!/usr/bin/env python3
2 # coding=utf-8
3 import logging
4 # The arrow library is used to handle datetimes
5 import arrow
6 import pandas as pd
7 from . import occtonet
8
9 # Abbreviations
10 # JP-HKD : Hokkaido
11 # JP-TH : Tohoku
12 # JP-TK : Tokyo area
13 # JP-CB : Chubu
14 # JP-HR : Hokuriku
15 # JP-KN : Kansai
16 # JP-SK : Shikoku
17 # JP-KY : Kyushu
18 # JP-ON : Okinawa
19
20 def fetch_production(zone_key='JP-TK', session=None, target_datetime=None,
21 logger=logging.getLogger(__name__)):
22 """
23 Calculates production from consumption and imports for a given area
24 All production is mapped to unknown
25 """
26 if target_datetime:
27 raise NotImplementedError(
28 'This parser is not yet able to parse past dates')
29 exch_map = {
30 'JP-HKD':['JP-TH'],
31 'JP-TH':['JP-TK'],
32 'JP-TK':['JP-TH', 'JP-CB'],
33 'JP-CB':['JP-TK', 'JP-HR', 'JP-KN'],
34 'JP-HR':['JP-CB', 'JP-KN'],
35 'JP-KN':['JP-CB', 'JP-HR', 'JP-SK', 'JP-CG'],
36 'JP-SK':['JP-KN', 'JP-CG'],
37 'JP-CG':['JP-KN', 'JP-SK', 'JP-KY']
38 }
39 df = fetch_consumption_df(zone_key, target_datetime)
40 df['imports'] = 0
41 for zone in exch_map[zone_key]:
42 df2 = occtonet.fetch_exchange(zone_key, zone, target_datetime)
43 df2 = pd.DataFrame(df2)
44 exchname = df2.loc[0, 'sortedZoneKeys']
45 df2 = df2[['datetime', 'netFlow']]
46 df2.columns = ['datetime', exchname]
47 df = pd.merge(df, df2, how='inner', on='datetime')
48 if exchname.split('->')[-1] == zone_key:
49 df['imports'] = df['imports']+df[exchname]
50 else:
51 df['imports'] = df['imports']-df[exchname]
52 df['prod'] = df['cons']-df['imports']
53 df = df[['datetime', 'prod']]
54 # add a row to production for each entry in the dictionary:
55 sources = {
56 'JP-HKD':'denkiyoho.hepco.co.jp',
57 'JP-TH':'setsuden.tohoku-epco.co.jp',
58 'JP-TK':'www.tepco.co.jp',
59 'JP-CB':'denki-yoho.chuden.jp',
60 'JP-HR':'www.rikuden.co.jp/denki-yoho',
61 'JP-KN':'www.kepco.co.jp',
62 'JP-SK':'www.energia.co.jp',
63 'JP-CG':'www.yonden.co.jp'
64 }
65 datalist = []
66 for i in range(df.shape[0]):
67 data = {
68 'zoneKey': zone_key,
69 'datetime': df.loc[i, 'datetime'].to_pydatetime(),
70 'production': {
71 'biomass': None,
72 'coal': None,
73 'gas': None,
74 'hydro': None,
75 'nuclear': None,
76 'oil': None,
77 'solar': None,
78 'wind': None,
79 'geothermal': None,
80 'unknown': df.loc[i, 'prod']
81 },
82 'storage': {},
83 'source': ['occtonet.or.jp', sources[zone_key]]
84 }
85 datalist.append(data)
86 return datalist
87
88
89 def fetch_consumption_df(zone_key='JP-TK', target_datetime=None,
90 logger=logging.getLogger(__name__)):
91 """
92 Returns the consumption for an area as a pandas DataFrame
93 """
94 datestamp = arrow.get(target_datetime).to('Asia/Tokyo').strftime('%Y%m%d')
95 consumption_url = {
96 'JP-HKD': 'http://denkiyoho.hepco.co.jp/area/data/juyo_01_{}.csv'.format(datestamp),
97 'JP-TH': 'http://setsuden.tohoku-epco.co.jp/common/demand/juyo_02_{}.csv'.format(datestamp),
98 'JP-TK': 'http://www.tepco.co.jp/forecast/html/images/juyo-j.csv',
99 'JP-HR': 'http://www.rikuden.co.jp/denki-yoho/csv/juyo_05_{}.csv'.format(datestamp),
100 'JP-CB': 'http://denki-yoho.chuden.jp/denki_yoho_content_data/juyo_cepco003.csv',
101 'JP-KN': 'http://www.kepco.co.jp/yamasou/juyo1_kansai.csv',
102 'JP-CG': 'http://www.energia.co.jp/jukyuu/sys/juyo_07_{}.csv'.format(datestamp),
103 'JP-SK': 'http://www.yonden.co.jp/denkiyoho/juyo_shikoku.csv'
104 }
105 # First roughly 40 rows of the consumption files have hourly data,
106 # the parser skips to the rows with 5-min actual values
107 if zone_key == 'JP-KN':
108 startrow = 44
109 else:
110 startrow = 42
111 df = pd.read_csv(consumption_url[zone_key], skiprows=list(range(startrow)),
112 encoding='shift-jis')
113 df.columns = ['Date', 'Time', 'cons']
114 # Convert 万kW to MW
115 df['cons'] = 10*df['cons']
116 df = df.dropna()
117 df['datetime'] = df.apply(parse_dt, axis=1)
118 df = df[['datetime', 'cons']]
119 return df
120
121 def parse_dt(row):
122 """
123 Parses timestamps from date and time
124 """
125 return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),
126 'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime
127
128 if __name__ == '__main__':
129 """Main method, never used by the Electricity Map backend, but handy for testing."""
130
131 print('fetch_production() ->')
132 print(fetch_production())
133
[end of parsers/JP.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsers/JP.py b/parsers/JP.py
--- a/parsers/JP.py
+++ b/parsers/JP.py
@@ -3,6 +3,7 @@
import logging
# The arrow library is used to handle datetimes
import arrow
+import datetime as dt
import pandas as pd
from . import occtonet
@@ -16,6 +17,8 @@
# JP-SK : Shikoku
# JP-KY : Kyushu
# JP-ON : Okinawa
+# JP-CG : Chūgoku
+
def fetch_production(zone_key='JP-TK', session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
@@ -118,6 +121,47 @@
df = df[['datetime', 'cons']]
return df
+
+def fetch_price(zone_key='JP-TK', session=None, target_datetime=None,
+ logger=logging.getLogger(__name__)):
+ if target_datetime is None:
+ target_datetime = dt.datetime.now() + dt.timedelta(days=1)
+
+ # price files contain data for fiscal year and not calendar year.
+ if target_datetime.month <= 3:
+ fiscal_year = target_datetime.year - 1
+ else:
+ fiscal_year = target_datetime.year
+ url = 'http://www.jepx.org/market/excel/spot_{}.csv'.format(fiscal_year)
+ df = pd.read_csv(url)
+
+ df = df.iloc[:, [0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14]]
+ df.columns = ['Date', 'Period', 'JP-HKD', 'JP-TH', 'JP-TK', 'JP-CB',
+ 'JP-HR', 'JP-KN', 'JP-CG', 'JP-SK', 'JP-KY']
+
+ if zone_key not in df.columns[2:]:
+ return []
+
+ start = target_datetime - dt.timedelta(days=1)
+ df['Date'] = df['Date'].apply(lambda x: dt.datetime.strptime(x, '%Y/%m/%d'))
+ df = df[(df['Date'] >= start.date()) & (df['Date'] <= target_datetime.date())]
+
+ df['datetime'] = df.apply(lambda row: arrow.get(row['Date']).shift(
+ minutes=30 * (row['Period'] - 1)).replace(tzinfo='Asia/Tokyo'), axis=1)
+
+ data = list()
+ for row in df.iterrows():
+ data.append({
+ 'zoneKey': zone_key,
+ 'currency': 'JPY',
+ 'datetime': row[1]['datetime'].datetime,
+ 'price': row[1][zone_key],
+ 'source': 'jepx.org'
+ })
+
+ return data
+
+
def parse_dt(row):
"""
Parses timestamps from date and time
@@ -125,8 +169,11 @@
return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),
'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime
+
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_production() ->')
print(fetch_production())
+ print('fetch_price() ->')
+ print(fetch_price())
|
{"golden_diff": "diff --git a/parsers/JP.py b/parsers/JP.py\n--- a/parsers/JP.py\n+++ b/parsers/JP.py\n@@ -3,6 +3,7 @@\n import logging\n # The arrow library is used to handle datetimes\n import arrow\n+import datetime as dt\n import pandas as pd\n from . import occtonet\n \n@@ -16,6 +17,8 @@\n # JP-SK : Shikoku\n # JP-KY : Kyushu\n # JP-ON : Okinawa\n+# JP-CG : Ch\u016bgoku\n+\n \n def fetch_production(zone_key='JP-TK', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n@@ -118,6 +121,47 @@\n df = df[['datetime', 'cons']]\n return df\n \n+\n+def fetch_price(zone_key='JP-TK', session=None, target_datetime=None,\n+ logger=logging.getLogger(__name__)):\n+ if target_datetime is None:\n+ target_datetime = dt.datetime.now() + dt.timedelta(days=1)\n+\n+ # price files contain data for fiscal year and not calendar year.\n+ if target_datetime.month <= 3:\n+ fiscal_year = target_datetime.year - 1\n+ else:\n+ fiscal_year = target_datetime.year\n+ url = 'http://www.jepx.org/market/excel/spot_{}.csv'.format(fiscal_year)\n+ df = pd.read_csv(url)\n+\n+ df = df.iloc[:, [0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14]]\n+ df.columns = ['Date', 'Period', 'JP-HKD', 'JP-TH', 'JP-TK', 'JP-CB',\n+ 'JP-HR', 'JP-KN', 'JP-CG', 'JP-SK', 'JP-KY']\n+\n+ if zone_key not in df.columns[2:]:\n+ return []\n+\n+ start = target_datetime - dt.timedelta(days=1)\n+ df['Date'] = df['Date'].apply(lambda x: dt.datetime.strptime(x, '%Y/%m/%d'))\n+ df = df[(df['Date'] >= start.date()) & (df['Date'] <= target_datetime.date())]\n+\n+ df['datetime'] = df.apply(lambda row: arrow.get(row['Date']).shift(\n+ minutes=30 * (row['Period'] - 1)).replace(tzinfo='Asia/Tokyo'), axis=1)\n+\n+ data = list()\n+ for row in df.iterrows():\n+ data.append({\n+ 'zoneKey': zone_key,\n+ 'currency': 'JPY',\n+ 'datetime': row[1]['datetime'].datetime,\n+ 'price': row[1][zone_key],\n+ 'source': 'jepx.org'\n+ })\n+\n+ return data\n+\n+\n def parse_dt(row):\n \"\"\"\n Parses timestamps from date and time\n@@ -125,8 +169,11 @@\n return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),\n 'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime\n \n+\n if __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n \n print('fetch_production() ->')\n print(fetch_production())\n+ print('fetch_price() ->')\n+ print(fetch_price())\n", "issue": "Add JP prices\nSee https://github.com/tmrowco/electricitymap-contrib/pull/1543#issuecomment-411281685 by @tmslaine \n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\nimport logging\n# The arrow library is used to handle datetimes\nimport arrow\nimport pandas as pd\nfrom . import occtonet\n\n# Abbreviations\n# JP-HKD : Hokkaido\n# JP-TH : Tohoku\n# JP-TK : Tokyo area\n# JP-CB : Chubu\n# JP-HR : Hokuriku\n# JP-KN : Kansai\n# JP-SK : Shikoku\n# JP-KY : Kyushu\n# JP-ON : Okinawa\n\ndef fetch_production(zone_key='JP-TK', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Calculates production from consumption and imports for a given area\n All production is mapped to unknown\n \"\"\"\n if target_datetime:\n raise NotImplementedError(\n 'This parser is not yet able to parse past dates')\n exch_map = {\n 'JP-HKD':['JP-TH'],\n 'JP-TH':['JP-TK'],\n 'JP-TK':['JP-TH', 'JP-CB'],\n 'JP-CB':['JP-TK', 'JP-HR', 'JP-KN'],\n 'JP-HR':['JP-CB', 'JP-KN'],\n 'JP-KN':['JP-CB', 'JP-HR', 'JP-SK', 'JP-CG'],\n 'JP-SK':['JP-KN', 'JP-CG'],\n 'JP-CG':['JP-KN', 'JP-SK', 'JP-KY']\n }\n df = fetch_consumption_df(zone_key, target_datetime)\n df['imports'] = 0\n for zone in exch_map[zone_key]:\n df2 = occtonet.fetch_exchange(zone_key, zone, target_datetime)\n df2 = pd.DataFrame(df2)\n exchname = df2.loc[0, 'sortedZoneKeys']\n df2 = df2[['datetime', 'netFlow']]\n df2.columns = ['datetime', exchname]\n df = pd.merge(df, df2, how='inner', on='datetime')\n if exchname.split('->')[-1] == zone_key:\n df['imports'] = df['imports']+df[exchname]\n else:\n df['imports'] = df['imports']-df[exchname]\n df['prod'] = df['cons']-df['imports']\n df = df[['datetime', 'prod']]\n # add a row to production for each entry in the dictionary:\n sources = {\n 'JP-HKD':'denkiyoho.hepco.co.jp',\n 'JP-TH':'setsuden.tohoku-epco.co.jp',\n 'JP-TK':'www.tepco.co.jp',\n 'JP-CB':'denki-yoho.chuden.jp',\n 'JP-HR':'www.rikuden.co.jp/denki-yoho',\n 'JP-KN':'www.kepco.co.jp',\n 'JP-SK':'www.energia.co.jp',\n 'JP-CG':'www.yonden.co.jp'\n }\n datalist = []\n for i in range(df.shape[0]):\n data = {\n 'zoneKey': zone_key,\n 'datetime': df.loc[i, 'datetime'].to_pydatetime(),\n 'production': {\n 'biomass': None,\n 'coal': None,\n 'gas': None,\n 'hydro': None,\n 'nuclear': None,\n 'oil': None,\n 'solar': None,\n 'wind': None,\n 'geothermal': None,\n 'unknown': df.loc[i, 'prod']\n },\n 'storage': {},\n 'source': ['occtonet.or.jp', sources[zone_key]]\n }\n datalist.append(data)\n return datalist\n\n\ndef fetch_consumption_df(zone_key='JP-TK', target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Returns the consumption for an area as a pandas DataFrame\n \"\"\"\n datestamp = arrow.get(target_datetime).to('Asia/Tokyo').strftime('%Y%m%d')\n consumption_url = {\n 'JP-HKD': 'http://denkiyoho.hepco.co.jp/area/data/juyo_01_{}.csv'.format(datestamp),\n 'JP-TH': 'http://setsuden.tohoku-epco.co.jp/common/demand/juyo_02_{}.csv'.format(datestamp),\n 'JP-TK': 'http://www.tepco.co.jp/forecast/html/images/juyo-j.csv',\n 'JP-HR': 'http://www.rikuden.co.jp/denki-yoho/csv/juyo_05_{}.csv'.format(datestamp),\n 'JP-CB': 'http://denki-yoho.chuden.jp/denki_yoho_content_data/juyo_cepco003.csv',\n 'JP-KN': 'http://www.kepco.co.jp/yamasou/juyo1_kansai.csv',\n 'JP-CG': 'http://www.energia.co.jp/jukyuu/sys/juyo_07_{}.csv'.format(datestamp),\n 'JP-SK': 'http://www.yonden.co.jp/denkiyoho/juyo_shikoku.csv'\n }\n # First roughly 40 rows of the consumption files have hourly data,\n # the parser skips to the rows with 5-min actual values \n if zone_key == 'JP-KN':\n startrow = 44\n else:\n startrow = 42\n df = pd.read_csv(consumption_url[zone_key], skiprows=list(range(startrow)),\n encoding='shift-jis')\n df.columns = ['Date', 'Time', 'cons']\n # Convert \u4e07kW to MW\n df['cons'] = 10*df['cons']\n df = df.dropna()\n df['datetime'] = df.apply(parse_dt, axis=1)\n df = df[['datetime', 'cons']]\n return df\n\ndef parse_dt(row):\n \"\"\"\n Parses timestamps from date and time\n \"\"\"\n return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),\n 'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/JP.py"}]}
| 2,278 | 777 |
gh_patches_debug_43349
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-1278
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Maximum feature cardinality in WholeDatasetDrift is not configurable
**Describe the bug**
When running the WholeDatasetDrift on a dataset with a feature that has cardinality greater then 255, the following error is thrown by the HistGradientBoostingClassifier:
```
/usr/local/lib/python3.9/dist-packages/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py in _check_categories(self, X)
186
187 if categories.size > self.max_bins:
--> 188 raise ValueError(
189 f"Categorical feature at index {f_idx} is "
190 "expected to have a "
ValueError: Categorical feature at index 30 is expected to have a cardinality <= 255
```
There is no way to adjust the max_bins parameter of HistGradientBoostingClassifier. Alternatively, the feature cardinality could be reduced before training the HistGradientBoostingClassifier using the max_num_categories parameter of WholeDatasetDrift
</issue>
<code>
[start of deepchecks/core/check_utils/whole_dataset_drift_utils.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module containing common WholeDatasetDriftCheck (domain classifier drift) utils."""
12
13 from typing import List, Optional
14 import warnings
15
16 import numpy as np
17 import pandas as pd
18
19 from sklearn.pipeline import Pipeline
20 from sklearn.compose import ColumnTransformer
21
22 with warnings.catch_warnings():
23 warnings.simplefilter('ignore')
24 from sklearn.experimental import enable_hist_gradient_boosting # noqa # pylint: disable=unused-import
25
26 from sklearn.ensemble import HistGradientBoostingClassifier
27 from sklearn.metrics import roc_auc_score
28 from sklearn.preprocessing import OrdinalEncoder
29 from sklearn.model_selection import train_test_split
30 import plotly.graph_objects as go
31
32 from deepchecks.tabular import Dataset
33 from deepchecks.utils.distribution.plot import feature_distribution_traces, drift_score_bar_traces
34 from deepchecks.utils.features import N_TOP_MESSAGE, calculate_feature_importance_or_none
35 from deepchecks.utils.function import run_available_kwargs
36 from deepchecks.utils.strings import format_percent
37 from deepchecks.utils.typing import Hashable
38
39
40 def run_whole_dataset_drift(train_dataframe: pd.DataFrame, test_dataframe: pd.DataFrame,
41 numerical_features: List[Hashable], cat_features: List[Hashable], sample_size: int,
42 random_state: int, test_size: float, n_top_columns: int, min_feature_importance: float,
43 max_num_categories: Optional[int], min_meaningful_drift_score: float):
44 """Calculate whole dataset drift."""
45 domain_classifier = generate_model(numerical_features, cat_features, random_state)
46
47 train_sample_df = train_dataframe.sample(sample_size, random_state=random_state)
48 test_sample_df = test_dataframe.sample(sample_size, random_state=random_state)
49
50 # create new dataset, with label denoting whether sample belongs to test dataset
51 domain_class_df = pd.concat([train_sample_df, test_sample_df])
52 domain_class_labels = pd.Series([0] * len(train_sample_df) + [1] * len(test_sample_df))
53
54 x_train, x_test, y_train, y_test = train_test_split(domain_class_df, domain_class_labels,
55 stratify=domain_class_labels,
56 random_state=random_state,
57 test_size=test_size)
58
59 domain_classifier = domain_classifier.fit(x_train, y_train)
60
61 y_test.name = 'belongs_to_test'
62 domain_test_dataset = Dataset(pd.concat([x_test.reset_index(drop=True), y_test.reset_index(drop=True)], axis=1),
63 cat_features=cat_features, label='belongs_to_test')
64
65 # calculate feature importance of domain_classifier, containing the information which features separate
66 # the dataset best.
67 fi, importance_type = calculate_feature_importance_or_none(
68 domain_classifier,
69 domain_test_dataset,
70 force_permutation=True,
71 permutation_kwargs={'n_repeats': 10, 'random_state': random_state, 'timeout': 120}
72 )
73
74 fi = fi.sort_values(ascending=False) if fi is not None else None
75
76 domain_classifier_auc = roc_auc_score(y_test, domain_classifier.predict_proba(x_test)[:, 1])
77 drift_score = auc_to_drift_score(domain_classifier_auc)
78
79 values_dict = {
80 'domain_classifier_auc': domain_classifier_auc,
81 'domain_classifier_drift_score': drift_score,
82 'domain_classifier_feature_importance': fi.to_dict() if fi is not None else {},
83 }
84
85 feature_importance_note = f"""
86 <span>
87 The percents of explained dataset difference are the importance values for the feature calculated
88 using `{importance_type}`.
89 </span><br><br>
90 """
91
92 if fi is not None and drift_score > min_meaningful_drift_score:
93 top_fi = fi.head(n_top_columns)
94 top_fi = top_fi.loc[top_fi > min_feature_importance]
95 else:
96 top_fi = None
97
98 if top_fi is not None and len(top_fi):
99 score = values_dict['domain_classifier_drift_score']
100
101 displays = [feature_importance_note, build_drift_plot(score),
102 '<h3>Main features contributing to drift</h3>',
103 N_TOP_MESSAGE % n_top_columns]
104 displays += [display_dist(train_sample_df[feature], test_sample_df[feature], top_fi, cat_features,
105 max_num_categories)
106 for feature in top_fi.index]
107 else:
108 displays = None
109
110 return values_dict, displays
111
112
113 def generate_model(numerical_columns: List[Hashable], categorical_columns: List[Hashable],
114 random_state: int = 42) -> Pipeline:
115 """Generate the unfitted Domain Classifier model."""
116 categorical_transformer = Pipeline(
117 steps=[('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',
118 unknown_value=np.nan,
119 dtype=np.float64))]
120 )
121
122 preprocessor = ColumnTransformer(
123 transformers=[
124 ('num', 'passthrough', numerical_columns),
125 ('cat', categorical_transformer, categorical_columns),
126 ]
127 )
128
129 return Pipeline(
130 steps=[('preprocessing', preprocessor),
131 ('model', HistGradientBoostingClassifier(
132 max_depth=2, max_iter=10, random_state=random_state,
133 categorical_features=[False] * len(numerical_columns) + [True] * len(categorical_columns)
134 ))])
135
136
137 def auc_to_drift_score(auc: float) -> float:
138 """Calculate the drift score, which is 2*auc - 1, with auc being the auc of the Domain Classifier.
139
140 Parameters
141 ----------
142 auc : float
143 auc of the Domain Classifier
144 """
145 return max(2 * auc - 1, 0)
146
147
148 def build_drift_plot(score):
149 """Build traffic light drift plot."""
150 bar_traces, x_axis, y_axis = drift_score_bar_traces(score)
151 x_axis['title'] = 'Drift score'
152 drift_plot = go.Figure(layout=dict(
153 title='Drift Score - Whole Dataset Total',
154 xaxis=x_axis,
155 yaxis=y_axis,
156 width=700,
157 height=200
158
159 ))
160
161 drift_plot.add_traces(bar_traces)
162 return drift_plot
163
164
165 def display_dist(train_column: pd.Series, test_column: pd.Series, fi_ser: pd.Series, cat_features,
166 max_num_categories: int = 10):
167 """Display a distribution comparison plot for the given columns."""
168 column_name = train_column.name
169
170 title = f'Feature: {column_name} - Explains {format_percent(fi_ser.loc[column_name])} of dataset difference'
171 traces, xaxis_layout, yaxis_layout = \
172 feature_distribution_traces(train_column.dropna(),
173 test_column.dropna(),
174 column_name,
175 is_categorical=column_name in cat_features,
176 max_num_categories=max_num_categories)
177
178 figure = go.Figure(layout=go.Layout(
179 title=title,
180 xaxis=xaxis_layout,
181 yaxis=yaxis_layout,
182 legend=dict(
183 title='Dataset',
184 yanchor='top',
185 y=0.9,
186 xanchor='left'),
187 width=700,
188 height=300
189 ))
190
191 figure.add_traces(traces)
192
193 return figure
194
[end of deepchecks/core/check_utils/whole_dataset_drift_utils.py]
[start of deepchecks/utils/distribution/rare_category_encoder.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module of RareCategoryEncoder."""
12 from typing import List, Optional
13 from collections import defaultdict
14
15 import pandas as pd
16
17 from deepchecks.utils.typing import Hashable
18
19
20 __all__ = ['RareCategoryEncoder']
21
22
23 class RareCategoryEncoder:
24 """Encodes rare categories into an "other" parameter.
25
26 Note that this encoder assumes data is received as a DataFrame.
27
28 Parameters
29 ----------
30 max_num_categories : int , default: 10
31 Indicates the maximum number of unique categories in a single categorical column
32 (rare categories will be changed to a form of "other")
33 cols : Optional[List[Hashable]] , default: None
34 Columns to limit the encoder to work on. If non are given will work on all columns given in `fit`
35 """
36
37 DEFAULT_OTHER_VALUE = 'OTHER_RARE_CATEGORY'
38
39 def __init__(
40 self,
41 max_num_categories: int = 10,
42 cols: Optional[List[Hashable]] = None
43 ):
44 self.max_num_categories = max_num_categories
45 self.cols = cols
46 self._col_mapping = None
47
48 def fit(self, data: pd.DataFrame):
49 """Fit the encoder using given dataframe.
50
51 Parameters
52 ----------
53 data : pd.DataFrame
54 data to fit from
55 """
56 if self.cols is not None:
57 self._col_mapping = data[self.cols].apply(self._fit_for_series, axis=0)
58 else:
59 self._col_mapping = data.apply(self._fit_for_series, axis=0)
60
61 def transform(self, data: pd.DataFrame):
62 """Transform given data according to columns processed in `fit`.
63
64 Parameters
65 ----------
66 data : pd.DataFrame
67 data to transform
68 Returns
69 -------
70 DataFrame
71 transformed data
72 """
73 if self._col_mapping is None:
74 raise RuntimeError('Cannot transform without fitting first')
75
76 if self.cols is not None:
77 data = data.copy()
78 data[self.cols] = data[self.cols].apply(lambda s: s.map(self._col_mapping[s.name]))
79 else:
80 data = data.apply(lambda s: s.map(self._col_mapping[s.name]))
81 return data
82
83 def fit_transform(self, data: pd.DataFrame):
84 """Run `fit` and `transform` on given data.
85
86 Parameters
87 ----------
88 data : pd.DataFrame
89 data to fit on and transform
90 Returns
91 -------
92 DataFrame
93 transformed data
94 """
95 self.fit(data)
96 return self.transform(data)
97
98 def _fit_for_series(self, series: pd.Series):
99 top_values = list(series.value_counts().head(self.max_num_categories).index)
100 other_value = self._get_unique_other_value(series)
101 mapper = pd.Series(defaultdict(lambda: other_value, {k: k for k in top_values}), name=series.name)
102 return mapper
103
104 def _get_unique_other_value(self, series: pd.Series):
105 unique_values = list(series.unique())
106 other = self.DEFAULT_OTHER_VALUE
107 i = 0
108 while other in unique_values:
109 other = self.DEFAULT_OTHER_VALUE + str(i)
110 i += 1
111 return other
112
[end of deepchecks/utils/distribution/rare_category_encoder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deepchecks/core/check_utils/whole_dataset_drift_utils.py b/deepchecks/core/check_utils/whole_dataset_drift_utils.py
--- a/deepchecks/core/check_utils/whole_dataset_drift_utils.py
+++ b/deepchecks/core/check_utils/whole_dataset_drift_utils.py
@@ -31,6 +31,7 @@
from deepchecks.tabular import Dataset
from deepchecks.utils.distribution.plot import feature_distribution_traces, drift_score_bar_traces
+from deepchecks.utils.distribution.rare_category_encoder import RareCategoryEncoder
from deepchecks.utils.features import N_TOP_MESSAGE, calculate_feature_importance_or_none
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import format_percent
@@ -114,7 +115,8 @@
random_state: int = 42) -> Pipeline:
"""Generate the unfitted Domain Classifier model."""
categorical_transformer = Pipeline(
- steps=[('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',
+ steps=[('rare', RareCategoryEncoder(254)),
+ ('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',
unknown_value=np.nan,
dtype=np.float64))]
)
diff --git a/deepchecks/utils/distribution/rare_category_encoder.py b/deepchecks/utils/distribution/rare_category_encoder.py
--- a/deepchecks/utils/distribution/rare_category_encoder.py
+++ b/deepchecks/utils/distribution/rare_category_encoder.py
@@ -45,18 +45,24 @@
self.cols = cols
self._col_mapping = None
- def fit(self, data: pd.DataFrame):
+ def fit(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument
"""Fit the encoder using given dataframe.
Parameters
----------
data : pd.DataFrame
data to fit from
+ y :
+ Unused, but needed for sklearn pipeline
"""
+ self._col_mapping = {}
+
if self.cols is not None:
- self._col_mapping = data[self.cols].apply(self._fit_for_series, axis=0)
+ for col in self.cols:
+ self._col_mapping[col] = self._fit_for_series(data[col])
else:
- self._col_mapping = data.apply(self._fit_for_series, axis=0)
+ for col in data.columns:
+ self._col_mapping[col] = self._fit_for_series(data[col])
def transform(self, data: pd.DataFrame):
"""Transform given data according to columns processed in `fit`.
@@ -78,15 +84,18 @@
data[self.cols] = data[self.cols].apply(lambda s: s.map(self._col_mapping[s.name]))
else:
data = data.apply(lambda s: s.map(self._col_mapping[s.name]))
+
return data
- def fit_transform(self, data: pd.DataFrame):
+ def fit_transform(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument
"""Run `fit` and `transform` on given data.
Parameters
----------
data : pd.DataFrame
data to fit on and transform
+ y :
+ Unused, but needed for sklearn pipeline
Returns
-------
DataFrame
@@ -98,7 +107,7 @@
def _fit_for_series(self, series: pd.Series):
top_values = list(series.value_counts().head(self.max_num_categories).index)
other_value = self._get_unique_other_value(series)
- mapper = pd.Series(defaultdict(lambda: other_value, {k: k for k in top_values}), name=series.name)
+ mapper = defaultdict(lambda: other_value, {k: k for k in top_values})
return mapper
def _get_unique_other_value(self, series: pd.Series):
|
{"golden_diff": "diff --git a/deepchecks/core/check_utils/whole_dataset_drift_utils.py b/deepchecks/core/check_utils/whole_dataset_drift_utils.py\n--- a/deepchecks/core/check_utils/whole_dataset_drift_utils.py\n+++ b/deepchecks/core/check_utils/whole_dataset_drift_utils.py\n@@ -31,6 +31,7 @@\n \n from deepchecks.tabular import Dataset\n from deepchecks.utils.distribution.plot import feature_distribution_traces, drift_score_bar_traces\n+from deepchecks.utils.distribution.rare_category_encoder import RareCategoryEncoder\n from deepchecks.utils.features import N_TOP_MESSAGE, calculate_feature_importance_or_none\n from deepchecks.utils.function import run_available_kwargs\n from deepchecks.utils.strings import format_percent\n@@ -114,7 +115,8 @@\n random_state: int = 42) -> Pipeline:\n \"\"\"Generate the unfitted Domain Classifier model.\"\"\"\n categorical_transformer = Pipeline(\n- steps=[('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',\n+ steps=[('rare', RareCategoryEncoder(254)),\n+ ('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',\n unknown_value=np.nan,\n dtype=np.float64))]\n )\ndiff --git a/deepchecks/utils/distribution/rare_category_encoder.py b/deepchecks/utils/distribution/rare_category_encoder.py\n--- a/deepchecks/utils/distribution/rare_category_encoder.py\n+++ b/deepchecks/utils/distribution/rare_category_encoder.py\n@@ -45,18 +45,24 @@\n self.cols = cols\n self._col_mapping = None\n \n- def fit(self, data: pd.DataFrame):\n+ def fit(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument\n \"\"\"Fit the encoder using given dataframe.\n \n Parameters\n ----------\n data : pd.DataFrame\n data to fit from\n+ y :\n+ Unused, but needed for sklearn pipeline\n \"\"\"\n+ self._col_mapping = {}\n+\n if self.cols is not None:\n- self._col_mapping = data[self.cols].apply(self._fit_for_series, axis=0)\n+ for col in self.cols:\n+ self._col_mapping[col] = self._fit_for_series(data[col])\n else:\n- self._col_mapping = data.apply(self._fit_for_series, axis=0)\n+ for col in data.columns:\n+ self._col_mapping[col] = self._fit_for_series(data[col])\n \n def transform(self, data: pd.DataFrame):\n \"\"\"Transform given data according to columns processed in `fit`.\n@@ -78,15 +84,18 @@\n data[self.cols] = data[self.cols].apply(lambda s: s.map(self._col_mapping[s.name]))\n else:\n data = data.apply(lambda s: s.map(self._col_mapping[s.name]))\n+\n return data\n \n- def fit_transform(self, data: pd.DataFrame):\n+ def fit_transform(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument\n \"\"\"Run `fit` and `transform` on given data.\n \n Parameters\n ----------\n data : pd.DataFrame\n data to fit on and transform\n+ y :\n+ Unused, but needed for sklearn pipeline\n Returns\n -------\n DataFrame\n@@ -98,7 +107,7 @@\n def _fit_for_series(self, series: pd.Series):\n top_values = list(series.value_counts().head(self.max_num_categories).index)\n other_value = self._get_unique_other_value(series)\n- mapper = pd.Series(defaultdict(lambda: other_value, {k: k for k in top_values}), name=series.name)\n+ mapper = defaultdict(lambda: other_value, {k: k for k in top_values})\n return mapper\n \n def _get_unique_other_value(self, series: pd.Series):\n", "issue": "[BUG] Maximum feature cardinality in WholeDatasetDrift is not configurable\n**Describe the bug**\r\nWhen running the WholeDatasetDrift on a dataset with a feature that has cardinality greater then 255, the following error is thrown by the HistGradientBoostingClassifier:\r\n\r\n```\r\n/usr/local/lib/python3.9/dist-packages/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py in _check_categories(self, X)\r\n 186 \r\n 187 if categories.size > self.max_bins:\r\n--> 188 raise ValueError(\r\n 189 f\"Categorical feature at index {f_idx} is \"\r\n 190 \"expected to have a \"\r\n\r\nValueError: Categorical feature at index 30 is expected to have a cardinality <= 255\r\n```\r\n\r\nThere is no way to adjust the max_bins parameter of HistGradientBoostingClassifier. Alternatively, the feature cardinality could be reduced before training the HistGradientBoostingClassifier using the max_num_categories parameter of WholeDatasetDrift\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing common WholeDatasetDriftCheck (domain classifier drift) utils.\"\"\"\n\nfrom typing import List, Optional\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\n\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore')\n from sklearn.experimental import enable_hist_gradient_boosting # noqa # pylint: disable=unused-import\n\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.model_selection import train_test_split\nimport plotly.graph_objects as go\n\nfrom deepchecks.tabular import Dataset\nfrom deepchecks.utils.distribution.plot import feature_distribution_traces, drift_score_bar_traces\nfrom deepchecks.utils.features import N_TOP_MESSAGE, calculate_feature_importance_or_none\nfrom deepchecks.utils.function import run_available_kwargs\nfrom deepchecks.utils.strings import format_percent\nfrom deepchecks.utils.typing import Hashable\n\n\ndef run_whole_dataset_drift(train_dataframe: pd.DataFrame, test_dataframe: pd.DataFrame,\n numerical_features: List[Hashable], cat_features: List[Hashable], sample_size: int,\n random_state: int, test_size: float, n_top_columns: int, min_feature_importance: float,\n max_num_categories: Optional[int], min_meaningful_drift_score: float):\n \"\"\"Calculate whole dataset drift.\"\"\"\n domain_classifier = generate_model(numerical_features, cat_features, random_state)\n\n train_sample_df = train_dataframe.sample(sample_size, random_state=random_state)\n test_sample_df = test_dataframe.sample(sample_size, random_state=random_state)\n\n # create new dataset, with label denoting whether sample belongs to test dataset\n domain_class_df = pd.concat([train_sample_df, test_sample_df])\n domain_class_labels = pd.Series([0] * len(train_sample_df) + [1] * len(test_sample_df))\n\n x_train, x_test, y_train, y_test = train_test_split(domain_class_df, domain_class_labels,\n stratify=domain_class_labels,\n random_state=random_state,\n test_size=test_size)\n\n domain_classifier = domain_classifier.fit(x_train, y_train)\n\n y_test.name = 'belongs_to_test'\n domain_test_dataset = Dataset(pd.concat([x_test.reset_index(drop=True), y_test.reset_index(drop=True)], axis=1),\n cat_features=cat_features, label='belongs_to_test')\n\n # calculate feature importance of domain_classifier, containing the information which features separate\n # the dataset best.\n fi, importance_type = calculate_feature_importance_or_none(\n domain_classifier,\n domain_test_dataset,\n force_permutation=True,\n permutation_kwargs={'n_repeats': 10, 'random_state': random_state, 'timeout': 120}\n )\n\n fi = fi.sort_values(ascending=False) if fi is not None else None\n\n domain_classifier_auc = roc_auc_score(y_test, domain_classifier.predict_proba(x_test)[:, 1])\n drift_score = auc_to_drift_score(domain_classifier_auc)\n\n values_dict = {\n 'domain_classifier_auc': domain_classifier_auc,\n 'domain_classifier_drift_score': drift_score,\n 'domain_classifier_feature_importance': fi.to_dict() if fi is not None else {},\n }\n\n feature_importance_note = f\"\"\"\n <span>\n The percents of explained dataset difference are the importance values for the feature calculated\n using `{importance_type}`.\n </span><br><br>\n \"\"\"\n\n if fi is not None and drift_score > min_meaningful_drift_score:\n top_fi = fi.head(n_top_columns)\n top_fi = top_fi.loc[top_fi > min_feature_importance]\n else:\n top_fi = None\n\n if top_fi is not None and len(top_fi):\n score = values_dict['domain_classifier_drift_score']\n\n displays = [feature_importance_note, build_drift_plot(score),\n '<h3>Main features contributing to drift</h3>',\n N_TOP_MESSAGE % n_top_columns]\n displays += [display_dist(train_sample_df[feature], test_sample_df[feature], top_fi, cat_features,\n max_num_categories)\n for feature in top_fi.index]\n else:\n displays = None\n\n return values_dict, displays\n\n\ndef generate_model(numerical_columns: List[Hashable], categorical_columns: List[Hashable],\n random_state: int = 42) -> Pipeline:\n \"\"\"Generate the unfitted Domain Classifier model.\"\"\"\n categorical_transformer = Pipeline(\n steps=[('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',\n unknown_value=np.nan,\n dtype=np.float64))]\n )\n\n preprocessor = ColumnTransformer(\n transformers=[\n ('num', 'passthrough', numerical_columns),\n ('cat', categorical_transformer, categorical_columns),\n ]\n )\n\n return Pipeline(\n steps=[('preprocessing', preprocessor),\n ('model', HistGradientBoostingClassifier(\n max_depth=2, max_iter=10, random_state=random_state,\n categorical_features=[False] * len(numerical_columns) + [True] * len(categorical_columns)\n ))])\n\n\ndef auc_to_drift_score(auc: float) -> float:\n \"\"\"Calculate the drift score, which is 2*auc - 1, with auc being the auc of the Domain Classifier.\n\n Parameters\n ----------\n auc : float\n auc of the Domain Classifier\n \"\"\"\n return max(2 * auc - 1, 0)\n\n\ndef build_drift_plot(score):\n \"\"\"Build traffic light drift plot.\"\"\"\n bar_traces, x_axis, y_axis = drift_score_bar_traces(score)\n x_axis['title'] = 'Drift score'\n drift_plot = go.Figure(layout=dict(\n title='Drift Score - Whole Dataset Total',\n xaxis=x_axis,\n yaxis=y_axis,\n width=700,\n height=200\n\n ))\n\n drift_plot.add_traces(bar_traces)\n return drift_plot\n\n\ndef display_dist(train_column: pd.Series, test_column: pd.Series, fi_ser: pd.Series, cat_features,\n max_num_categories: int = 10):\n \"\"\"Display a distribution comparison plot for the given columns.\"\"\"\n column_name = train_column.name\n\n title = f'Feature: {column_name} - Explains {format_percent(fi_ser.loc[column_name])} of dataset difference'\n traces, xaxis_layout, yaxis_layout = \\\n feature_distribution_traces(train_column.dropna(),\n test_column.dropna(),\n column_name,\n is_categorical=column_name in cat_features,\n max_num_categories=max_num_categories)\n\n figure = go.Figure(layout=go.Layout(\n title=title,\n xaxis=xaxis_layout,\n yaxis=yaxis_layout,\n legend=dict(\n title='Dataset',\n yanchor='top',\n y=0.9,\n xanchor='left'),\n width=700,\n height=300\n ))\n\n figure.add_traces(traces)\n\n return figure\n", "path": "deepchecks/core/check_utils/whole_dataset_drift_utils.py"}, {"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module of RareCategoryEncoder.\"\"\"\nfrom typing import List, Optional\nfrom collections import defaultdict\n\nimport pandas as pd\n\nfrom deepchecks.utils.typing import Hashable\n\n\n__all__ = ['RareCategoryEncoder']\n\n\nclass RareCategoryEncoder:\n \"\"\"Encodes rare categories into an \"other\" parameter.\n\n Note that this encoder assumes data is received as a DataFrame.\n\n Parameters\n ----------\n max_num_categories : int , default: 10\n Indicates the maximum number of unique categories in a single categorical column\n (rare categories will be changed to a form of \"other\")\n cols : Optional[List[Hashable]] , default: None\n Columns to limit the encoder to work on. If non are given will work on all columns given in `fit`\n \"\"\"\n\n DEFAULT_OTHER_VALUE = 'OTHER_RARE_CATEGORY'\n\n def __init__(\n self,\n max_num_categories: int = 10,\n cols: Optional[List[Hashable]] = None\n ):\n self.max_num_categories = max_num_categories\n self.cols = cols\n self._col_mapping = None\n\n def fit(self, data: pd.DataFrame):\n \"\"\"Fit the encoder using given dataframe.\n\n Parameters\n ----------\n data : pd.DataFrame\n data to fit from\n \"\"\"\n if self.cols is not None:\n self._col_mapping = data[self.cols].apply(self._fit_for_series, axis=0)\n else:\n self._col_mapping = data.apply(self._fit_for_series, axis=0)\n\n def transform(self, data: pd.DataFrame):\n \"\"\"Transform given data according to columns processed in `fit`.\n\n Parameters\n ----------\n data : pd.DataFrame\n data to transform\n Returns\n -------\n DataFrame\n transformed data\n \"\"\"\n if self._col_mapping is None:\n raise RuntimeError('Cannot transform without fitting first')\n\n if self.cols is not None:\n data = data.copy()\n data[self.cols] = data[self.cols].apply(lambda s: s.map(self._col_mapping[s.name]))\n else:\n data = data.apply(lambda s: s.map(self._col_mapping[s.name]))\n return data\n\n def fit_transform(self, data: pd.DataFrame):\n \"\"\"Run `fit` and `transform` on given data.\n\n Parameters\n ----------\n data : pd.DataFrame\n data to fit on and transform\n Returns\n -------\n DataFrame\n transformed data\n \"\"\"\n self.fit(data)\n return self.transform(data)\n\n def _fit_for_series(self, series: pd.Series):\n top_values = list(series.value_counts().head(self.max_num_categories).index)\n other_value = self._get_unique_other_value(series)\n mapper = pd.Series(defaultdict(lambda: other_value, {k: k for k in top_values}), name=series.name)\n return mapper\n\n def _get_unique_other_value(self, series: pd.Series):\n unique_values = list(series.unique())\n other = self.DEFAULT_OTHER_VALUE\n i = 0\n while other in unique_values:\n other = self.DEFAULT_OTHER_VALUE + str(i)\n i += 1\n return other\n", "path": "deepchecks/utils/distribution/rare_category_encoder.py"}]}
| 3,906 | 848 |
gh_patches_debug_22042
|
rasdani/github-patches
|
git_diff
|
MycroftAI__mycroft-core-2706
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Idea: Enhance Amazon Polly support
Amazon Polly works well using standard voices, I have it running perfectly under the latest Picroft image. However, there is no current support for 'neural' engine voices, as well as 'conversational' style SSML. Both of these provide exceptionally high quality text-to-speech audio and would be nice to have the ability to use with Mycroft.
This [post](https://community.mycroft.ai/t/regarding-polly-tts-support/8722/10) on the community forums goes in to a little more detail on it.
Thanks!
</issue>
<code>
[start of mycroft/tts/polly_tts.py]
1 # Copyright 2017 Mycroft AI Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 from mycroft.tts.tts import TTS, TTSValidator
16 from mycroft.configuration import Configuration
17
18
19 class PollyTTS(TTS):
20 def __init__(self, lang="en-us", config=None):
21 import boto3
22 config = config or Configuration.get().get("tts", {}).get("polly", {})
23 super(PollyTTS, self).__init__(lang, config, PollyTTSValidator(self),
24 audio_ext="mp3",
25 ssml_tags=["speak", "say-as", "voice",
26 "prosody", "break",
27 "emphasis", "sub", "lang",
28 "phoneme", "w", "whisper",
29 "amazon:auto-breaths",
30 "p", "s", "amazon:effect",
31 "mark"])
32
33 self.voice = self.config.get("voice", "Matthew")
34 self.key_id = self.config.get("access_key_id", '')
35 self.key = self.config.get("secret_access_key", '')
36 self.region = self.config.get("region", 'us-east-1')
37 self.polly = boto3.Session(aws_access_key_id=self.key_id,
38 aws_secret_access_key=self.key,
39 region_name=self.region).client('polly')
40
41 def get_tts(self, sentence, wav_file):
42 text_type = "text"
43 if self.remove_ssml(sentence) != sentence:
44 text_type = "ssml"
45 sentence = sentence \
46 .replace("\\whispered", "/amazon:effect") \
47 .replace("whispered", "amazon:effect name=\"whispered\"")
48 response = self.polly.synthesize_speech(
49 OutputFormat=self.audio_ext,
50 Text=sentence,
51 TextType=text_type,
52 VoiceId=self.voice)
53
54 with open(wav_file, 'wb') as f:
55 f.write(response['AudioStream'].read())
56 return (wav_file, None) # No phonemes
57
58 def describe_voices(self, language_code="en-US"):
59 if language_code.islower():
60 a, b = language_code.split("-")
61 b = b.upper()
62 language_code = "-".join([a, b])
63 # example 'it-IT' useful to retrieve voices
64 voices = self.polly.describe_voices(LanguageCode=language_code)
65
66 return voices
67
68
69 class PollyTTSValidator(TTSValidator):
70 def __init__(self, tts):
71 super(PollyTTSValidator, self).__init__(tts)
72
73 def validate_lang(self):
74 # TODO
75 pass
76
77 def validate_dependencies(self):
78 try:
79 from boto3 import Session
80 except ImportError:
81 raise Exception(
82 'PollyTTS dependencies not installed, please run pip install '
83 'boto3 ')
84
85 def validate_connection(self):
86 try:
87 if not self.tts.voice:
88 raise Exception("Polly TTS Voice not configured")
89 output = self.tts.describe_voices()
90 except TypeError:
91 raise Exception(
92 'PollyTTS server could not be verified. Please check your '
93 'internet connection and credentials.')
94
95 def get_tts_class(self):
96 return PollyTTS
97
[end of mycroft/tts/polly_tts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mycroft/tts/polly_tts.py b/mycroft/tts/polly_tts.py
--- a/mycroft/tts/polly_tts.py
+++ b/mycroft/tts/polly_tts.py
@@ -34,6 +34,7 @@
self.key_id = self.config.get("access_key_id", '')
self.key = self.config.get("secret_access_key", '')
self.region = self.config.get("region", 'us-east-1')
+ self.engine = self.config.get("engine", "standard")
self.polly = boto3.Session(aws_access_key_id=self.key_id,
aws_secret_access_key=self.key,
region_name=self.region).client('polly')
@@ -49,7 +50,8 @@
OutputFormat=self.audio_ext,
Text=sentence,
TextType=text_type,
- VoiceId=self.voice)
+ VoiceId=self.voice,
+ Engine=self.engine)
with open(wav_file, 'wb') as f:
f.write(response['AudioStream'].read())
|
{"golden_diff": "diff --git a/mycroft/tts/polly_tts.py b/mycroft/tts/polly_tts.py\n--- a/mycroft/tts/polly_tts.py\n+++ b/mycroft/tts/polly_tts.py\n@@ -34,6 +34,7 @@\n self.key_id = self.config.get(\"access_key_id\", '')\n self.key = self.config.get(\"secret_access_key\", '')\n self.region = self.config.get(\"region\", 'us-east-1')\n+ self.engine = self.config.get(\"engine\", \"standard\")\n self.polly = boto3.Session(aws_access_key_id=self.key_id,\n aws_secret_access_key=self.key,\n region_name=self.region).client('polly')\n@@ -49,7 +50,8 @@\n OutputFormat=self.audio_ext,\n Text=sentence,\n TextType=text_type,\n- VoiceId=self.voice)\n+ VoiceId=self.voice,\n+ Engine=self.engine)\n \n with open(wav_file, 'wb') as f:\n f.write(response['AudioStream'].read())\n", "issue": "Idea: Enhance Amazon Polly support\nAmazon Polly works well using standard voices, I have it running perfectly under the latest Picroft image. However, there is no current support for 'neural' engine voices, as well as 'conversational' style SSML. Both of these provide exceptionally high quality text-to-speech audio and would be nice to have the ability to use with Mycroft.\r\n\r\nThis [post](https://community.mycroft.ai/t/regarding-polly-tts-support/8722/10) on the community forums goes in to a little more detail on it.\r\n\r\nThanks!\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom mycroft.tts.tts import TTS, TTSValidator\nfrom mycroft.configuration import Configuration\n\n\nclass PollyTTS(TTS):\n def __init__(self, lang=\"en-us\", config=None):\n import boto3\n config = config or Configuration.get().get(\"tts\", {}).get(\"polly\", {})\n super(PollyTTS, self).__init__(lang, config, PollyTTSValidator(self),\n audio_ext=\"mp3\",\n ssml_tags=[\"speak\", \"say-as\", \"voice\",\n \"prosody\", \"break\",\n \"emphasis\", \"sub\", \"lang\",\n \"phoneme\", \"w\", \"whisper\",\n \"amazon:auto-breaths\",\n \"p\", \"s\", \"amazon:effect\",\n \"mark\"])\n\n self.voice = self.config.get(\"voice\", \"Matthew\")\n self.key_id = self.config.get(\"access_key_id\", '')\n self.key = self.config.get(\"secret_access_key\", '')\n self.region = self.config.get(\"region\", 'us-east-1')\n self.polly = boto3.Session(aws_access_key_id=self.key_id,\n aws_secret_access_key=self.key,\n region_name=self.region).client('polly')\n\n def get_tts(self, sentence, wav_file):\n text_type = \"text\"\n if self.remove_ssml(sentence) != sentence:\n text_type = \"ssml\"\n sentence = sentence \\\n .replace(\"\\\\whispered\", \"/amazon:effect\") \\\n .replace(\"whispered\", \"amazon:effect name=\\\"whispered\\\"\")\n response = self.polly.synthesize_speech(\n OutputFormat=self.audio_ext,\n Text=sentence,\n TextType=text_type,\n VoiceId=self.voice)\n\n with open(wav_file, 'wb') as f:\n f.write(response['AudioStream'].read())\n return (wav_file, None) # No phonemes\n\n def describe_voices(self, language_code=\"en-US\"):\n if language_code.islower():\n a, b = language_code.split(\"-\")\n b = b.upper()\n language_code = \"-\".join([a, b])\n # example 'it-IT' useful to retrieve voices\n voices = self.polly.describe_voices(LanguageCode=language_code)\n\n return voices\n\n\nclass PollyTTSValidator(TTSValidator):\n def __init__(self, tts):\n super(PollyTTSValidator, self).__init__(tts)\n\n def validate_lang(self):\n # TODO\n pass\n\n def validate_dependencies(self):\n try:\n from boto3 import Session\n except ImportError:\n raise Exception(\n 'PollyTTS dependencies not installed, please run pip install '\n 'boto3 ')\n\n def validate_connection(self):\n try:\n if not self.tts.voice:\n raise Exception(\"Polly TTS Voice not configured\")\n output = self.tts.describe_voices()\n except TypeError:\n raise Exception(\n 'PollyTTS server could not be verified. Please check your '\n 'internet connection and credentials.')\n\n def get_tts_class(self):\n return PollyTTS\n", "path": "mycroft/tts/polly_tts.py"}]}
| 1,666 | 227 |
gh_patches_debug_22555
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-967
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gh: prefix doesn't work anymore
* Cookiecutter version: 1.5.1
* Template project url: `gh:*`
* Python version: 2.7.13
* Operating System: Linux
### Description:
cookiecutter does not honor prefixes anymore.
### What I've run:
Simply testing the example from the README doesn't work as expected:
``` bash
$ cookiecutter gh:audreyr/cookiecutter-pypackage
A valid repository for "gh:audreyr/cookiecutter-pypackage" could not be found in the following locations:
gh:audreyr/cookiecutter-pypackage
/home/me/.cookiecutters/gh:audreyr/cookiecutter-pypackage
```
The same commands using the full repository path works as expected:
```bash
$ cookiecutter https://github.com/audreyr/cookiecutter-pypackage
```
</issue>
<code>
[start of cookiecutter/config.py]
1 # -*- coding: utf-8 -*-
2
3 """Global configuration handling."""
4
5 from __future__ import unicode_literals
6 import copy
7 import logging
8 import os
9 import io
10
11 import poyo
12
13 from .exceptions import ConfigDoesNotExistException
14 from .exceptions import InvalidConfiguration
15
16
17 logger = logging.getLogger(__name__)
18
19 USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')
20
21 BUILTIN_ABBREVIATIONS = {
22 'gh': 'https://github.com/{0}.git',
23 'gl': 'https://gitlab.com/{0}.git',
24 'bb': 'https://bitbucket.org/{0}',
25 }
26
27 DEFAULT_CONFIG = {
28 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),
29 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),
30 'default_context': {},
31 'abbreviations': BUILTIN_ABBREVIATIONS,
32 }
33
34
35 def _expand_path(path):
36 """Expand both environment variables and user home in the given path."""
37 path = os.path.expandvars(path)
38 path = os.path.expanduser(path)
39 return path
40
41
42 def get_config(config_path):
43 """Retrieve the config from the specified path, returning a config dict."""
44 if not os.path.exists(config_path):
45 raise ConfigDoesNotExistException
46
47 logger.debug('config_path is {0}'.format(config_path))
48 with io.open(config_path, encoding='utf-8') as file_handle:
49 try:
50 yaml_dict = poyo.parse_string(file_handle.read())
51 except poyo.exceptions.PoyoException as e:
52 raise InvalidConfiguration(
53 'Unable to parse YAML file {}. Error: {}'
54 ''.format(config_path, e)
55 )
56
57 config_dict = copy.copy(DEFAULT_CONFIG)
58 config_dict.update(yaml_dict)
59
60 raw_replay_dir = config_dict['replay_dir']
61 config_dict['replay_dir'] = _expand_path(raw_replay_dir)
62
63 raw_cookies_dir = config_dict['cookiecutters_dir']
64 config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir)
65
66 return config_dict
67
68
69 def get_user_config(config_file=None, default_config=False):
70 """Return the user config as a dict.
71
72 If ``default_config`` is True, ignore ``config_file`` and return default
73 values for the config parameters.
74
75 If a path to a ``config_file`` is given, that is different from the default
76 location, load the user config from that.
77
78 Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``
79 environment variable. If set, load the config from this path. This will
80 raise an error if the specified path is not valid.
81
82 If the environment variable is not set, try the default config file path
83 before falling back to the default config values.
84 """
85 # Do NOT load a config. Return defaults instead.
86 if default_config:
87 return copy.copy(DEFAULT_CONFIG)
88
89 # Load the given config file
90 if config_file and config_file is not USER_CONFIG_PATH:
91 return get_config(config_file)
92
93 try:
94 # Does the user set up a config environment variable?
95 env_config_file = os.environ['COOKIECUTTER_CONFIG']
96 except KeyError:
97 # Load an optional user config if it exists
98 # otherwise return the defaults
99 if os.path.exists(USER_CONFIG_PATH):
100 return get_config(USER_CONFIG_PATH)
101 else:
102 return copy.copy(DEFAULT_CONFIG)
103 else:
104 # There is a config environment variable. Try to load it.
105 # Do not check for existence, so invalid file paths raise an error.
106 return get_config(env_config_file)
107
[end of cookiecutter/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cookiecutter/config.py b/cookiecutter/config.py
--- a/cookiecutter/config.py
+++ b/cookiecutter/config.py
@@ -39,6 +39,25 @@
return path
+def merge_configs(default, overwrite):
+ """Recursively update a dict with the key/value pair of another.
+
+ Dict values that are dictionaries themselves will be updated, whilst
+ preserving existing keys.
+ """
+ new_config = copy.deepcopy(default)
+
+ for k, v in overwrite.items():
+ # Make sure to preserve existing items in
+ # nested dicts, for example `abbreviations`
+ if isinstance(v, dict):
+ new_config[k] = merge_configs(default[k], v)
+ else:
+ new_config[k] = v
+
+ return new_config
+
+
def get_config(config_path):
"""Retrieve the config from the specified path, returning a config dict."""
if not os.path.exists(config_path):
@@ -54,8 +73,7 @@
''.format(config_path, e)
)
- config_dict = copy.copy(DEFAULT_CONFIG)
- config_dict.update(yaml_dict)
+ config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict)
raw_replay_dir = config_dict['replay_dir']
config_dict['replay_dir'] = _expand_path(raw_replay_dir)
|
{"golden_diff": "diff --git a/cookiecutter/config.py b/cookiecutter/config.py\n--- a/cookiecutter/config.py\n+++ b/cookiecutter/config.py\n@@ -39,6 +39,25 @@\n return path\n \n \n+def merge_configs(default, overwrite):\n+ \"\"\"Recursively update a dict with the key/value pair of another.\n+\n+ Dict values that are dictionaries themselves will be updated, whilst\n+ preserving existing keys.\n+ \"\"\"\n+ new_config = copy.deepcopy(default)\n+\n+ for k, v in overwrite.items():\n+ # Make sure to preserve existing items in\n+ # nested dicts, for example `abbreviations`\n+ if isinstance(v, dict):\n+ new_config[k] = merge_configs(default[k], v)\n+ else:\n+ new_config[k] = v\n+\n+ return new_config\n+\n+\n def get_config(config_path):\n \"\"\"Retrieve the config from the specified path, returning a config dict.\"\"\"\n if not os.path.exists(config_path):\n@@ -54,8 +73,7 @@\n ''.format(config_path, e)\n )\n \n- config_dict = copy.copy(DEFAULT_CONFIG)\n- config_dict.update(yaml_dict)\n+ config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict)\n \n raw_replay_dir = config_dict['replay_dir']\n config_dict['replay_dir'] = _expand_path(raw_replay_dir)\n", "issue": "gh: prefix doesn't work anymore\n* Cookiecutter version: 1.5.1\r\n* Template project url: `gh:*`\r\n* Python version: 2.7.13\r\n* Operating System: Linux\r\n\r\n### Description:\r\n\r\ncookiecutter does not honor prefixes anymore.\r\n\r\n### What I've run:\r\n\r\nSimply testing the example from the README doesn't work as expected:\r\n\r\n``` bash\r\n$ cookiecutter gh:audreyr/cookiecutter-pypackage\r\nA valid repository for \"gh:audreyr/cookiecutter-pypackage\" could not be found in the following locations:\r\ngh:audreyr/cookiecutter-pypackage\r\n/home/me/.cookiecutters/gh:audreyr/cookiecutter-pypackage\r\n```\r\nThe same commands using the full repository path works as expected:\r\n\r\n```bash\r\n$ cookiecutter https://github.com/audreyr/cookiecutter-pypackage\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Global configuration handling.\"\"\"\n\nfrom __future__ import unicode_literals\nimport copy\nimport logging\nimport os\nimport io\n\nimport poyo\n\nfrom .exceptions import ConfigDoesNotExistException\nfrom .exceptions import InvalidConfiguration\n\n\nlogger = logging.getLogger(__name__)\n\nUSER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')\n\nBUILTIN_ABBREVIATIONS = {\n 'gh': 'https://github.com/{0}.git',\n 'gl': 'https://gitlab.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nDEFAULT_CONFIG = {\n 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),\n 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),\n 'default_context': {},\n 'abbreviations': BUILTIN_ABBREVIATIONS,\n}\n\n\ndef _expand_path(path):\n \"\"\"Expand both environment variables and user home in the given path.\"\"\"\n path = os.path.expandvars(path)\n path = os.path.expanduser(path)\n return path\n\n\ndef get_config(config_path):\n \"\"\"Retrieve the config from the specified path, returning a config dict.\"\"\"\n if not os.path.exists(config_path):\n raise ConfigDoesNotExistException\n\n logger.debug('config_path is {0}'.format(config_path))\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = poyo.parse_string(file_handle.read())\n except poyo.exceptions.PoyoException as e:\n raise InvalidConfiguration(\n 'Unable to parse YAML file {}. Error: {}'\n ''.format(config_path, e)\n )\n\n config_dict = copy.copy(DEFAULT_CONFIG)\n config_dict.update(yaml_dict)\n\n raw_replay_dir = config_dict['replay_dir']\n config_dict['replay_dir'] = _expand_path(raw_replay_dir)\n\n raw_cookies_dir = config_dict['cookiecutters_dir']\n config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir)\n\n return config_dict\n\n\ndef get_user_config(config_file=None, default_config=False):\n \"\"\"Return the user config as a dict.\n\n If ``default_config`` is True, ignore ``config_file`` and return default\n values for the config parameters.\n\n If a path to a ``config_file`` is given, that is different from the default\n location, load the user config from that.\n\n Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``\n environment variable. If set, load the config from this path. This will\n raise an error if the specified path is not valid.\n\n If the environment variable is not set, try the default config file path\n before falling back to the default config values.\n \"\"\"\n # Do NOT load a config. Return defaults instead.\n if default_config:\n return copy.copy(DEFAULT_CONFIG)\n\n # Load the given config file\n if config_file and config_file is not USER_CONFIG_PATH:\n return get_config(config_file)\n\n try:\n # Does the user set up a config environment variable?\n env_config_file = os.environ['COOKIECUTTER_CONFIG']\n except KeyError:\n # Load an optional user config if it exists\n # otherwise return the defaults\n if os.path.exists(USER_CONFIG_PATH):\n return get_config(USER_CONFIG_PATH)\n else:\n return copy.copy(DEFAULT_CONFIG)\n else:\n # There is a config environment variable. Try to load it.\n # Do not check for existence, so invalid file paths raise an error.\n return get_config(env_config_file)\n", "path": "cookiecutter/config.py"}]}
| 1,728 | 305 |
gh_patches_debug_4279
|
rasdani/github-patches
|
git_diff
|
facebookresearch__ParlAI-3345
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reddit Movie Dialog no longer exists
**Bug description**
[Reddit Movie Dialog](https://parl.ai/docs/tasks.html#movie-dialog-reddit) no longer exists.
**Reproduction steps**
```
TrainModel.main(
# similar to before
task='empathetic_dialogues,blended_skill_talk,movie_dialog_reddit,convai2,persona_chat',
model='transformer/generator',
model_file='from_pretrained/model',
# initialize with a pretrained model
init_model='zoo:tutorial_transformer_generator/model',
# arguments we get from the pretrained model.
# Unfortunately, these must be looked up separately for each model.
n_heads=16, n_layers=8, n_positions=512, text_truncate=512,
label_truncate=128, ffn_size=2048, embedding_size=512,
activation='gelu', variant='xlm',
dict_lower=True, dict_tokenizer='bpe',
dict_file='zoo:tutorial_transformer_generator/model.dict',
learn_positional_embeddings=True,
# some training arguments, specific to this fine-tuning
# use a small learning rate with ADAM optimizer
lr=1e-5, optimizer='adam',
warmup_updates=100,
# early stopping on perplexity
validation_metric='ppl',
# train at most 10 minutes, and validate every 0.25 epochs
max_train_time=600, validation_every_n_epochs=0.25,
# depend on your gpu. If you have a V100, this is good
batchsize=12, fp16=True, fp16_impl='mem_efficient',
# speeds up validation
skip_generation=True,
# helps us cram more examples into our gpu at a time
dynamic_batching='full',
)
```
**Logs**
Please paste the command line output:
```
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-39-ff3044de39fe> in <module>()
36
37 # helps us cram more examples into our gpu at a time
---> 38 dynamic_batching='full',
39 )
15 frames
/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)
ModuleNotFoundError: No module named 'parlai.tasks.movie_dialog_reddit'
---------------------------------------------------------------------------
NOTE: If your import is failing due to a missing package, you can
manually install dependencies using either !pip or !apt.
To view examples of installing some common dependencies, click the
"Open Examples" button below.
---------------------------------------------------------------------------```
</issue>
<code>
[start of docs/source/generate_task_list.py]
1 #!/usr/bin/env python3
2 # Copyright (c) Facebook, Inc. and its affiliates.
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 from parlai.tasks.task_list import task_list
7
8 MASTER = "https://github.com/facebookresearch/ParlAI/tree/master"
9
10 category_order = ['QA', 'Cloze', 'Goal', 'ChitChat', 'Negotiation', 'Visual', 'decanlp']
11 category_task_list = {x: [] for x in category_order}
12
13 fout = open('task_list.inc', 'w')
14
15 s = "They consist of: "
16 for t in category_order:
17 fout.write(f"1. {t} tasks\n")
18 fout.write("\n")
19
20 for task_dict in task_list:
21 tags = task_dict.get('tags', None)
22 for tag in tags:
23 if tag in category_task_list:
24 category_task_list[tag].append(task_dict)
25
26 for num_category, (category, tl) in enumerate(category_task_list.items()):
27 if num_category != 0:
28 fout.write("\n-----\n\n")
29
30 fout.write(f'## {category} Tasks\n')
31
32 for task_dict in tl:
33 id = task_dict.get('id', None)
34 display_name = task_dict.get('display_name', None)
35 task = task_dict.get('task', None)
36 tags = task_dict.get('tags', None)
37 description = task_dict.get('description', None)
38 notes = task_dict.get('notes', None)
39 code_urlend = task[: max(task.find(':'), len(task))]
40 code_url = f"{MASTER}/parlai/tasks/{code_urlend}"
41 links = task_dict.get("links", {})
42 assert isinstance(links, dict), f"task {id} is poorly formatted"
43 urls = [(k, v) for k, v in links.items()]
44 urls.append(("code", code_url))
45
46 urls_md = ", ".join(f"[{k}]({v})" for k, v in urls)
47 fout.write(f"### {display_name}\n")
48 fout.write(f"_Links_: {urls_md}\n\n")
49 if description:
50 fout.write(description + "\n")
51 if notes:
52 fout.write(":::{admonition,note} Notes\n")
53 fout.write(notes + "\n")
54 fout.write(":::\n")
55 fout.write("\n\n")
56
57 fout.close()
58
[end of docs/source/generate_task_list.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/source/generate_task_list.py b/docs/source/generate_task_list.py
--- a/docs/source/generate_task_list.py
+++ b/docs/source/generate_task_list.py
@@ -45,6 +45,7 @@
urls_md = ", ".join(f"[{k}]({v})" for k, v in urls)
fout.write(f"### {display_name}\n")
+ fout.write(f"_Usage_: `--task {task}`\n\n")
fout.write(f"_Links_: {urls_md}\n\n")
if description:
fout.write(description + "\n")
|
{"golden_diff": "diff --git a/docs/source/generate_task_list.py b/docs/source/generate_task_list.py\n--- a/docs/source/generate_task_list.py\n+++ b/docs/source/generate_task_list.py\n@@ -45,6 +45,7 @@\n \n urls_md = \", \".join(f\"[{k}]({v})\" for k, v in urls)\n fout.write(f\"### {display_name}\\n\")\n+ fout.write(f\"_Usage_: `--task {task}`\\n\\n\")\n fout.write(f\"_Links_: {urls_md}\\n\\n\")\n if description:\n fout.write(description + \"\\n\")\n", "issue": "Reddit Movie Dialog no longer exists\n**Bug description**\r\n[Reddit Movie Dialog](https://parl.ai/docs/tasks.html#movie-dialog-reddit) no longer exists.\r\n\r\n**Reproduction steps**\r\n```\r\nTrainModel.main(\r\n # similar to before\r\n task='empathetic_dialogues,blended_skill_talk,movie_dialog_reddit,convai2,persona_chat', \r\n model='transformer/generator',\r\n model_file='from_pretrained/model',\r\n \r\n # initialize with a pretrained model\r\n init_model='zoo:tutorial_transformer_generator/model',\r\n \r\n # arguments we get from the pretrained model.\r\n # Unfortunately, these must be looked up separately for each model.\r\n n_heads=16, n_layers=8, n_positions=512, text_truncate=512,\r\n label_truncate=128, ffn_size=2048, embedding_size=512,\r\n activation='gelu', variant='xlm',\r\n dict_lower=True, dict_tokenizer='bpe',\r\n dict_file='zoo:tutorial_transformer_generator/model.dict',\r\n learn_positional_embeddings=True,\r\n \r\n # some training arguments, specific to this fine-tuning\r\n # use a small learning rate with ADAM optimizer\r\n lr=1e-5, optimizer='adam',\r\n warmup_updates=100,\r\n # early stopping on perplexity\r\n validation_metric='ppl',\r\n # train at most 10 minutes, and validate every 0.25 epochs\r\n max_train_time=600, validation_every_n_epochs=0.25,\r\n \r\n # depend on your gpu. If you have a V100, this is good\r\n batchsize=12, fp16=True, fp16_impl='mem_efficient',\r\n \r\n # speeds up validation\r\n skip_generation=True,\r\n \r\n # helps us cram more examples into our gpu at a time\r\n dynamic_batching='full',\r\n)\r\n```\r\n\r\n**Logs**\r\nPlease paste the command line output:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nModuleNotFoundError Traceback (most recent call last)\r\n<ipython-input-39-ff3044de39fe> in <module>()\r\n 36 \r\n 37 # helps us cram more examples into our gpu at a time\r\n---> 38 dynamic_batching='full',\r\n 39 )\r\n\r\n15 frames\r\n/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)\r\n\r\nModuleNotFoundError: No module named 'parlai.tasks.movie_dialog_reddit'\r\n\r\n---------------------------------------------------------------------------\r\nNOTE: If your import is failing due to a missing package, you can\r\nmanually install dependencies using either !pip or !apt.\r\n\r\nTo view examples of installing some common dependencies, click the\r\n\"Open Examples\" button below.\r\n---------------------------------------------------------------------------```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom parlai.tasks.task_list import task_list\n\nMASTER = \"https://github.com/facebookresearch/ParlAI/tree/master\"\n\ncategory_order = ['QA', 'Cloze', 'Goal', 'ChitChat', 'Negotiation', 'Visual', 'decanlp']\ncategory_task_list = {x: [] for x in category_order}\n\nfout = open('task_list.inc', 'w')\n\ns = \"They consist of: \"\nfor t in category_order:\n fout.write(f\"1. {t} tasks\\n\")\nfout.write(\"\\n\")\n\nfor task_dict in task_list:\n tags = task_dict.get('tags', None)\n for tag in tags:\n if tag in category_task_list:\n category_task_list[tag].append(task_dict)\n\nfor num_category, (category, tl) in enumerate(category_task_list.items()):\n if num_category != 0:\n fout.write(\"\\n-----\\n\\n\")\n\n fout.write(f'## {category} Tasks\\n')\n\n for task_dict in tl:\n id = task_dict.get('id', None)\n display_name = task_dict.get('display_name', None)\n task = task_dict.get('task', None)\n tags = task_dict.get('tags', None)\n description = task_dict.get('description', None)\n notes = task_dict.get('notes', None)\n code_urlend = task[: max(task.find(':'), len(task))]\n code_url = f\"{MASTER}/parlai/tasks/{code_urlend}\"\n links = task_dict.get(\"links\", {})\n assert isinstance(links, dict), f\"task {id} is poorly formatted\"\n urls = [(k, v) for k, v in links.items()]\n urls.append((\"code\", code_url))\n\n urls_md = \", \".join(f\"[{k}]({v})\" for k, v in urls)\n fout.write(f\"### {display_name}\\n\")\n fout.write(f\"_Links_: {urls_md}\\n\\n\")\n if description:\n fout.write(description + \"\\n\")\n if notes:\n fout.write(\":::{admonition,note} Notes\\n\")\n fout.write(notes + \"\\n\")\n fout.write(\":::\\n\")\n fout.write(\"\\n\\n\")\n\nfout.close()\n", "path": "docs/source/generate_task_list.py"}]}
| 1,779 | 134 |
gh_patches_debug_8735
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-3251
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs builds are failing
Docs builds seem to have been failing since 1.8.5 release. We should fix this before the 1.8.6 release.
https://readthedocs.org/projects/pyro-ppl/builds/20847164/
<img width="812" alt="image" src="https://github.com/pyro-ppl/pyro/assets/648532/45149fae-a72d-481a-aaf9-73262d50aa92">
</issue>
<code>
[start of docs/source/conf.py]
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import os
5 import sys
6
7 import sphinx_rtd_theme
8
9 # import pkg_resources
10
11 # -*- coding: utf-8 -*-
12 #
13 # Pyro documentation build configuration file, created by
14 # sphinx-quickstart on Thu Jun 15 17:16:14 2017.
15 #
16 # This file is execfile()d with the current directory set to its
17 # containing dir.
18 #
19 # Note that not all possible configuration values are present in this
20 # autogenerated file.
21 #
22 # All configuration values have a default; values that are commented out
23 # serve to show the default.
24
25 # If extensions (or modules to document with autodoc) are in another directory,
26 # add these directories to sys.path here. If the directory is relative to the
27 # documentation root, use os.path.abspath to make it absolute, like shown here.
28 #
29 sys.path.insert(0, os.path.abspath("../.."))
30
31 # -- General configuration ------------------------------------------------
32
33 # If your documentation needs a minimal Sphinx version, state it here.
34 #
35 # needs_sphinx = '1.0'
36
37 # Add any Sphinx extension module names here, as strings. They can be
38 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
39 # ones.
40 extensions = [
41 "sphinx.ext.intersphinx", #
42 "sphinx.ext.todo", #
43 "sphinx.ext.mathjax", #
44 "sphinx.ext.ifconfig", #
45 "sphinx.ext.viewcode", #
46 "sphinx.ext.githubpages", #
47 "sphinx.ext.graphviz", #
48 "sphinx.ext.autodoc",
49 "sphinx.ext.doctest",
50 'sphinx.ext.napoleon',
51 ]
52
53 # Disable documentation inheritance so as to avoid inheriting
54 # docstrings in a different format, e.g. when the parent class
55 # is a PyTorch class.
56
57 autodoc_inherit_docstrings = False
58
59 # Add any paths that contain templates here, relative to this directory.
60 templates_path = ["_templates"]
61
62 # The suffix(es) of source filenames.
63 # You can specify multiple suffix as a list of string:
64 #
65 # source_suffix = ['.rst', '.md']
66 source_suffix = ".rst"
67
68 # The master toctree document.
69 master_doc = "index"
70
71 # General information about the project.
72 project = u"Pyro"
73 copyright = u"2017-2018, Uber Technologies, Inc"
74 author = u"Uber AI Labs"
75
76 # The version info for the project you're documenting, acts as replacement for
77 # |version| and |release|, also used in various other places throughout the
78 # built documents.
79
80 version = ""
81
82 if "READTHEDOCS" not in os.environ:
83 # if developing locally, use pyro.__version__ as version
84 from pyro import __version__ # noqaE402
85
86 version = __version__
87
88 # release version
89 release = version
90
91 # The language for content autogenerated by Sphinx. Refer to documentation
92 # for a list of supported languages.
93 #
94 # This is also used if you do content translation via gettext catalogs.
95 # Usually you set "language" from the command line for these cases.
96 language = "en"
97
98 # List of patterns, relative to source directory, that match files and
99 # directories to ignore when looking for source files.
100 # This patterns also effect to html_static_path and html_extra_path
101 exclude_patterns = []
102
103 # The name of the Pygments (syntax highlighting) style to use.
104 pygments_style = "sphinx"
105
106 # If true, `todo` and `todoList` produce output, else they produce nothing.
107 todo_include_todos = True
108
109 # do not prepend module name to functions
110 add_module_names = False
111
112 # -- Options for HTML output ----------------------------------------------
113
114 # logo
115 html_logo = "_static/img/pyro_logo_wide.png"
116
117 # logo
118 html_favicon = "_static/img/favicon/favicon.ico"
119
120 # The theme to use for HTML and HTML Help pages. See the documentation for
121 # a list of builtin themes.
122 #
123 html_theme = "sphinx_rtd_theme"
124 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
125
126 # Theme options are theme-specific and customize the look and feel of a theme
127 # further. For a list of options available for each theme, see the
128 # documentation.
129
130 html_theme_options = {
131 "navigation_depth": 3,
132 "logo_only": True,
133 }
134
135 # Add any paths that contain custom static files (such as style sheets) here,
136 # relative to this directory. They are copied after the builtin static files,
137 # so a file named "default.css" will overwrite the builtin "default.css".
138 html_static_path = ["_static"]
139 html_style = "css/pyro.css"
140
141 # -- Options for HTMLHelp output ------------------------------------------
142
143 # Output file base name for HTML help builder.
144 htmlhelp_basename = "Pyrodoc"
145
146 # -- Options for LaTeX output ---------------------------------------------
147
148 latex_elements = {
149 # The paper size ('letterpaper' or 'a4paper').
150 #
151 # 'papersize': 'letterpaper',
152 # The font size ('10pt', '11pt' or '12pt').
153 #
154 # 'pointsize': '10pt',
155 # Additional stuff for the LaTeX preamble.
156 #
157 # 'preamble': '',
158 # Latex figure (float) alignment
159 #
160 # 'figure_align': 'htbp',
161 }
162
163 # Grouping the document tree into LaTeX files. List of tuples
164 # (source start file, target name, title,
165 # author, documentclass [howto, manual, or own class]).
166 latex_documents = [
167 # Disabled pdf builds to unblock readthedocs failed builds;
168 # see https://github.com/pyro-ppl/pyro/issues/3248
169 # (master_doc, "Pyro.tex", u"Pyro Documentation", u"Uber AI Labs", "manual"),
170 ]
171
172 # -- Options for manual page output ---------------------------------------
173
174 # One entry per manual page. List of tuples
175 # (source start file, name, description, authors, manual section).
176 man_pages = [(master_doc, "pyro", u"Pyro Documentation", [author], 1)]
177
178 # -- Options for Texinfo output -------------------------------------------
179
180 # Grouping the document tree into Texinfo files. List of tuples
181 # (source start file, target name, title, author,
182 # dir menu entry, description, category)
183 texinfo_documents = [
184 (
185 master_doc,
186 "Pyro",
187 u"Pyro Documentation",
188 author,
189 "Pyro",
190 "Deep Universal Probabilistic Programming.",
191 "Miscellaneous",
192 ),
193 ]
194
195 # Example configuration for intersphinx: refer to the Python standard library.
196 intersphinx_mapping = {
197 "python": ("https://docs.python.org/3/", None),
198 "torch": ("https://pytorch.org/docs/master/", None),
199 "funsor": ("http://funsor.pyro.ai/en/stable/", None),
200 "opt_einsum": ("https://optimized-einsum.readthedocs.io/en/stable/", None),
201 "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
202 "Bio": ("https://biopython.org/docs/latest/api/", None),
203 "horovod": ("https://horovod.readthedocs.io/en/stable/", None),
204 "graphviz": ("https://graphviz.readthedocs.io/en/stable/", None),
205 }
206
207 # document class constructors (__init__ methods):
208 """ comment out this functionality for now;
209 def skip(app, what, name, obj, skip, options):
210 if name == "__init__":
211 return False
212 return skip
213 """
214
215
216 def setup(app):
217 app.add_css_file("css/pyro.css")
218
219
220 # app.connect("autodoc-skip-member", skip)
221
222
223 # @jpchen's hack to get rtd builder to install latest pytorch
224 # See similar line in the install section of .travis.yml
225 if "READTHEDOCS" in os.environ:
226 os.system("pip install numpy")
227 os.system(
228 "pip install torch==1.11.0+cpu torchvision==0.12.0+cpu "
229 "-f https://download.pytorch.org/whl/torch_stable.html"
230 )
231
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -164,9 +164,7 @@
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- # Disabled pdf builds to unblock readthedocs failed builds;
- # see https://github.com/pyro-ppl/pyro/issues/3248
- # (master_doc, "Pyro.tex", u"Pyro Documentation", u"Uber AI Labs", "manual"),
+ (master_doc, "Pyro.tex", u"Pyro Documentation", u"Uber AI Labs", "manual"),
]
# -- Options for manual page output ---------------------------------------
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -164,9 +164,7 @@\n # (source start file, target name, title,\n # author, documentclass [howto, manual, or own class]).\n latex_documents = [\n- # Disabled pdf builds to unblock readthedocs failed builds;\n- # see https://github.com/pyro-ppl/pyro/issues/3248\n- # (master_doc, \"Pyro.tex\", u\"Pyro Documentation\", u\"Uber AI Labs\", \"manual\"),\n+ (master_doc, \"Pyro.tex\", u\"Pyro Documentation\", u\"Uber AI Labs\", \"manual\"),\n ]\n \n # -- Options for manual page output ---------------------------------------\n", "issue": "Docs builds are failing\nDocs builds seem to have been failing since 1.8.5 release. We should fix this before the 1.8.6 release.\r\n\r\nhttps://readthedocs.org/projects/pyro-ppl/builds/20847164/\r\n<img width=\"812\" alt=\"image\" src=\"https://github.com/pyro-ppl/pyro/assets/648532/45149fae-a72d-481a-aaf9-73262d50aa92\">\r\n\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport sys\n\nimport sphinx_rtd_theme\n\n# import pkg_resources\n\n# -*- coding: utf-8 -*-\n#\n# Pyro documentation build configuration file, created by\n# sphinx-quickstart on Thu Jun 15 17:16:14 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nsys.path.insert(0, os.path.abspath(\"../..\"))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.intersphinx\", #\n \"sphinx.ext.todo\", #\n \"sphinx.ext.mathjax\", #\n \"sphinx.ext.ifconfig\", #\n \"sphinx.ext.viewcode\", #\n \"sphinx.ext.githubpages\", #\n \"sphinx.ext.graphviz\", #\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n 'sphinx.ext.napoleon',\n]\n\n# Disable documentation inheritance so as to avoid inheriting\n# docstrings in a different format, e.g. when the parent class\n# is a PyTorch class.\n\nautodoc_inherit_docstrings = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = u\"Pyro\"\ncopyright = u\"2017-2018, Uber Technologies, Inc\"\nauthor = u\"Uber AI Labs\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n\nversion = \"\"\n\nif \"READTHEDOCS\" not in os.environ:\n # if developing locally, use pyro.__version__ as version\n from pyro import __version__ # noqaE402\n\n version = __version__\n\n# release version\nrelease = version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# do not prepend module name to functions\nadd_module_names = False\n\n# -- Options for HTML output ----------------------------------------------\n\n# logo\nhtml_logo = \"_static/img/pyro_logo_wide.png\"\n\n# logo\nhtml_favicon = \"_static/img/favicon/favicon.ico\"\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_theme_options = {\n \"navigation_depth\": 3,\n \"logo_only\": True,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\nhtml_style = \"css/pyro.css\"\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyrodoc\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n # Disabled pdf builds to unblock readthedocs failed builds;\n # see https://github.com/pyro-ppl/pyro/issues/3248\n # (master_doc, \"Pyro.tex\", u\"Pyro Documentation\", u\"Uber AI Labs\", \"manual\"),\n]\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pyro\", u\"Pyro Documentation\", [author], 1)]\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Pyro\",\n u\"Pyro Documentation\",\n author,\n \"Pyro\",\n \"Deep Universal Probabilistic Programming.\",\n \"Miscellaneous\",\n ),\n]\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"torch\": (\"https://pytorch.org/docs/master/\", None),\n \"funsor\": (\"http://funsor.pyro.ai/en/stable/\", None),\n \"opt_einsum\": (\"https://optimized-einsum.readthedocs.io/en/stable/\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference/\", None),\n \"Bio\": (\"https://biopython.org/docs/latest/api/\", None),\n \"horovod\": (\"https://horovod.readthedocs.io/en/stable/\", None),\n \"graphviz\": (\"https://graphviz.readthedocs.io/en/stable/\", None),\n}\n\n# document class constructors (__init__ methods):\n\"\"\" comment out this functionality for now;\ndef skip(app, what, name, obj, skip, options):\n if name == \"__init__\":\n return False\n return skip\n\"\"\"\n\n\ndef setup(app):\n app.add_css_file(\"css/pyro.css\")\n\n\n# app.connect(\"autodoc-skip-member\", skip)\n\n\n# @jpchen's hack to get rtd builder to install latest pytorch\n# See similar line in the install section of .travis.yml\nif \"READTHEDOCS\" in os.environ:\n os.system(\"pip install numpy\")\n os.system(\n \"pip install torch==1.11.0+cpu torchvision==0.12.0+cpu \"\n \"-f https://download.pytorch.org/whl/torch_stable.html\"\n )\n", "path": "docs/source/conf.py"}]}
| 3,063 | 173 |
gh_patches_debug_8174
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-1872
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fails to load on startup with "NameError: name 'CS_ARCH_RISCV' is not defined"
### Description
I just updated pwndbg and ran setup.sh. Afterwards gdb just fails to load pwndbg.
```text
GNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90
Copyright (C) 2022 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Type "show copying" and "show warranty" for details.
This GDB was configured as "x86_64-linux-gnu".
Type "show configuration" for configuration details.
For bug reporting instructions, please see:
<https://www.gnu.org/software/gdb/bugs/>.
Find the GDB manual and other documentation resources online at:
<http://www.gnu.org/software/gdb/documentation/>.
For help, type "help".
Type "apropos word" to search for commands related to "word".
Traceback (most recent call last):
File "/home/username/repositories/hacking/pwndbg/gdbinit.py", line 71, in <module>
import pwndbg # noqa: F401
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/__init__.py", line 9, in <module>
import pwndbg.commands
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/commands/__init__.py", line 17, in <module>
from pwndbg.heap.ptmalloc import DebugSymsHeap
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/heap/ptmalloc.py", line 19, in <module>
import pwndbg.disasm
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/disasm/__init__.py", line 40, in <module>
"rv32": CS_ARCH_RISCV,
NameError: name 'CS_ARCH_RISCV' is not defined. Did you mean: 'CS_ARCH_MIPS'?
```
### Steps to reproduce
1. run setup.sh
2. run gdb
3. ???
4. no profit!
### My setup
OS: Ubuntu Mate 22.04
pwndbg: 0fbe6cf47f9893af482626aa6fb30ea68f0c30d3
gdb: GNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90
~/.gdbinit
```text
# prevent history from being all over the filesystem
set history save on
set history filename ~/.gdb_history
set history size 8192
set history remove-duplicates unlimited
# leave history expansion off (! character)
# prevent brain from exploding
set disassembly-flavor intel
# show registers, stack and instruction pointer when stopping
# not required with gef/pwndbg
# define hook-stop
# info registers rax rbx rcx rdx rsi rdi rbp rsp rip eflags
# x /64wx $rsp
# x /3i $rip
# end
# load extensions
# source ~/repositories/hacking/peda/peda.py
source ~/repositories/hacking/exploitable/exploitable/exploitable.py
# source ~/repositories/hacking/gef/gef.py
source ~/repositories/hacking/pwndbg/gdbinit.py
```
fails to load on startup with "NameError: name 'CS_ARCH_RISCV' is not defined"
### Description
I just updated pwndbg and ran setup.sh. Afterwards gdb just fails to load pwndbg.
```text
GNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90
Copyright (C) 2022 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Type "show copying" and "show warranty" for details.
This GDB was configured as "x86_64-linux-gnu".
Type "show configuration" for configuration details.
For bug reporting instructions, please see:
<https://www.gnu.org/software/gdb/bugs/>.
Find the GDB manual and other documentation resources online at:
<http://www.gnu.org/software/gdb/documentation/>.
For help, type "help".
Type "apropos word" to search for commands related to "word".
Traceback (most recent call last):
File "/home/username/repositories/hacking/pwndbg/gdbinit.py", line 71, in <module>
import pwndbg # noqa: F401
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/__init__.py", line 9, in <module>
import pwndbg.commands
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/commands/__init__.py", line 17, in <module>
from pwndbg.heap.ptmalloc import DebugSymsHeap
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/heap/ptmalloc.py", line 19, in <module>
import pwndbg.disasm
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/disasm/__init__.py", line 40, in <module>
"rv32": CS_ARCH_RISCV,
NameError: name 'CS_ARCH_RISCV' is not defined. Did you mean: 'CS_ARCH_MIPS'?
```
### Steps to reproduce
1. run setup.sh
2. run gdb
3. ???
4. no profit!
### My setup
OS: Ubuntu Mate 22.04
pwndbg: 0fbe6cf47f9893af482626aa6fb30ea68f0c30d3
gdb: GNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90
~/.gdbinit
```text
# prevent history from being all over the filesystem
set history save on
set history filename ~/.gdb_history
set history size 8192
set history remove-duplicates unlimited
# leave history expansion off (! character)
# prevent brain from exploding
set disassembly-flavor intel
# show registers, stack and instruction pointer when stopping
# not required with gef/pwndbg
# define hook-stop
# info registers rax rbx rcx rdx rsi rdi rbp rsp rip eflags
# x /64wx $rsp
# x /3i $rip
# end
# load extensions
# source ~/repositories/hacking/peda/peda.py
source ~/repositories/hacking/exploitable/exploitable/exploitable.py
# source ~/repositories/hacking/gef/gef.py
source ~/repositories/hacking/pwndbg/gdbinit.py
```
</issue>
<code>
[start of gdbinit.py]
1 from __future__ import annotations
2
3 import cProfile
4 import glob
5 import locale
6 import os
7 import site
8 import sys
9 import time
10 from glob import glob
11 from os import environ
12 from os import path
13
14 _profiler = cProfile.Profile()
15
16 _start_time = None
17 if environ.get("PWNDBG_PROFILE") == "1":
18 _start_time = time.time()
19 _profiler.enable()
20
21 # Get virtualenv's site-packages path
22 venv_path = os.environ.get("PWNDBG_VENV_PATH")
23 if venv_path == "PWNDBG_PLEASE_SKIP_VENV":
24 pass
25 else:
26 directory, file = path.split(__file__)
27 directory = path.expanduser(directory)
28 directory = path.abspath(directory)
29
30 if not venv_path:
31 venv_path = os.path.join(directory, ".venv")
32
33 if not os.path.exists(venv_path):
34 print(f"Cannot find Pwndbg virtualenv directory: {venv_path}: please re-run setup.sh")
35 sys.exit(1)
36
37 site_pkgs_path = glob(os.path.join(venv_path, "lib/*/site-packages"))[0]
38
39 # add virtualenv's site-packages to sys.path and run .pth files
40 site.addsitedir(site_pkgs_path)
41
42 # remove existing, system-level site-packages from sys.path
43 for site_packages in site.getsitepackages():
44 if site_packages in sys.path:
45 sys.path.remove(site_packages)
46
47 # Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc)
48 bin_path = os.path.join(venv_path, "bin")
49 os.environ["PATH"] = bin_path + os.pathsep + os.environ.get("PATH")
50
51 # Add gdb-pt-dump directory to sys.path so it can be imported
52 gdbpt = path.join(directory, "gdb-pt-dump")
53 sys.path.append(directory)
54 sys.path.append(gdbpt)
55
56 # warn if the user has different encoding than utf-8
57 encoding = locale.getpreferredencoding()
58
59 if encoding != "UTF-8":
60 print("******")
61 print(f"Your encoding ({encoding}) is different than UTF-8. pwndbg might not work properly.")
62 print("You might try launching GDB with:")
63 print(" LC_CTYPE=C.UTF-8 gdb")
64 print(
65 "If that does not work, make sure that en_US.UTF-8 is uncommented in /etc/locale.gen and that you called `locale-gen` command"
66 )
67 print("******")
68
69 environ["PWNLIB_NOTERM"] = "1"
70
71 import pwndbg # noqa: F401
72 import pwndbg.profiling
73
74 pwndbg.profiling.init(_profiler, _start_time)
75 if environ.get("PWNDBG_PROFILE") == "1":
76 pwndbg.profiling.profiler.stop("pwndbg-load.pstats")
77 pwndbg.profiling.profiler.start()
78
[end of gdbinit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gdbinit.py b/gdbinit.py
--- a/gdbinit.py
+++ b/gdbinit.py
@@ -50,8 +50,13 @@
# Add gdb-pt-dump directory to sys.path so it can be imported
gdbpt = path.join(directory, "gdb-pt-dump")
- sys.path.append(directory)
- sys.path.append(gdbpt)
+ sys.path.insert(0, directory)
+ sys.path.insert(1, gdbpt)
+
+ # Push virtualenv's site-packages to the front
+ sys.path.remove(site_pkgs_path)
+ sys.path.insert(2, site_pkgs_path)
+
# warn if the user has different encoding than utf-8
encoding = locale.getpreferredencoding()
|
{"golden_diff": "diff --git a/gdbinit.py b/gdbinit.py\n--- a/gdbinit.py\n+++ b/gdbinit.py\n@@ -50,8 +50,13 @@\n \n # Add gdb-pt-dump directory to sys.path so it can be imported\n gdbpt = path.join(directory, \"gdb-pt-dump\")\n- sys.path.append(directory)\n- sys.path.append(gdbpt)\n+ sys.path.insert(0, directory)\n+ sys.path.insert(1, gdbpt)\n+\n+ # Push virtualenv's site-packages to the front\n+ sys.path.remove(site_pkgs_path)\n+ sys.path.insert(2, site_pkgs_path)\n+\n \n # warn if the user has different encoding than utf-8\n encoding = locale.getpreferredencoding()\n", "issue": "fails to load on startup with \"NameError: name 'CS_ARCH_RISCV' is not defined\"\n### Description\r\nI just updated pwndbg and ran setup.sh. Afterwards gdb just fails to load pwndbg.\r\n\r\n```text\r\nGNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90\r\nCopyright (C) 2022 Free Software Foundation, Inc.\r\nLicense GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\r\nThis is free software: you are free to change and redistribute it.\r\nThere is NO WARRANTY, to the extent permitted by law.\r\nType \"show copying\" and \"show warranty\" for details.\r\nThis GDB was configured as \"x86_64-linux-gnu\".\r\nType \"show configuration\" for configuration details.\r\nFor bug reporting instructions, please see:\r\n<https://www.gnu.org/software/gdb/bugs/>.\r\nFind the GDB manual and other documentation resources online at:\r\n <http://www.gnu.org/software/gdb/documentation/>.\r\n\r\nFor help, type \"help\".\r\nType \"apropos word\" to search for commands related to \"word\".\r\nTraceback (most recent call last):\r\n File \"/home/username/repositories/hacking/pwndbg/gdbinit.py\", line 71, in <module>\r\n import pwndbg # noqa: F401\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/__init__.py\", line 9, in <module>\r\n import pwndbg.commands\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/commands/__init__.py\", line 17, in <module>\r\n from pwndbg.heap.ptmalloc import DebugSymsHeap\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/heap/ptmalloc.py\", line 19, in <module>\r\n import pwndbg.disasm\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/disasm/__init__.py\", line 40, in <module>\r\n \"rv32\": CS_ARCH_RISCV,\r\nNameError: name 'CS_ARCH_RISCV' is not defined. Did you mean: 'CS_ARCH_MIPS'?\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. run setup.sh\r\n2. run gdb\r\n3. ???\r\n4. no profit!\r\n\r\n### My setup\r\n\r\nOS: Ubuntu Mate 22.04\r\npwndbg: 0fbe6cf47f9893af482626aa6fb30ea68f0c30d3\r\ngdb: GNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90\r\n\r\n~/.gdbinit\r\n```text\r\n# prevent history from being all over the filesystem\r\nset history save on\r\nset history filename ~/.gdb_history\r\nset history size 8192\r\nset history remove-duplicates unlimited\r\n# leave history expansion off (! character)\r\n\r\n# prevent brain from exploding\r\nset disassembly-flavor intel\r\n\r\n# show registers, stack and instruction pointer when stopping\r\n\r\n# not required with gef/pwndbg\r\n# define hook-stop\r\n# info registers rax rbx rcx rdx rsi rdi rbp rsp rip eflags\r\n# x /64wx $rsp\r\n# x /3i $rip\r\n# end\r\n\r\n# load extensions\r\n# source ~/repositories/hacking/peda/peda.py\r\nsource ~/repositories/hacking/exploitable/exploitable/exploitable.py\r\n# source ~/repositories/hacking/gef/gef.py\r\nsource ~/repositories/hacking/pwndbg/gdbinit.py\r\n```\nfails to load on startup with \"NameError: name 'CS_ARCH_RISCV' is not defined\"\n### Description\r\nI just updated pwndbg and ran setup.sh. Afterwards gdb just fails to load pwndbg.\r\n\r\n```text\r\nGNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90\r\nCopyright (C) 2022 Free Software Foundation, Inc.\r\nLicense GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\r\nThis is free software: you are free to change and redistribute it.\r\nThere is NO WARRANTY, to the extent permitted by law.\r\nType \"show copying\" and \"show warranty\" for details.\r\nThis GDB was configured as \"x86_64-linux-gnu\".\r\nType \"show configuration\" for configuration details.\r\nFor bug reporting instructions, please see:\r\n<https://www.gnu.org/software/gdb/bugs/>.\r\nFind the GDB manual and other documentation resources online at:\r\n <http://www.gnu.org/software/gdb/documentation/>.\r\n\r\nFor help, type \"help\".\r\nType \"apropos word\" to search for commands related to \"word\".\r\nTraceback (most recent call last):\r\n File \"/home/username/repositories/hacking/pwndbg/gdbinit.py\", line 71, in <module>\r\n import pwndbg # noqa: F401\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/__init__.py\", line 9, in <module>\r\n import pwndbg.commands\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/commands/__init__.py\", line 17, in <module>\r\n from pwndbg.heap.ptmalloc import DebugSymsHeap\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/heap/ptmalloc.py\", line 19, in <module>\r\n import pwndbg.disasm\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/disasm/__init__.py\", line 40, in <module>\r\n \"rv32\": CS_ARCH_RISCV,\r\nNameError: name 'CS_ARCH_RISCV' is not defined. Did you mean: 'CS_ARCH_MIPS'?\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. run setup.sh\r\n2. run gdb\r\n3. ???\r\n4. no profit!\r\n\r\n### My setup\r\n\r\nOS: Ubuntu Mate 22.04\r\npwndbg: 0fbe6cf47f9893af482626aa6fb30ea68f0c30d3\r\ngdb: GNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90\r\n\r\n~/.gdbinit\r\n```text\r\n# prevent history from being all over the filesystem\r\nset history save on\r\nset history filename ~/.gdb_history\r\nset history size 8192\r\nset history remove-duplicates unlimited\r\n# leave history expansion off (! character)\r\n\r\n# prevent brain from exploding\r\nset disassembly-flavor intel\r\n\r\n# show registers, stack and instruction pointer when stopping\r\n\r\n# not required with gef/pwndbg\r\n# define hook-stop\r\n# info registers rax rbx rcx rdx rsi rdi rbp rsp rip eflags\r\n# x /64wx $rsp\r\n# x /3i $rip\r\n# end\r\n\r\n# load extensions\r\n# source ~/repositories/hacking/peda/peda.py\r\nsource ~/repositories/hacking/exploitable/exploitable/exploitable.py\r\n# source ~/repositories/hacking/gef/gef.py\r\nsource ~/repositories/hacking/pwndbg/gdbinit.py\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport cProfile\nimport glob\nimport locale\nimport os\nimport site\nimport sys\nimport time\nfrom glob import glob\nfrom os import environ\nfrom os import path\n\n_profiler = cProfile.Profile()\n\n_start_time = None\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n _start_time = time.time()\n _profiler.enable()\n\n# Get virtualenv's site-packages path\nvenv_path = os.environ.get(\"PWNDBG_VENV_PATH\")\nif venv_path == \"PWNDBG_PLEASE_SKIP_VENV\":\n pass\nelse:\n directory, file = path.split(__file__)\n directory = path.expanduser(directory)\n directory = path.abspath(directory)\n\n if not venv_path:\n venv_path = os.path.join(directory, \".venv\")\n\n if not os.path.exists(venv_path):\n print(f\"Cannot find Pwndbg virtualenv directory: {venv_path}: please re-run setup.sh\")\n sys.exit(1)\n\n site_pkgs_path = glob(os.path.join(venv_path, \"lib/*/site-packages\"))[0]\n\n # add virtualenv's site-packages to sys.path and run .pth files\n site.addsitedir(site_pkgs_path)\n\n # remove existing, system-level site-packages from sys.path\n for site_packages in site.getsitepackages():\n if site_packages in sys.path:\n sys.path.remove(site_packages)\n\n # Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc)\n bin_path = os.path.join(venv_path, \"bin\")\n os.environ[\"PATH\"] = bin_path + os.pathsep + os.environ.get(\"PATH\")\n\n # Add gdb-pt-dump directory to sys.path so it can be imported\n gdbpt = path.join(directory, \"gdb-pt-dump\")\n sys.path.append(directory)\n sys.path.append(gdbpt)\n\n# warn if the user has different encoding than utf-8\nencoding = locale.getpreferredencoding()\n\nif encoding != \"UTF-8\":\n print(\"******\")\n print(f\"Your encoding ({encoding}) is different than UTF-8. pwndbg might not work properly.\")\n print(\"You might try launching GDB with:\")\n print(\" LC_CTYPE=C.UTF-8 gdb\")\n print(\n \"If that does not work, make sure that en_US.UTF-8 is uncommented in /etc/locale.gen and that you called `locale-gen` command\"\n )\n print(\"******\")\n\nenviron[\"PWNLIB_NOTERM\"] = \"1\"\n\nimport pwndbg # noqa: F401\nimport pwndbg.profiling\n\npwndbg.profiling.init(_profiler, _start_time)\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n pwndbg.profiling.profiler.stop(\"pwndbg-load.pstats\")\n pwndbg.profiling.profiler.start()\n", "path": "gdbinit.py"}]}
| 2,994 | 170 |
gh_patches_debug_4848
|
rasdani/github-patches
|
git_diff
|
enthought__chaco-728
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError when running stock prices examples
This error was discovered when running the `examples/demo/financial/stock_prices.py` example.
In order to reproduce, simply load the example and start zooming out. This was observed on python 3.6 with wx toolkit.
```
>python stock_prices.py
c:\users\rporuri\work\github\ets\enable\kiva\agg\plat_support.py:188: wxPyDeprecationWarning: Call to deprecated item. Use GetHandle instead.
self.draw(window_dc.GetHDC(), x, y, width, height)
Exception occurred in traits notification handler for event object: TraitChangeEvent(object=<chaco.data_range_1d.DataRange1D object at 0x000001C06339C518>, name='updated', old=<undefined>, new=(1614674109.0105073, 1657874109.0105073))
Traceback (most recent call last):
File "C:\Users\rporuri\.edm\envs\enable-test-3.6-wx\lib\site-packages\traits\observation\_trait_event_notifier.py", line 122, in __call__
self.dispatcher(handler, event)
File "C:\Users\rporuri\.edm\envs\enable-test-3.6-wx\lib\site-packages\traits\observation\observe.py", line 26, in dispatch_same
handler(event)
File "stock_prices.py", line 147, in _plot_range_handler
low, high = event
TypeError: 'TraitChangeEvent' object is not iterable
Exception occurred in traits notification handler for event object: TraitChangeEvent(object=<chaco.data_range_1d.DataRange1D object at 0x000001C06339C518>, name='updated', old=<undefined>, new=(1593074109.0105073, 1679474109.0105073))
Traceback (most recent call last):
File "C:\Users\rporuri\.edm\envs\enable-test-3.6-wx\lib\site-packages\traits\observation\_trait_event_notifier.py", line 122, in __call__
self.dispatcher(handler, event)
File "C:\Users\rporuri\.edm\envs\enable-test-3.6-wx\lib\site-packages\traits\observation\observe.py", line 26, in dispatch_same
handler(event)
File "stock_prices.py", line 147, in _plot_range_handler
low, high = event
TypeError: 'TraitChangeEvent' object is not iterable
```
</issue>
<code>
[start of examples/demo/financial/stock_prices.py]
1 """
2 Implementation of a standard financial plot visualization using Chaco
3 renderers and scales.
4
5 In the main price plot area, mouse wheel zooms and mouse drag pans (if
6 the plot is not at the edge of the time series data). In the bottom
7 overview plot area, right-click-drag selects a range of times to display
8 on the top two plots. Once a region is selected, it can be moved
9 around by left-dragging or resized by left-dragging one of its
10 edges.
11 """
12
13 # Major library imports
14 from numpy import abs, cumprod, linspace, random
15 import time
16
17 from enable.example_support import DemoFrame, demo_main
18
19 # Enthought library imports
20 from enable.api import Window
21
22 # Chaco imports
23 from chaco.api import (
24 ArrayDataSource,
25 BarPlot,
26 DataRange1D,
27 LinePlot,
28 LinearMapper,
29 VPlotContainer,
30 PlotAxis,
31 FilledLinePlot,
32 add_default_grids,
33 )
34 from chaco.tools.api import (
35 PanTool,
36 ZoomTool,
37 RangeSelection,
38 RangeSelectionOverlay,
39 )
40
41 from chaco.scales.api import CalendarScaleSystem
42 from chaco.scales_tick_generator import ScalesTickGenerator
43
44
45 def create_dates(numpoints, units="days"):
46 """Returns **numpoints** number of dates that evenly bracket the current
47 date and time. **units** should be one of "weeks", "days", "hours"
48 "minutes", or "seconds".
49 """
50 units_map = {
51 "weeks": 7 * 24 * 3600,
52 "days": 24 * 3600,
53 "hours": 3600,
54 "minutes": 60,
55 "seconds": 1,
56 }
57 now = time.time()
58 dt = units_map[units]
59 dates = linspace(now, now + numpoints * dt, numpoints)
60 return dates
61
62
63 class PlotFrame(DemoFrame):
64 def _create_price_plots(self, times, prices, mini_height=75):
65 """Creates the two plots of prices and returns them. One of the
66 plots can be zoomed and panned, and the other plot (smaller) always
67 shows the full data.
68
69 *dates* and *prices* are two data sources.
70 """
71
72 # Create the price plot
73 price_plot = FilledLinePlot(
74 index=times,
75 value=prices,
76 index_mapper=LinearMapper(range=DataRange1D(times)),
77 value_mapper=LinearMapper(range=DataRange1D(prices)),
78 edge_color="blue",
79 face_color="paleturquoise",
80 bgcolor="white",
81 border_visible=True,
82 )
83
84 # Add pan and zoom
85 price_plot.tools.append(
86 PanTool(
87 price_plot,
88 constrain=True,
89 constrain_direction="x",
90 restrict_to_data=True,
91 )
92 )
93 price_plot.overlays.append(
94 ZoomTool(
95 price_plot,
96 drag_button="right",
97 always_on=True,
98 tool_mode="range",
99 axis="index",
100 max_zoom_out_factor=1.0,
101 x_min_zoom_factor=float(1e-3),
102 )
103 )
104
105 # Create the miniplot
106 miniplot = LinePlot(
107 index=times,
108 value=prices,
109 index_mapper=LinearMapper(range=DataRange1D(times)),
110 value_mapper=LinearMapper(range=DataRange1D(prices)),
111 color="black",
112 border_visible=True,
113 bgcolor="white",
114 height=mini_height,
115 resizable="h",
116 )
117
118 # Add a range overlay to the miniplot that is hooked up to the range
119 # of the main price_plot
120 range_tool = RangeSelection(miniplot)
121 miniplot.tools.append(range_tool)
122 range_overlay = RangeSelectionOverlay(
123 miniplot, metadata_name="selections"
124 )
125 miniplot.overlays.append(range_overlay)
126 range_tool.observe(self._range_selection_handler, "selection")
127
128 # Attach a handler that sets the tool when the plot's index range changes
129 self.range_tool = range_tool
130 price_plot.index_range.observe(self._plot_range_handler, "updated")
131
132 return price_plot, miniplot
133
134 def _range_selection_handler(self, event):
135 range_selection_event = event.new
136 # The event obj should be a tuple (low, high) in data space
137 if range_selection_event is not None:
138 low, high = range_selection_event
139 self.price_plot.index_range.low = low
140 self.price_plot.index_range.high = high
141 else:
142 self.price_plot.index_range.set_bounds("auto", "auto")
143
144 def _plot_range_handler(self, event):
145 plot_range_event = event.new
146 if plot_range_event is not None:
147 low, high = event
148 if "auto" not in (low, high):
149 self.range_tool.selection = (low, high)
150
151 def _create_vol_plot(self, times, volumes, height=100):
152 "Creates and returns the volume plot"
153 index_range = self.price_plot.index_range
154 vol_plot = BarPlot(
155 index=times,
156 value=volumes,
157 index_mapper=LinearMapper(range=index_range),
158 value_mapper=LinearMapper(range=DataRange1D(volumes)),
159 line_color="transparent",
160 fill_color="black",
161 bar_width=1.0,
162 bar_width_type="screen",
163 antialias=False,
164 height=100,
165 resizable="h",
166 bgcolor="white",
167 border_visible=True,
168 )
169 vol_plot.tools.append(
170 PanTool(vol_plot, constrain=True, constrain_direction="x")
171 )
172 return vol_plot
173
174 def _create_component(self):
175
176 # Create the data and datasource objects
177 # In order for the date axis to work, the index data points need to
178 # be in units of seconds since the epoch. This is because we are using
179 # the CalendarScaleSystem, whose formatters interpret the numerical values
180 # as seconds since the epoch.
181 numpoints = 500
182 index = create_dates(numpoints)
183 returns = random.lognormal(0.01, 0.1, size=numpoints)
184 price = 100.0 * cumprod(returns)
185 volume = abs(random.normal(1000.0, 1500.0, size=numpoints) + 2000.0)
186
187 time_ds = ArrayDataSource(index)
188 vol_ds = ArrayDataSource(volume, sort_order="none")
189 price_ds = ArrayDataSource(price, sort_order="none")
190
191 # Create the price plots
192 price_plot, mini_plot = self._create_price_plots(time_ds, price_ds)
193 price_plot.index_mapper.domain_limits = (index[0], index[-1])
194 self.price_plot = price_plot
195 self.mini_plot = mini_plot
196
197 # Create the volume plot
198 vol_plot = self._create_vol_plot(time_ds, vol_ds)
199 vol_plot.index_mapper.domain_limits = (index[0], index[-1])
200
201 # Set the plot's bottom axis to use the Scales ticking system
202 ticker = ScalesTickGenerator(scale=CalendarScaleSystem())
203 for plot in price_plot, mini_plot, vol_plot:
204 bottom_axis = PlotAxis(
205 plot, orientation="bottom", tick_generator=ticker
206 )
207 plot.overlays.append(bottom_axis)
208 plot.overlays.append(PlotAxis(plot, orientation="left"))
209 hgrid, vgrid = add_default_grids(plot)
210 vgrid.tick_generator = bottom_axis.tick_generator
211
212 container = VPlotContainer(
213 bgcolor="lightgray", spacing=40, padding=50, fill_padding=False
214 )
215 container.add(mini_plot, vol_plot, price_plot)
216
217 return container
218
219
220 if __name__ == "__main__":
221 # Save demo so that it doesn't get garbage collected when run within
222 # existing event loop (i.e. from ipython).
223 demo = demo_main(
224 PlotFrame, size=(800, 600), title="Stock price and volume"
225 )
226
[end of examples/demo/financial/stock_prices.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/demo/financial/stock_prices.py b/examples/demo/financial/stock_prices.py
--- a/examples/demo/financial/stock_prices.py
+++ b/examples/demo/financial/stock_prices.py
@@ -144,7 +144,7 @@
def _plot_range_handler(self, event):
plot_range_event = event.new
if plot_range_event is not None:
- low, high = event
+ low, high = plot_range_event
if "auto" not in (low, high):
self.range_tool.selection = (low, high)
|
{"golden_diff": "diff --git a/examples/demo/financial/stock_prices.py b/examples/demo/financial/stock_prices.py\n--- a/examples/demo/financial/stock_prices.py\n+++ b/examples/demo/financial/stock_prices.py\n@@ -144,7 +144,7 @@\n def _plot_range_handler(self, event):\n plot_range_event = event.new\n if plot_range_event is not None:\n- low, high = event\n+ low, high = plot_range_event\n if \"auto\" not in (low, high):\n self.range_tool.selection = (low, high)\n", "issue": "TypeError when running stock prices examples\nThis error was discovered when running the `examples/demo/financial/stock_prices.py` example.\r\n\r\nIn order to reproduce, simply load the example and start zooming out. This was observed on python 3.6 with wx toolkit.\r\n\r\n```\r\n>python stock_prices.py\r\nc:\\users\\rporuri\\work\\github\\ets\\enable\\kiva\\agg\\plat_support.py:188: wxPyDeprecationWarning: Call to deprecated item. Use GetHandle instead.\r\n self.draw(window_dc.GetHDC(), x, y, width, height)\r\nException occurred in traits notification handler for event object: TraitChangeEvent(object=<chaco.data_range_1d.DataRange1D object at 0x000001C06339C518>, name='updated', old=<undefined>, new=(1614674109.0105073, 1657874109.0105073))\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\rporuri\\.edm\\envs\\enable-test-3.6-wx\\lib\\site-packages\\traits\\observation\\_trait_event_notifier.py\", line 122, in __call__\r\n self.dispatcher(handler, event)\r\n File \"C:\\Users\\rporuri\\.edm\\envs\\enable-test-3.6-wx\\lib\\site-packages\\traits\\observation\\observe.py\", line 26, in dispatch_same\r\n handler(event)\r\n File \"stock_prices.py\", line 147, in _plot_range_handler\r\n low, high = event\r\nTypeError: 'TraitChangeEvent' object is not iterable\r\nException occurred in traits notification handler for event object: TraitChangeEvent(object=<chaco.data_range_1d.DataRange1D object at 0x000001C06339C518>, name='updated', old=<undefined>, new=(1593074109.0105073, 1679474109.0105073))\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\rporuri\\.edm\\envs\\enable-test-3.6-wx\\lib\\site-packages\\traits\\observation\\_trait_event_notifier.py\", line 122, in __call__\r\n self.dispatcher(handler, event)\r\n File \"C:\\Users\\rporuri\\.edm\\envs\\enable-test-3.6-wx\\lib\\site-packages\\traits\\observation\\observe.py\", line 26, in dispatch_same\r\n handler(event)\r\n File \"stock_prices.py\", line 147, in _plot_range_handler\r\n low, high = event\r\nTypeError: 'TraitChangeEvent' object is not iterable\r\n```\n", "before_files": [{"content": "\"\"\"\nImplementation of a standard financial plot visualization using Chaco\nrenderers and scales.\n\nIn the main price plot area, mouse wheel zooms and mouse drag pans (if\nthe plot is not at the edge of the time series data). In the bottom\noverview plot area, right-click-drag selects a range of times to display\non the top two plots. Once a region is selected, it can be moved\naround by left-dragging or resized by left-dragging one of its\nedges.\n\"\"\"\n\n# Major library imports\nfrom numpy import abs, cumprod, linspace, random\nimport time\n\nfrom enable.example_support import DemoFrame, demo_main\n\n# Enthought library imports\nfrom enable.api import Window\n\n# Chaco imports\nfrom chaco.api import (\n ArrayDataSource,\n BarPlot,\n DataRange1D,\n LinePlot,\n LinearMapper,\n VPlotContainer,\n PlotAxis,\n FilledLinePlot,\n add_default_grids,\n)\nfrom chaco.tools.api import (\n PanTool,\n ZoomTool,\n RangeSelection,\n RangeSelectionOverlay,\n)\n\nfrom chaco.scales.api import CalendarScaleSystem\nfrom chaco.scales_tick_generator import ScalesTickGenerator\n\n\ndef create_dates(numpoints, units=\"days\"):\n \"\"\"Returns **numpoints** number of dates that evenly bracket the current\n date and time. **units** should be one of \"weeks\", \"days\", \"hours\"\n \"minutes\", or \"seconds\".\n \"\"\"\n units_map = {\n \"weeks\": 7 * 24 * 3600,\n \"days\": 24 * 3600,\n \"hours\": 3600,\n \"minutes\": 60,\n \"seconds\": 1,\n }\n now = time.time()\n dt = units_map[units]\n dates = linspace(now, now + numpoints * dt, numpoints)\n return dates\n\n\nclass PlotFrame(DemoFrame):\n def _create_price_plots(self, times, prices, mini_height=75):\n \"\"\"Creates the two plots of prices and returns them. One of the\n plots can be zoomed and panned, and the other plot (smaller) always\n shows the full data.\n\n *dates* and *prices* are two data sources.\n \"\"\"\n\n # Create the price plot\n price_plot = FilledLinePlot(\n index=times,\n value=prices,\n index_mapper=LinearMapper(range=DataRange1D(times)),\n value_mapper=LinearMapper(range=DataRange1D(prices)),\n edge_color=\"blue\",\n face_color=\"paleturquoise\",\n bgcolor=\"white\",\n border_visible=True,\n )\n\n # Add pan and zoom\n price_plot.tools.append(\n PanTool(\n price_plot,\n constrain=True,\n constrain_direction=\"x\",\n restrict_to_data=True,\n )\n )\n price_plot.overlays.append(\n ZoomTool(\n price_plot,\n drag_button=\"right\",\n always_on=True,\n tool_mode=\"range\",\n axis=\"index\",\n max_zoom_out_factor=1.0,\n x_min_zoom_factor=float(1e-3),\n )\n )\n\n # Create the miniplot\n miniplot = LinePlot(\n index=times,\n value=prices,\n index_mapper=LinearMapper(range=DataRange1D(times)),\n value_mapper=LinearMapper(range=DataRange1D(prices)),\n color=\"black\",\n border_visible=True,\n bgcolor=\"white\",\n height=mini_height,\n resizable=\"h\",\n )\n\n # Add a range overlay to the miniplot that is hooked up to the range\n # of the main price_plot\n range_tool = RangeSelection(miniplot)\n miniplot.tools.append(range_tool)\n range_overlay = RangeSelectionOverlay(\n miniplot, metadata_name=\"selections\"\n )\n miniplot.overlays.append(range_overlay)\n range_tool.observe(self._range_selection_handler, \"selection\")\n\n # Attach a handler that sets the tool when the plot's index range changes\n self.range_tool = range_tool\n price_plot.index_range.observe(self._plot_range_handler, \"updated\")\n\n return price_plot, miniplot\n\n def _range_selection_handler(self, event):\n range_selection_event = event.new\n # The event obj should be a tuple (low, high) in data space\n if range_selection_event is not None:\n low, high = range_selection_event\n self.price_plot.index_range.low = low\n self.price_plot.index_range.high = high\n else:\n self.price_plot.index_range.set_bounds(\"auto\", \"auto\")\n\n def _plot_range_handler(self, event):\n plot_range_event = event.new\n if plot_range_event is not None:\n low, high = event\n if \"auto\" not in (low, high):\n self.range_tool.selection = (low, high)\n\n def _create_vol_plot(self, times, volumes, height=100):\n \"Creates and returns the volume plot\"\n index_range = self.price_plot.index_range\n vol_plot = BarPlot(\n index=times,\n value=volumes,\n index_mapper=LinearMapper(range=index_range),\n value_mapper=LinearMapper(range=DataRange1D(volumes)),\n line_color=\"transparent\",\n fill_color=\"black\",\n bar_width=1.0,\n bar_width_type=\"screen\",\n antialias=False,\n height=100,\n resizable=\"h\",\n bgcolor=\"white\",\n border_visible=True,\n )\n vol_plot.tools.append(\n PanTool(vol_plot, constrain=True, constrain_direction=\"x\")\n )\n return vol_plot\n\n def _create_component(self):\n\n # Create the data and datasource objects\n # In order for the date axis to work, the index data points need to\n # be in units of seconds since the epoch. This is because we are using\n # the CalendarScaleSystem, whose formatters interpret the numerical values\n # as seconds since the epoch.\n numpoints = 500\n index = create_dates(numpoints)\n returns = random.lognormal(0.01, 0.1, size=numpoints)\n price = 100.0 * cumprod(returns)\n volume = abs(random.normal(1000.0, 1500.0, size=numpoints) + 2000.0)\n\n time_ds = ArrayDataSource(index)\n vol_ds = ArrayDataSource(volume, sort_order=\"none\")\n price_ds = ArrayDataSource(price, sort_order=\"none\")\n\n # Create the price plots\n price_plot, mini_plot = self._create_price_plots(time_ds, price_ds)\n price_plot.index_mapper.domain_limits = (index[0], index[-1])\n self.price_plot = price_plot\n self.mini_plot = mini_plot\n\n # Create the volume plot\n vol_plot = self._create_vol_plot(time_ds, vol_ds)\n vol_plot.index_mapper.domain_limits = (index[0], index[-1])\n\n # Set the plot's bottom axis to use the Scales ticking system\n ticker = ScalesTickGenerator(scale=CalendarScaleSystem())\n for plot in price_plot, mini_plot, vol_plot:\n bottom_axis = PlotAxis(\n plot, orientation=\"bottom\", tick_generator=ticker\n )\n plot.overlays.append(bottom_axis)\n plot.overlays.append(PlotAxis(plot, orientation=\"left\"))\n hgrid, vgrid = add_default_grids(plot)\n vgrid.tick_generator = bottom_axis.tick_generator\n\n container = VPlotContainer(\n bgcolor=\"lightgray\", spacing=40, padding=50, fill_padding=False\n )\n container.add(mini_plot, vol_plot, price_plot)\n\n return container\n\n\nif __name__ == \"__main__\":\n # Save demo so that it doesn't get garbage collected when run within\n # existing event loop (i.e. from ipython).\n demo = demo_main(\n PlotFrame, size=(800, 600), title=\"Stock price and volume\"\n )\n", "path": "examples/demo/financial/stock_prices.py"}]}
| 3,486 | 126 |
gh_patches_debug_4443
|
rasdani/github-patches
|
git_diff
|
pytorch__text-145
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'list' object has no attribute 'rstrip'
Hi all, previously torchtext works for me when I'm running anaconda python. However, now, when i uninstalled my anaconda python. It stops working.
It gives me the following error:
```
File "/Library/Python/2.7/site-packages/torchtext/data/example.py", line 59, in fromlist
setattr(ex, name, field.preprocess(val.rstrip('\n')))
AttributeError: 'list' object has no attribute 'rstrip'
```
Thanks!
</issue>
<code>
[start of torchtext/data/example.py]
1 import csv
2 import json
3
4 import six
5
6
7 class Example(object):
8 """Defines a single training or test example.
9
10 Stores each column of the example as an attribute.
11 """
12
13 @classmethod
14 def fromJSON(cls, data, fields):
15 return cls.fromdict(json.loads(data), fields)
16
17 @classmethod
18 def fromdict(cls, data, fields):
19 ex = cls()
20 for key, vals in fields.items():
21 if key not in data:
22 raise ValueError("Specified key {} was not found in "
23 "the input data".format(key))
24 if vals is not None:
25 if not isinstance(vals, list):
26 vals = [vals]
27 for val in vals:
28 name, field = val
29 setattr(ex, name, field.preprocess(data[key]))
30 return ex
31
32 @classmethod
33 def fromTSV(cls, data, fields):
34 return cls.fromlist(data.split('\t'), fields)
35
36 @classmethod
37 def fromCSV(cls, data, fields):
38 data = data.rstrip("\n")
39 # If Python 2, encode to utf-8 since CSV doesn't take unicode input
40 if six.PY2:
41 data = data.encode('utf-8')
42 # Use Python CSV module to parse the CSV line
43 parsed_csv_lines = csv.reader([data])
44
45 # If Python 2, decode back to unicode (the original input format).
46 if six.PY2:
47 for line in parsed_csv_lines:
48 parsed_csv_line = [six.text_type(col, 'utf-8') for col in line]
49 break
50 else:
51 parsed_csv_line = list(parsed_csv_lines)[0]
52 return cls.fromlist(parsed_csv_line, fields)
53
54 @classmethod
55 def fromlist(cls, data, fields):
56 ex = cls()
57 for (name, field), val in zip(fields, data):
58 if field is not None:
59 setattr(ex, name, field.preprocess(val.rstrip('\n')))
60 return ex
61
62 @classmethod
63 def fromtree(cls, data, fields, subtrees=False):
64 try:
65 from nltk.tree import Tree
66 except ImportError:
67 print("Please install NLTK. "
68 "See the docs at http://nltk.org for more information.")
69 raise
70 tree = Tree.fromstring(data)
71 if subtrees:
72 return [cls.fromlist(
73 [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]
74 return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)
75
[end of torchtext/data/example.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torchtext/data/example.py b/torchtext/data/example.py
--- a/torchtext/data/example.py
+++ b/torchtext/data/example.py
@@ -56,7 +56,9 @@
ex = cls()
for (name, field), val in zip(fields, data):
if field is not None:
- setattr(ex, name, field.preprocess(val.rstrip('\n')))
+ if isinstance(val, six.string_types):
+ val = val.rstrip('\n')
+ setattr(ex, name, field.preprocess(val))
return ex
@classmethod
|
{"golden_diff": "diff --git a/torchtext/data/example.py b/torchtext/data/example.py\n--- a/torchtext/data/example.py\n+++ b/torchtext/data/example.py\n@@ -56,7 +56,9 @@\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n- setattr(ex, name, field.preprocess(val.rstrip('\\n')))\n+ if isinstance(val, six.string_types):\n+ val = val.rstrip('\\n')\n+ setattr(ex, name, field.preprocess(val))\n return ex\n \n @classmethod\n", "issue": "AttributeError: 'list' object has no attribute 'rstrip'\nHi all, previously torchtext works for me when I'm running anaconda python. However, now, when i uninstalled my anaconda python. It stops working.\r\n\r\nIt gives me the following error: \r\n\r\n```\r\nFile \"/Library/Python/2.7/site-packages/torchtext/data/example.py\", line 59, in fromlist\r\n setattr(ex, name, field.preprocess(val.rstrip('\\n')))\r\nAttributeError: 'list' object has no attribute 'rstrip'\r\n\r\n```\r\n\r\nThanks!\n", "before_files": [{"content": "import csv\nimport json\n\nimport six\n\n\nclass Example(object):\n \"\"\"Defines a single training or test example.\n\n Stores each column of the example as an attribute.\n \"\"\"\n\n @classmethod\n def fromJSON(cls, data, fields):\n return cls.fromdict(json.loads(data), fields)\n\n @classmethod\n def fromdict(cls, data, fields):\n ex = cls()\n for key, vals in fields.items():\n if key not in data:\n raise ValueError(\"Specified key {} was not found in \"\n \"the input data\".format(key))\n if vals is not None:\n if not isinstance(vals, list):\n vals = [vals]\n for val in vals:\n name, field = val\n setattr(ex, name, field.preprocess(data[key]))\n return ex\n\n @classmethod\n def fromTSV(cls, data, fields):\n return cls.fromlist(data.split('\\t'), fields)\n\n @classmethod\n def fromCSV(cls, data, fields):\n data = data.rstrip(\"\\n\")\n # If Python 2, encode to utf-8 since CSV doesn't take unicode input\n if six.PY2:\n data = data.encode('utf-8')\n # Use Python CSV module to parse the CSV line\n parsed_csv_lines = csv.reader([data])\n\n # If Python 2, decode back to unicode (the original input format).\n if six.PY2:\n for line in parsed_csv_lines:\n parsed_csv_line = [six.text_type(col, 'utf-8') for col in line]\n break\n else:\n parsed_csv_line = list(parsed_csv_lines)[0]\n return cls.fromlist(parsed_csv_line, fields)\n\n @classmethod\n def fromlist(cls, data, fields):\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n setattr(ex, name, field.preprocess(val.rstrip('\\n')))\n return ex\n\n @classmethod\n def fromtree(cls, data, fields, subtrees=False):\n try:\n from nltk.tree import Tree\n except ImportError:\n print(\"Please install NLTK. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n tree = Tree.fromstring(data)\n if subtrees:\n return [cls.fromlist(\n [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]\n return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)\n", "path": "torchtext/data/example.py"}]}
| 1,335 | 128 |
gh_patches_debug_12513
|
rasdani/github-patches
|
git_diff
|
pypa__pip-11417
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Completion in ZSH doesn't understand that arguments follow certain options
* Pip version: 9.0.1
* Python version: 2.7.12
* Operating system: Ubuntu 16.04
### Description:
Completion in Zsh uses the older `compctl` builtin and returns completions for long options that take arguments with a trailing equals sign. But compctl/Zsh doesn't understand that as meaning that the option takes an argument and adds a space after the equals sign and also tries to complete the next argument if you remove the equals sign as yet another option.
No idea if this is fixable using the older compctl, might want to migrate to the newer compsys... With compsys you will probably have to modify the completion output from Pip or preprocess it in shell code so that it fits what something like `_arguments` expects.
https://github.com/pypa/pip/pull/4842 will make it complete file names by re-implementing that inside pip instead of letting the shell handle it which means certain stuff like colored file names won't work or `zstyle` related settings for file name completion. And it still won't fix the fact that Zsh will add a space after the equals sign...
### What I've run:
```sh
➜ pip install --requirem<tab>
➜ pip install --requirement= # With a space
➜ pip install --requirement=<tab> # Nothing
➜ pip install --requirement= <tab>
➜ pip install --requirement= -- # Sigh...
```
</issue>
<code>
[start of src/pip/_internal/commands/completion.py]
1 import sys
2 import textwrap
3 from optparse import Values
4 from typing import List
5
6 from pip._internal.cli.base_command import Command
7 from pip._internal.cli.status_codes import SUCCESS
8 from pip._internal.utils.misc import get_prog
9
10 BASE_COMPLETION = """
11 # pip {shell} completion start{script}# pip {shell} completion end
12 """
13
14 COMPLETION_SCRIPTS = {
15 "bash": """
16 _pip_completion()
17 {{
18 COMPREPLY=( $( COMP_WORDS="${{COMP_WORDS[*]}}" \\
19 COMP_CWORD=$COMP_CWORD \\
20 PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) )
21 }}
22 complete -o default -F _pip_completion {prog}
23 """,
24 "zsh": """
25 function _pip_completion {{
26 local words cword
27 read -Ac words
28 read -cn cword
29 reply=( $( COMP_WORDS="$words[*]" \\
30 COMP_CWORD=$(( cword-1 )) \\
31 PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))
32 }}
33 compctl -K _pip_completion {prog}
34 """,
35 "fish": """
36 function __fish_complete_pip
37 set -lx COMP_WORDS (commandline -o) ""
38 set -lx COMP_CWORD ( \\
39 math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\
40 )
41 set -lx PIP_AUTO_COMPLETE 1
42 string split \\ -- (eval $COMP_WORDS[1])
43 end
44 complete -fa "(__fish_complete_pip)" -c {prog}
45 """,
46 "powershell": """
47 if ((Test-Path Function:\\TabExpansion) -and -not `
48 (Test-Path Function:\\_pip_completeBackup)) {{
49 Rename-Item Function:\\TabExpansion _pip_completeBackup
50 }}
51 function TabExpansion($line, $lastWord) {{
52 $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart()
53 if ($lastBlock.StartsWith("{prog} ")) {{
54 $Env:COMP_WORDS=$lastBlock
55 $Env:COMP_CWORD=$lastBlock.Split().Length - 1
56 $Env:PIP_AUTO_COMPLETE=1
57 (& {prog}).Split()
58 Remove-Item Env:COMP_WORDS
59 Remove-Item Env:COMP_CWORD
60 Remove-Item Env:PIP_AUTO_COMPLETE
61 }}
62 elseif (Test-Path Function:\\_pip_completeBackup) {{
63 # Fall back on existing tab expansion
64 _pip_completeBackup $line $lastWord
65 }}
66 }}
67 """,
68 }
69
70
71 class CompletionCommand(Command):
72 """A helper command to be used for command completion."""
73
74 ignore_require_venv = True
75
76 def add_options(self) -> None:
77 self.cmd_opts.add_option(
78 "--bash",
79 "-b",
80 action="store_const",
81 const="bash",
82 dest="shell",
83 help="Emit completion code for bash",
84 )
85 self.cmd_opts.add_option(
86 "--zsh",
87 "-z",
88 action="store_const",
89 const="zsh",
90 dest="shell",
91 help="Emit completion code for zsh",
92 )
93 self.cmd_opts.add_option(
94 "--fish",
95 "-f",
96 action="store_const",
97 const="fish",
98 dest="shell",
99 help="Emit completion code for fish",
100 )
101 self.cmd_opts.add_option(
102 "--powershell",
103 "-p",
104 action="store_const",
105 const="powershell",
106 dest="shell",
107 help="Emit completion code for powershell",
108 )
109
110 self.parser.insert_option_group(0, self.cmd_opts)
111
112 def run(self, options: Values, args: List[str]) -> int:
113 """Prints the completion code of the given shell"""
114 shells = COMPLETION_SCRIPTS.keys()
115 shell_options = ["--" + shell for shell in sorted(shells)]
116 if options.shell in shells:
117 script = textwrap.dedent(
118 COMPLETION_SCRIPTS.get(options.shell, "").format(prog=get_prog())
119 )
120 print(BASE_COMPLETION.format(script=script, shell=options.shell))
121 return SUCCESS
122 else:
123 sys.stderr.write(
124 "ERROR: You must pass {}\n".format(" or ".join(shell_options))
125 )
126 return SUCCESS
127
[end of src/pip/_internal/commands/completion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pip/_internal/commands/completion.py b/src/pip/_internal/commands/completion.py
--- a/src/pip/_internal/commands/completion.py
+++ b/src/pip/_internal/commands/completion.py
@@ -22,15 +22,10 @@
complete -o default -F _pip_completion {prog}
""",
"zsh": """
- function _pip_completion {{
- local words cword
- read -Ac words
- read -cn cword
- reply=( $( COMP_WORDS="$words[*]" \\
- COMP_CWORD=$(( cword-1 )) \\
- PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))
- }}
- compctl -K _pip_completion {prog}
+ #compdef -P pip[0-9.]#
+ compadd $( COMP_WORDS="$words[*]" \\
+ COMP_CWORD=$((CURRENT-1)) \\
+ PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )
""",
"fish": """
function __fish_complete_pip
|
{"golden_diff": "diff --git a/src/pip/_internal/commands/completion.py b/src/pip/_internal/commands/completion.py\n--- a/src/pip/_internal/commands/completion.py\n+++ b/src/pip/_internal/commands/completion.py\n@@ -22,15 +22,10 @@\n complete -o default -F _pip_completion {prog}\n \"\"\",\n \"zsh\": \"\"\"\n- function _pip_completion {{\n- local words cword\n- read -Ac words\n- read -cn cword\n- reply=( $( COMP_WORDS=\"$words[*]\" \\\\\n- COMP_CWORD=$(( cword-1 )) \\\\\n- PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))\n- }}\n- compctl -K _pip_completion {prog}\n+ #compdef -P pip[0-9.]#\n+ compadd $( COMP_WORDS=\"$words[*]\" \\\\\n+ COMP_CWORD=$((CURRENT-1)) \\\\\n+ PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )\n \"\"\",\n \"fish\": \"\"\"\n function __fish_complete_pip\n", "issue": "Completion in ZSH doesn't understand that arguments follow certain options\n* Pip version: 9.0.1\r\n* Python version: 2.7.12\r\n* Operating system: Ubuntu 16.04\r\n\r\n### Description:\r\n\r\nCompletion in Zsh uses the older `compctl` builtin and returns completions for long options that take arguments with a trailing equals sign. But compctl/Zsh doesn't understand that as meaning that the option takes an argument and adds a space after the equals sign and also tries to complete the next argument if you remove the equals sign as yet another option.\r\n\r\nNo idea if this is fixable using the older compctl, might want to migrate to the newer compsys... With compsys you will probably have to modify the completion output from Pip or preprocess it in shell code so that it fits what something like `_arguments` expects.\r\n\r\nhttps://github.com/pypa/pip/pull/4842 will make it complete file names by re-implementing that inside pip instead of letting the shell handle it which means certain stuff like colored file names won't work or `zstyle` related settings for file name completion. And it still won't fix the fact that Zsh will add a space after the equals sign...\r\n\r\n### What I've run:\r\n\r\n```sh\r\n\u279c pip install --requirem<tab>\r\n\u279c pip install --requirement= # With a space\r\n\u279c pip install --requirement=<tab> # Nothing\r\n\u279c pip install --requirement= <tab>\r\n\u279c pip install --requirement= -- # Sigh...\r\n```\r\n\n", "before_files": [{"content": "import sys\nimport textwrap\nfrom optparse import Values\nfrom typing import List\n\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import SUCCESS\nfrom pip._internal.utils.misc import get_prog\n\nBASE_COMPLETION = \"\"\"\n# pip {shell} completion start{script}# pip {shell} completion end\n\"\"\"\n\nCOMPLETION_SCRIPTS = {\n \"bash\": \"\"\"\n _pip_completion()\n {{\n COMPREPLY=( $( COMP_WORDS=\"${{COMP_WORDS[*]}}\" \\\\\n COMP_CWORD=$COMP_CWORD \\\\\n PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) )\n }}\n complete -o default -F _pip_completion {prog}\n \"\"\",\n \"zsh\": \"\"\"\n function _pip_completion {{\n local words cword\n read -Ac words\n read -cn cword\n reply=( $( COMP_WORDS=\"$words[*]\" \\\\\n COMP_CWORD=$(( cword-1 )) \\\\\n PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))\n }}\n compctl -K _pip_completion {prog}\n \"\"\",\n \"fish\": \"\"\"\n function __fish_complete_pip\n set -lx COMP_WORDS (commandline -o) \"\"\n set -lx COMP_CWORD ( \\\\\n math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\\\\n )\n set -lx PIP_AUTO_COMPLETE 1\n string split \\\\ -- (eval $COMP_WORDS[1])\n end\n complete -fa \"(__fish_complete_pip)\" -c {prog}\n \"\"\",\n \"powershell\": \"\"\"\n if ((Test-Path Function:\\\\TabExpansion) -and -not `\n (Test-Path Function:\\\\_pip_completeBackup)) {{\n Rename-Item Function:\\\\TabExpansion _pip_completeBackup\n }}\n function TabExpansion($line, $lastWord) {{\n $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart()\n if ($lastBlock.StartsWith(\"{prog} \")) {{\n $Env:COMP_WORDS=$lastBlock\n $Env:COMP_CWORD=$lastBlock.Split().Length - 1\n $Env:PIP_AUTO_COMPLETE=1\n (& {prog}).Split()\n Remove-Item Env:COMP_WORDS\n Remove-Item Env:COMP_CWORD\n Remove-Item Env:PIP_AUTO_COMPLETE\n }}\n elseif (Test-Path Function:\\\\_pip_completeBackup) {{\n # Fall back on existing tab expansion\n _pip_completeBackup $line $lastWord\n }}\n }}\n \"\"\",\n}\n\n\nclass CompletionCommand(Command):\n \"\"\"A helper command to be used for command completion.\"\"\"\n\n ignore_require_venv = True\n\n def add_options(self) -> None:\n self.cmd_opts.add_option(\n \"--bash\",\n \"-b\",\n action=\"store_const\",\n const=\"bash\",\n dest=\"shell\",\n help=\"Emit completion code for bash\",\n )\n self.cmd_opts.add_option(\n \"--zsh\",\n \"-z\",\n action=\"store_const\",\n const=\"zsh\",\n dest=\"shell\",\n help=\"Emit completion code for zsh\",\n )\n self.cmd_opts.add_option(\n \"--fish\",\n \"-f\",\n action=\"store_const\",\n const=\"fish\",\n dest=\"shell\",\n help=\"Emit completion code for fish\",\n )\n self.cmd_opts.add_option(\n \"--powershell\",\n \"-p\",\n action=\"store_const\",\n const=\"powershell\",\n dest=\"shell\",\n help=\"Emit completion code for powershell\",\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options: Values, args: List[str]) -> int:\n \"\"\"Prints the completion code of the given shell\"\"\"\n shells = COMPLETION_SCRIPTS.keys()\n shell_options = [\"--\" + shell for shell in sorted(shells)]\n if options.shell in shells:\n script = textwrap.dedent(\n COMPLETION_SCRIPTS.get(options.shell, \"\").format(prog=get_prog())\n )\n print(BASE_COMPLETION.format(script=script, shell=options.shell))\n return SUCCESS\n else:\n sys.stderr.write(\n \"ERROR: You must pass {}\\n\".format(\" or \".join(shell_options))\n )\n return SUCCESS\n", "path": "src/pip/_internal/commands/completion.py"}]}
| 2,074 | 247 |
gh_patches_debug_27845
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__torchmetrics-2001
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ClipScore errors on captions with more than 77 tokens
## 🐛 Bug
If you run [CLIPScore](https://torchmetrics.readthedocs.io/en/stable/multimodal/clip_score.html) between an image and a caption, where the caption has more than 77 tokens (longer than the max string than CLIP can process) -- the clip score errors.
### To Reproduce
Compute CLIPScore between a caption with 77+ tokens and an image.
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
<details>
<summary>Code sample</summary>
```
metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch32")
metric.to('cuda')
clip_score = metric(image_tensor, caption)
```
```
Traceback (most recent call last):
File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/user/scripts/compute_clip_scores.py", line 125, in <module>
compute_clip_scores(response=response,
File "/home/user/scripts/compute_clip_scores.py", line 87, in compute_clip_scores
clip_score = metric(image_tensor, caption)
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py", line 288, in forward
self._forward_cache = self._forward_full_state_update(*args, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py", line 302, in _forward_full_state_update
self.update(*args, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py", line 456, in wrapped_func
raise err
File "/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py", line 446, in wrapped_func
update(*args, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/torchmetrics/multimodal/clip_score.py", line 123, in update
score, n_samples = _clip_score_update(images, text, self.model, self.processor)
File "/home/user/.local/lib/python3.8/site-packages/torchmetrics/functional/multimodal/clip_score.py", line 69, in _clip_score_update
txt_features = model.get_text_features(
File "/home/user/.local/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py", line 1017, in get_text_features
text_outputs = self.text_model(
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py", line 730, in forward
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py", line 230, in forward
embeddings = inputs_embeds + position_embeddings
RuntimeError: The size of tensor a (138) must match the size of tensor b (77) at non-singleton dimension 1
```
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
</details>
### Expected behavior
Present a warning to the user and truncate the caption so that the metric can be computed on the first 77 tokens of the provided caption
### Environment
- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source): **1.0.3, pip**
- Python & PyTorch Version (e.g., 1.0): **Python 3.8.10, PyTorch 2.0.1+cu118**
- Any other relevant information such as OS (e.g., Linux): **Linux**
</issue>
<code>
[start of src/torchmetrics/functional/multimodal/clip_score.py]
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import List, Tuple, Union
15
16 import torch
17 from torch import Tensor
18 from typing_extensions import Literal
19
20 from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout
21 from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_10
22
23 if _TRANSFORMERS_GREATER_EQUAL_4_10:
24 from transformers import CLIPModel as _CLIPModel
25 from transformers import CLIPProcessor as _CLIPProcessor
26
27 def _download_clip() -> None:
28 _CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
29 _CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
30
31 if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):
32 __doctest_skip__ = ["clip_score"]
33
34 else:
35 __doctest_skip__ = ["clip_score"]
36 _CLIPModel = None
37 _CLIPProcessor = None
38
39
40 def _clip_score_update(
41 images: Union[Tensor, List[Tensor]],
42 text: Union[str, List[str]],
43 model: _CLIPModel,
44 processor: _CLIPProcessor,
45 ) -> Tuple[Tensor, int]:
46 if not isinstance(images, list):
47 if images.ndim == 3:
48 images = [images]
49 else: # unwrap into list
50 images = list(images)
51
52 if not all(i.ndim == 3 for i in images):
53 raise ValueError("Expected all images to be 3d but found image that has either more or less")
54
55 if not isinstance(text, list):
56 text = [text]
57
58 if len(text) != len(images):
59 raise ValueError(
60 f"Expected the number of images and text examples to be the same but got {len(images)} and {len(text)}"
61 )
62 device = images[0].device
63 processed_input = processor(text=text, images=[i.cpu() for i in images], return_tensors="pt", padding=True)
64
65 img_features = model.get_image_features(processed_input["pixel_values"].to(device))
66 img_features = img_features / img_features.norm(p=2, dim=-1, keepdim=True)
67
68 txt_features = model.get_text_features(
69 processed_input["input_ids"].to(device), processed_input["attention_mask"].to(device)
70 )
71 txt_features = txt_features / txt_features.norm(p=2, dim=-1, keepdim=True)
72
73 # cosine similarity between feature vectors
74 score = 100 * (img_features * txt_features).sum(axis=-1)
75 return score, len(text)
76
77
78 def _get_model_and_processor(
79 model_name_or_path: Literal[
80 "openai/clip-vit-base-patch16",
81 "openai/clip-vit-base-patch32",
82 "openai/clip-vit-large-patch14-336",
83 "openai/clip-vit-large-patch14",
84 ] = "openai/clip-vit-large-patch14",
85 ) -> Tuple[_CLIPModel, _CLIPProcessor]:
86 if _TRANSFORMERS_GREATER_EQUAL_4_10:
87 model = _CLIPModel.from_pretrained(model_name_or_path)
88 processor = _CLIPProcessor.from_pretrained(model_name_or_path)
89 return model, processor
90
91 raise ModuleNotFoundError(
92 "`clip_score` metric requires `transformers` package be installed."
93 " Either install with `pip install transformers>=4.10.0` or `pip install torchmetrics[multimodal]`."
94 )
95
96
97 def clip_score(
98 images: Union[Tensor, List[Tensor]],
99 text: Union[str, List[str]],
100 model_name_or_path: Literal[
101 "openai/clip-vit-base-patch16",
102 "openai/clip-vit-base-patch32",
103 "openai/clip-vit-large-patch14-336",
104 "openai/clip-vit-large-patch14",
105 ] = "openai/clip-vit-large-patch14",
106 ) -> Tensor:
107 r"""Calculate `CLIP Score`_ which is a text-to-image similarity metric.
108
109 CLIP is a reference free metric that can be used to evaluate the correlation between a generated caption for an
110 image and the actual content of the image. It has been found to be highly correlated with human judgement. The
111 metric is defined as:
112
113 .. math::
114 \text{CLIPScore(I, C)} = max(100 * cos(E_I, E_C), 0)
115
116 which corresponds to the cosine similarity between visual CLIP embedding :math:`E_i` for an image :math:`i` and
117 textual CLIP embedding :math:`E_C` for an caption :math:`C`. The score is bound between 0 and 100 and the closer
118 to 100 the better.
119
120 .. note:: Metric is not scriptable
121
122 Args:
123 images: Either a single [N, C, H, W] tensor or a list of [C, H, W] tensors
124 text: Either a single caption or a list of captions
125 model_name_or_path: string indicating the version of the CLIP model to use. Available models are
126 `"openai/clip-vit-base-patch16"`, `"openai/clip-vit-base-patch32"`, `"openai/clip-vit-large-patch14-336"`
127 and `"openai/clip-vit-large-patch14"`,
128
129 Raises:
130 ModuleNotFoundError:
131 If transformers package is not installed or version is lower than 4.10.0
132 ValueError:
133 If not all images have format [C, H, W]
134 ValueError:
135 If the number of images and captions do not match
136
137 Example:
138 >>> import torch
139 >>> _ = torch.manual_seed(42)
140 >>> from torchmetrics.functional.multimodal import clip_score
141 >>> score = clip_score(torch.randint(255, (3, 224, 224)), "a photo of a cat", "openai/clip-vit-base-patch16")
142 >>> print(score.detach())
143 tensor(24.4255)
144
145 """
146 model, processor = _get_model_and_processor(model_name_or_path)
147 device = images.device if isinstance(images, Tensor) else images[0].device
148 score, _ = _clip_score_update(images, text, model.to(device), processor)
149 score = score.mean(0)
150 return torch.max(score, torch.zeros_like(score))
151
[end of src/torchmetrics/functional/multimodal/clip_score.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/torchmetrics/functional/multimodal/clip_score.py b/src/torchmetrics/functional/multimodal/clip_score.py
--- a/src/torchmetrics/functional/multimodal/clip_score.py
+++ b/src/torchmetrics/functional/multimodal/clip_score.py
@@ -17,6 +17,7 @@
from torch import Tensor
from typing_extensions import Literal
+from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout
from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_10
@@ -65,6 +66,17 @@
img_features = model.get_image_features(processed_input["pixel_values"].to(device))
img_features = img_features / img_features.norm(p=2, dim=-1, keepdim=True)
+ max_position_embeddings = model.config.text_config.max_position_embeddings
+ if processed_input["attention_mask"].shape[-1] > max_position_embeddings:
+ rank_zero_warn(
+ f"Encountered caption longer than {max_position_embeddings=}. Will truncate captions to this length."
+ "If longer captions are needed, initialize argument `model_name_or_path` with a model that supports"
+ "longer sequences",
+ UserWarning,
+ )
+ processed_input["attention_mask"] = processed_input["attention_mask"][..., :max_position_embeddings]
+ processed_input["input_ids"] = processed_input["input_ids"][..., :max_position_embeddings]
+
txt_features = model.get_text_features(
processed_input["input_ids"].to(device), processed_input["attention_mask"].to(device)
)
|
{"golden_diff": "diff --git a/src/torchmetrics/functional/multimodal/clip_score.py b/src/torchmetrics/functional/multimodal/clip_score.py\n--- a/src/torchmetrics/functional/multimodal/clip_score.py\n+++ b/src/torchmetrics/functional/multimodal/clip_score.py\n@@ -17,6 +17,7 @@\n from torch import Tensor\n from typing_extensions import Literal\n \n+from torchmetrics.utilities import rank_zero_warn\n from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout\n from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_10\n \n@@ -65,6 +66,17 @@\n img_features = model.get_image_features(processed_input[\"pixel_values\"].to(device))\n img_features = img_features / img_features.norm(p=2, dim=-1, keepdim=True)\n \n+ max_position_embeddings = model.config.text_config.max_position_embeddings\n+ if processed_input[\"attention_mask\"].shape[-1] > max_position_embeddings:\n+ rank_zero_warn(\n+ f\"Encountered caption longer than {max_position_embeddings=}. Will truncate captions to this length.\"\n+ \"If longer captions are needed, initialize argument `model_name_or_path` with a model that supports\"\n+ \"longer sequences\",\n+ UserWarning,\n+ )\n+ processed_input[\"attention_mask\"] = processed_input[\"attention_mask\"][..., :max_position_embeddings]\n+ processed_input[\"input_ids\"] = processed_input[\"input_ids\"][..., :max_position_embeddings]\n+\n txt_features = model.get_text_features(\n processed_input[\"input_ids\"].to(device), processed_input[\"attention_mask\"].to(device)\n )\n", "issue": "ClipScore errors on captions with more than 77 tokens\n## \ud83d\udc1b Bug\r\n\r\nIf you run [CLIPScore](https://torchmetrics.readthedocs.io/en/stable/multimodal/clip_score.html) between an image and a caption, where the caption has more than 77 tokens (longer than the max string than CLIP can process) -- the clip score errors.\r\n\r\n### To Reproduce\r\nCompute CLIPScore between a caption with 77+ tokens and an image.\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n<details>\r\n <summary>Code sample</summary>\r\n\r\n```\r\nmetric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch32\")\r\nmetric.to('cuda')\r\nclip_score = metric(image_tensor, caption)\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.8/runpy.py\", line 194, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/usr/lib/python3.8/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/user/scripts/compute_clip_scores.py\", line 125, in <module>\r\n compute_clip_scores(response=response,\r\n File \"/home/user/scripts/compute_clip_scores.py\", line 87, in compute_clip_scores\r\n clip_score = metric(image_tensor, caption)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\r\n return forward_call(*args, **kwargs)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py\", line 288, in forward\r\n self._forward_cache = self._forward_full_state_update(*args, **kwargs)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py\", line 302, in _forward_full_state_update\r\n self.update(*args, **kwargs)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py\", line 456, in wrapped_func\r\n raise err\r\n File \"/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py\", line 446, in wrapped_func\r\n update(*args, **kwargs)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torchmetrics/multimodal/clip_score.py\", line 123, in update\r\n score, n_samples = _clip_score_update(images, text, self.model, self.processor)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torchmetrics/functional/multimodal/clip_score.py\", line 69, in _clip_score_update\r\n txt_features = model.get_text_features(\r\n File \"/home/user/.local/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py\", line 1017, in get_text_features\r\n text_outputs = self.text_model(\r\n File \"/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\r\n return forward_call(*args, **kwargs)\r\n File \"/home/user/.local/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py\", line 730, in forward\r\n hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\r\n return forward_call(*args, **kwargs)\r\n File \"/home/user/.local/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py\", line 230, in forward\r\n embeddings = inputs_embeds + position_embeddings\r\nRuntimeError: The size of tensor a (138) must match the size of tensor b (77) at non-singleton dimension 1\r\n```\r\n\r\n\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n</details>\r\n\r\n### Expected behavior\r\n\r\nPresent a warning to the user and truncate the caption so that the metric can be computed on the first 77 tokens of the provided caption\r\n\r\n### Environment\r\n\r\n- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source): **1.0.3, pip**\r\n- Python & PyTorch Version (e.g., 1.0): **Python 3.8.10, PyTorch 2.0.1+cu118**\r\n- Any other relevant information such as OS (e.g., Linux): **Linux**\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import List, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout\nfrom torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_10\n\nif _TRANSFORMERS_GREATER_EQUAL_4_10:\n from transformers import CLIPModel as _CLIPModel\n from transformers import CLIPProcessor as _CLIPProcessor\n\n def _download_clip() -> None:\n _CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n _CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\")\n\n if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):\n __doctest_skip__ = [\"clip_score\"]\n\nelse:\n __doctest_skip__ = [\"clip_score\"]\n _CLIPModel = None\n _CLIPProcessor = None\n\n\ndef _clip_score_update(\n images: Union[Tensor, List[Tensor]],\n text: Union[str, List[str]],\n model: _CLIPModel,\n processor: _CLIPProcessor,\n) -> Tuple[Tensor, int]:\n if not isinstance(images, list):\n if images.ndim == 3:\n images = [images]\n else: # unwrap into list\n images = list(images)\n\n if not all(i.ndim == 3 for i in images):\n raise ValueError(\"Expected all images to be 3d but found image that has either more or less\")\n\n if not isinstance(text, list):\n text = [text]\n\n if len(text) != len(images):\n raise ValueError(\n f\"Expected the number of images and text examples to be the same but got {len(images)} and {len(text)}\"\n )\n device = images[0].device\n processed_input = processor(text=text, images=[i.cpu() for i in images], return_tensors=\"pt\", padding=True)\n\n img_features = model.get_image_features(processed_input[\"pixel_values\"].to(device))\n img_features = img_features / img_features.norm(p=2, dim=-1, keepdim=True)\n\n txt_features = model.get_text_features(\n processed_input[\"input_ids\"].to(device), processed_input[\"attention_mask\"].to(device)\n )\n txt_features = txt_features / txt_features.norm(p=2, dim=-1, keepdim=True)\n\n # cosine similarity between feature vectors\n score = 100 * (img_features * txt_features).sum(axis=-1)\n return score, len(text)\n\n\ndef _get_model_and_processor(\n model_name_or_path: Literal[\n \"openai/clip-vit-base-patch16\",\n \"openai/clip-vit-base-patch32\",\n \"openai/clip-vit-large-patch14-336\",\n \"openai/clip-vit-large-patch14\",\n ] = \"openai/clip-vit-large-patch14\",\n) -> Tuple[_CLIPModel, _CLIPProcessor]:\n if _TRANSFORMERS_GREATER_EQUAL_4_10:\n model = _CLIPModel.from_pretrained(model_name_or_path)\n processor = _CLIPProcessor.from_pretrained(model_name_or_path)\n return model, processor\n\n raise ModuleNotFoundError(\n \"`clip_score` metric requires `transformers` package be installed.\"\n \" Either install with `pip install transformers>=4.10.0` or `pip install torchmetrics[multimodal]`.\"\n )\n\n\ndef clip_score(\n images: Union[Tensor, List[Tensor]],\n text: Union[str, List[str]],\n model_name_or_path: Literal[\n \"openai/clip-vit-base-patch16\",\n \"openai/clip-vit-base-patch32\",\n \"openai/clip-vit-large-patch14-336\",\n \"openai/clip-vit-large-patch14\",\n ] = \"openai/clip-vit-large-patch14\",\n) -> Tensor:\n r\"\"\"Calculate `CLIP Score`_ which is a text-to-image similarity metric.\n\n CLIP is a reference free metric that can be used to evaluate the correlation between a generated caption for an\n image and the actual content of the image. It has been found to be highly correlated with human judgement. The\n metric is defined as:\n\n .. math::\n \\text{CLIPScore(I, C)} = max(100 * cos(E_I, E_C), 0)\n\n which corresponds to the cosine similarity between visual CLIP embedding :math:`E_i` for an image :math:`i` and\n textual CLIP embedding :math:`E_C` for an caption :math:`C`. The score is bound between 0 and 100 and the closer\n to 100 the better.\n\n .. note:: Metric is not scriptable\n\n Args:\n images: Either a single [N, C, H, W] tensor or a list of [C, H, W] tensors\n text: Either a single caption or a list of captions\n model_name_or_path: string indicating the version of the CLIP model to use. Available models are\n `\"openai/clip-vit-base-patch16\"`, `\"openai/clip-vit-base-patch32\"`, `\"openai/clip-vit-large-patch14-336\"`\n and `\"openai/clip-vit-large-patch14\"`,\n\n Raises:\n ModuleNotFoundError:\n If transformers package is not installed or version is lower than 4.10.0\n ValueError:\n If not all images have format [C, H, W]\n ValueError:\n If the number of images and captions do not match\n\n Example:\n >>> import torch\n >>> _ = torch.manual_seed(42)\n >>> from torchmetrics.functional.multimodal import clip_score\n >>> score = clip_score(torch.randint(255, (3, 224, 224)), \"a photo of a cat\", \"openai/clip-vit-base-patch16\")\n >>> print(score.detach())\n tensor(24.4255)\n\n \"\"\"\n model, processor = _get_model_and_processor(model_name_or_path)\n device = images.device if isinstance(images, Tensor) else images[0].device\n score, _ = _clip_score_update(images, text, model.to(device), processor)\n score = score.mean(0)\n return torch.max(score, torch.zeros_like(score))\n", "path": "src/torchmetrics/functional/multimodal/clip_score.py"}]}
| 3,521 | 369 |
gh_patches_debug_11566
|
rasdani/github-patches
|
git_diff
|
DDMAL__CantusDB-759
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hide "Edit chants (Fulltext & Volpiano editor)" link from my sources sidebar on flatpages
To prevent encountering a 404 error, we should hide the link from the "My Sources" sidebar on flatpages when the corresponding source has no chants.
</issue>
<code>
[start of django/cantusdb_project/main_app/templatetags/helper_tags.py]
1 import calendar
2 from typing import Union, Optional
3 from django import template
4 from main_app.models import Source
5 from articles.models import Article
6 from django.utils.safestring import mark_safe
7 from django.urls import reverse
8 from django.core.paginator import Paginator
9
10
11 register = template.Library()
12
13
14 @register.simple_tag(takes_context=False)
15 def recent_articles():
16 """
17 Generates a html unordered list of recent articles for display on the homepage
18
19 Used in:
20 templates/flatpages/default.html
21 """
22 articles = Article.objects.order_by("-date_created")[:5]
23 list_item_template = '<li style="padding-bottom: 0.5em;"><a href="{url}">{title}</a><br><small>{date}</small></li>'
24 list_items = [
25 list_item_template.format(
26 url=a.get_absolute_url(),
27 title=a.title,
28 date=a.date_created.strftime("%A %B %-d, %Y"),
29 )
30 for a in articles
31 ]
32 list_items_string = "".join(list_items)
33 recent_articles_string = "<ul>{lis}</ul>".format(lis=list_items_string)
34 return mark_safe(recent_articles_string)
35
36
37 @register.simple_tag(takes_context=False)
38 def my_sources(user):
39 """
40 Generates a html unordered list of sources the currently logged-in user has access to edit, for display on the homepage
41
42 Used in:
43 templates/flatpages/default.html
44 """
45
46 def make_source_detail_link_with_siglum(source):
47 id = source.id
48 siglum = source.rism_siglum
49 url = reverse("source-detail", args=[id])
50 link = '<a href="{}">{}</a>'.format(url, siglum)
51 return link
52
53 def make_source_detail_link_with_title(source):
54 id = source.id
55 title = source.title
56 url = reverse("source-detail", args=[id])
57 link = '<a href="{}">{}</a>'.format(url, title)
58 return link
59
60 def make_add_new_chants_link(source):
61 id = source.id
62 url = reverse("chant-create", args=[id])
63 link = '<a href="{}">+ Add new chant</a>'.format(url)
64 return link
65
66 def make_edit_chants_link(source):
67 id = source.id
68 url = reverse("source-edit-chants", args=[id])
69 link = '<a href="{}">Edit chants (Fulltext & Volpiano editor)</a>'.format(url)
70 return link
71
72 def make_links_for_source(source):
73 link_with_siglum = make_source_detail_link_with_siglum(source)
74 link_with_title = make_source_detail_link_with_title(source)
75 add_new_chants_link = make_add_new_chants_link(source)
76 edit_chants_link = make_edit_chants_link(source)
77 template = """{sigl}<br>
78 <small>
79 <b>{title}</b><br>
80 {add}<br>
81 {edit}<br>
82 </small>
83 """
84 links_string = template.format(
85 sigl=link_with_siglum,
86 title=link_with_title,
87 add=add_new_chants_link,
88 edit=edit_chants_link,
89 )
90 return links_string
91
92 MAX_SOURCES_TO_DISPLAY = 6
93 sources = list(user.sources_user_can_edit.all())[:MAX_SOURCES_TO_DISPLAY]
94 source_links = [make_links_for_source(source) for source in sources]
95 list_items = ["<li>{}</li>".format(link) for link in source_links]
96 joined_list_items = "".join(list_items)
97 links_ul = "<ul>{}</ul>".format(joined_list_items)
98 return mark_safe(links_ul)
99
100
101 @register.filter(name="month_to_string")
102 def month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:
103 """
104 Converts month number to textual representation, 3 letters (Jan, Mar, etc)
105
106 used in:
107 main_app/templates/feast_detail.html
108 main_app/templates/feast_list.html
109 """
110 if type(value) == int and value in range(1, 13):
111 return calendar.month_abbr[value]
112 else:
113 return value
114
115
116 @register.simple_tag(takes_context=True)
117 def url_add_get_params(context, **kwargs):
118 """
119 accounts for the situations where there may be two paginations in one page
120
121 Used in:
122 main_app/templates/pagination.html
123 main_app/templates/user_source_list.html
124 """
125 query = context["request"].GET.copy()
126 if "page" in kwargs:
127 query.pop("page", None)
128 if "page2" in kwargs:
129 query.pop("page2", None)
130 query.update(kwargs)
131 return query.urlencode()
132
133
134 @register.simple_tag(takes_context=False)
135 def source_links():
136 """
137 Generates a series of html option tags linking to sources in Cantus Dabase, for display on the homepage
138
139 Used in:
140 templates/flatpages/default.html
141 """
142 sources = (
143 Source.objects.filter(published=True, segment__id=4063)
144 .exclude(siglum=None)
145 .values("siglum", "id")
146 .order_by("siglum")
147 )
148 options = ""
149 for source in sources:
150 option_str = (
151 f"<option value=source/{source['id']}>{source['siglum']}</option>\n"
152 )
153 options += option_str
154
155 return mark_safe(options)
156
157
158 @register.filter
159 def classname(obj):
160 """
161 Returns the name of the object's class
162 A use-case is: {% if object|classname == "Notation" %}
163
164 Used in:
165 main_app/templates/content_overview.html
166 """
167 return obj.__class__.__name__
168
169
170 @register.filter
171 def admin_url_name(class_name, action):
172 """
173 Accepts the name of a class in "main_app", and an action (either "change" or "delete") as arguments.
174 Returns the name of the URL for changing/deleting an object in the admin interface.
175
176 Used in:
177 main_app/templates/content_overview.html
178 """
179 class_name = class_name.lower()
180 action = action.lower()
181
182 return f"admin:main_app_{class_name}_{action}"
183
184
185 @register.filter(name="has_group")
186 def has_group(user, group_name):
187 """
188 Used in:
189 templates/base.html
190 """
191 return user.groups.filter(name=group_name).exists()
192
193
194 @register.simple_tag(takes_context=True)
195 def get_user_source_pagination(context):
196 user_created_sources = (
197 Source.objects.filter(created_by=context["user"])
198 .order_by("-date_created")
199 .distinct()
200 )
201 paginator = Paginator(user_created_sources, 10)
202 page_number = context["request"].GET.get("page")
203 page_obj = paginator.get_page(page_number)
204 return page_obj
205
[end of django/cantusdb_project/main_app/templatetags/helper_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/django/cantusdb_project/main_app/templatetags/helper_tags.py b/django/cantusdb_project/main_app/templatetags/helper_tags.py
--- a/django/cantusdb_project/main_app/templatetags/helper_tags.py
+++ b/django/cantusdb_project/main_app/templatetags/helper_tags.py
@@ -73,7 +73,10 @@
link_with_siglum = make_source_detail_link_with_siglum(source)
link_with_title = make_source_detail_link_with_title(source)
add_new_chants_link = make_add_new_chants_link(source)
- edit_chants_link = make_edit_chants_link(source)
+ if source.chant_set.exists():
+ edit_chants_link = make_edit_chants_link(source)
+ else:
+ edit_chants_link = ""
template = """{sigl}<br>
<small>
<b>{title}</b><br>
|
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/templatetags/helper_tags.py b/django/cantusdb_project/main_app/templatetags/helper_tags.py\n--- a/django/cantusdb_project/main_app/templatetags/helper_tags.py\n+++ b/django/cantusdb_project/main_app/templatetags/helper_tags.py\n@@ -73,7 +73,10 @@\n link_with_siglum = make_source_detail_link_with_siglum(source)\n link_with_title = make_source_detail_link_with_title(source)\n add_new_chants_link = make_add_new_chants_link(source)\n- edit_chants_link = make_edit_chants_link(source)\n+ if source.chant_set.exists():\n+ edit_chants_link = make_edit_chants_link(source)\n+ else:\n+ edit_chants_link = \"\"\n template = \"\"\"{sigl}<br>\n <small>\n <b>{title}</b><br>\n", "issue": "Hide \"Edit chants (Fulltext & Volpiano editor)\" link from my sources sidebar on flatpages\nTo prevent encountering a 404 error, we should hide the link from the \"My Sources\" sidebar on flatpages when the corresponding source has no chants.\n", "before_files": [{"content": "import calendar\nfrom typing import Union, Optional\nfrom django import template\nfrom main_app.models import Source\nfrom articles.models import Article\nfrom django.utils.safestring import mark_safe\nfrom django.urls import reverse\nfrom django.core.paginator import Paginator\n\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=False)\ndef recent_articles():\n \"\"\"\n Generates a html unordered list of recent articles for display on the homepage\n\n Used in:\n templates/flatpages/default.html\n \"\"\"\n articles = Article.objects.order_by(\"-date_created\")[:5]\n list_item_template = '<li style=\"padding-bottom: 0.5em;\"><a href=\"{url}\">{title}</a><br><small>{date}</small></li>'\n list_items = [\n list_item_template.format(\n url=a.get_absolute_url(),\n title=a.title,\n date=a.date_created.strftime(\"%A %B %-d, %Y\"),\n )\n for a in articles\n ]\n list_items_string = \"\".join(list_items)\n recent_articles_string = \"<ul>{lis}</ul>\".format(lis=list_items_string)\n return mark_safe(recent_articles_string)\n\n\[email protected]_tag(takes_context=False)\ndef my_sources(user):\n \"\"\"\n Generates a html unordered list of sources the currently logged-in user has access to edit, for display on the homepage\n\n Used in:\n templates/flatpages/default.html\n \"\"\"\n\n def make_source_detail_link_with_siglum(source):\n id = source.id\n siglum = source.rism_siglum\n url = reverse(\"source-detail\", args=[id])\n link = '<a href=\"{}\">{}</a>'.format(url, siglum)\n return link\n\n def make_source_detail_link_with_title(source):\n id = source.id\n title = source.title\n url = reverse(\"source-detail\", args=[id])\n link = '<a href=\"{}\">{}</a>'.format(url, title)\n return link\n\n def make_add_new_chants_link(source):\n id = source.id\n url = reverse(\"chant-create\", args=[id])\n link = '<a href=\"{}\">+ Add new chant</a>'.format(url)\n return link\n\n def make_edit_chants_link(source):\n id = source.id\n url = reverse(\"source-edit-chants\", args=[id])\n link = '<a href=\"{}\">Edit chants (Fulltext & Volpiano editor)</a>'.format(url)\n return link\n\n def make_links_for_source(source):\n link_with_siglum = make_source_detail_link_with_siglum(source)\n link_with_title = make_source_detail_link_with_title(source)\n add_new_chants_link = make_add_new_chants_link(source)\n edit_chants_link = make_edit_chants_link(source)\n template = \"\"\"{sigl}<br>\n <small>\n <b>{title}</b><br>\n {add}<br>\n {edit}<br>\n </small>\n \"\"\"\n links_string = template.format(\n sigl=link_with_siglum,\n title=link_with_title,\n add=add_new_chants_link,\n edit=edit_chants_link,\n )\n return links_string\n\n MAX_SOURCES_TO_DISPLAY = 6\n sources = list(user.sources_user_can_edit.all())[:MAX_SOURCES_TO_DISPLAY]\n source_links = [make_links_for_source(source) for source in sources]\n list_items = [\"<li>{}</li>\".format(link) for link in source_links]\n joined_list_items = \"\".join(list_items)\n links_ul = \"<ul>{}</ul>\".format(joined_list_items)\n return mark_safe(links_ul)\n\n\[email protected](name=\"month_to_string\")\ndef month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:\n \"\"\"\n Converts month number to textual representation, 3 letters (Jan, Mar, etc)\n\n used in:\n main_app/templates/feast_detail.html\n main_app/templates/feast_list.html\n \"\"\"\n if type(value) == int and value in range(1, 13):\n return calendar.month_abbr[value]\n else:\n return value\n\n\[email protected]_tag(takes_context=True)\ndef url_add_get_params(context, **kwargs):\n \"\"\"\n accounts for the situations where there may be two paginations in one page\n\n Used in:\n main_app/templates/pagination.html\n main_app/templates/user_source_list.html\n \"\"\"\n query = context[\"request\"].GET.copy()\n if \"page\" in kwargs:\n query.pop(\"page\", None)\n if \"page2\" in kwargs:\n query.pop(\"page2\", None)\n query.update(kwargs)\n return query.urlencode()\n\n\[email protected]_tag(takes_context=False)\ndef source_links():\n \"\"\"\n Generates a series of html option tags linking to sources in Cantus Dabase, for display on the homepage\n\n Used in:\n templates/flatpages/default.html\n \"\"\"\n sources = (\n Source.objects.filter(published=True, segment__id=4063)\n .exclude(siglum=None)\n .values(\"siglum\", \"id\")\n .order_by(\"siglum\")\n )\n options = \"\"\n for source in sources:\n option_str = (\n f\"<option value=source/{source['id']}>{source['siglum']}</option>\\n\"\n )\n options += option_str\n\n return mark_safe(options)\n\n\[email protected]\ndef classname(obj):\n \"\"\"\n Returns the name of the object's class\n A use-case is: {% if object|classname == \"Notation\" %}\n\n Used in:\n main_app/templates/content_overview.html\n \"\"\"\n return obj.__class__.__name__\n\n\[email protected]\ndef admin_url_name(class_name, action):\n \"\"\"\n Accepts the name of a class in \"main_app\", and an action (either \"change\" or \"delete\") as arguments.\n Returns the name of the URL for changing/deleting an object in the admin interface.\n\n Used in:\n main_app/templates/content_overview.html\n \"\"\"\n class_name = class_name.lower()\n action = action.lower()\n\n return f\"admin:main_app_{class_name}_{action}\"\n\n\[email protected](name=\"has_group\")\ndef has_group(user, group_name):\n \"\"\"\n Used in:\n templates/base.html\n \"\"\"\n return user.groups.filter(name=group_name).exists()\n\n\[email protected]_tag(takes_context=True)\ndef get_user_source_pagination(context):\n user_created_sources = (\n Source.objects.filter(created_by=context[\"user\"])\n .order_by(\"-date_created\")\n .distinct()\n )\n paginator = Paginator(user_created_sources, 10)\n page_number = context[\"request\"].GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n return page_obj\n", "path": "django/cantusdb_project/main_app/templatetags/helper_tags.py"}]}
| 2,599 | 211 |
gh_patches_debug_37072
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-4719
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unfragmented WebSocket messages getting fragmented
https://github.com/mitmproxy/mitmproxy/blob/e270399a3eba7b3212bf7325beaa50e159a7ca0a/mitmproxy/proxy/layers/websocket.py#L153-L175
While handling WebSocket events, mitmproxy doesn't distinguish between `message_finished` and `frame_finished` ([Message class](https://python-hyper.org/projects/wsproto/en/stable/api.html#wsproto.events.Message)) which in my case led to continuation frames being sent while there were none in the initial WebSocket message.
This is because the wsproto API doesn't always emit complete frames (I guess this is caused by the TCP fragmentation?), they could be chunked while the original WebSocket message has no fragmentation and I think even WebSocket messages using fragmentation with large continuation frames could be emitted as chunks themselves?
To avoid this behavior each `frame_buf` entry must be a complete frame, here is my fix suggestion:
```python
for ws_event in src_ws.events():
if isinstance(ws_event, wsproto.events.Message):
is_text = isinstance(ws_event.data, str)
# Add the data variable to avoid multiple conditions
if is_text:
typ = Opcode.TEXT
data = ws_event.data.encode()
else:
typ = Opcode.BINARY
data = ws_event.data
# Make each frame one entry to frame_buf, append if empty to avoid IndexError
if src_ws.frame_buf:
src_ws.frame_buf[-1] += data
else:
src_ws.frame_buf.append(data)
if ws_event.message_finished:
content = b"".join(src_ws.frame_buf)
fragmentizer = Fragmentizer(src_ws.frame_buf, is_text)
src_ws.frame_buf.clear()
message = websocket.WebSocketMessage(typ, from_client, content)
self.flow.websocket.messages.append(message)
yield WebsocketMessageHook(self.flow)
if not message.dropped:
for msg in fragmentizer(message.content):
yield dst_ws.send2(msg)
# Initialize next frame entry
elif ws_event.frame_finished:
src_ws.frame_buf.append(b"")
```
It works for me but I didn't test it with in an environment using WebSocket continuation frames. Also this only works for unmodified WebSocket messages, injected or modified messages are still concerned by the issue because the `Fragmentizer` class compares the lengths so they will always fall into the else condition (unless you made the modified message keep its original length):
https://github.com/mitmproxy/mitmproxy/blob/e270399a3eba7b3212bf7325beaa50e159a7ca0a/mitmproxy/proxy/layers/websocket.py#L229-L243
For my use case I didn't make a proper fix for this, I just made the first condition always true, maybe a boolean variable can be added to the `WebSocketMessage` and `Fragmentizer` classes or something like that?
</issue>
<code>
[start of mitmproxy/proxy/layers/websocket.py]
1 import time
2 from dataclasses import dataclass
3 from typing import Iterator, List
4
5 import wsproto
6 import wsproto.extensions
7 import wsproto.frame_protocol
8 import wsproto.utilities
9 from mitmproxy import connection, http, websocket
10 from mitmproxy.proxy import commands, events, layer
11 from mitmproxy.proxy.commands import StartHook
12 from mitmproxy.proxy.context import Context
13 from mitmproxy.proxy.events import MessageInjected
14 from mitmproxy.proxy.utils import expect
15 from wsproto import ConnectionState
16 from wsproto.frame_protocol import Opcode
17
18
19 @dataclass
20 class WebsocketStartHook(StartHook):
21 """
22 A WebSocket connection has commenced.
23 """
24 flow: http.HTTPFlow
25
26
27 @dataclass
28 class WebsocketMessageHook(StartHook):
29 """
30 Called when a WebSocket message is received from the client or
31 server. The most recent message will be flow.messages[-1]. The
32 message is user-modifiable. Currently there are two types of
33 messages, corresponding to the BINARY and TEXT frame types.
34 """
35 flow: http.HTTPFlow
36
37
38 @dataclass
39 class WebsocketEndHook(StartHook):
40 """
41 A WebSocket connection has ended.
42 You can check `flow.websocket.close_code` to determine why it ended.
43 """
44
45 flow: http.HTTPFlow
46
47
48 class WebSocketMessageInjected(MessageInjected[websocket.WebSocketMessage]):
49 """
50 The user has injected a custom WebSocket message.
51 """
52
53
54 class WebsocketConnection(wsproto.Connection):
55 """
56 A very thin wrapper around wsproto.Connection:
57
58 - we keep the underlying connection as an attribute for easy access.
59 - we add a framebuffer for incomplete messages
60 - we wrap .send() so that we can directly yield it.
61 """
62 conn: connection.Connection
63 frame_buf: List[bytes]
64
65 def __init__(self, *args, conn: connection.Connection, **kwargs):
66 super(WebsocketConnection, self).__init__(*args, **kwargs)
67 self.conn = conn
68 self.frame_buf = []
69
70 def send2(self, event: wsproto.events.Event) -> commands.SendData:
71 data = self.send(event)
72 return commands.SendData(self.conn, data)
73
74 def __repr__(self):
75 return f"WebsocketConnection<{self.state.name}, {self.conn}>"
76
77
78 class WebsocketLayer(layer.Layer):
79 """
80 WebSocket layer that intercepts and relays messages.
81 """
82 flow: http.HTTPFlow
83 client_ws: WebsocketConnection
84 server_ws: WebsocketConnection
85
86 def __init__(self, context: Context, flow: http.HTTPFlow):
87 super().__init__(context)
88 self.flow = flow
89 assert context.server.connected
90
91 @expect(events.Start)
92 def start(self, _) -> layer.CommandGenerator[None]:
93
94 client_extensions = []
95 server_extensions = []
96
97 # Parse extension headers. We only support deflate at the moment and ignore everything else.
98 assert self.flow.response # satisfy type checker
99 ext_header = self.flow.response.headers.get("Sec-WebSocket-Extensions", "")
100 if ext_header:
101 for ext in wsproto.utilities.split_comma_header(ext_header.encode("ascii", "replace")):
102 ext_name = ext.split(";", 1)[0].strip()
103 if ext_name == wsproto.extensions.PerMessageDeflate.name:
104 client_deflate = wsproto.extensions.PerMessageDeflate()
105 client_deflate.finalize(ext)
106 client_extensions.append(client_deflate)
107 server_deflate = wsproto.extensions.PerMessageDeflate()
108 server_deflate.finalize(ext)
109 server_extensions.append(server_deflate)
110 else:
111 yield commands.Log(f"Ignoring unknown WebSocket extension {ext_name!r}.")
112
113 self.client_ws = WebsocketConnection(wsproto.ConnectionType.SERVER, client_extensions, conn=self.context.client)
114 self.server_ws = WebsocketConnection(wsproto.ConnectionType.CLIENT, server_extensions, conn=self.context.server)
115
116 yield WebsocketStartHook(self.flow)
117
118 self._handle_event = self.relay_messages
119
120 _handle_event = start
121
122 @expect(events.DataReceived, events.ConnectionClosed, WebSocketMessageInjected)
123 def relay_messages(self, event: events.Event) -> layer.CommandGenerator[None]:
124 assert self.flow.websocket # satisfy type checker
125
126 if isinstance(event, events.ConnectionEvent):
127 from_client = event.connection == self.context.client
128 elif isinstance(event, WebSocketMessageInjected):
129 from_client = event.message.from_client
130 else:
131 raise AssertionError(f"Unexpected event: {event}")
132
133 from_str = 'client' if from_client else 'server'
134 if from_client:
135 src_ws = self.client_ws
136 dst_ws = self.server_ws
137 else:
138 src_ws = self.server_ws
139 dst_ws = self.client_ws
140
141 if isinstance(event, events.DataReceived):
142 src_ws.receive_data(event.data)
143 elif isinstance(event, events.ConnectionClosed):
144 src_ws.receive_data(None)
145 elif isinstance(event, WebSocketMessageInjected):
146 fragmentizer = Fragmentizer([], event.message.type == Opcode.TEXT)
147 src_ws._events.extend(
148 fragmentizer(event.message.content)
149 )
150 else: # pragma: no cover
151 raise AssertionError(f"Unexpected event: {event}")
152
153 for ws_event in src_ws.events():
154 if isinstance(ws_event, wsproto.events.Message):
155 is_text = isinstance(ws_event.data, str)
156 if is_text:
157 typ = Opcode.TEXT
158 src_ws.frame_buf.append(ws_event.data.encode())
159 else:
160 typ = Opcode.BINARY
161 src_ws.frame_buf.append(ws_event.data)
162
163 if ws_event.message_finished:
164 content = b"".join(src_ws.frame_buf)
165
166 fragmentizer = Fragmentizer(src_ws.frame_buf, is_text)
167 src_ws.frame_buf.clear()
168
169 message = websocket.WebSocketMessage(typ, from_client, content)
170 self.flow.websocket.messages.append(message)
171 yield WebsocketMessageHook(self.flow)
172
173 if not message.dropped:
174 for msg in fragmentizer(message.content):
175 yield dst_ws.send2(msg)
176
177 elif isinstance(ws_event, (wsproto.events.Ping, wsproto.events.Pong)):
178 yield commands.Log(
179 f"Received WebSocket {ws_event.__class__.__name__.lower()} from {from_str} "
180 f"(payload: {bytes(ws_event.payload)!r})"
181 )
182 yield dst_ws.send2(ws_event)
183 elif isinstance(ws_event, wsproto.events.CloseConnection):
184 self.flow.websocket.timestamp_end = time.time()
185 self.flow.websocket.closed_by_client = from_client
186 self.flow.websocket.close_code = ws_event.code
187 self.flow.websocket.close_reason = ws_event.reason
188
189 for ws in [self.server_ws, self.client_ws]:
190 if ws.state in {ConnectionState.OPEN, ConnectionState.REMOTE_CLOSING}:
191 # response == original event, so no need to differentiate here.
192 yield ws.send2(ws_event)
193 yield commands.CloseConnection(ws.conn)
194 yield WebsocketEndHook(self.flow)
195 self._handle_event = self.done
196 else: # pragma: no cover
197 raise AssertionError(f"Unexpected WebSocket event: {ws_event}")
198
199 @expect(events.DataReceived, events.ConnectionClosed, WebSocketMessageInjected)
200 def done(self, _) -> layer.CommandGenerator[None]:
201 yield from ()
202
203
204 class Fragmentizer:
205 """
206 Theory (RFC 6455):
207 Unless specified otherwise by an extension, frames have no semantic
208 meaning. An intermediary might coalesce and/or split frames, [...]
209
210 Practice:
211 Some WebSocket servers reject large payload sizes.
212
213 As a workaround, we either retain the original chunking or, if the payload has been modified, use ~4kB chunks.
214 """
215 # A bit less than 4kb to accommodate for headers.
216 FRAGMENT_SIZE = 4000
217
218 def __init__(self, fragments: List[bytes], is_text: bool):
219 self.fragment_lengths = [len(x) for x in fragments]
220 self.is_text = is_text
221
222 def msg(self, data: bytes, message_finished: bool):
223 if self.is_text:
224 data_str = data.decode(errors="replace")
225 return wsproto.events.TextMessage(data_str, message_finished=message_finished)
226 else:
227 return wsproto.events.BytesMessage(data, message_finished=message_finished)
228
229 def __call__(self, content: bytes) -> Iterator[wsproto.events.Message]:
230 if len(content) == sum(self.fragment_lengths):
231 # message has the same length, we can reuse the same sizes
232 offset = 0
233 for fl in self.fragment_lengths[:-1]:
234 yield self.msg(content[offset:offset + fl], False)
235 offset += fl
236 yield self.msg(content[offset:], True)
237 else:
238 offset = 0
239 total = len(content) - self.FRAGMENT_SIZE
240 while offset < total:
241 yield self.msg(content[offset:offset + self.FRAGMENT_SIZE], False)
242 offset += self.FRAGMENT_SIZE
243 yield self.msg(content[offset:], True)
244
[end of mitmproxy/proxy/layers/websocket.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mitmproxy/proxy/layers/websocket.py b/mitmproxy/proxy/layers/websocket.py
--- a/mitmproxy/proxy/layers/websocket.py
+++ b/mitmproxy/proxy/layers/websocket.py
@@ -65,7 +65,7 @@
def __init__(self, *args, conn: connection.Connection, **kwargs):
super(WebsocketConnection, self).__init__(*args, **kwargs)
self.conn = conn
- self.frame_buf = []
+ self.frame_buf = [b""]
def send2(self, event: wsproto.events.Event) -> commands.SendData:
data = self.send(event)
@@ -155,10 +155,10 @@
is_text = isinstance(ws_event.data, str)
if is_text:
typ = Opcode.TEXT
- src_ws.frame_buf.append(ws_event.data.encode())
+ src_ws.frame_buf[-1] += ws_event.data.encode()
else:
typ = Opcode.BINARY
- src_ws.frame_buf.append(ws_event.data)
+ src_ws.frame_buf[-1] += ws_event.data
if ws_event.message_finished:
content = b"".join(src_ws.frame_buf)
@@ -174,6 +174,9 @@
for msg in fragmentizer(message.content):
yield dst_ws.send2(msg)
+ elif ws_event.frame_finished:
+ src_ws.frame_buf.append(b"")
+
elif isinstance(ws_event, (wsproto.events.Ping, wsproto.events.Pong)):
yield commands.Log(
f"Received WebSocket {ws_event.__class__.__name__.lower()} from {from_str} "
@@ -209,8 +212,11 @@
Practice:
Some WebSocket servers reject large payload sizes.
+ Other WebSocket servers reject CONTINUATION frames.
As a workaround, we either retain the original chunking or, if the payload has been modified, use ~4kB chunks.
+ If one deals with web servers that do not support CONTINUATION frames, addons need to monkeypatch FRAGMENT_SIZE
+ if they need to modify the message.
"""
# A bit less than 4kb to accommodate for headers.
FRAGMENT_SIZE = 4000
|
{"golden_diff": "diff --git a/mitmproxy/proxy/layers/websocket.py b/mitmproxy/proxy/layers/websocket.py\n--- a/mitmproxy/proxy/layers/websocket.py\n+++ b/mitmproxy/proxy/layers/websocket.py\n@@ -65,7 +65,7 @@\n def __init__(self, *args, conn: connection.Connection, **kwargs):\n super(WebsocketConnection, self).__init__(*args, **kwargs)\n self.conn = conn\n- self.frame_buf = []\n+ self.frame_buf = [b\"\"]\n \n def send2(self, event: wsproto.events.Event) -> commands.SendData:\n data = self.send(event)\n@@ -155,10 +155,10 @@\n is_text = isinstance(ws_event.data, str)\n if is_text:\n typ = Opcode.TEXT\n- src_ws.frame_buf.append(ws_event.data.encode())\n+ src_ws.frame_buf[-1] += ws_event.data.encode()\n else:\n typ = Opcode.BINARY\n- src_ws.frame_buf.append(ws_event.data)\n+ src_ws.frame_buf[-1] += ws_event.data\n \n if ws_event.message_finished:\n content = b\"\".join(src_ws.frame_buf)\n@@ -174,6 +174,9 @@\n for msg in fragmentizer(message.content):\n yield dst_ws.send2(msg)\n \n+ elif ws_event.frame_finished:\n+ src_ws.frame_buf.append(b\"\")\n+\n elif isinstance(ws_event, (wsproto.events.Ping, wsproto.events.Pong)):\n yield commands.Log(\n f\"Received WebSocket {ws_event.__class__.__name__.lower()} from {from_str} \"\n@@ -209,8 +212,11 @@\n \n Practice:\n Some WebSocket servers reject large payload sizes.\n+ Other WebSocket servers reject CONTINUATION frames.\n \n As a workaround, we either retain the original chunking or, if the payload has been modified, use ~4kB chunks.\n+ If one deals with web servers that do not support CONTINUATION frames, addons need to monkeypatch FRAGMENT_SIZE\n+ if they need to modify the message.\n \"\"\"\n # A bit less than 4kb to accommodate for headers.\n FRAGMENT_SIZE = 4000\n", "issue": "Unfragmented WebSocket messages getting fragmented\nhttps://github.com/mitmproxy/mitmproxy/blob/e270399a3eba7b3212bf7325beaa50e159a7ca0a/mitmproxy/proxy/layers/websocket.py#L153-L175\r\n\r\nWhile handling WebSocket events, mitmproxy doesn't distinguish between `message_finished` and `frame_finished` ([Message class](https://python-hyper.org/projects/wsproto/en/stable/api.html#wsproto.events.Message)) which in my case led to continuation frames being sent while there were none in the initial WebSocket message.\r\n\r\nThis is because the wsproto API doesn't always emit complete frames (I guess this is caused by the TCP fragmentation?), they could be chunked while the original WebSocket message has no fragmentation and I think even WebSocket messages using fragmentation with large continuation frames could be emitted as chunks themselves?\r\n\r\nTo avoid this behavior each `frame_buf` entry must be a complete frame, here is my fix suggestion:\r\n\r\n```python\r\n for ws_event in src_ws.events():\r\n if isinstance(ws_event, wsproto.events.Message):\r\n is_text = isinstance(ws_event.data, str)\r\n\r\n # Add the data variable to avoid multiple conditions\r\n if is_text:\r\n typ = Opcode.TEXT\r\n data = ws_event.data.encode()\r\n else:\r\n typ = Opcode.BINARY\r\n data = ws_event.data\r\n\r\n # Make each frame one entry to frame_buf, append if empty to avoid IndexError\r\n if src_ws.frame_buf:\r\n src_ws.frame_buf[-1] += data\r\n else:\r\n src_ws.frame_buf.append(data)\r\n\r\n if ws_event.message_finished:\r\n content = b\"\".join(src_ws.frame_buf)\r\n\r\n fragmentizer = Fragmentizer(src_ws.frame_buf, is_text)\r\n src_ws.frame_buf.clear()\r\n\r\n message = websocket.WebSocketMessage(typ, from_client, content)\r\n self.flow.websocket.messages.append(message)\r\n yield WebsocketMessageHook(self.flow)\r\n\r\n if not message.dropped:\r\n for msg in fragmentizer(message.content):\r\n yield dst_ws.send2(msg)\r\n\r\n # Initialize next frame entry\r\n elif ws_event.frame_finished:\r\n src_ws.frame_buf.append(b\"\")\r\n```\r\n\r\nIt works for me but I didn't test it with in an environment using WebSocket continuation frames. Also this only works for unmodified WebSocket messages, injected or modified messages are still concerned by the issue because the `Fragmentizer` class compares the lengths so they will always fall into the else condition (unless you made the modified message keep its original length):\r\n\r\nhttps://github.com/mitmproxy/mitmproxy/blob/e270399a3eba7b3212bf7325beaa50e159a7ca0a/mitmproxy/proxy/layers/websocket.py#L229-L243\r\n\r\nFor my use case I didn't make a proper fix for this, I just made the first condition always true, maybe a boolean variable can be added to the `WebSocketMessage` and `Fragmentizer` classes or something like that?\n", "before_files": [{"content": "import time\nfrom dataclasses import dataclass\nfrom typing import Iterator, List\n\nimport wsproto\nimport wsproto.extensions\nimport wsproto.frame_protocol\nimport wsproto.utilities\nfrom mitmproxy import connection, http, websocket\nfrom mitmproxy.proxy import commands, events, layer\nfrom mitmproxy.proxy.commands import StartHook\nfrom mitmproxy.proxy.context import Context\nfrom mitmproxy.proxy.events import MessageInjected\nfrom mitmproxy.proxy.utils import expect\nfrom wsproto import ConnectionState\nfrom wsproto.frame_protocol import Opcode\n\n\n@dataclass\nclass WebsocketStartHook(StartHook):\n \"\"\"\n A WebSocket connection has commenced.\n \"\"\"\n flow: http.HTTPFlow\n\n\n@dataclass\nclass WebsocketMessageHook(StartHook):\n \"\"\"\n Called when a WebSocket message is received from the client or\n server. The most recent message will be flow.messages[-1]. The\n message is user-modifiable. Currently there are two types of\n messages, corresponding to the BINARY and TEXT frame types.\n \"\"\"\n flow: http.HTTPFlow\n\n\n@dataclass\nclass WebsocketEndHook(StartHook):\n \"\"\"\n A WebSocket connection has ended.\n You can check `flow.websocket.close_code` to determine why it ended.\n \"\"\"\n\n flow: http.HTTPFlow\n\n\nclass WebSocketMessageInjected(MessageInjected[websocket.WebSocketMessage]):\n \"\"\"\n The user has injected a custom WebSocket message.\n \"\"\"\n\n\nclass WebsocketConnection(wsproto.Connection):\n \"\"\"\n A very thin wrapper around wsproto.Connection:\n\n - we keep the underlying connection as an attribute for easy access.\n - we add a framebuffer for incomplete messages\n - we wrap .send() so that we can directly yield it.\n \"\"\"\n conn: connection.Connection\n frame_buf: List[bytes]\n\n def __init__(self, *args, conn: connection.Connection, **kwargs):\n super(WebsocketConnection, self).__init__(*args, **kwargs)\n self.conn = conn\n self.frame_buf = []\n\n def send2(self, event: wsproto.events.Event) -> commands.SendData:\n data = self.send(event)\n return commands.SendData(self.conn, data)\n\n def __repr__(self):\n return f\"WebsocketConnection<{self.state.name}, {self.conn}>\"\n\n\nclass WebsocketLayer(layer.Layer):\n \"\"\"\n WebSocket layer that intercepts and relays messages.\n \"\"\"\n flow: http.HTTPFlow\n client_ws: WebsocketConnection\n server_ws: WebsocketConnection\n\n def __init__(self, context: Context, flow: http.HTTPFlow):\n super().__init__(context)\n self.flow = flow\n assert context.server.connected\n\n @expect(events.Start)\n def start(self, _) -> layer.CommandGenerator[None]:\n\n client_extensions = []\n server_extensions = []\n\n # Parse extension headers. We only support deflate at the moment and ignore everything else.\n assert self.flow.response # satisfy type checker\n ext_header = self.flow.response.headers.get(\"Sec-WebSocket-Extensions\", \"\")\n if ext_header:\n for ext in wsproto.utilities.split_comma_header(ext_header.encode(\"ascii\", \"replace\")):\n ext_name = ext.split(\";\", 1)[0].strip()\n if ext_name == wsproto.extensions.PerMessageDeflate.name:\n client_deflate = wsproto.extensions.PerMessageDeflate()\n client_deflate.finalize(ext)\n client_extensions.append(client_deflate)\n server_deflate = wsproto.extensions.PerMessageDeflate()\n server_deflate.finalize(ext)\n server_extensions.append(server_deflate)\n else:\n yield commands.Log(f\"Ignoring unknown WebSocket extension {ext_name!r}.\")\n\n self.client_ws = WebsocketConnection(wsproto.ConnectionType.SERVER, client_extensions, conn=self.context.client)\n self.server_ws = WebsocketConnection(wsproto.ConnectionType.CLIENT, server_extensions, conn=self.context.server)\n\n yield WebsocketStartHook(self.flow)\n\n self._handle_event = self.relay_messages\n\n _handle_event = start\n\n @expect(events.DataReceived, events.ConnectionClosed, WebSocketMessageInjected)\n def relay_messages(self, event: events.Event) -> layer.CommandGenerator[None]:\n assert self.flow.websocket # satisfy type checker\n\n if isinstance(event, events.ConnectionEvent):\n from_client = event.connection == self.context.client\n elif isinstance(event, WebSocketMessageInjected):\n from_client = event.message.from_client\n else:\n raise AssertionError(f\"Unexpected event: {event}\")\n\n from_str = 'client' if from_client else 'server'\n if from_client:\n src_ws = self.client_ws\n dst_ws = self.server_ws\n else:\n src_ws = self.server_ws\n dst_ws = self.client_ws\n\n if isinstance(event, events.DataReceived):\n src_ws.receive_data(event.data)\n elif isinstance(event, events.ConnectionClosed):\n src_ws.receive_data(None)\n elif isinstance(event, WebSocketMessageInjected):\n fragmentizer = Fragmentizer([], event.message.type == Opcode.TEXT)\n src_ws._events.extend(\n fragmentizer(event.message.content)\n )\n else: # pragma: no cover\n raise AssertionError(f\"Unexpected event: {event}\")\n\n for ws_event in src_ws.events():\n if isinstance(ws_event, wsproto.events.Message):\n is_text = isinstance(ws_event.data, str)\n if is_text:\n typ = Opcode.TEXT\n src_ws.frame_buf.append(ws_event.data.encode())\n else:\n typ = Opcode.BINARY\n src_ws.frame_buf.append(ws_event.data)\n\n if ws_event.message_finished:\n content = b\"\".join(src_ws.frame_buf)\n\n fragmentizer = Fragmentizer(src_ws.frame_buf, is_text)\n src_ws.frame_buf.clear()\n\n message = websocket.WebSocketMessage(typ, from_client, content)\n self.flow.websocket.messages.append(message)\n yield WebsocketMessageHook(self.flow)\n\n if not message.dropped:\n for msg in fragmentizer(message.content):\n yield dst_ws.send2(msg)\n\n elif isinstance(ws_event, (wsproto.events.Ping, wsproto.events.Pong)):\n yield commands.Log(\n f\"Received WebSocket {ws_event.__class__.__name__.lower()} from {from_str} \"\n f\"(payload: {bytes(ws_event.payload)!r})\"\n )\n yield dst_ws.send2(ws_event)\n elif isinstance(ws_event, wsproto.events.CloseConnection):\n self.flow.websocket.timestamp_end = time.time()\n self.flow.websocket.closed_by_client = from_client\n self.flow.websocket.close_code = ws_event.code\n self.flow.websocket.close_reason = ws_event.reason\n\n for ws in [self.server_ws, self.client_ws]:\n if ws.state in {ConnectionState.OPEN, ConnectionState.REMOTE_CLOSING}:\n # response == original event, so no need to differentiate here.\n yield ws.send2(ws_event)\n yield commands.CloseConnection(ws.conn)\n yield WebsocketEndHook(self.flow)\n self._handle_event = self.done\n else: # pragma: no cover\n raise AssertionError(f\"Unexpected WebSocket event: {ws_event}\")\n\n @expect(events.DataReceived, events.ConnectionClosed, WebSocketMessageInjected)\n def done(self, _) -> layer.CommandGenerator[None]:\n yield from ()\n\n\nclass Fragmentizer:\n \"\"\"\n Theory (RFC 6455):\n Unless specified otherwise by an extension, frames have no semantic\n meaning. An intermediary might coalesce and/or split frames, [...]\n\n Practice:\n Some WebSocket servers reject large payload sizes.\n\n As a workaround, we either retain the original chunking or, if the payload has been modified, use ~4kB chunks.\n \"\"\"\n # A bit less than 4kb to accommodate for headers.\n FRAGMENT_SIZE = 4000\n\n def __init__(self, fragments: List[bytes], is_text: bool):\n self.fragment_lengths = [len(x) for x in fragments]\n self.is_text = is_text\n\n def msg(self, data: bytes, message_finished: bool):\n if self.is_text:\n data_str = data.decode(errors=\"replace\")\n return wsproto.events.TextMessage(data_str, message_finished=message_finished)\n else:\n return wsproto.events.BytesMessage(data, message_finished=message_finished)\n\n def __call__(self, content: bytes) -> Iterator[wsproto.events.Message]:\n if len(content) == sum(self.fragment_lengths):\n # message has the same length, we can reuse the same sizes\n offset = 0\n for fl in self.fragment_lengths[:-1]:\n yield self.msg(content[offset:offset + fl], False)\n offset += fl\n yield self.msg(content[offset:], True)\n else:\n offset = 0\n total = len(content) - self.FRAGMENT_SIZE\n while offset < total:\n yield self.msg(content[offset:offset + self.FRAGMENT_SIZE], False)\n offset += self.FRAGMENT_SIZE\n yield self.msg(content[offset:], True)\n", "path": "mitmproxy/proxy/layers/websocket.py"}]}
| 3,725 | 490 |
gh_patches_debug_13914
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-2083
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
store window: district tile announces more results than there are if I click on them
store window tile shows ALL project of district, if I click and get to project overview, the default filter takes out all old projects and plans without beteiligung.
can we only count running projects with participation?
</issue>
<code>
[start of meinberlin/apps/cms/models/storefronts.py]
1 import random
2
3 from django.db import models
4 from django.utils.functional import cached_property
5 from modelcluster.fields import ParentalKey
6 from modelcluster.models import ClusterableModel
7 from wagtail.admin import edit_handlers
8 from wagtail.admin.edit_handlers import FieldPanel
9 from wagtail.images.edit_handlers import ImageChooserPanel
10 from wagtail.snippets.models import register_snippet
11
12 from adhocracy4.comments.models import Comment
13 from adhocracy4.modules.models import Item
14 from adhocracy4.projects.models import Project
15 from meinberlin.apps.projects import get_project_type
16
17
18 class StorefrontItem(models.Model):
19 district = models.ForeignKey(
20 'a4administrative_districts.AdministrativeDistrict',
21 related_name='+',
22 null=True,
23 blank=True
24 )
25 project = models.ForeignKey(
26 'a4projects.Project',
27 related_name='+',
28 null=True,
29 blank=True
30 )
31 quote = models.TextField(
32 blank=True,
33 max_length=150
34 )
35
36 def __str__(self):
37 return str(self.pk)
38
39 @cached_property
40 def item_type(self):
41 if get_project_type(self.project) in ('external', 'bplan'):
42 return 'external'
43 return 'project'
44
45 @cached_property
46 def project_url(self):
47 if self.item_type == 'external':
48 return self.project.externalproject.url
49 return self.project.get_absolute_url()
50
51 @cached_property
52 def district_project_count(self):
53 return Project.objects\
54 .filter(administrative_district=self.district,
55 is_draft=False,
56 is_public=True,
57 is_archived=False
58 ).count()
59
60 panels = [
61 FieldPanel('district'),
62 FieldPanel('project'),
63 FieldPanel('quote'),
64 ]
65
66
67 @register_snippet
68 class Storefront(ClusterableModel):
69 title = models.CharField(max_length=255, null=False, blank=False)
70 image = models.ForeignKey(
71 'meinberlin_cms.CustomImage',
72 null=True,
73 blank=True,
74 on_delete=models.SET_NULL,
75 related_name='+'
76 )
77 teaser = models.CharField(max_length=100)
78
79 def __str__(self):
80 return self.title
81
82 @cached_property
83 def num_entries(self):
84 num_comments = Comment.objects.all().count()
85 num_items = Item.objects.all().count()
86 return num_comments + num_items
87
88 @cached_property
89 def num_projects(self):
90 projects = Project.objects.all()\
91 .filter(is_draft=False, is_archived=False, is_public=True)
92 active_project_count = 0
93 for project in projects:
94 if project.active_phase or project.future_phases:
95 active_project_count += 1
96 return active_project_count
97
98 @cached_property
99 def random_items(self):
100 items = self.items.all()
101 if items.count() > 3:
102 items_list = items.values_list('id', flat=True)
103 random_items = random.sample(list(items_list), 3)
104 return StorefrontItem.objects.filter(id__in=random_items)
105 else:
106 return items
107
108 title_panel = [
109 edit_handlers.FieldPanel('title')
110 ]
111
112 image_tile_panel = [
113 ImageChooserPanel('image'),
114 edit_handlers.FieldPanel('teaser')
115 ]
116
117 project_tiles_panel = [
118 edit_handlers.InlinePanel('items', min_num=3)
119 ]
120
121 edit_handler = edit_handlers.TabbedInterface([
122 edit_handlers.ObjectList(title_panel, heading='Title'),
123 edit_handlers.ObjectList(image_tile_panel, heading='Image Tile'),
124 edit_handlers.ObjectList(project_tiles_panel, heading='Project Tiles')
125 ])
126
127
128 class StorefrontCollection(StorefrontItem):
129 parent = ParentalKey('meinberlin_cms.Storefront', related_name='items')
130
[end of meinberlin/apps/cms/models/storefronts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/meinberlin/apps/cms/models/storefronts.py b/meinberlin/apps/cms/models/storefronts.py
--- a/meinberlin/apps/cms/models/storefronts.py
+++ b/meinberlin/apps/cms/models/storefronts.py
@@ -50,12 +50,17 @@
@cached_property
def district_project_count(self):
- return Project.objects\
+ projects = Project.objects\
.filter(administrative_district=self.district,
is_draft=False,
is_public=True,
is_archived=False
- ).count()
+ )
+ active_project_count = 0
+ for project in projects:
+ if project.active_phase or project.future_phases:
+ active_project_count += 1
+ return active_project_count
panels = [
FieldPanel('district'),
|
{"golden_diff": "diff --git a/meinberlin/apps/cms/models/storefronts.py b/meinberlin/apps/cms/models/storefronts.py\n--- a/meinberlin/apps/cms/models/storefronts.py\n+++ b/meinberlin/apps/cms/models/storefronts.py\n@@ -50,12 +50,17 @@\n \n @cached_property\n def district_project_count(self):\n- return Project.objects\\\n+ projects = Project.objects\\\n .filter(administrative_district=self.district,\n is_draft=False,\n is_public=True,\n is_archived=False\n- ).count()\n+ )\n+ active_project_count = 0\n+ for project in projects:\n+ if project.active_phase or project.future_phases:\n+ active_project_count += 1\n+ return active_project_count\n \n panels = [\n FieldPanel('district'),\n", "issue": "store window: district tile announces more results than there are if I click on them\nstore window tile shows ALL project of district, if I click and get to project overview, the default filter takes out all old projects and plans without beteiligung.\r\n\r\ncan we only count running projects with participation?\n", "before_files": [{"content": "import random\n\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.admin import edit_handlers\nfrom wagtail.admin.edit_handlers import FieldPanel\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.snippets.models import register_snippet\n\nfrom adhocracy4.comments.models import Comment\nfrom adhocracy4.modules.models import Item\nfrom adhocracy4.projects.models import Project\nfrom meinberlin.apps.projects import get_project_type\n\n\nclass StorefrontItem(models.Model):\n district = models.ForeignKey(\n 'a4administrative_districts.AdministrativeDistrict',\n related_name='+',\n null=True,\n blank=True\n )\n project = models.ForeignKey(\n 'a4projects.Project',\n related_name='+',\n null=True,\n blank=True\n )\n quote = models.TextField(\n blank=True,\n max_length=150\n )\n\n def __str__(self):\n return str(self.pk)\n\n @cached_property\n def item_type(self):\n if get_project_type(self.project) in ('external', 'bplan'):\n return 'external'\n return 'project'\n\n @cached_property\n def project_url(self):\n if self.item_type == 'external':\n return self.project.externalproject.url\n return self.project.get_absolute_url()\n\n @cached_property\n def district_project_count(self):\n return Project.objects\\\n .filter(administrative_district=self.district,\n is_draft=False,\n is_public=True,\n is_archived=False\n ).count()\n\n panels = [\n FieldPanel('district'),\n FieldPanel('project'),\n FieldPanel('quote'),\n ]\n\n\n@register_snippet\nclass Storefront(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n image = models.ForeignKey(\n 'meinberlin_cms.CustomImage',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n teaser = models.CharField(max_length=100)\n\n def __str__(self):\n return self.title\n\n @cached_property\n def num_entries(self):\n num_comments = Comment.objects.all().count()\n num_items = Item.objects.all().count()\n return num_comments + num_items\n\n @cached_property\n def num_projects(self):\n projects = Project.objects.all()\\\n .filter(is_draft=False, is_archived=False, is_public=True)\n active_project_count = 0\n for project in projects:\n if project.active_phase or project.future_phases:\n active_project_count += 1\n return active_project_count\n\n @cached_property\n def random_items(self):\n items = self.items.all()\n if items.count() > 3:\n items_list = items.values_list('id', flat=True)\n random_items = random.sample(list(items_list), 3)\n return StorefrontItem.objects.filter(id__in=random_items)\n else:\n return items\n\n title_panel = [\n edit_handlers.FieldPanel('title')\n ]\n\n image_tile_panel = [\n ImageChooserPanel('image'),\n edit_handlers.FieldPanel('teaser')\n ]\n\n project_tiles_panel = [\n edit_handlers.InlinePanel('items', min_num=3)\n ]\n\n edit_handler = edit_handlers.TabbedInterface([\n edit_handlers.ObjectList(title_panel, heading='Title'),\n edit_handlers.ObjectList(image_tile_panel, heading='Image Tile'),\n edit_handlers.ObjectList(project_tiles_panel, heading='Project Tiles')\n ])\n\n\nclass StorefrontCollection(StorefrontItem):\n parent = ParentalKey('meinberlin_cms.Storefront', related_name='items')\n", "path": "meinberlin/apps/cms/models/storefronts.py"}]}
| 1,693 | 185 |
gh_patches_debug_40807
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-909
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Changes in CLI arguments between versions
## Environment data
- PTVSD version: 4.1.3
- Using VS Code or Visual Studio: N/A
## Actual behavior
* CLI args for starting apps for remote debugging has changed
* Now the arg requires `--server-host`, this wasn't the case in the past.
* This is a breaking change.
## Expected behavior
* We need to revert if possible as existing users will not be able to use PTSVD for remote debugging scenarios.
Reported here https://github.com/Microsoft/vscode-python/issues/2833#issuecomment-428422616
</issue>
<code>
[start of ptvsd/__main__.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import argparse
6 import os.path
7 import sys
8
9 from ptvsd._attach import attach_main
10 from ptvsd._local import debug_main, run_main
11 from ptvsd.socket import Address
12 from ptvsd.version import __version__, __author__ # noqa
13
14
15 ##################################
16 # the script
17
18 """
19 For the PyDevd CLI handling see:
20
21 https://github.com/fabioz/PyDev.Debugger/blob/master/_pydevd_bundle/pydevd_command_line_handling.py
22 https://github.com/fabioz/PyDev.Debugger/blob/master/pydevd.py#L1450 (main func)
23 """ # noqa
24
25 PYDEVD_OPTS = {
26 '--file',
27 '--client',
28 #'--port',
29 '--vm_type',
30 }
31
32 PYDEVD_FLAGS = {
33 '--DEBUG',
34 '--DEBUG_RECORD_SOCKET_READS',
35 '--cmd-line',
36 '--module',
37 '--multiproc',
38 '--multiprocess',
39 '--print-in-debugger-startup',
40 '--save-signatures',
41 '--save-threading',
42 '--save-asyncio',
43 '--server',
44 '--qt-support=auto',
45 }
46
47 USAGE = """
48 {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT -m MODULE [arg ...]
49 {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT FILENAME [arg ...]
50 {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID
51 """ # noqa
52
53
54 def parse_args(argv=None):
55 """Return the parsed args to use in main()."""
56 if argv is None:
57 argv = sys.argv
58 prog = argv[0]
59 if prog == __file__:
60 prog = '{} -m ptvsd'.format(os.path.basename(sys.executable))
61 else:
62 prog = argv[0]
63 argv = argv[1:]
64
65 supported, pydevd, script = _group_args(argv)
66 args = _parse_args(prog, supported)
67 # '--' is used in _run_args to extract pydevd specific args
68 extra = pydevd + ['--']
69 if script:
70 extra += script
71 return args, extra
72
73
74 def _group_args(argv):
75 supported = []
76 pydevd = []
77 script = []
78
79 try:
80 pos = argv.index('--')
81 except ValueError:
82 script = []
83 else:
84 script = argv[pos + 1:]
85 argv = argv[:pos]
86
87 for arg in argv:
88 if arg == '-h' or arg == '--help':
89 return argv, [], script
90
91 gottarget = False
92 skip = 0
93 for i in range(len(argv)):
94 if skip:
95 skip -= 1
96 continue
97
98 arg = argv[i]
99 try:
100 nextarg = argv[i + 1]
101 except IndexError:
102 nextarg = None
103
104 # TODO: Deprecate the PyDevd arg support.
105 # PyDevd support
106 if gottarget:
107 script = argv[i:] + script
108 break
109 if arg == '--client':
110 arg = '--host'
111 elif arg == '--file':
112 if nextarg is None: # The filename is missing...
113 pydevd.append(arg)
114 continue # This will get handled later.
115 if nextarg.endswith(':') and '--module' in pydevd:
116 pydevd.remove('--module')
117 arg = '-m'
118 argv[i + 1] = nextarg = nextarg[:-1]
119 else:
120 arg = nextarg
121 skip += 1
122
123 if arg in PYDEVD_OPTS:
124 pydevd.append(arg)
125 if nextarg is not None:
126 pydevd.append(nextarg)
127 skip += 1
128 elif arg in PYDEVD_FLAGS:
129 pydevd.append(arg)
130 elif arg == '--nodebug':
131 supported.append(arg)
132
133 # ptvsd support
134 elif arg in ('--host', '--server-host', '--port', '--pid', '-m'):
135 if arg == '-m' or arg == '--pid':
136 gottarget = True
137 supported.append(arg)
138 if nextarg is not None:
139 supported.append(nextarg)
140 skip += 1
141 elif arg in ('--single-session', '--wait'):
142 supported.append(arg)
143 elif not arg.startswith('-'):
144 supported.append(arg)
145 gottarget = True
146
147 # unsupported arg
148 else:
149 supported.append(arg)
150 break
151
152 return supported, pydevd, script
153
154
155 def _parse_args(prog, argv):
156 parser = argparse.ArgumentParser(
157 prog=prog,
158 usage=USAGE.format(prog),
159 )
160
161 parser.add_argument('--nodebug', action='store_true')
162
163 host = parser.add_mutually_exclusive_group()
164 host.add_argument('--host')
165 host.add_argument('--server-host')
166 parser.add_argument('--port', type=int, required=True)
167
168 target = parser.add_mutually_exclusive_group(required=True)
169 target.add_argument('-m', dest='module')
170 target.add_argument('--pid', type=int)
171 target.add_argument('filename', nargs='?')
172
173 parser.add_argument('--single-session', action='store_true')
174 parser.add_argument('--wait', action='store_true')
175
176 parser.add_argument('-V', '--version', action='version')
177 parser.version = __version__
178
179 args = parser.parse_args(argv)
180 ns = vars(args)
181
182 serverhost = ns.pop('server_host', None)
183 clienthost = ns.pop('host', None)
184 if serverhost:
185 args.address = Address.as_server(serverhost, ns.pop('port'))
186 elif not clienthost:
187 if args.nodebug:
188 args.address = Address.as_client(clienthost, ns.pop('port'))
189 else:
190 args.address = Address.as_server(clienthost, ns.pop('port'))
191 else:
192 args.address = Address.as_client(clienthost, ns.pop('port'))
193
194 pid = ns.pop('pid')
195 module = ns.pop('module')
196 filename = ns.pop('filename')
197 if pid is not None:
198 args.name = pid
199 args.kind = 'pid'
200 elif module is not None:
201 args.name = module
202 args.kind = 'module'
203 else:
204 args.name = filename
205 args.kind = 'script'
206
207 return args
208
209
210 def handle_args(addr, name, kind, extra=(), nodebug=False, **kwargs):
211 if kind == 'pid':
212 attach_main(addr, name, *extra, **kwargs)
213 elif nodebug:
214 run_main(addr, name, kind, *extra, **kwargs)
215 else:
216 debug_main(addr, name, kind, *extra, **kwargs)
217
218
219 def main(argv=None):
220 args, extra = parse_args(argv)
221 handle_args(args.address, args.name, args.kind, extra,
222 nodebug=args.nodebug, singlesession=args.single_session,
223 wait=args.wait)
224
225
226 if __name__ == '__main__':
227 main()
228
[end of ptvsd/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ptvsd/__main__.py b/ptvsd/__main__.py
--- a/ptvsd/__main__.py
+++ b/ptvsd/__main__.py
@@ -24,8 +24,6 @@
PYDEVD_OPTS = {
'--file',
- '--client',
- #'--port',
'--vm_type',
}
@@ -45,8 +43,8 @@
}
USAGE = """
- {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT -m MODULE [arg ...]
- {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT FILENAME [arg ...]
+ {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT -m MODULE [arg ...]
+ {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT FILENAME [arg ...]
{0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID
""" # noqa
@@ -106,9 +104,7 @@
if gottarget:
script = argv[i:] + script
break
- if arg == '--client':
- arg = '--host'
- elif arg == '--file':
+ if arg == '--file':
if nextarg is None: # The filename is missing...
pydevd.append(arg)
continue # This will get handled later.
@@ -131,14 +127,14 @@
supported.append(arg)
# ptvsd support
- elif arg in ('--host', '--server-host', '--port', '--pid', '-m'):
+ elif arg in ('--host', '--port', '--pid', '-m'):
if arg == '-m' or arg == '--pid':
gottarget = True
supported.append(arg)
if nextarg is not None:
supported.append(nextarg)
skip += 1
- elif arg in ('--single-session', '--wait'):
+ elif arg in ('--single-session', '--wait', '--client'):
supported.append(arg)
elif not arg.startswith('-'):
supported.append(arg)
@@ -159,10 +155,9 @@
)
parser.add_argument('--nodebug', action='store_true')
+ parser.add_argument('--client', action='store_true')
- host = parser.add_mutually_exclusive_group()
- host.add_argument('--host')
- host.add_argument('--server-host')
+ parser.add_argument('--host')
parser.add_argument('--port', type=int, required=True)
target = parser.add_mutually_exclusive_group(required=True)
@@ -179,17 +174,10 @@
args = parser.parse_args(argv)
ns = vars(args)
- serverhost = ns.pop('server_host', None)
- clienthost = ns.pop('host', None)
- if serverhost:
- args.address = Address.as_server(serverhost, ns.pop('port'))
- elif not clienthost:
- if args.nodebug:
- args.address = Address.as_client(clienthost, ns.pop('port'))
- else:
- args.address = Address.as_server(clienthost, ns.pop('port'))
- else:
- args.address = Address.as_client(clienthost, ns.pop('port'))
+ host = ns.pop('host', None)
+ port = ns.pop('port')
+ client = ns.pop('client')
+ args.address = (Address.as_client if client else Address.as_server)(host, port) # noqa
pid = ns.pop('pid')
module = ns.pop('module')
|
{"golden_diff": "diff --git a/ptvsd/__main__.py b/ptvsd/__main__.py\n--- a/ptvsd/__main__.py\n+++ b/ptvsd/__main__.py\n@@ -24,8 +24,6 @@\n \n PYDEVD_OPTS = {\n '--file',\n- '--client',\n- #'--port',\n '--vm_type',\n }\n \n@@ -45,8 +43,8 @@\n }\n \n USAGE = \"\"\"\n- {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT -m MODULE [arg ...]\n- {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT FILENAME [arg ...]\n+ {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT -m MODULE [arg ...]\n+ {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT FILENAME [arg ...]\n {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID\n \"\"\" # noqa\n \n@@ -106,9 +104,7 @@\n if gottarget:\n script = argv[i:] + script\n break\n- if arg == '--client':\n- arg = '--host'\n- elif arg == '--file':\n+ if arg == '--file':\n if nextarg is None: # The filename is missing...\n pydevd.append(arg)\n continue # This will get handled later.\n@@ -131,14 +127,14 @@\n supported.append(arg)\n \n # ptvsd support\n- elif arg in ('--host', '--server-host', '--port', '--pid', '-m'):\n+ elif arg in ('--host', '--port', '--pid', '-m'):\n if arg == '-m' or arg == '--pid':\n gottarget = True\n supported.append(arg)\n if nextarg is not None:\n supported.append(nextarg)\n skip += 1\n- elif arg in ('--single-session', '--wait'):\n+ elif arg in ('--single-session', '--wait', '--client'):\n supported.append(arg)\n elif not arg.startswith('-'):\n supported.append(arg)\n@@ -159,10 +155,9 @@\n )\n \n parser.add_argument('--nodebug', action='store_true')\n+ parser.add_argument('--client', action='store_true')\n \n- host = parser.add_mutually_exclusive_group()\n- host.add_argument('--host')\n- host.add_argument('--server-host')\n+ parser.add_argument('--host')\n parser.add_argument('--port', type=int, required=True)\n \n target = parser.add_mutually_exclusive_group(required=True)\n@@ -179,17 +174,10 @@\n args = parser.parse_args(argv)\n ns = vars(args)\n \n- serverhost = ns.pop('server_host', None)\n- clienthost = ns.pop('host', None)\n- if serverhost:\n- args.address = Address.as_server(serverhost, ns.pop('port'))\n- elif not clienthost:\n- if args.nodebug:\n- args.address = Address.as_client(clienthost, ns.pop('port'))\n- else:\n- args.address = Address.as_server(clienthost, ns.pop('port'))\n- else:\n- args.address = Address.as_client(clienthost, ns.pop('port'))\n+ host = ns.pop('host', None)\n+ port = ns.pop('port')\n+ client = ns.pop('client')\n+ args.address = (Address.as_client if client else Address.as_server)(host, port) # noqa\n \n pid = ns.pop('pid')\n module = ns.pop('module')\n", "issue": "Changes in CLI arguments between versions\n## Environment data\r\n\r\n- PTVSD version: 4.1.3\r\n- Using VS Code or Visual Studio: N/A\r\n\r\n## Actual behavior\r\n\r\n* CLI args for starting apps for remote debugging has changed \r\n* Now the arg requires `--server-host`, this wasn't the case in the past.\r\n* This is a breaking change.\r\n\r\n## Expected behavior\r\n\r\n* We need to revert if possible as existing users will not be able to use PTSVD for remote debugging scenarios.\r\n\r\nReported here https://github.com/Microsoft/vscode-python/issues/2833#issuecomment-428422616\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport argparse\nimport os.path\nimport sys\n\nfrom ptvsd._attach import attach_main\nfrom ptvsd._local import debug_main, run_main\nfrom ptvsd.socket import Address\nfrom ptvsd.version import __version__, __author__ # noqa\n\n\n##################################\n# the script\n\n\"\"\"\nFor the PyDevd CLI handling see:\n\n https://github.com/fabioz/PyDev.Debugger/blob/master/_pydevd_bundle/pydevd_command_line_handling.py\n https://github.com/fabioz/PyDev.Debugger/blob/master/pydevd.py#L1450 (main func)\n\"\"\" # noqa\n\nPYDEVD_OPTS = {\n '--file',\n '--client',\n #'--port',\n '--vm_type',\n}\n\nPYDEVD_FLAGS = {\n '--DEBUG',\n '--DEBUG_RECORD_SOCKET_READS',\n '--cmd-line',\n '--module',\n '--multiproc',\n '--multiprocess',\n '--print-in-debugger-startup',\n '--save-signatures',\n '--save-threading',\n '--save-asyncio',\n '--server',\n '--qt-support=auto',\n}\n\nUSAGE = \"\"\"\n {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT -m MODULE [arg ...]\n {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT FILENAME [arg ...]\n {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID\n\"\"\" # noqa\n\n\ndef parse_args(argv=None):\n \"\"\"Return the parsed args to use in main().\"\"\"\n if argv is None:\n argv = sys.argv\n prog = argv[0]\n if prog == __file__:\n prog = '{} -m ptvsd'.format(os.path.basename(sys.executable))\n else:\n prog = argv[0]\n argv = argv[1:]\n\n supported, pydevd, script = _group_args(argv)\n args = _parse_args(prog, supported)\n # '--' is used in _run_args to extract pydevd specific args\n extra = pydevd + ['--']\n if script:\n extra += script\n return args, extra\n\n\ndef _group_args(argv):\n supported = []\n pydevd = []\n script = []\n\n try:\n pos = argv.index('--')\n except ValueError:\n script = []\n else:\n script = argv[pos + 1:]\n argv = argv[:pos]\n\n for arg in argv:\n if arg == '-h' or arg == '--help':\n return argv, [], script\n\n gottarget = False\n skip = 0\n for i in range(len(argv)):\n if skip:\n skip -= 1\n continue\n\n arg = argv[i]\n try:\n nextarg = argv[i + 1]\n except IndexError:\n nextarg = None\n\n # TODO: Deprecate the PyDevd arg support.\n # PyDevd support\n if gottarget:\n script = argv[i:] + script\n break\n if arg == '--client':\n arg = '--host'\n elif arg == '--file':\n if nextarg is None: # The filename is missing...\n pydevd.append(arg)\n continue # This will get handled later.\n if nextarg.endswith(':') and '--module' in pydevd:\n pydevd.remove('--module')\n arg = '-m'\n argv[i + 1] = nextarg = nextarg[:-1]\n else:\n arg = nextarg\n skip += 1\n\n if arg in PYDEVD_OPTS:\n pydevd.append(arg)\n if nextarg is not None:\n pydevd.append(nextarg)\n skip += 1\n elif arg in PYDEVD_FLAGS:\n pydevd.append(arg)\n elif arg == '--nodebug':\n supported.append(arg)\n\n # ptvsd support\n elif arg in ('--host', '--server-host', '--port', '--pid', '-m'):\n if arg == '-m' or arg == '--pid':\n gottarget = True\n supported.append(arg)\n if nextarg is not None:\n supported.append(nextarg)\n skip += 1\n elif arg in ('--single-session', '--wait'):\n supported.append(arg)\n elif not arg.startswith('-'):\n supported.append(arg)\n gottarget = True\n\n # unsupported arg\n else:\n supported.append(arg)\n break\n\n return supported, pydevd, script\n\n\ndef _parse_args(prog, argv):\n parser = argparse.ArgumentParser(\n prog=prog,\n usage=USAGE.format(prog),\n )\n\n parser.add_argument('--nodebug', action='store_true')\n\n host = parser.add_mutually_exclusive_group()\n host.add_argument('--host')\n host.add_argument('--server-host')\n parser.add_argument('--port', type=int, required=True)\n\n target = parser.add_mutually_exclusive_group(required=True)\n target.add_argument('-m', dest='module')\n target.add_argument('--pid', type=int)\n target.add_argument('filename', nargs='?')\n\n parser.add_argument('--single-session', action='store_true')\n parser.add_argument('--wait', action='store_true')\n\n parser.add_argument('-V', '--version', action='version')\n parser.version = __version__\n\n args = parser.parse_args(argv)\n ns = vars(args)\n\n serverhost = ns.pop('server_host', None)\n clienthost = ns.pop('host', None)\n if serverhost:\n args.address = Address.as_server(serverhost, ns.pop('port'))\n elif not clienthost:\n if args.nodebug:\n args.address = Address.as_client(clienthost, ns.pop('port'))\n else:\n args.address = Address.as_server(clienthost, ns.pop('port'))\n else:\n args.address = Address.as_client(clienthost, ns.pop('port'))\n\n pid = ns.pop('pid')\n module = ns.pop('module')\n filename = ns.pop('filename')\n if pid is not None:\n args.name = pid\n args.kind = 'pid'\n elif module is not None:\n args.name = module\n args.kind = 'module'\n else:\n args.name = filename\n args.kind = 'script'\n\n return args\n\n\ndef handle_args(addr, name, kind, extra=(), nodebug=False, **kwargs):\n if kind == 'pid':\n attach_main(addr, name, *extra, **kwargs)\n elif nodebug:\n run_main(addr, name, kind, *extra, **kwargs)\n else:\n debug_main(addr, name, kind, *extra, **kwargs)\n\n\ndef main(argv=None):\n args, extra = parse_args(argv)\n handle_args(args.address, args.name, args.kind, extra,\n nodebug=args.nodebug, singlesession=args.single_session,\n wait=args.wait)\n\n\nif __name__ == '__main__':\n main()\n", "path": "ptvsd/__main__.py"}]}
| 2,812 | 837 |
gh_patches_debug_17675
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-11453
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Say all on Desktop raises an error
### Steps to reproduce:
1. Focus the desktop.
2. Invoke caret say all
### Actual behavior:
The following error is raised:
```
ERROR - scriptHandler.executeScript (15:54:57.769):
error executing script: <bound method GlobalCommands.script_sayAll of <globalCommands.GlobalCommands object at 0x05875770>> with gesture 'NVDA+a'
Traceback (most recent call last):
File "scriptHandler.pyc", line 190, in executeScript
File "globalCommands.pyc", line 1334, in script_sayAll
File "sayAllHandler.pyc", line 79, in readText
File "sayAllHandler.pyc", line 119, in nextLine
AttributeError: '_TextReader' object has no attribute 'reader'
ERROR - stderr (15:54:57.779):
Exception ignored in:
ERROR - stderr (15:54:57.790):
<function _TextReader.__del__ at 0x0462F390>
ERROR - stderr (15:54:57.803):
Traceback (most recent call last):
ERROR - stderr (15:54:57.815):
File "sayAllHandler.pyc", line 213, in __del__
ERROR - stderr (15:54:57.827):
File "sayAllHandler.pyc", line 206, in stop
ERROR - stderr (15:54:57.839):
AttributeError
ERROR - stderr (15:54:57.851):
:
ERROR - stderr (15:54:57.863):
'_TextReader' object has no attribute 'reader'
```
### Expected behavior:
NO error
### System configuration
#### NVDA installed/portable/running from source:
Installed
#### NVDA version:
threshold-18069
#### Windows version:
Windows 10 1903 build 18362.239
</issue>
<code>
[start of source/sayAllHandler.py]
1 # A part of NonVisual Desktop Access (NVDA)
2 # Copyright (C) 2006-2017 NV Access Limited
3 # This file may be used under the terms of the GNU General Public License, version 2 or later.
4 # For more details see: https://www.gnu.org/licenses/gpl-2.0.html
5
6 import weakref
7 import speech
8 import synthDriverHandler
9 from logHandler import log
10 import config
11 import controlTypes
12 import api
13 import textInfos
14 import queueHandler
15 import winKernel
16
17 CURSOR_CARET = 0
18 CURSOR_REVIEW = 1
19
20 lastSayAllMode = None
21 #: The active say all manager.
22 #: This is a weakref because the manager should be allowed to die once say all is complete.
23 _activeSayAll = lambda: None # Return None when called like a dead weakref.
24
25 def stop():
26 active = _activeSayAll()
27 if active:
28 active.stop()
29
30 def isRunning():
31 """Determine whether say all is currently running.
32 @return: C{True} if say all is currently running, C{False} if not.
33 @rtype: bool
34 """
35 return bool(_activeSayAll())
36
37 def readObjects(obj):
38 global _activeSayAll
39 reader = _ObjectsReader(obj)
40 _activeSayAll = weakref.ref(reader)
41 reader.next()
42
43 class _ObjectsReader(object):
44
45 def __init__(self, root):
46 self.walker = self.walk(root)
47 self.prevObj = None
48
49 def walk(self, obj):
50 yield obj
51 child=obj.simpleFirstChild
52 while child:
53 for descendant in self.walk(child):
54 yield descendant
55 child=child.simpleNext
56
57 def next(self):
58 if not self.walker:
59 # We were stopped.
60 return
61 if self.prevObj:
62 # We just started speaking this object, so move the navigator to it.
63 api.setNavigatorObject(self.prevObj, isFocus=lastSayAllMode==CURSOR_CARET)
64 winKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)
65 # Move onto the next object.
66 self.prevObj = obj = next(self.walker, None)
67 if not obj:
68 return
69 # Call this method again when we start speaking this object.
70 callbackCommand = speech.CallbackCommand(self.next, name="say-all:next")
71 speech.speakObject(obj, reason=controlTypes.REASON_SAYALL, _prefixSpeechCommand=callbackCommand)
72
73 def stop(self):
74 self.walker = None
75
76 def readText(cursor):
77 global lastSayAllMode, _activeSayAll
78 lastSayAllMode=cursor
79 try:
80 reader = _TextReader(cursor)
81 except NotImplementedError:
82 log.debugWarning("Unable to make reader", exc_info=True)
83 return
84 _activeSayAll = weakref.ref(reader)
85 reader.nextLine()
86
87 class _TextReader(object):
88 """Manages continuous reading of text.
89 This is intended for internal use only.
90
91 The high level flow of control is as follows:
92 1. The constructor sets things up.
93 2. L{nextLine} is called to read the first line.
94 3. When it speaks a line, L{nextLine} request that L{lineReached} be called
95 when we start speaking this line, providing the position and state at this point.
96 4. When we start speaking a line, L{lineReached} is called
97 and moves the cursor to that line.
98 5. L{lineReached} calls L{nextLine}.
99 6. If there are more lines, L{nextLine} works as per steps 3 and 4.
100 7. Otherwise, if the object doesn't support page turns, we're finished.
101 8. If the object does support page turns,
102 we request that L{turnPage} be called when speech is finished.
103 9. L{turnPage} tries to turn the page.
104 10. If there are no more pages, we're finished.
105 11. If there is another page, L{turnPage} calls L{nextLine}.
106 """
107 MAX_BUFFERED_LINES = 10
108
109 def __init__(self, cursor):
110 self.cursor = cursor
111 self.trigger = SayAllProfileTrigger()
112 self.trigger.enter()
113 # Start at the cursor.
114 if cursor == CURSOR_CARET:
115 try:
116 self.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)
117 except (NotImplementedError, RuntimeError) as e:
118 raise NotImplementedError("Unable to make TextInfo: " + str(e))
119 else:
120 self.reader = api.getReviewPosition()
121 self.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)
122 self.numBufferedLines = 0
123
124 def nextLine(self):
125 if not self.reader:
126 log.debug("no self.reader")
127 # We were stopped.
128 return
129 if not self.reader.obj:
130 log.debug("no self.reader.obj")
131 # The object died, so we should too.
132 self.finish()
133 return
134 bookmark = self.reader.bookmark
135 # Expand to the current line.
136 # We use move end rather than expand
137 # because the user might start in the middle of a line
138 # and we don't want to read from the start of the line in that case.
139 # For lines after the first, it's also more efficient because
140 # we're already at the start of the line, so there's no need to search backwards.
141 delta = self.reader.move(textInfos.UNIT_READINGCHUNK, 1, endPoint="end")
142 if delta <= 0:
143 # No more text.
144 if isinstance(self.reader.obj, textInfos.DocumentWithPageTurns):
145 # Once the last line finishes reading, try turning the page.
146 cb = speech.CallbackCommand(self.turnPage, name="say-all:turnPage")
147 speech.speakWithoutPauses([cb, speech.EndUtteranceCommand()])
148 else:
149 self.finish()
150 return
151
152 # Copy the speakTextInfoState so that speak callbackCommand
153 # and its associated callback are using a copy isolated to this specific line.
154 state = self.speakTextInfoState.copy()
155 # Call lineReached when we start speaking this line.
156 # lineReached will move the cursor and trigger reading of the next line.
157
158 def _onLineReached(obj=self.reader.obj, state=state):
159 self.lineReached(obj, bookmark, state)
160
161 cb = speech.CallbackCommand(
162 _onLineReached,
163 name="say-all:lineReached"
164 )
165
166 # Generate the speech sequence for the reader textInfo
167 # and insert the lineReached callback at the very beginning of the sequence.
168 # _linePrefix on speakTextInfo cannot be used here
169 # As it would be inserted in the sequence after all initial control starts which is too late.
170 speechGen = speech.getTextInfoSpeech(
171 self.reader,
172 unit=textInfos.UNIT_READINGCHUNK,
173 reason=controlTypes.REASON_SAYALL,
174 useCache=state
175 )
176 seq = list(speech._flattenNestedSequences(speechGen))
177 seq.insert(0, cb)
178 # Speak the speech sequence.
179 spoke = speech.speakWithoutPauses(seq)
180 # Update the textInfo state ready for when speaking the next line.
181 self.speakTextInfoState = state.copy()
182
183 # Collapse to the end of this line, ready to read the next.
184 try:
185 self.reader.collapse(end=True)
186 except RuntimeError:
187 # This occurs in Microsoft Word when the range covers the end of the document.
188 # without this exception to indicate that further collapsing is not possible, say all could enter an infinite loop.
189 self.finish()
190 return
191 if not spoke:
192 # This line didn't include a natural pause, so nothing was spoken.
193 self.numBufferedLines += 1
194 if self.numBufferedLines < self.MAX_BUFFERED_LINES:
195 # Move on to the next line.
196 # We queue this to allow the user a chance to stop say all.
197 queueHandler.queueFunction(queueHandler.eventQueue, self.nextLine)
198 else:
199 # We don't want to buffer too much.
200 # Force speech. lineReached will resume things when speech catches up.
201 speech.speakWithoutPauses(None)
202 # The first buffered line has now started speaking.
203 self.numBufferedLines -= 1
204
205 def lineReached(self, obj, bookmark, state):
206 # We've just started speaking this line, so move the cursor there.
207 state.updateObj()
208 updater = obj.makeTextInfo(bookmark)
209 if self.cursor == CURSOR_CARET:
210 updater.updateCaret()
211 if self.cursor != CURSOR_CARET or config.conf["reviewCursor"]["followCaret"]:
212 api.setReviewPosition(updater, isCaret=self.cursor==CURSOR_CARET)
213 winKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)
214 if self.numBufferedLines == 0:
215 # This was the last line spoken, so move on.
216 self.nextLine()
217 else:
218 self.numBufferedLines -= 1
219
220 def turnPage(self):
221 try:
222 self.reader.obj.turnPage()
223 except RuntimeError:
224 log.debug("No more pages")
225 # No more pages.
226 self.stop()
227 return
228 self.reader = self.reader.obj.makeTextInfo(textInfos.POSITION_FIRST)
229 self.nextLine()
230
231 def finish(self):
232 # There is no more text.
233 # Call stop to clean up, but only after speech completely finishes.
234 # Otherwise, if a different synth is being used for say all,
235 # we might switch synths too early and truncate the final speech.
236 # We do this by putting a CallbackCommand at the start of a new utterance.
237 cb = speech.CallbackCommand(self.stop, name="say-all:stop")
238 speech.speakWithoutPauses([
239 speech.EndUtteranceCommand(),
240 cb,
241 speech.EndUtteranceCommand()
242 ])
243
244 def stop(self):
245 if not self.reader:
246 return
247 self.reader = None
248 self.trigger.exit()
249 self.trigger = None
250
251 def __del__(self):
252 self.stop()
253
254 class SayAllProfileTrigger(config.ProfileTrigger):
255 """A configuration profile trigger for when say all is in progress.
256 """
257 spec = "sayAll"
258
[end of source/sayAllHandler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/source/sayAllHandler.py b/source/sayAllHandler.py
--- a/source/sayAllHandler.py
+++ b/source/sayAllHandler.py
@@ -109,7 +109,7 @@
def __init__(self, cursor):
self.cursor = cursor
self.trigger = SayAllProfileTrigger()
- self.trigger.enter()
+ self.reader = None
# Start at the cursor.
if cursor == CURSOR_CARET:
try:
@@ -118,6 +118,8 @@
raise NotImplementedError("Unable to make TextInfo: " + str(e))
else:
self.reader = api.getReviewPosition()
+ # #10899: SayAll profile can't be activated earlier because they may not be anything to read
+ self.trigger.enter()
self.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)
self.numBufferedLines = 0
|
{"golden_diff": "diff --git a/source/sayAllHandler.py b/source/sayAllHandler.py\n--- a/source/sayAllHandler.py\n+++ b/source/sayAllHandler.py\n@@ -109,7 +109,7 @@\n \tdef __init__(self, cursor):\r\n \t\tself.cursor = cursor\r\n \t\tself.trigger = SayAllProfileTrigger()\r\n-\t\tself.trigger.enter()\r\n+\t\tself.reader = None\r\n \t\t# Start at the cursor.\r\n \t\tif cursor == CURSOR_CARET:\r\n \t\t\ttry:\r\n@@ -118,6 +118,8 @@\n \t\t\t\traise NotImplementedError(\"Unable to make TextInfo: \" + str(e))\r\n \t\telse:\r\n \t\t\tself.reader = api.getReviewPosition()\r\n+\t\t# #10899: SayAll profile can't be activated earlier because they may not be anything to read\r\n+\t\tself.trigger.enter()\r\n \t\tself.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)\r\n \t\tself.numBufferedLines = 0\n", "issue": "Say all on Desktop raises an error\n### Steps to reproduce:\r\n1. Focus the desktop.\r\n2. Invoke caret say all\r\n\r\n### Actual behavior:\r\nThe following error is raised:\r\n\r\n```\r\nERROR - scriptHandler.executeScript (15:54:57.769):\r\nerror executing script: <bound method GlobalCommands.script_sayAll of <globalCommands.GlobalCommands object at 0x05875770>> with gesture 'NVDA+a'\r\nTraceback (most recent call last):\r\n File \"scriptHandler.pyc\", line 190, in executeScript\r\n File \"globalCommands.pyc\", line 1334, in script_sayAll\r\n File \"sayAllHandler.pyc\", line 79, in readText\r\n File \"sayAllHandler.pyc\", line 119, in nextLine\r\nAttributeError: '_TextReader' object has no attribute 'reader'\r\nERROR - stderr (15:54:57.779):\r\nException ignored in:\r\nERROR - stderr (15:54:57.790):\r\n<function _TextReader.__del__ at 0x0462F390>\r\nERROR - stderr (15:54:57.803):\r\nTraceback (most recent call last):\r\nERROR - stderr (15:54:57.815):\r\n File \"sayAllHandler.pyc\", line 213, in __del__\r\nERROR - stderr (15:54:57.827):\r\n File \"sayAllHandler.pyc\", line 206, in stop\r\nERROR - stderr (15:54:57.839):\r\nAttributeError\r\nERROR - stderr (15:54:57.851):\r\n:\r\nERROR - stderr (15:54:57.863):\r\n'_TextReader' object has no attribute 'reader'\r\n```\r\n\r\n### Expected behavior:\r\nNO error\r\n\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\nInstalled\r\n\r\n#### NVDA version:\r\nthreshold-18069\r\n\r\n#### Windows version:\r\nWindows 10 1903 build 18362.239\r\n\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2017 NV Access Limited\r\n# This file may be used under the terms of the GNU General Public License, version 2 or later.\r\n# For more details see: https://www.gnu.org/licenses/gpl-2.0.html\r\n\r\nimport weakref\r\nimport speech\r\nimport synthDriverHandler\r\nfrom logHandler import log\r\nimport config\r\nimport controlTypes\r\nimport api\r\nimport textInfos\r\nimport queueHandler\r\nimport winKernel\r\n\r\nCURSOR_CARET = 0\r\nCURSOR_REVIEW = 1\r\n\r\nlastSayAllMode = None\r\n#: The active say all manager.\r\n#: This is a weakref because the manager should be allowed to die once say all is complete.\r\n_activeSayAll = lambda: None # Return None when called like a dead weakref.\r\n\r\ndef stop():\r\n\tactive = _activeSayAll()\r\n\tif active:\r\n\t\tactive.stop()\r\n\r\ndef isRunning():\r\n\t\"\"\"Determine whether say all is currently running.\r\n\t@return: C{True} if say all is currently running, C{False} if not.\r\n\t@rtype: bool\r\n\t\"\"\"\r\n\treturn bool(_activeSayAll())\r\n\r\ndef readObjects(obj):\r\n\tglobal _activeSayAll\r\n\treader = _ObjectsReader(obj)\r\n\t_activeSayAll = weakref.ref(reader)\r\n\treader.next()\r\n\r\nclass _ObjectsReader(object):\r\n\r\n\tdef __init__(self, root):\r\n\t\tself.walker = self.walk(root)\r\n\t\tself.prevObj = None\r\n\r\n\tdef walk(self, obj):\r\n\t\tyield obj\r\n\t\tchild=obj.simpleFirstChild\r\n\t\twhile child:\r\n\t\t\tfor descendant in self.walk(child):\r\n\t\t\t\tyield descendant\r\n\t\t\tchild=child.simpleNext\r\n\r\n\tdef next(self):\r\n\t\tif not self.walker:\r\n\t\t\t# We were stopped.\r\n\t\t\treturn\r\n\t\tif self.prevObj:\r\n\t\t\t# We just started speaking this object, so move the navigator to it.\r\n\t\t\tapi.setNavigatorObject(self.prevObj, isFocus=lastSayAllMode==CURSOR_CARET)\r\n\t\t\twinKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)\r\n\t\t# Move onto the next object.\r\n\t\tself.prevObj = obj = next(self.walker, None)\r\n\t\tif not obj:\r\n\t\t\treturn\r\n\t\t# Call this method again when we start speaking this object.\r\n\t\tcallbackCommand = speech.CallbackCommand(self.next, name=\"say-all:next\")\r\n\t\tspeech.speakObject(obj, reason=controlTypes.REASON_SAYALL, _prefixSpeechCommand=callbackCommand)\r\n\r\n\tdef stop(self):\r\n\t\tself.walker = None\r\n\r\ndef readText(cursor):\r\n\tglobal lastSayAllMode, _activeSayAll\r\n\tlastSayAllMode=cursor\r\n\ttry:\r\n\t\treader = _TextReader(cursor)\r\n\texcept NotImplementedError:\r\n\t\tlog.debugWarning(\"Unable to make reader\", exc_info=True)\r\n\t\treturn\r\n\t_activeSayAll = weakref.ref(reader)\r\n\treader.nextLine()\r\n\r\nclass _TextReader(object):\r\n\t\"\"\"Manages continuous reading of text.\r\n\tThis is intended for internal use only.\r\n\r\n\tThe high level flow of control is as follows:\r\n\t1. The constructor sets things up.\r\n\t2. L{nextLine} is called to read the first line.\r\n\t3. When it speaks a line, L{nextLine} request that L{lineReached} be called\r\n\t\twhen we start speaking this line, providing the position and state at this point.\r\n\t4. When we start speaking a line, L{lineReached} is called\r\n\t\tand moves the cursor to that line.\r\n\t5. L{lineReached} calls L{nextLine}.\r\n\t6. If there are more lines, L{nextLine} works as per steps 3 and 4.\r\n\t7. Otherwise, if the object doesn't support page turns, we're finished.\r\n\t8. If the object does support page turns,\r\n\t\twe request that L{turnPage} be called when speech is finished.\r\n\t9. L{turnPage} tries to turn the page.\r\n\t10. If there are no more pages, we're finished.\r\n\t11. If there is another page, L{turnPage} calls L{nextLine}.\r\n\t\"\"\"\r\n\tMAX_BUFFERED_LINES = 10\r\n\r\n\tdef __init__(self, cursor):\r\n\t\tself.cursor = cursor\r\n\t\tself.trigger = SayAllProfileTrigger()\r\n\t\tself.trigger.enter()\r\n\t\t# Start at the cursor.\r\n\t\tif cursor == CURSOR_CARET:\r\n\t\t\ttry:\r\n\t\t\t\tself.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)\r\n\t\t\texcept (NotImplementedError, RuntimeError) as e:\r\n\t\t\t\traise NotImplementedError(\"Unable to make TextInfo: \" + str(e))\r\n\t\telse:\r\n\t\t\tself.reader = api.getReviewPosition()\r\n\t\tself.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)\r\n\t\tself.numBufferedLines = 0\r\n\r\n\tdef nextLine(self):\r\n\t\tif not self.reader:\r\n\t\t\tlog.debug(\"no self.reader\")\r\n\t\t\t# We were stopped.\r\n\t\t\treturn\r\n\t\tif not self.reader.obj:\r\n\t\t\tlog.debug(\"no self.reader.obj\")\r\n\t\t\t# The object died, so we should too.\r\n\t\t\tself.finish()\r\n\t\t\treturn\r\n\t\tbookmark = self.reader.bookmark\r\n\t\t# Expand to the current line.\r\n\t\t# We use move end rather than expand\r\n\t\t# because the user might start in the middle of a line\r\n\t\t# and we don't want to read from the start of the line in that case.\r\n\t\t# For lines after the first, it's also more efficient because\r\n\t\t# we're already at the start of the line, so there's no need to search backwards.\r\n\t\tdelta = self.reader.move(textInfos.UNIT_READINGCHUNK, 1, endPoint=\"end\")\r\n\t\tif delta <= 0:\r\n\t\t\t# No more text.\r\n\t\t\tif isinstance(self.reader.obj, textInfos.DocumentWithPageTurns):\r\n\t\t\t\t# Once the last line finishes reading, try turning the page.\r\n\t\t\t\tcb = speech.CallbackCommand(self.turnPage, name=\"say-all:turnPage\")\r\n\t\t\t\tspeech.speakWithoutPauses([cb, speech.EndUtteranceCommand()])\r\n\t\t\telse:\r\n\t\t\t\tself.finish()\r\n\t\t\treturn\r\n\r\n\t\t# Copy the speakTextInfoState so that speak callbackCommand\r\n\t\t# and its associated callback are using a copy isolated to this specific line.\r\n\t\tstate = self.speakTextInfoState.copy()\r\n\t\t# Call lineReached when we start speaking this line.\r\n\t\t# lineReached will move the cursor and trigger reading of the next line.\r\n\r\n\t\tdef _onLineReached(obj=self.reader.obj, state=state):\r\n\t\t\tself.lineReached(obj, bookmark, state)\r\n\r\n\t\tcb = speech.CallbackCommand(\r\n\t\t\t_onLineReached,\r\n\t\t\tname=\"say-all:lineReached\"\r\n\t\t)\r\n\r\n\t\t# Generate the speech sequence for the reader textInfo\r\n\t\t# and insert the lineReached callback at the very beginning of the sequence.\r\n\t\t# _linePrefix on speakTextInfo cannot be used here\r\n\t\t# As it would be inserted in the sequence after all initial control starts which is too late.\r\n\t\tspeechGen = speech.getTextInfoSpeech(\r\n\t\t\tself.reader,\r\n\t\t\tunit=textInfos.UNIT_READINGCHUNK,\r\n\t\t\treason=controlTypes.REASON_SAYALL,\r\n\t\t\tuseCache=state\r\n\t\t)\r\n\t\tseq = list(speech._flattenNestedSequences(speechGen))\r\n\t\tseq.insert(0, cb)\r\n\t\t# Speak the speech sequence.\r\n\t\tspoke = speech.speakWithoutPauses(seq)\r\n\t\t# Update the textInfo state ready for when speaking the next line.\r\n\t\tself.speakTextInfoState = state.copy()\r\n\r\n\t\t# Collapse to the end of this line, ready to read the next.\r\n\t\ttry:\r\n\t\t\tself.reader.collapse(end=True)\r\n\t\texcept RuntimeError:\r\n\t\t\t# This occurs in Microsoft Word when the range covers the end of the document.\r\n\t\t\t# without this exception to indicate that further collapsing is not possible, say all could enter an infinite loop.\r\n\t\t\tself.finish()\r\n\t\t\treturn\r\n\t\tif not spoke:\r\n\t\t\t# This line didn't include a natural pause, so nothing was spoken.\r\n\t\t\tself.numBufferedLines += 1\r\n\t\t\tif self.numBufferedLines < self.MAX_BUFFERED_LINES:\r\n\t\t\t\t# Move on to the next line.\r\n\t\t\t\t# We queue this to allow the user a chance to stop say all.\r\n\t\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue, self.nextLine)\r\n\t\t\telse:\r\n\t\t\t\t# We don't want to buffer too much.\r\n\t\t\t\t# Force speech. lineReached will resume things when speech catches up.\r\n\t\t\t\tspeech.speakWithoutPauses(None)\r\n\t\t\t\t# The first buffered line has now started speaking.\r\n\t\t\t\tself.numBufferedLines -= 1\r\n\r\n\tdef lineReached(self, obj, bookmark, state):\r\n\t\t# We've just started speaking this line, so move the cursor there.\r\n\t\tstate.updateObj()\r\n\t\tupdater = obj.makeTextInfo(bookmark)\r\n\t\tif self.cursor == CURSOR_CARET:\r\n\t\t\tupdater.updateCaret()\r\n\t\tif self.cursor != CURSOR_CARET or config.conf[\"reviewCursor\"][\"followCaret\"]:\r\n\t\t\tapi.setReviewPosition(updater, isCaret=self.cursor==CURSOR_CARET)\r\n\t\twinKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)\r\n\t\tif self.numBufferedLines == 0:\r\n\t\t\t# This was the last line spoken, so move on.\r\n\t\t\tself.nextLine()\r\n\t\telse:\r\n\t\t\tself.numBufferedLines -= 1\r\n\r\n\tdef turnPage(self):\r\n\t\ttry:\r\n\t\t\tself.reader.obj.turnPage()\r\n\t\texcept RuntimeError:\r\n\t\t\tlog.debug(\"No more pages\")\r\n\t\t\t# No more pages.\r\n\t\t\tself.stop()\r\n\t\t\treturn\r\n\t\tself.reader = self.reader.obj.makeTextInfo(textInfos.POSITION_FIRST)\r\n\t\tself.nextLine()\r\n\r\n\tdef finish(self):\r\n\t\t# There is no more text.\r\n\t\t# Call stop to clean up, but only after speech completely finishes.\r\n\t\t# Otherwise, if a different synth is being used for say all,\r\n\t\t# we might switch synths too early and truncate the final speech.\r\n\t\t# We do this by putting a CallbackCommand at the start of a new utterance.\r\n\t\tcb = speech.CallbackCommand(self.stop, name=\"say-all:stop\")\r\n\t\tspeech.speakWithoutPauses([\r\n\t\t\tspeech.EndUtteranceCommand(),\r\n\t\t\tcb,\r\n\t\t\tspeech.EndUtteranceCommand()\r\n\t\t])\r\n\r\n\tdef stop(self):\r\n\t\tif not self.reader:\r\n\t\t\treturn\r\n\t\tself.reader = None\r\n\t\tself.trigger.exit()\r\n\t\tself.trigger = None\r\n\r\n\tdef __del__(self):\r\n\t\tself.stop()\r\n\r\nclass SayAllProfileTrigger(config.ProfileTrigger):\r\n\t\"\"\"A configuration profile trigger for when say all is in progress.\r\n\t\"\"\"\r\n\tspec = \"sayAll\"\r\n", "path": "source/sayAllHandler.py"}]}
| 3,998 | 211 |
gh_patches_debug_15530
|
rasdani/github-patches
|
git_diff
|
ibis-project__ibis-6950
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug(bigquery): memtable and string literals not escaping `\n` or `\` which results in invalid syntax
### What happened?
Code:
```python
import ibis
ibis_client = ibis.bigquery.connect()
table = ibis.memtable(
{
"col1": ["a\tb\nc", "d e f", "g'e\"h"],
}
)
print(ibis_client.compile(table))
```
Output:
```
SELECT t0.*
FROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b
c' AS col1), STRUCT('d e f' AS col1), STRUCT('g\'e"h' AS col1)]) t0
```
Note, the following SQL works as expected:
```
SELECT t0.*
FROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b\nc' AS col1), STRUCT('d e f' AS col1), STRUCT('g\'e"h' AS col1)]) t0
```
Therefore, we should really be escaping `\n` in addition to `'`. Though, perhaps there are other characters that could break BigQuery syntax? See: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals
Alternatively, using triple-quoted strings allows for newline characters in the string literal itself.
### What version of ibis are you using?
6.1.0
also tested on latest commit: 15f8d9575
### What backend(s) are you using, if any?
BigQuery
### Relevant log output
```sh
BigQuery API: Syntax error: Unclosed string literal at [2:47]
```
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
</issue>
<code>
[start of ibis/backends/base/sql/registry/literal.py]
1 from __future__ import annotations
2
3 import datetime
4 import math
5
6 import ibis.expr.types as ir
7
8
9 def _set_literal_format(translator, expr):
10 value_type = expr.type().value_type
11
12 formatted = [
13 translator.translate(ir.literal(x, type=value_type)) for x in expr.op().value
14 ]
15
16 return "(" + ", ".join(formatted) + ")"
17
18
19 def _boolean_literal_format(translator, op):
20 return "TRUE" if op.value else "FALSE"
21
22
23 def _string_literal_format(translator, op):
24 return "'{}'".format(op.value.replace("'", "\\'"))
25
26
27 def _number_literal_format(translator, op):
28 if math.isfinite(op.value):
29 formatted = repr(op.value)
30 else:
31 if math.isnan(op.value):
32 formatted_val = "NaN"
33 elif math.isinf(op.value):
34 if op.value > 0:
35 formatted_val = "Infinity"
36 else:
37 formatted_val = "-Infinity"
38 formatted = f"CAST({formatted_val!r} AS DOUBLE)"
39
40 return formatted
41
42
43 def _interval_literal_format(translator, op):
44 return f"INTERVAL {op.value} {op.dtype.resolution.upper()}"
45
46
47 def _date_literal_format(translator, op):
48 value = op.value
49 if isinstance(value, datetime.date):
50 value = value.strftime("%Y-%m-%d")
51
52 return repr(value)
53
54
55 def _timestamp_literal_format(translator, op):
56 value = op.value
57 if isinstance(value, datetime.datetime):
58 value = value.isoformat()
59
60 return repr(value)
61
62
63 literal_formatters = {
64 "boolean": _boolean_literal_format,
65 "number": _number_literal_format,
66 "string": _string_literal_format,
67 "interval": _interval_literal_format,
68 "timestamp": _timestamp_literal_format,
69 "date": _date_literal_format,
70 "set": _set_literal_format,
71 }
72
73
74 def literal(translator, op):
75 """Return the expression as its literal value."""
76
77 dtype = op.dtype
78
79 if op.value is None:
80 return "NULL"
81
82 if dtype.is_boolean():
83 typeclass = "boolean"
84 elif dtype.is_string():
85 typeclass = "string"
86 elif dtype.is_date():
87 typeclass = "date"
88 elif dtype.is_numeric():
89 typeclass = "number"
90 elif dtype.is_timestamp():
91 typeclass = "timestamp"
92 elif dtype.is_interval():
93 typeclass = "interval"
94 else:
95 raise NotImplementedError(f"Unsupported type: {dtype!r}")
96
97 return literal_formatters[typeclass](translator, op)
98
[end of ibis/backends/base/sql/registry/literal.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ibis/backends/base/sql/registry/literal.py b/ibis/backends/base/sql/registry/literal.py
--- a/ibis/backends/base/sql/registry/literal.py
+++ b/ibis/backends/base/sql/registry/literal.py
@@ -21,7 +21,22 @@
def _string_literal_format(translator, op):
- return "'{}'".format(op.value.replace("'", "\\'"))
+ return "'{}'".format(
+ op.value
+ # Escape \ first so we don't double escape other characters.
+ .replace("\\", "\\\\")
+ # Escape ' since we're using those for the string literal.
+ .replace("'", "\\'")
+ # ASCII escape sequences that are recognized in Python:
+ # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
+ .replace("\a", "\\a") # Bell
+ .replace("\b", "\\b") # Backspace
+ .replace("\f", "\\f") # Formfeed
+ .replace("\n", "\\n") # Newline / Linefeed
+ .replace("\r", "\\r") # Carriage return
+ .replace("\t", "\\t") # Tab
+ .replace("\v", "\\v") # Vertical tab
+ )
def _number_literal_format(translator, op):
|
{"golden_diff": "diff --git a/ibis/backends/base/sql/registry/literal.py b/ibis/backends/base/sql/registry/literal.py\n--- a/ibis/backends/base/sql/registry/literal.py\n+++ b/ibis/backends/base/sql/registry/literal.py\n@@ -21,7 +21,22 @@\n \n \n def _string_literal_format(translator, op):\n- return \"'{}'\".format(op.value.replace(\"'\", \"\\\\'\"))\n+ return \"'{}'\".format(\n+ op.value\n+ # Escape \\ first so we don't double escape other characters.\n+ .replace(\"\\\\\", \"\\\\\\\\\")\n+ # Escape ' since we're using those for the string literal.\n+ .replace(\"'\", \"\\\\'\")\n+ # ASCII escape sequences that are recognized in Python:\n+ # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals\n+ .replace(\"\\a\", \"\\\\a\") # Bell\n+ .replace(\"\\b\", \"\\\\b\") # Backspace\n+ .replace(\"\\f\", \"\\\\f\") # Formfeed\n+ .replace(\"\\n\", \"\\\\n\") # Newline / Linefeed\n+ .replace(\"\\r\", \"\\\\r\") # Carriage return\n+ .replace(\"\\t\", \"\\\\t\") # Tab\n+ .replace(\"\\v\", \"\\\\v\") # Vertical tab\n+ )\n \n \n def _number_literal_format(translator, op):\n", "issue": "bug(bigquery): memtable and string literals not escaping `\\n` or `\\` which results in invalid syntax\n### What happened?\n\nCode:\r\n\r\n```python\r\nimport ibis\r\n\r\nibis_client = ibis.bigquery.connect()\r\ntable = ibis.memtable(\r\n {\r\n \"col1\": [\"a\\tb\\nc\", \"d e f\", \"g'e\\\"h\"],\r\n }\r\n)\r\nprint(ibis_client.compile(table))\r\n```\r\n\r\nOutput:\r\n\r\n```\r\nSELECT t0.*\r\nFROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b\r\nc' AS col1), STRUCT('d e f' AS col1), STRUCT('g\\'e\"h' AS col1)]) t0\r\n```\r\n\r\nNote, the following SQL works as expected:\r\n\r\n```\r\nSELECT t0.*\r\nFROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b\\nc' AS col1), STRUCT('d e f' AS col1), STRUCT('g\\'e\"h' AS col1)]) t0\r\n```\r\n\r\nTherefore, we should really be escaping `\\n` in addition to `'`. Though, perhaps there are other characters that could break BigQuery syntax? See: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals\r\n\r\nAlternatively, using triple-quoted strings allows for newline characters in the string literal itself.\n\n### What version of ibis are you using?\n\n6.1.0\r\n\r\nalso tested on latest commit: 15f8d9575\n\n### What backend(s) are you using, if any?\n\nBigQuery\n\n### Relevant log output\n\n```sh\nBigQuery API: Syntax error: Unclosed string literal at [2:47]\n```\n\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "from __future__ import annotations\n\nimport datetime\nimport math\n\nimport ibis.expr.types as ir\n\n\ndef _set_literal_format(translator, expr):\n value_type = expr.type().value_type\n\n formatted = [\n translator.translate(ir.literal(x, type=value_type)) for x in expr.op().value\n ]\n\n return \"(\" + \", \".join(formatted) + \")\"\n\n\ndef _boolean_literal_format(translator, op):\n return \"TRUE\" if op.value else \"FALSE\"\n\n\ndef _string_literal_format(translator, op):\n return \"'{}'\".format(op.value.replace(\"'\", \"\\\\'\"))\n\n\ndef _number_literal_format(translator, op):\n if math.isfinite(op.value):\n formatted = repr(op.value)\n else:\n if math.isnan(op.value):\n formatted_val = \"NaN\"\n elif math.isinf(op.value):\n if op.value > 0:\n formatted_val = \"Infinity\"\n else:\n formatted_val = \"-Infinity\"\n formatted = f\"CAST({formatted_val!r} AS DOUBLE)\"\n\n return formatted\n\n\ndef _interval_literal_format(translator, op):\n return f\"INTERVAL {op.value} {op.dtype.resolution.upper()}\"\n\n\ndef _date_literal_format(translator, op):\n value = op.value\n if isinstance(value, datetime.date):\n value = value.strftime(\"%Y-%m-%d\")\n\n return repr(value)\n\n\ndef _timestamp_literal_format(translator, op):\n value = op.value\n if isinstance(value, datetime.datetime):\n value = value.isoformat()\n\n return repr(value)\n\n\nliteral_formatters = {\n \"boolean\": _boolean_literal_format,\n \"number\": _number_literal_format,\n \"string\": _string_literal_format,\n \"interval\": _interval_literal_format,\n \"timestamp\": _timestamp_literal_format,\n \"date\": _date_literal_format,\n \"set\": _set_literal_format,\n}\n\n\ndef literal(translator, op):\n \"\"\"Return the expression as its literal value.\"\"\"\n\n dtype = op.dtype\n\n if op.value is None:\n return \"NULL\"\n\n if dtype.is_boolean():\n typeclass = \"boolean\"\n elif dtype.is_string():\n typeclass = \"string\"\n elif dtype.is_date():\n typeclass = \"date\"\n elif dtype.is_numeric():\n typeclass = \"number\"\n elif dtype.is_timestamp():\n typeclass = \"timestamp\"\n elif dtype.is_interval():\n typeclass = \"interval\"\n else:\n raise NotImplementedError(f\"Unsupported type: {dtype!r}\")\n\n return literal_formatters[typeclass](translator, op)\n", "path": "ibis/backends/base/sql/registry/literal.py"}]}
| 1,679 | 316 |
gh_patches_debug_9401
|
rasdani/github-patches
|
git_diff
|
vyperlang__vyper-1595
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add docs for assert_modifiable
### What's your issue about?
`assert_modifiable` was added in #1480 add docs for it.
### How can it be fixed?
`^.^`
</issue>
<code>
[start of docs/conf.py]
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # Vyper documentation build configuration file, created by
5 # sphinx-quickstart on Wed Jul 26 11:18:29 2017.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19 #
20 # import os
21 # import sys
22 # sys.path.insert(0, os.path.abspath('.'))
23 from recommonmark.parser import CommonMarkParser
24
25 # TO DO - Create and Implement Vyper Lexer
26 # def setup(sphinx):
27 # sys.path.insert(0, os.path.abspath('./utils'))
28 # from SolidityLexer import SolidityLexer
29 # sphinx.add_lexer('Python', SolidityLexer())
30
31
32 # -- General configuration ------------------------------------------------
33
34 # If your documentation needs a minimal Sphinx version, state it here.
35 #
36 # needs_sphinx = '1.0'
37
38 # Add any Sphinx extension module names here, as strings. They can be
39 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
40 # ones.
41 extensions = [
42 'sphinx.ext.autodoc'
43 ]
44
45 # Add any paths that contain templates here, relative to this directory.
46 templates_path = ['_templates']
47
48 # The suffix(es) of source filenames.
49 # You can specify multiple suffix as a list of string:
50 #
51 # source_suffix = ['.rst', '.md']
52 source_suffix = '.rst'
53
54 # The master toctree document.
55 master_doc = 'index'
56
57 # General information about the project.
58 project = 'Vyper'
59 copyright = '2017, Vitalik Buterin'
60 author = 'Vitalik Buterin'
61
62 # The version info for the project you're documenting, acts as replacement for
63 # |version| and |release|, also used in various other places throughout the
64 # built documents.
65 #
66 # The short X.Y version.
67 version = ''
68 # The full version, including alpha/beta/rc tags.
69 release = ''
70
71 # The language for content autogenerated by Sphinx. Refer to documentation
72 # for a list of supported languages.
73 #
74 # This is also used if you do content translation via gettext catalogs.
75 # Usually you set "language" from the command line for these cases.
76 language = 'python'
77
78 # List of patterns, relative to source directory, that match files and
79 # directories to ignore when looking for source files.
80 # This patterns also effect to html_static_path and html_extra_path
81 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
82
83 # The name of the Pygments (syntax highlighting) style to use.
84 pygments_style = 'sphinx'
85
86 # If true, `todo` and `todoList` produce output, else they produce nothing.
87 todo_include_todos = False
88
89
90 # -- Options for HTML output ----------------------------------------------
91
92 # The theme to use for HTML and HTML Help pages. See the documentation for
93 # a list of builtin themes.
94 #
95 html_theme = "sphinx_rtd_theme"
96
97 # Theme options are theme-specific and customize the look and feel of a theme
98 # further. For a list of options available for each theme, see the
99 # documentation.
100 #
101 # html_theme_options = {}
102
103 # Add any paths that contain custom static files (such as style sheets) here,
104 # relative to this directory. They are copied after the builtin static files,
105 # so a file named "default.css" will overwrite the builtin "default.css".
106 html_static_path = ['_static']
107
108 # Custom sidebar templates, must be a dictionary that maps document names
109 # to template names.
110 #
111 # This is required for the alabaster theme
112 # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
113 html_sidebars = {
114 '**': [
115 'about.html',
116 'navigation.html',
117 'relations.html', # needs 'show_related': True theme option to display
118 'searchbox.html',
119 'donate.html',
120 ]
121 }
122
123
124 # -- Options for HTMLHelp output ------------------------------------------
125
126 # Output file base name for HTML help builder.
127 htmlhelp_basename = 'Vyperdoc'
128
129
130 # -- Options for LaTeX output ---------------------------------------------
131
132 latex_elements = {
133 # The paper size ('letterpaper' or 'a4paper').
134 #
135 # 'papersize': 'letterpaper',
136
137 # The font size ('10pt', '11pt' or '12pt').
138 #
139 # 'pointsize': '10pt',
140
141 # Additional stuff for the LaTeX preamble.
142 #
143 # 'preamble': '',
144
145 # Latex figure (float) alignment
146 #
147 # 'figure_align': 'htbp',
148 }
149
150 # Grouping the document tree into LaTeX files. List of tuples
151 # (source start file, target name, title,
152 # author, documentclass [howto, manual, or own class]).
153 latex_documents = [
154 (master_doc, 'Vyper.tex', 'Vyper Documentation',
155 'Vitalik Buterin', 'manual'),
156 ]
157
158
159 # -- Options for manual page output ---------------------------------------
160
161 # One entry per manual page. List of tuples
162 # (source start file, name, description, authors, manual section).
163 man_pages = [
164 (master_doc, 'vyper', 'Vyper Documentation',
165 [author], 1)
166 ]
167
168
169 # -- Options for Texinfo output -------------------------------------------
170
171 # Grouping the document tree into Texinfo files. List of tuples
172 # (source start file, target name, title, author,
173 # dir menu entry, description, category)
174 texinfo_documents = [
175 (master_doc, 'Vyper', 'Vyper Documentation',
176 author, 'Vyper', 'One line description of project.',
177 'Miscellaneous'),
178 ]
179
180 source_parsers = {
181 '.md': CommonMarkParser,
182 }
183
184 source_suffix = ['.rst', '.md']
185
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -103,7 +103,7 @@
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -103,7 +103,7 @@\n # Add any paths that contain custom static files (such as style sheets) here,\n # relative to this directory. They are copied after the builtin static files,\n # so a file named \"default.css\" will overwrite the builtin \"default.css\".\n-html_static_path = ['_static']\n+# html_static_path = ['_static']\n \n # Custom sidebar templates, must be a dictionary that maps document names\n # to template names.\n", "issue": "Add docs for assert_modifiable\n\r\n### What's your issue about?\r\n\r\n`assert_modifiable` was added in #1480 add docs for it.\r\n\r\n### How can it be fixed?\r\n\r\n`^.^`\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Vyper documentation build configuration file, created by\n# sphinx-quickstart on Wed Jul 26 11:18:29 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nfrom recommonmark.parser import CommonMarkParser\n\n# TO DO - Create and Implement Vyper Lexer\n# def setup(sphinx):\n# sys.path.insert(0, os.path.abspath('./utils'))\n# from SolidityLexer import SolidityLexer\n# sphinx.add_lexer('Python', SolidityLexer())\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Vyper'\ncopyright = '2017, Vitalik Buterin'\nauthor = 'Vitalik Buterin'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = ''\n# The full version, including alpha/beta/rc tags.\nrelease = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = 'python'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'about.html',\n 'navigation.html',\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n 'donate.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Vyperdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Vyper.tex', 'Vyper Documentation',\n 'Vitalik Buterin', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'vyper', 'Vyper Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Vyper', 'Vyper Documentation',\n author, 'Vyper', 'One line description of project.',\n 'Miscellaneous'),\n]\n\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nsource_suffix = ['.rst', '.md']\n", "path": "docs/conf.py"}]}
| 2,365 | 124 |
gh_patches_debug_33289
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-2634
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Review constrain scikit-learn< 0.23
scikit-optimize 0.8.1 has been released which works together with scikit-learn >= 0.23
https://github.com/scikit-optimize/scikit-optimize/releases/tag/v0.8.1
</issue>
<code>
[start of setup.py]
1 import os
2 from typing import Dict
3 from typing import List
4 from typing import Optional
5
6 import pkg_resources
7 from setuptools import find_packages
8 from setuptools import setup
9
10
11 def get_version() -> str:
12
13 version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py")
14 with open(version_filepath) as f:
15 for line in f:
16 if line.startswith("__version__"):
17 return line.strip().split()[-1][1:-1]
18 assert False
19
20
21 def get_long_description() -> str:
22
23 readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
24 with open(readme_filepath) as f:
25 return f.read()
26
27
28 def get_install_requires() -> List[str]:
29
30 requirements = [
31 "alembic",
32 "cliff",
33 "cmaes>=0.8.2",
34 "colorlog",
35 "numpy",
36 "packaging>=20.0",
37 "scipy!=1.4.0",
38 "sqlalchemy>=1.1.0",
39 "tqdm",
40 ]
41 return requirements
42
43
44 def get_tests_require() -> List[str]:
45
46 return get_extras_require()["testing"]
47
48
49 def get_extras_require() -> Dict[str, List[str]]:
50
51 requirements = {
52 # TODO(HideakiImamura) Unpin mypy version after fixing "Duplicate modules" error in
53 # examples and tutorials.
54 "checking": ["black", "hacking", "isort", "mypy==0.790", "blackdoc"],
55 "codecov": ["codecov", "pytest-cov"],
56 "doctest": [
57 "cma",
58 "matplotlib>=3.0.0",
59 "pandas",
60 "plotly>=4.0.0",
61 "scikit-learn>=0.19.0,<0.23.0",
62 "scikit-optimize",
63 "mlflow",
64 ],
65 "document": [
66 # TODO(nzw): Remove the version constraint after resolving the issue
67 # https://github.com/optuna/optuna/issues/2658.
68 "sphinx<4.0.0",
69 "sphinx_rtd_theme",
70 "sphinx-copybutton",
71 "sphinx-gallery",
72 "sphinx-plotly-directive",
73 "pillow",
74 "matplotlib",
75 "scikit-learn",
76 "plotly>=4.0.0", # optuna/visualization.
77 "pandas",
78 "lightgbm",
79 "torch==1.8.0",
80 "torchvision==0.9.0",
81 "torchaudio==0.8.0",
82 "thop",
83 ],
84 "example": [
85 "nbval",
86 "scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'",
87 # optuna/visualization/param_importances.py.
88 "thop",
89 "torch==1.8.0 ; sys_platform=='darwin'",
90 "torch==1.8.0+cpu ; sys_platform!='darwin'",
91 "torchvision==0.9.0 ; sys_platform=='darwin'",
92 "torchvision==0.9.0+cpu ; sys_platform!='darwin'",
93 "torchaudio==0.8.0",
94 "botorch>=0.4.0 ; python_version>'3.6'",
95 "pandas",
96 "plotly",
97 "requests",
98 ],
99 "experimental": ["redis"],
100 "testing": [
101 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
102 # https://github.com/optuna/optuna/issues/1000.
103 "bokeh<2.0.0",
104 "chainer>=5.0.0",
105 "cma",
106 "fakeredis",
107 "lightgbm",
108 "matplotlib>=3.0.0",
109 "mlflow",
110 "mpi4py",
111 "mxnet",
112 "pandas",
113 "plotly>=4.0.0",
114 "pytest",
115 "scikit-learn>=0.19.0,<0.23.0",
116 "scikit-optimize",
117 "xgboost",
118 "keras",
119 # TODO(HideakiImamura): Remove the version constraint after resolving the issue
120 # https://github.com/keras-team/keras/issues/14632
121 "tensorflow<2.5.0 ; python_version<'3.9'",
122 "tensorflow-datasets",
123 "pytorch-ignite",
124 "pytorch-lightning>=1.0.2",
125 "skorch",
126 "catalyst>=21.3",
127 "torch==1.8.0 ; sys_platform=='darwin'",
128 "torch==1.8.0+cpu ; sys_platform!='darwin'",
129 "torchvision==0.9.0 ; sys_platform=='darwin'",
130 "torchvision==0.9.0+cpu ; sys_platform!='darwin'",
131 "torchaudio==0.8.0",
132 "allennlp>=2.2.0",
133 "botorch>=0.4.0 ; python_version>'3.6'",
134 "fastai",
135 ],
136 "tests": [
137 "fakeredis",
138 "pytest",
139 ],
140 "optional": [
141 "bokeh<2.0.0", # optuna/cli.py, optuna/dashboard.py.
142 "matplotlib>=3.0.0", # optuna/visualization/matplotlib
143 "pandas", # optuna/study.py
144 "plotly>=4.0.0", # optuna/visualization.
145 "redis", # optuna/storages/redis.py.
146 "scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'",
147 # optuna/visualization/param_importances.py.
148 ],
149 "integration": [
150 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
151 # https://github.com/optuna/optuna/issues/1000.
152 "chainer>=5.0.0",
153 "cma",
154 "lightgbm",
155 "mlflow",
156 "mpi4py",
157 "mxnet",
158 "pandas",
159 "scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'",
160 "scikit-optimize",
161 "xgboost",
162 "keras ; python_version<'3.9'",
163 # TODO(HideakiImamura): Remove the version constraint after resolving the issue
164 # https://github.com/keras-team/keras/issues/14632
165 "tensorflow<2.5.0 ; python_version<'3.9'",
166 "tensorflow-datasets ; python_version<'3.9'",
167 "pytorch-ignite",
168 "pytorch-lightning>=1.0.2",
169 "skorch",
170 "catalyst>=21.3",
171 "torch==1.8.0 ; sys_platform=='darwin'",
172 "torch==1.8.0+cpu ; sys_platform!='darwin'",
173 "torchvision==0.9.0 ; sys_platform=='darwin'",
174 "torchvision==0.9.0+cpu ; sys_platform!='darwin'",
175 "torchaudio==0.8.0",
176 "allennlp>=2.2.0",
177 "botorch>=0.4.0 ; python_version>'3.6'",
178 "fastai",
179 ],
180 }
181
182 return requirements
183
184
185 def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
186
187 for pkg in pkgs:
188 try:
189 return pkg_resources.get_distribution(pkg)
190 except pkg_resources.DistributionNotFound:
191 pass
192 return None
193
194
195 setup(
196 name="optuna",
197 version=get_version(),
198 description="A hyperparameter optimization framework",
199 long_description=get_long_description(),
200 long_description_content_type="text/markdown",
201 author="Takuya Akiba",
202 author_email="[email protected]",
203 url="https://optuna.org/",
204 packages=find_packages(exclude=("tests", "tests.*")),
205 package_data={
206 "optuna": [
207 "storages/_rdb/alembic.ini",
208 "storages/_rdb/alembic/*.*",
209 "storages/_rdb/alembic/versions/*.*",
210 "py.typed",
211 ]
212 },
213 python_requires=">=3.6",
214 install_requires=get_install_requires(),
215 tests_require=get_tests_require(),
216 extras_require=get_extras_require(),
217 entry_points={
218 "console_scripts": ["optuna = optuna.cli:main"],
219 "optuna.command": [
220 "create-study = optuna.cli:_CreateStudy",
221 "delete-study = optuna.cli:_DeleteStudy",
222 "study set-user-attr = optuna.cli:_StudySetUserAttribute",
223 "studies = optuna.cli:_Studies",
224 "dashboard = optuna.cli:_Dashboard",
225 "study optimize = optuna.cli:_StudyOptimize",
226 "storage upgrade = optuna.cli:_StorageUpgrade",
227 ],
228 },
229 classifiers=[
230 "Development Status :: 5 - Production/Stable",
231 "Intended Audience :: Science/Research",
232 "Intended Audience :: Developers",
233 "License :: OSI Approved :: MIT License",
234 "Programming Language :: Python :: 3",
235 "Programming Language :: Python :: 3.6",
236 "Programming Language :: Python :: 3.7",
237 "Programming Language :: Python :: 3.8",
238 "Programming Language :: Python :: 3.9",
239 "Programming Language :: Python :: 3 :: Only",
240 "Topic :: Scientific/Engineering",
241 "Topic :: Scientific/Engineering :: Mathematics",
242 "Topic :: Scientific/Engineering :: Artificial Intelligence",
243 "Topic :: Software Development",
244 "Topic :: Software Development :: Libraries",
245 "Topic :: Software Development :: Libraries :: Python Modules",
246 ],
247 )
248
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -58,7 +58,7 @@
"matplotlib>=3.0.0",
"pandas",
"plotly>=4.0.0",
- "scikit-learn>=0.19.0,<0.23.0",
+ "scikit-learn>=0.24.2",
"scikit-optimize",
"mlflow",
],
@@ -83,7 +83,7 @@
],
"example": [
"nbval",
- "scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'",
+ "scikit-learn>=0.24.2",
# optuna/visualization/param_importances.py.
"thop",
"torch==1.8.0 ; sys_platform=='darwin'",
@@ -112,7 +112,7 @@
"pandas",
"plotly>=4.0.0",
"pytest",
- "scikit-learn>=0.19.0,<0.23.0",
+ "scikit-learn>=0.24.2",
"scikit-optimize",
"xgboost",
"keras",
@@ -143,7 +143,7 @@
"pandas", # optuna/study.py
"plotly>=4.0.0", # optuna/visualization.
"redis", # optuna/storages/redis.py.
- "scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'",
+ "scikit-learn>=0.24.2",
# optuna/visualization/param_importances.py.
],
"integration": [
@@ -156,7 +156,7 @@
"mpi4py",
"mxnet",
"pandas",
- "scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'",
+ "scikit-learn>=0.24.2",
"scikit-optimize",
"xgboost",
"keras ; python_version<'3.9'",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -58,7 +58,7 @@\n \"matplotlib>=3.0.0\",\n \"pandas\",\n \"plotly>=4.0.0\",\n- \"scikit-learn>=0.19.0,<0.23.0\",\n+ \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"mlflow\",\n ],\n@@ -83,7 +83,7 @@\n ],\n \"example\": [\n \"nbval\",\n- \"scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'\",\n+ \"scikit-learn>=0.24.2\",\n # optuna/visualization/param_importances.py.\n \"thop\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n@@ -112,7 +112,7 @@\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n- \"scikit-learn>=0.19.0,<0.23.0\",\n+ \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras\",\n@@ -143,7 +143,7 @@\n \"pandas\", # optuna/study.py\n \"plotly>=4.0.0\", # optuna/visualization.\n \"redis\", # optuna/storages/redis.py.\n- \"scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'\",\n+ \"scikit-learn>=0.24.2\",\n # optuna/visualization/param_importances.py.\n ],\n \"integration\": [\n@@ -156,7 +156,7 @@\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n- \"scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'\",\n+ \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras ; python_version<'3.9'\",\n", "issue": "Review constrain scikit-learn< 0.23\nscikit-optimize 0.8.1 has been released which works together with scikit-learn >= 0.23\r\n\r\nhttps://github.com/scikit-optimize/scikit-optimize/releases/tag/v0.8.1\n", "before_files": [{"content": "import os\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n requirements = [\n \"alembic\",\n \"cliff\",\n \"cmaes>=0.8.2\",\n \"colorlog\",\n \"numpy\",\n \"packaging>=20.0\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n ]\n return requirements\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n # TODO(HideakiImamura) Unpin mypy version after fixing \"Duplicate modules\" error in\n # examples and tutorials.\n \"checking\": [\"black\", \"hacking\", \"isort\", \"mypy==0.790\", \"blackdoc\"],\n \"codecov\": [\"codecov\", \"pytest-cov\"],\n \"doctest\": [\n \"cma\",\n \"matplotlib>=3.0.0\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"scikit-learn>=0.19.0,<0.23.0\",\n \"scikit-optimize\",\n \"mlflow\",\n ],\n \"document\": [\n # TODO(nzw): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/2658.\n \"sphinx<4.0.0\",\n \"sphinx_rtd_theme\",\n \"sphinx-copybutton\",\n \"sphinx-gallery\",\n \"sphinx-plotly-directive\",\n \"pillow\",\n \"matplotlib\",\n \"scikit-learn\",\n \"plotly>=4.0.0\", # optuna/visualization.\n \"pandas\",\n \"lightgbm\",\n \"torch==1.8.0\",\n \"torchvision==0.9.0\",\n \"torchaudio==0.8.0\",\n \"thop\",\n ],\n \"example\": [\n \"nbval\",\n \"scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'\",\n # optuna/visualization/param_importances.py.\n \"thop\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n \"torch==1.8.0+cpu ; sys_platform!='darwin'\",\n \"torchvision==0.9.0 ; sys_platform=='darwin'\",\n \"torchvision==0.9.0+cpu ; sys_platform!='darwin'\",\n \"torchaudio==0.8.0\",\n \"botorch>=0.4.0 ; python_version>'3.6'\",\n \"pandas\",\n \"plotly\",\n \"requests\",\n ],\n \"experimental\": [\"redis\"],\n \"testing\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"bokeh<2.0.0\",\n \"chainer>=5.0.0\",\n \"cma\",\n \"fakeredis\",\n \"lightgbm\",\n \"matplotlib>=3.0.0\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"scikit-learn>=0.19.0,<0.23.0\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras\",\n # TODO(HideakiImamura): Remove the version constraint after resolving the issue\n # https://github.com/keras-team/keras/issues/14632\n \"tensorflow<2.5.0 ; python_version<'3.9'\",\n \"tensorflow-datasets\",\n \"pytorch-ignite\",\n \"pytorch-lightning>=1.0.2\",\n \"skorch\",\n \"catalyst>=21.3\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n \"torch==1.8.0+cpu ; sys_platform!='darwin'\",\n \"torchvision==0.9.0 ; sys_platform=='darwin'\",\n \"torchvision==0.9.0+cpu ; sys_platform!='darwin'\",\n \"torchaudio==0.8.0\",\n \"allennlp>=2.2.0\",\n \"botorch>=0.4.0 ; python_version>'3.6'\",\n \"fastai\",\n ],\n \"tests\": [\n \"fakeredis\",\n \"pytest\",\n ],\n \"optional\": [\n \"bokeh<2.0.0\", # optuna/cli.py, optuna/dashboard.py.\n \"matplotlib>=3.0.0\", # optuna/visualization/matplotlib\n \"pandas\", # optuna/study.py\n \"plotly>=4.0.0\", # optuna/visualization.\n \"redis\", # optuna/storages/redis.py.\n \"scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'\",\n # optuna/visualization/param_importances.py.\n ],\n \"integration\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"chainer>=5.0.0\",\n \"cma\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras ; python_version<'3.9'\",\n # TODO(HideakiImamura): Remove the version constraint after resolving the issue\n # https://github.com/keras-team/keras/issues/14632\n \"tensorflow<2.5.0 ; python_version<'3.9'\",\n \"tensorflow-datasets ; python_version<'3.9'\",\n \"pytorch-ignite\",\n \"pytorch-lightning>=1.0.2\",\n \"skorch\",\n \"catalyst>=21.3\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n \"torch==1.8.0+cpu ; sys_platform!='darwin'\",\n \"torchvision==0.9.0 ; sys_platform=='darwin'\",\n \"torchvision==0.9.0+cpu ; sys_platform!='darwin'\",\n \"torchaudio==0.8.0\",\n \"allennlp>=2.2.0\",\n \"botorch>=0.4.0 ; python_version>'3.6'\",\n \"fastai\",\n ],\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"[email protected]\",\n url=\"https://optuna.org/\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n package_data={\n \"optuna\": [\n \"storages/_rdb/alembic.ini\",\n \"storages/_rdb/alembic/*.*\",\n \"storages/_rdb/alembic/versions/*.*\",\n \"py.typed\",\n ]\n },\n python_requires=\">=3.6\",\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\n \"console_scripts\": [\"optuna = optuna.cli:main\"],\n \"optuna.command\": [\n \"create-study = optuna.cli:_CreateStudy\",\n \"delete-study = optuna.cli:_DeleteStudy\",\n \"study set-user-attr = optuna.cli:_StudySetUserAttribute\",\n \"studies = optuna.cli:_Studies\",\n \"dashboard = optuna.cli:_Dashboard\",\n \"study optimize = optuna.cli:_StudyOptimize\",\n \"storage upgrade = optuna.cli:_StorageUpgrade\",\n ],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}]}
| 3,407 | 520 |
gh_patches_debug_18800
|
rasdani/github-patches
|
git_diff
|
DistrictDataLabs__yellowbrick-407
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ClassificationScoreVisualizers should return accuracy
See #358 and #213 -- classification score visualizers should return accuracy when `score()` is called. If F1 or accuracy is not in the figure it should also be included in the figure.
</issue>
<code>
[start of yellowbrick/classifier/base.py]
1 # yellowbrick.classifier.base
2 # API for classification visualizer hierarchy.
3 #
4 # Author: Rebecca Bilbro <[email protected]>
5 # Author: Benjamin Bengfort <[email protected]>
6 # Author: Neal Humphrey
7 # Created: Wed May 18 12:39:40 2016 -0400
8 #
9 # Copyright (C) 2016 District Data Labs
10 # For license information, see LICENSE.txt
11 #
12 # ID: base.py [5388065] [email protected] $
13
14 """
15 API for classification visualizer hierarchy.
16 """
17
18 ##########################################################################
19 ## Imports
20 ##########################################################################
21
22 import numpy as np
23
24 from ..utils import isclassifier
25 from ..base import ScoreVisualizer
26 from ..style.palettes import color_palette
27 from ..exceptions import YellowbrickTypeError
28
29
30 ##########################################################################
31 ## Base Classification Visualizer
32 ##########################################################################
33
34 class ClassificationScoreVisualizer(ScoreVisualizer):
35
36 def __init__(self, model, ax=None, classes=None, **kwargs):
37 """
38 Check to see if model is an instance of a classifer.
39 Should return an error if it isn't.
40
41 .. todo:: document this class.
42 .. tood:: accept as input classes as all visualizers need this.
43 """
44 # A bit of type checking
45 if not isclassifier(model):
46 raise YellowbrickTypeError(
47 "This estimator is not a classifier; "
48 "try a regression or clustering score visualizer instead!"
49 )
50
51 # Initialize the super method.
52 super(ClassificationScoreVisualizer, self).__init__(model, ax=ax, **kwargs)
53
54 # Convert to array if necessary to match estimator.classes_
55 if classes is not None:
56 classes = np.array(classes)
57
58 # Set up classifier score visualization properties
59 if classes is not None:
60 n_colors = len(classes)
61 else:
62 n_colors = None
63
64 self.colors = color_palette(kwargs.pop('colors', None), n_colors)
65 self.classes_ = classes
66
67 @property
68 def classes_(self):
69 """
70 Proxy property to smartly access the classes from the estimator or
71 stored locally on the score visualizer for visualization.
72 """
73 if self.__classes is None:
74 try:
75 return self.estimator.classes_
76 except AttributeError:
77 return None
78 return self.__classes
79
80 @classes_.setter
81 def classes_(self, value):
82 self.__classes = value
83
84 def fit(self, X, y=None, **kwargs):
85 """
86 Parameters
87 ----------
88
89 X : ndarray or DataFrame of shape n x m
90 A matrix of n instances with m features
91
92 y : ndarray or Series of length n
93 An array or series of target or class values
94
95 kwargs: keyword arguments passed to Scikit-Learn API.
96
97 Returns
98 -------
99 self : instance
100 Returns the instance of the classification score visualizer
101
102 """
103 # Fit the inner estimator
104 self.estimator.fit(X, y)
105
106 # Extract the classes from the estimator
107 if self.classes_ is None:
108 self.classes_ = self.estimator.classes_
109
110 # Always return self from fit
111 return self
112
113 #TODO during refactoring this can be used to generalize ClassBalance
114 def class_counts(self, y):
115 unique, counts = np.unique(y, return_counts=True)
116 return dict(zip(unique, counts))
117
[end of yellowbrick/classifier/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/yellowbrick/classifier/base.py b/yellowbrick/classifier/base.py
--- a/yellowbrick/classifier/base.py
+++ b/yellowbrick/classifier/base.py
@@ -110,6 +110,28 @@
# Always return self from fit
return self
+
+ def score(self, X, y, **kwargs):
+ """
+ The score function is the hook for visual interaction. Pass in test
+ data and the visualizer will create predictions on the data and
+ evaluate them with respect to the test values. The evaluation will
+ then be passed to draw() and the result of the estimator score will
+ be returned.
+ Parameters
+ ----------
+ X : array-like
+ X (also X_test) are the dependent variables of test set to predict
+ y : array-like
+ y (also y_test) is the independent actual variables to score against
+ Returns
+ -------
+ score : float
+ """
+ self.score_ = self.estimator.score(X, y, **kwargs)
+
+ return self.score_
+
#TODO during refactoring this can be used to generalize ClassBalance
def class_counts(self, y):
unique, counts = np.unique(y, return_counts=True)
|
{"golden_diff": "diff --git a/yellowbrick/classifier/base.py b/yellowbrick/classifier/base.py\n--- a/yellowbrick/classifier/base.py\n+++ b/yellowbrick/classifier/base.py\n@@ -110,6 +110,28 @@\n # Always return self from fit\n return self\n \n+\n+ def score(self, X, y, **kwargs):\n+ \"\"\"\n+ The score function is the hook for visual interaction. Pass in test\n+ data and the visualizer will create predictions on the data and\n+ evaluate them with respect to the test values. The evaluation will\n+ then be passed to draw() and the result of the estimator score will\n+ be returned.\n+ Parameters\n+ ----------\n+ X : array-like\n+ X (also X_test) are the dependent variables of test set to predict\n+ y : array-like\n+ y (also y_test) is the independent actual variables to score against\n+ Returns\n+ -------\n+ score : float\n+ \"\"\"\n+ self.score_ = self.estimator.score(X, y, **kwargs)\n+\n+ return self.score_\n+\n #TODO during refactoring this can be used to generalize ClassBalance\n def class_counts(self, y):\n unique, counts = np.unique(y, return_counts=True)\n", "issue": "ClassificationScoreVisualizers should return accuracy\nSee #358 and #213 -- classification score visualizers should return accuracy when `score()` is called. If F1 or accuracy is not in the figure it should also be included in the figure. \n", "before_files": [{"content": "# yellowbrick.classifier.base\n# API for classification visualizer hierarchy.\n#\n# Author: Rebecca Bilbro <[email protected]>\n# Author: Benjamin Bengfort <[email protected]>\n# Author: Neal Humphrey\n# Created: Wed May 18 12:39:40 2016 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: base.py [5388065] [email protected] $\n\n\"\"\"\nAPI for classification visualizer hierarchy.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport numpy as np\n\nfrom ..utils import isclassifier\nfrom ..base import ScoreVisualizer\nfrom ..style.palettes import color_palette\nfrom ..exceptions import YellowbrickTypeError\n\n\n##########################################################################\n## Base Classification Visualizer\n##########################################################################\n\nclass ClassificationScoreVisualizer(ScoreVisualizer):\n\n def __init__(self, model, ax=None, classes=None, **kwargs):\n \"\"\"\n Check to see if model is an instance of a classifer.\n Should return an error if it isn't.\n\n .. todo:: document this class.\n .. tood:: accept as input classes as all visualizers need this.\n \"\"\"\n # A bit of type checking\n if not isclassifier(model):\n raise YellowbrickTypeError(\n \"This estimator is not a classifier; \"\n \"try a regression or clustering score visualizer instead!\"\n )\n\n # Initialize the super method.\n super(ClassificationScoreVisualizer, self).__init__(model, ax=ax, **kwargs)\n\n # Convert to array if necessary to match estimator.classes_\n if classes is not None:\n classes = np.array(classes)\n\n # Set up classifier score visualization properties\n if classes is not None:\n n_colors = len(classes)\n else:\n n_colors = None\n\n self.colors = color_palette(kwargs.pop('colors', None), n_colors)\n self.classes_ = classes\n\n @property\n def classes_(self):\n \"\"\"\n Proxy property to smartly access the classes from the estimator or\n stored locally on the score visualizer for visualization.\n \"\"\"\n if self.__classes is None:\n try:\n return self.estimator.classes_\n except AttributeError:\n return None\n return self.__classes\n\n @classes_.setter\n def classes_(self, value):\n self.__classes = value\n\n def fit(self, X, y=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n\n X : ndarray or DataFrame of shape n x m\n A matrix of n instances with m features\n\n y : ndarray or Series of length n\n An array or series of target or class values\n\n kwargs: keyword arguments passed to Scikit-Learn API.\n\n Returns\n -------\n self : instance\n Returns the instance of the classification score visualizer\n\n \"\"\"\n # Fit the inner estimator\n self.estimator.fit(X, y)\n\n # Extract the classes from the estimator\n if self.classes_ is None:\n self.classes_ = self.estimator.classes_\n\n # Always return self from fit\n return self\n\n #TODO during refactoring this can be used to generalize ClassBalance\n def class_counts(self, y):\n unique, counts = np.unique(y, return_counts=True)\n return dict(zip(unique, counts))\n", "path": "yellowbrick/classifier/base.py"}]}
| 1,580 | 286 |
gh_patches_debug_23280
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmaction2-723
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error in --validate while training.
Hi,
My dataset is multiclass `VideoDataset` type and looks something like this.
```
some/path/000.mp4 1 3 5
some/path/001.mp4 1 2
some/path/002.mp4 2 6 10
```
Given video data and multi-class, I adopted my configs to test something on a small dataset from `tsn_r50_video_1x1x8_100e_kinetics400_rgb.py` and `tsn_r101_1x1x5_50e_mmit_rgb.py`
It looks something like this
```
# model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNet',
pretrained='torchvision://resnet50',
depth=50,
norm_eval=False),
cls_head=dict(
type='TSNHead',
num_classes=14,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
loss_cls=dict(type='BCELossWithLogits', loss_weight=160.0),
dropout_ratio=0.5,
init_std=0.01,
multi_class=True,
label_smooth_eps=0))
# model training and testing settings
train_cfg = None
test_cfg = dict(average_clips=None)
# dataset settings
dataset_type = 'VideoDataset'
data_root = '/home/rajawat/Desktop/videos'
data_root_val = '/home/rajawat/Desktop/val_videos'
ann_file_train = '/home/rajawat/Desktop/labels/train.txt'
ann_file_val = '/home/rajawat/Desktop/labels/val.txt'
ann_file_test = '/home/rajawat/Desktop/labels/test.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3),
dict(type='DecordDecode'),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=3,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=25,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='TenCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=2,
workers_per_gpu=0,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline,
multi_class=True,
num_classes=14),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline,
multi_class=True,
num_classes=14),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline,
multi_class=True,
num_classes=14))
# optimizer
optimizer = dict(
type='SGD',
constructor='TSMOptimizerConstructor',
paramwise_cfg=dict(fc_lr5=True),
lr=0.01, # this lr is used for 8 gpus
momentum=0.9,
weight_decay=0.0001,
)
optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[20, 40])
total_epochs = 10
checkpoint_config = dict(interval=5)
evaluation = dict(interval=2, metrics=['mean_average_precision'])
# yapf:disable
log_config = dict(
interval=2,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/tsn_r101_1x1x5_50e_mmit_rgb/'
load_from = None
resume_from = None
workflow = [('train', 1)]
```
while training
`python tools/train.py configs/recognition/tsn/tsn_r50_1x1x5_50e_cater_rgb.py --gpus 1` works well , but with `--validate` it breaks down.
```
Traceback (most recent call last):
File "tools/train.py", line 178, in <module>
main()
File "tools/train.py", line 174, in main
meta=meta)
File "/home/rajawat/Desktop/mmaction/apis/train.py", line 156, in train_model
runner.run(data_loaders, cfg.workflow, cfg.total_epochs, **runner_kwargs)
File "/home/rajawat/anaconda3/envs/slowfast/lib/python3.7/site-packages/mmcv/runner/epoch_based_runner.py", line 125, in run
epoch_runner(data_loaders[i], **kwargs)
File "/home/rajawat/anaconda3/envs/slowfast/lib/python3.7/site-packages/mmcv/runner/epoch_based_runner.py", line 54, in train
self.call_hook('after_train_epoch')
File "/home/rajawat/anaconda3/envs/slowfast/lib/python3.7/site-packages/mmcv/runner/base_runner.py", line 308, in call_hook
getattr(hook, fn_name)(self)
File "/home/rajawat/Desktop/mmaction/core/evaluation/eval_hooks.py", line 152, in after_train_epoch
key_score = self.evaluate(runner, results)
File "/home/rajawat/Desktop/mmaction/core/evaluation/eval_hooks.py", line 170, in evaluate
results, logger=runner.logger, **self.eval_kwargs)
File "/home/rajawat/Desktop/mmaction/datasets/base.py", line 215, in evaluate
for label in gt_labels
File "/home/rajawat/Desktop/mmaction/datasets/base.py", line 215, in <listcomp>
for label in gt_labels
File "/home/rajawat/Desktop/mmaction/datasets/base.py", line 121, in label2array
arr[label] = 1.
IndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices
```
Am I missing something in the config?
Thanks in advance.
</issue>
<code>
[start of mmaction/datasets/video_dataset.py]
1 import os.path as osp
2
3 import torch
4
5 from .base import BaseDataset
6 from .registry import DATASETS
7
8
9 @DATASETS.register_module()
10 class VideoDataset(BaseDataset):
11 """Video dataset for action recognition.
12
13 The dataset loads raw videos and apply specified transforms to return a
14 dict containing the frame tensors and other information.
15
16 The ann_file is a text file with multiple lines, and each line indicates
17 a sample video with the filepath and label, which are split with a
18 whitespace. Example of a annotation file:
19
20 .. code-block:: txt
21
22 some/path/000.mp4 1
23 some/path/001.mp4 1
24 some/path/002.mp4 2
25 some/path/003.mp4 2
26 some/path/004.mp4 3
27 some/path/005.mp4 3
28
29
30 Args:
31 ann_file (str): Path to the annotation file.
32 pipeline (list[dict | callable]): A sequence of data transforms.
33 start_index (int): Specify a start index for frames in consideration of
34 different filename format. However, when taking videos as input,
35 it should be set to 0, since frames loaded from videos count
36 from 0. Default: 0.
37 **kwargs: Keyword arguments for ``BaseDataset``.
38 """
39
40 def __init__(self, ann_file, pipeline, start_index=0, **kwargs):
41 super().__init__(ann_file, pipeline, start_index=start_index, **kwargs)
42
43 def load_annotations(self):
44 """Load annotation file to get video information."""
45 if self.ann_file.endswith('.json'):
46 return self.load_json_annotations()
47
48 video_infos = []
49 with open(self.ann_file, 'r') as fin:
50 for line in fin:
51 line_split = line.strip().split()
52 if self.multi_class:
53 assert self.num_classes is not None
54 filename, label = line_split[0], line_split[1:]
55 label = list(map(int, label))
56 onehot = torch.zeros(self.num_classes)
57 onehot[label] = 1.0
58 else:
59 filename, label = line_split
60 label = int(label)
61 if self.data_prefix is not None:
62 filename = osp.join(self.data_prefix, filename)
63 video_infos.append(
64 dict(
65 filename=filename,
66 label=onehot if self.multi_class else label))
67 return video_infos
68
[end of mmaction/datasets/video_dataset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mmaction/datasets/video_dataset.py b/mmaction/datasets/video_dataset.py
--- a/mmaction/datasets/video_dataset.py
+++ b/mmaction/datasets/video_dataset.py
@@ -1,7 +1,5 @@
import os.path as osp
-import torch
-
from .base import BaseDataset
from .registry import DATASETS
@@ -53,15 +51,10 @@
assert self.num_classes is not None
filename, label = line_split[0], line_split[1:]
label = list(map(int, label))
- onehot = torch.zeros(self.num_classes)
- onehot[label] = 1.0
else:
filename, label = line_split
label = int(label)
if self.data_prefix is not None:
filename = osp.join(self.data_prefix, filename)
- video_infos.append(
- dict(
- filename=filename,
- label=onehot if self.multi_class else label))
+ video_infos.append(dict(filename=filename, label=label))
return video_infos
|
{"golden_diff": "diff --git a/mmaction/datasets/video_dataset.py b/mmaction/datasets/video_dataset.py\n--- a/mmaction/datasets/video_dataset.py\n+++ b/mmaction/datasets/video_dataset.py\n@@ -1,7 +1,5 @@\n import os.path as osp\n \n-import torch\n-\n from .base import BaseDataset\n from .registry import DATASETS\n \n@@ -53,15 +51,10 @@\n assert self.num_classes is not None\n filename, label = line_split[0], line_split[1:]\n label = list(map(int, label))\n- onehot = torch.zeros(self.num_classes)\n- onehot[label] = 1.0\n else:\n filename, label = line_split\n label = int(label)\n if self.data_prefix is not None:\n filename = osp.join(self.data_prefix, filename)\n- video_infos.append(\n- dict(\n- filename=filename,\n- label=onehot if self.multi_class else label))\n+ video_infos.append(dict(filename=filename, label=label))\n return video_infos\n", "issue": "Error in --validate while training.\nHi,\r\n\r\nMy dataset is multiclass `VideoDataset` type and looks something like this.\r\n```\r\n\r\n some/path/000.mp4 1 3 5\r\n some/path/001.mp4 1 2\r\n some/path/002.mp4 2 6 10\r\n\r\n```\r\nGiven video data and multi-class, I adopted my configs to test something on a small dataset from `tsn_r50_video_1x1x8_100e_kinetics400_rgb.py` and `tsn_r101_1x1x5_50e_mmit_rgb.py` \r\n\r\nIt looks something like this\r\n```\r\n# model settings\r\nmodel = dict(\r\n type='Recognizer2D',\r\n backbone=dict(\r\n type='ResNet',\r\n pretrained='torchvision://resnet50',\r\n depth=50,\r\n norm_eval=False),\r\n cls_head=dict(\r\n type='TSNHead',\r\n num_classes=14,\r\n in_channels=2048,\r\n spatial_type='avg',\r\n consensus=dict(type='AvgConsensus', dim=1),\r\n loss_cls=dict(type='BCELossWithLogits', loss_weight=160.0),\r\n dropout_ratio=0.5,\r\n init_std=0.01,\r\n multi_class=True,\r\n label_smooth_eps=0))\r\n# model training and testing settings\r\ntrain_cfg = None\r\ntest_cfg = dict(average_clips=None)\r\n# dataset settings\r\ndataset_type = 'VideoDataset'\r\ndata_root = '/home/rajawat/Desktop/videos'\r\ndata_root_val = '/home/rajawat/Desktop/val_videos'\r\nann_file_train = '/home/rajawat/Desktop/labels/train.txt'\r\nann_file_val = '/home/rajawat/Desktop/labels/val.txt'\r\nann_file_test = '/home/rajawat/Desktop/labels/test.txt'\r\nimg_norm_cfg = dict(\r\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)\r\ntrain_pipeline = [\r\n dict(type='DecordInit'),\r\n dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3),\r\n dict(type='DecordDecode'),\r\n dict(type='RandomResizedCrop'),\r\n dict(type='Resize', scale=(224, 224), keep_ratio=False),\r\n dict(type='Flip', flip_ratio=0.5),\r\n dict(type='Normalize', **img_norm_cfg),\r\n dict(type='FormatShape', input_format='NCHW'),\r\n dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),\r\n dict(type='ToTensor', keys=['imgs', 'label'])\r\n]\r\nval_pipeline = [\r\n dict(type='DecordInit'),\r\n dict(\r\n type='SampleFrames',\r\n clip_len=1,\r\n frame_interval=1,\r\n num_clips=3,\r\n test_mode=True),\r\n dict(type='DecordDecode'),\r\n dict(type='Resize', scale=(-1, 256)),\r\n dict(type='CenterCrop', crop_size=224),\r\n dict(type='Flip', flip_ratio=0),\r\n dict(type='Normalize', **img_norm_cfg),\r\n dict(type='FormatShape', input_format='NCHW'),\r\n dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),\r\n dict(type='ToTensor', keys=['imgs'])\r\n]\r\ntest_pipeline = [\r\n dict(type='DecordInit'),\r\n dict(\r\n type='SampleFrames',\r\n clip_len=1,\r\n frame_interval=1,\r\n num_clips=25,\r\n test_mode=True),\r\n dict(type='DecordDecode'),\r\n dict(type='Resize', scale=(-1, 256)),\r\n dict(type='TenCrop', crop_size=224),\r\n dict(type='Flip', flip_ratio=0),\r\n dict(type='Normalize', **img_norm_cfg),\r\n dict(type='FormatShape', input_format='NCHW'),\r\n dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),\r\n dict(type='ToTensor', keys=['imgs'])\r\n]\r\n\r\ndata = dict(\r\n videos_per_gpu=2,\r\n workers_per_gpu=0,\r\n train=dict(\r\n type=dataset_type,\r\n ann_file=ann_file_train,\r\n data_prefix=data_root,\r\n pipeline=train_pipeline,\r\n multi_class=True,\r\n num_classes=14),\r\n val=dict(\r\n type=dataset_type,\r\n ann_file=ann_file_val,\r\n data_prefix=data_root_val,\r\n pipeline=val_pipeline,\r\n multi_class=True,\r\n num_classes=14),\r\n test=dict(\r\n type=dataset_type,\r\n ann_file=ann_file_test,\r\n data_prefix=data_root_val,\r\n pipeline=test_pipeline,\r\n multi_class=True,\r\n num_classes=14))\r\n# optimizer\r\noptimizer = dict(\r\n type='SGD',\r\n constructor='TSMOptimizerConstructor',\r\n paramwise_cfg=dict(fc_lr5=True),\r\n lr=0.01, # this lr is used for 8 gpus\r\n momentum=0.9,\r\n weight_decay=0.0001,\r\n)\r\noptimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))\r\n# learning policy\r\nlr_config = dict(policy='step', step=[20, 40])\r\ntotal_epochs = 10\r\ncheckpoint_config = dict(interval=5)\r\nevaluation = dict(interval=2, metrics=['mean_average_precision'])\r\n# yapf:disable\r\nlog_config = dict(\r\n interval=2,\r\n hooks=[\r\n dict(type='TextLoggerHook'),\r\n # dict(type='TensorboardLoggerHook'),\r\n ])\r\n# runtime settings\r\ndist_params = dict(backend='nccl')\r\nlog_level = 'INFO'\r\nwork_dir = './work_dirs/tsn_r101_1x1x5_50e_mmit_rgb/'\r\nload_from = None\r\nresume_from = None\r\nworkflow = [('train', 1)]\r\n\r\n```\r\n\r\nwhile training \r\n`python tools/train.py configs/recognition/tsn/tsn_r50_1x1x5_50e_cater_rgb.py --gpus 1` works well , but with `--validate` it breaks down. \r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"tools/train.py\", line 178, in <module>\r\n main()\r\n File \"tools/train.py\", line 174, in main\r\n meta=meta)\r\n File \"/home/rajawat/Desktop/mmaction/apis/train.py\", line 156, in train_model\r\n runner.run(data_loaders, cfg.workflow, cfg.total_epochs, **runner_kwargs)\r\n File \"/home/rajawat/anaconda3/envs/slowfast/lib/python3.7/site-packages/mmcv/runner/epoch_based_runner.py\", line 125, in run\r\n epoch_runner(data_loaders[i], **kwargs)\r\n File \"/home/rajawat/anaconda3/envs/slowfast/lib/python3.7/site-packages/mmcv/runner/epoch_based_runner.py\", line 54, in train\r\n self.call_hook('after_train_epoch')\r\n File \"/home/rajawat/anaconda3/envs/slowfast/lib/python3.7/site-packages/mmcv/runner/base_runner.py\", line 308, in call_hook\r\n getattr(hook, fn_name)(self)\r\n File \"/home/rajawat/Desktop/mmaction/core/evaluation/eval_hooks.py\", line 152, in after_train_epoch\r\n key_score = self.evaluate(runner, results)\r\n File \"/home/rajawat/Desktop/mmaction/core/evaluation/eval_hooks.py\", line 170, in evaluate\r\n results, logger=runner.logger, **self.eval_kwargs)\r\n File \"/home/rajawat/Desktop/mmaction/datasets/base.py\", line 215, in evaluate\r\n for label in gt_labels\r\n File \"/home/rajawat/Desktop/mmaction/datasets/base.py\", line 215, in <listcomp>\r\n for label in gt_labels\r\n File \"/home/rajawat/Desktop/mmaction/datasets/base.py\", line 121, in label2array\r\n arr[label] = 1.\r\nIndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices\r\n\r\n```\r\nAm I missing something in the config?\r\nThanks in advance.\n", "before_files": [{"content": "import os.path as osp\n\nimport torch\n\nfrom .base import BaseDataset\nfrom .registry import DATASETS\n\n\[email protected]_module()\nclass VideoDataset(BaseDataset):\n \"\"\"Video dataset for action recognition.\n\n The dataset loads raw videos and apply specified transforms to return a\n dict containing the frame tensors and other information.\n\n The ann_file is a text file with multiple lines, and each line indicates\n a sample video with the filepath and label, which are split with a\n whitespace. Example of a annotation file:\n\n .. code-block:: txt\n\n some/path/000.mp4 1\n some/path/001.mp4 1\n some/path/002.mp4 2\n some/path/003.mp4 2\n some/path/004.mp4 3\n some/path/005.mp4 3\n\n\n Args:\n ann_file (str): Path to the annotation file.\n pipeline (list[dict | callable]): A sequence of data transforms.\n start_index (int): Specify a start index for frames in consideration of\n different filename format. However, when taking videos as input,\n it should be set to 0, since frames loaded from videos count\n from 0. Default: 0.\n **kwargs: Keyword arguments for ``BaseDataset``.\n \"\"\"\n\n def __init__(self, ann_file, pipeline, start_index=0, **kwargs):\n super().__init__(ann_file, pipeline, start_index=start_index, **kwargs)\n\n def load_annotations(self):\n \"\"\"Load annotation file to get video information.\"\"\"\n if self.ann_file.endswith('.json'):\n return self.load_json_annotations()\n\n video_infos = []\n with open(self.ann_file, 'r') as fin:\n for line in fin:\n line_split = line.strip().split()\n if self.multi_class:\n assert self.num_classes is not None\n filename, label = line_split[0], line_split[1:]\n label = list(map(int, label))\n onehot = torch.zeros(self.num_classes)\n onehot[label] = 1.0\n else:\n filename, label = line_split\n label = int(label)\n if self.data_prefix is not None:\n filename = osp.join(self.data_prefix, filename)\n video_infos.append(\n dict(\n filename=filename,\n label=onehot if self.multi_class else label))\n return video_infos\n", "path": "mmaction/datasets/video_dataset.py"}]}
| 3,030 | 229 |
gh_patches_debug_32057
|
rasdani/github-patches
|
git_diff
|
doccano__doccano-1985
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Feature Request] Allowed to add more metadata for a project
Feature description
---------
currently we have many annotation projects in doccano.
However, it is not easy to find the the right project. Because the information for a project is only its name.
- If the user could add more metadata for a project will be good. Such as the created data, created user, description. And all those metadata could be shown in project list page to help the user find the project.
- the metadata for a project could be modified. For example, we created the project in a bad name such as "DocumentationClassification-1". And we can't change the name.
- some way to search the project or sort the project or filter the project? For example, sort the project by creation date or only shown the project created by a user.
</issue>
<code>
[start of backend/projects/serializers.py]
1 from rest_framework import serializers
2 from rest_polymorphic.serializers import PolymorphicSerializer
3
4 from .models import (
5 BoundingBoxProject,
6 ImageCaptioningProject,
7 ImageClassificationProject,
8 IntentDetectionAndSlotFillingProject,
9 Member,
10 Project,
11 SegmentationProject,
12 Seq2seqProject,
13 SequenceLabelingProject,
14 Speech2textProject,
15 Tag,
16 TextClassificationProject,
17 )
18
19
20 class MemberSerializer(serializers.ModelSerializer):
21 username = serializers.SerializerMethodField()
22 rolename = serializers.SerializerMethodField()
23
24 @classmethod
25 def get_username(cls, instance):
26 user = instance.user
27 return user.username if user else None
28
29 @classmethod
30 def get_rolename(cls, instance):
31 role = instance.role
32 return role.name if role else None
33
34 class Meta:
35 model = Member
36 fields = ("id", "user", "role", "username", "rolename")
37
38
39 class TagSerializer(serializers.ModelSerializer):
40 class Meta:
41 model = Tag
42 fields = (
43 "id",
44 "project",
45 "text",
46 )
47 read_only_fields = ("id", "project")
48
49
50 class ProjectSerializer(serializers.ModelSerializer):
51 tags = TagSerializer(many=True, required=False)
52
53 class Meta:
54 model = Project
55 fields = [
56 "id",
57 "name",
58 "description",
59 "guideline",
60 "project_type",
61 "updated_at",
62 "random_order",
63 "created_by",
64 "collaborative_annotation",
65 "single_class_classification",
66 "is_text_project",
67 "can_define_label",
68 "can_define_relation",
69 "can_define_category",
70 "can_define_span",
71 "tags",
72 ]
73 read_only_fields = (
74 "updated_at",
75 "is_text_project",
76 "can_define_label",
77 "can_define_relation",
78 "can_define_category",
79 "can_define_span",
80 )
81
82 def create(self, validated_data):
83 tags = TagSerializer(data=validated_data.pop("tags", []), many=True)
84 project = self.Meta.model.objects.create(**validated_data)
85 tags.is_valid()
86 tags.save(project=project)
87 return project
88
89 def update(self, instance, validated_data):
90 # Don't update tags. Please use TagAPI.
91 validated_data.pop("tags", None)
92 return super().update(instance, validated_data)
93
94
95 class TextClassificationProjectSerializer(ProjectSerializer):
96 class Meta(ProjectSerializer.Meta):
97 model = TextClassificationProject
98
99
100 class SequenceLabelingProjectSerializer(ProjectSerializer):
101 class Meta(ProjectSerializer.Meta):
102 model = SequenceLabelingProject
103 fields = ProjectSerializer.Meta.fields + ["allow_overlapping", "grapheme_mode", "use_relation"]
104
105
106 class Seq2seqProjectSerializer(ProjectSerializer):
107 class Meta(ProjectSerializer.Meta):
108 model = Seq2seqProject
109
110
111 class IntentDetectionAndSlotFillingProjectSerializer(ProjectSerializer):
112 class Meta(ProjectSerializer.Meta):
113 model = IntentDetectionAndSlotFillingProject
114
115
116 class Speech2textProjectSerializer(ProjectSerializer):
117 class Meta(ProjectSerializer.Meta):
118 model = Speech2textProject
119
120
121 class ImageClassificationProjectSerializer(ProjectSerializer):
122 class Meta(ProjectSerializer.Meta):
123 model = ImageClassificationProject
124
125
126 class BoundingBoxProjectSerializer(ProjectSerializer):
127 class Meta(ProjectSerializer.Meta):
128 model = BoundingBoxProject
129
130
131 class SegmentationProjectSerializer(ProjectSerializer):
132 class Meta(ProjectSerializer.Meta):
133 model = SegmentationProject
134
135
136 class ImageCaptioningProjectSerializer(ProjectSerializer):
137 class Meta(ProjectSerializer.Meta):
138 model = ImageCaptioningProject
139
140
141 class ProjectPolymorphicSerializer(PolymorphicSerializer):
142 model_serializer_mapping = {
143 Project: ProjectSerializer,
144 **{cls.Meta.model: cls for cls in ProjectSerializer.__subclasses__()},
145 }
146
[end of backend/projects/serializers.py]
[start of backend/projects/views/project.py]
1 from django.conf import settings
2 from django_filters.rest_framework import DjangoFilterBackend
3 from rest_framework import filters, generics, status
4 from rest_framework.permissions import IsAdminUser, IsAuthenticated
5 from rest_framework.response import Response
6
7 from projects.models import Project
8 from projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly
9 from projects.serializers import ProjectPolymorphicSerializer
10
11
12 class ProjectList(generics.ListCreateAPIView):
13 serializer_class = ProjectPolymorphicSerializer
14 filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
15 search_fields = ("name", "description")
16
17 def get_permissions(self):
18 if self.request.method == "GET":
19 self.permission_classes = [
20 IsAuthenticated,
21 ]
22 else:
23 self.permission_classes = [IsAuthenticated & IsAdminUser]
24 return super().get_permissions()
25
26 def get_queryset(self):
27 return Project.objects.filter(role_mappings__user=self.request.user)
28
29 def perform_create(self, serializer):
30 project = serializer.save(created_by=self.request.user)
31 project.add_admin()
32
33 def delete(self, request, *args, **kwargs):
34 delete_ids = request.data["ids"]
35 projects = Project.objects.filter(
36 role_mappings__user=self.request.user,
37 role_mappings__role__name=settings.ROLE_PROJECT_ADMIN,
38 pk__in=delete_ids,
39 )
40 # Todo: I want to use bulk delete.
41 # But it causes the constraint error.
42 # See https://github.com/django-polymorphic/django-polymorphic/issues/229
43 for project in projects:
44 project.delete()
45 return Response(status=status.HTTP_204_NO_CONTENT)
46
47
48 class ProjectDetail(generics.RetrieveUpdateDestroyAPIView):
49 queryset = Project.objects.all()
50 serializer_class = ProjectPolymorphicSerializer
51 lookup_url_kwarg = "project_id"
52 permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]
53
[end of backend/projects/views/project.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/backend/projects/serializers.py b/backend/projects/serializers.py
--- a/backend/projects/serializers.py
+++ b/backend/projects/serializers.py
@@ -49,6 +49,13 @@
class ProjectSerializer(serializers.ModelSerializer):
tags = TagSerializer(many=True, required=False)
+ author = serializers.SerializerMethodField()
+
+ @classmethod
+ def get_author(cls, instance):
+ if instance.created_by:
+ return instance.created_by.username
+ return ""
class Meta:
model = Project
@@ -58,9 +65,10 @@
"description",
"guideline",
"project_type",
+ "created_at",
"updated_at",
"random_order",
- "created_by",
+ "author",
"collaborative_annotation",
"single_class_classification",
"is_text_project",
@@ -71,7 +79,9 @@
"tags",
]
read_only_fields = (
+ "created_at",
"updated_at",
+ "author",
"is_text_project",
"can_define_label",
"can_define_relation",
diff --git a/backend/projects/views/project.py b/backend/projects/views/project.py
--- a/backend/projects/views/project.py
+++ b/backend/projects/views/project.py
@@ -13,6 +13,8 @@
serializer_class = ProjectPolymorphicSerializer
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
search_fields = ("name", "description")
+ ordering_fields = ["name", "created_at", "created_by", "project_type"]
+ ordering = ["-created_at"]
def get_permissions(self):
if self.request.method == "GET":
|
{"golden_diff": "diff --git a/backend/projects/serializers.py b/backend/projects/serializers.py\n--- a/backend/projects/serializers.py\n+++ b/backend/projects/serializers.py\n@@ -49,6 +49,13 @@\n \n class ProjectSerializer(serializers.ModelSerializer):\n tags = TagSerializer(many=True, required=False)\n+ author = serializers.SerializerMethodField()\n+\n+ @classmethod\n+ def get_author(cls, instance):\n+ if instance.created_by:\n+ return instance.created_by.username\n+ return \"\"\n \n class Meta:\n model = Project\n@@ -58,9 +65,10 @@\n \"description\",\n \"guideline\",\n \"project_type\",\n+ \"created_at\",\n \"updated_at\",\n \"random_order\",\n- \"created_by\",\n+ \"author\",\n \"collaborative_annotation\",\n \"single_class_classification\",\n \"is_text_project\",\n@@ -71,7 +79,9 @@\n \"tags\",\n ]\n read_only_fields = (\n+ \"created_at\",\n \"updated_at\",\n+ \"author\",\n \"is_text_project\",\n \"can_define_label\",\n \"can_define_relation\",\ndiff --git a/backend/projects/views/project.py b/backend/projects/views/project.py\n--- a/backend/projects/views/project.py\n+++ b/backend/projects/views/project.py\n@@ -13,6 +13,8 @@\n serializer_class = ProjectPolymorphicSerializer\n filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)\n search_fields = (\"name\", \"description\")\n+ ordering_fields = [\"name\", \"created_at\", \"created_by\", \"project_type\"]\n+ ordering = [\"-created_at\"]\n \n def get_permissions(self):\n if self.request.method == \"GET\":\n", "issue": "[Feature Request] Allowed to add more metadata for a project\nFeature description\r\n---------\r\ncurrently we have many annotation projects in doccano.\r\nHowever, it is not easy to find the the right project. Because the information for a project is only its name.\r\n- If the user could add more metadata for a project will be good. Such as the created data, created user, description. And all those metadata could be shown in project list page to help the user find the project.\r\n- the metadata for a project could be modified. For example, we created the project in a bad name such as \"DocumentationClassification-1\". And we can't change the name.\r\n- some way to search the project or sort the project or filter the project? For example, sort the project by creation date or only shown the project created by a user.\r\n\n", "before_files": [{"content": "from rest_framework import serializers\nfrom rest_polymorphic.serializers import PolymorphicSerializer\n\nfrom .models import (\n BoundingBoxProject,\n ImageCaptioningProject,\n ImageClassificationProject,\n IntentDetectionAndSlotFillingProject,\n Member,\n Project,\n SegmentationProject,\n Seq2seqProject,\n SequenceLabelingProject,\n Speech2textProject,\n Tag,\n TextClassificationProject,\n)\n\n\nclass MemberSerializer(serializers.ModelSerializer):\n username = serializers.SerializerMethodField()\n rolename = serializers.SerializerMethodField()\n\n @classmethod\n def get_username(cls, instance):\n user = instance.user\n return user.username if user else None\n\n @classmethod\n def get_rolename(cls, instance):\n role = instance.role\n return role.name if role else None\n\n class Meta:\n model = Member\n fields = (\"id\", \"user\", \"role\", \"username\", \"rolename\")\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = (\n \"id\",\n \"project\",\n \"text\",\n )\n read_only_fields = (\"id\", \"project\")\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n tags = TagSerializer(many=True, required=False)\n\n class Meta:\n model = Project\n fields = [\n \"id\",\n \"name\",\n \"description\",\n \"guideline\",\n \"project_type\",\n \"updated_at\",\n \"random_order\",\n \"created_by\",\n \"collaborative_annotation\",\n \"single_class_classification\",\n \"is_text_project\",\n \"can_define_label\",\n \"can_define_relation\",\n \"can_define_category\",\n \"can_define_span\",\n \"tags\",\n ]\n read_only_fields = (\n \"updated_at\",\n \"is_text_project\",\n \"can_define_label\",\n \"can_define_relation\",\n \"can_define_category\",\n \"can_define_span\",\n )\n\n def create(self, validated_data):\n tags = TagSerializer(data=validated_data.pop(\"tags\", []), many=True)\n project = self.Meta.model.objects.create(**validated_data)\n tags.is_valid()\n tags.save(project=project)\n return project\n\n def update(self, instance, validated_data):\n # Don't update tags. Please use TagAPI.\n validated_data.pop(\"tags\", None)\n return super().update(instance, validated_data)\n\n\nclass TextClassificationProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = TextClassificationProject\n\n\nclass SequenceLabelingProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = SequenceLabelingProject\n fields = ProjectSerializer.Meta.fields + [\"allow_overlapping\", \"grapheme_mode\", \"use_relation\"]\n\n\nclass Seq2seqProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = Seq2seqProject\n\n\nclass IntentDetectionAndSlotFillingProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = IntentDetectionAndSlotFillingProject\n\n\nclass Speech2textProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = Speech2textProject\n\n\nclass ImageClassificationProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = ImageClassificationProject\n\n\nclass BoundingBoxProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = BoundingBoxProject\n\n\nclass SegmentationProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = SegmentationProject\n\n\nclass ImageCaptioningProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = ImageCaptioningProject\n\n\nclass ProjectPolymorphicSerializer(PolymorphicSerializer):\n model_serializer_mapping = {\n Project: ProjectSerializer,\n **{cls.Meta.model: cls for cls in ProjectSerializer.__subclasses__()},\n }\n", "path": "backend/projects/serializers.py"}, {"content": "from django.conf import settings\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters, generics, status\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom projects.models import Project\nfrom projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly\nfrom projects.serializers import ProjectPolymorphicSerializer\n\n\nclass ProjectList(generics.ListCreateAPIView):\n serializer_class = ProjectPolymorphicSerializer\n filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)\n search_fields = (\"name\", \"description\")\n\n def get_permissions(self):\n if self.request.method == \"GET\":\n self.permission_classes = [\n IsAuthenticated,\n ]\n else:\n self.permission_classes = [IsAuthenticated & IsAdminUser]\n return super().get_permissions()\n\n def get_queryset(self):\n return Project.objects.filter(role_mappings__user=self.request.user)\n\n def perform_create(self, serializer):\n project = serializer.save(created_by=self.request.user)\n project.add_admin()\n\n def delete(self, request, *args, **kwargs):\n delete_ids = request.data[\"ids\"]\n projects = Project.objects.filter(\n role_mappings__user=self.request.user,\n role_mappings__role__name=settings.ROLE_PROJECT_ADMIN,\n pk__in=delete_ids,\n )\n # Todo: I want to use bulk delete.\n # But it causes the constraint error.\n # See https://github.com/django-polymorphic/django-polymorphic/issues/229\n for project in projects:\n project.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass ProjectDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Project.objects.all()\n serializer_class = ProjectPolymorphicSerializer\n lookup_url_kwarg = \"project_id\"\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n", "path": "backend/projects/views/project.py"}]}
| 2,374 | 381 |
gh_patches_debug_8883
|
rasdani/github-patches
|
git_diff
|
python-pillow__Pillow-906
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot from PIL import ImageGrab
Does Pillow2.5.3 ImageGrab still not support other OS except windows?
If not, why we cannot do that?
---
/Library/Python/2.7/site-packages/Pillow-2.5.3-py2.7-macosx-10.9-intel.egg/PIL/**init**.py
Python 2.7.5 (default, Mar 9 2014, 22:15:05)
[GCC 4.2.1 Compatible Apple LLVM 5.0 (clang-500.0.68)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
> > > from PIL import ImageGrab
> > > Traceback (most recent call last):
> > > File "<stdin>", line 1, in <module>
> > > File "build/bdist.macosx-10.9-intel/egg/PIL/ImageGrab.py", line 26, in <module>
> > > ImportError: No module named _grabscreen
</issue>
<code>
[start of PIL/ImageGrab.py]
1 #
2 # The Python Imaging Library
3 # $Id$
4 #
5 # screen grabber (windows only)
6 #
7 # History:
8 # 2001-04-26 fl created
9 # 2001-09-17 fl use builtin driver, if present
10 # 2002-11-19 fl added grabclipboard support
11 #
12 # Copyright (c) 2001-2002 by Secret Labs AB
13 # Copyright (c) 2001-2002 by Fredrik Lundh
14 #
15 # See the README file for information on usage and redistribution.
16 #
17
18 from PIL import Image
19
20
21 try:
22 # built-in driver (1.1.3 and later)
23 grabber = Image.core.grabscreen
24 except AttributeError:
25 # stand-alone driver (pil plus)
26 import _grabscreen
27 grabber = _grabscreen.grab
28
29
30 def grab(bbox=None):
31 size, data = grabber()
32 im = Image.frombytes(
33 "RGB", size, data,
34 # RGB, 32-bit line padding, origo in lower left corner
35 "raw", "BGR", (size[0]*3 + 3) & -4, -1
36 )
37 if bbox:
38 im = im.crop(bbox)
39 return im
40
41
42 def grabclipboard():
43 debug = 0 # temporary interface
44 data = Image.core.grabclipboard(debug)
45 if isinstance(data, bytes):
46 from PIL import BmpImagePlugin
47 import io
48 return BmpImagePlugin.DibImageFile(io.BytesIO(data))
49 return data
50
[end of PIL/ImageGrab.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/PIL/ImageGrab.py b/PIL/ImageGrab.py
--- a/PIL/ImageGrab.py
+++ b/PIL/ImageGrab.py
@@ -17,6 +17,9 @@
from PIL import Image
+import sys
+if sys.platform != "win32":
+ raise ImportError("ImageGrab is Windows only")
try:
# built-in driver (1.1.3 and later)
@@ -40,7 +43,7 @@
def grabclipboard():
- debug = 0 # temporary interface
+ debug = 0 # temporary interface
data = Image.core.grabclipboard(debug)
if isinstance(data, bytes):
from PIL import BmpImagePlugin
|
{"golden_diff": "diff --git a/PIL/ImageGrab.py b/PIL/ImageGrab.py\n--- a/PIL/ImageGrab.py\n+++ b/PIL/ImageGrab.py\n@@ -17,6 +17,9 @@\n \n from PIL import Image\n \n+import sys\n+if sys.platform != \"win32\":\n+ raise ImportError(\"ImageGrab is Windows only\")\n \n try:\n # built-in driver (1.1.3 and later)\n@@ -40,7 +43,7 @@\n \n \n def grabclipboard():\n- debug = 0 # temporary interface\n+ debug = 0 # temporary interface\n data = Image.core.grabclipboard(debug)\n if isinstance(data, bytes):\n from PIL import BmpImagePlugin\n", "issue": "Cannot from PIL import ImageGrab\nDoes Pillow2.5.3 ImageGrab still not support other OS except windows?\nIf not, why we cannot do that?\n\n---\n\n/Library/Python/2.7/site-packages/Pillow-2.5.3-py2.7-macosx-10.9-intel.egg/PIL/**init**.py\n\nPython 2.7.5 (default, Mar 9 2014, 22:15:05)\n[GCC 4.2.1 Compatible Apple LLVM 5.0 (clang-500.0.68)] on darwin\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n\n> > > from PIL import ImageGrab\n> > > Traceback (most recent call last):\n> > > File \"<stdin>\", line 1, in <module>\n> > > File \"build/bdist.macosx-10.9-intel/egg/PIL/ImageGrab.py\", line 26, in <module>\n> > > ImportError: No module named _grabscreen\n\n", "before_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# screen grabber (windows only)\n#\n# History:\n# 2001-04-26 fl created\n# 2001-09-17 fl use builtin driver, if present\n# 2002-11-19 fl added grabclipboard support\n#\n# Copyright (c) 2001-2002 by Secret Labs AB\n# Copyright (c) 2001-2002 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom PIL import Image\n\n\ntry:\n # built-in driver (1.1.3 and later)\n grabber = Image.core.grabscreen\nexcept AttributeError:\n # stand-alone driver (pil plus)\n import _grabscreen\n grabber = _grabscreen.grab\n\n\ndef grab(bbox=None):\n size, data = grabber()\n im = Image.frombytes(\n \"RGB\", size, data,\n # RGB, 32-bit line padding, origo in lower left corner\n \"raw\", \"BGR\", (size[0]*3 + 3) & -4, -1\n )\n if bbox:\n im = im.crop(bbox)\n return im\n\n\ndef grabclipboard():\n debug = 0 # temporary interface\n data = Image.core.grabclipboard(debug)\n if isinstance(data, bytes):\n from PIL import BmpImagePlugin\n import io\n return BmpImagePlugin.DibImageFile(io.BytesIO(data))\n return data\n", "path": "PIL/ImageGrab.py"}]}
| 1,219 | 156 |
gh_patches_debug_23519
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-927
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sync raises IncompatibleRequirements even when environment markers indicate the incompatible requirements to be irrelevant for the current platform
IMO this is simply more of #206, and I've already demonstrated the problem there. But it's been a month and no one has re-opened, so I'm opening this more specific issue to address the problem.
Here's an example `dev-requirements.txt` from the `plumbum` project:
```python
pytest
pytest-cov
pytest-mock
idna<2.8 ; python_version < '2.7'
pycparser<2.18 ; python_version < '2.7'
paramiko<2.4 ; python_version < '2.7'
paramiko ; python_version >= '2.7'
setuptools
wheel ; python_version >= '2.7'
psutil
```
```bash
pip-sync dev-requirements.txt
```
Identical output whether in a Python `3.7.4` or `2.7.16` env:
```
Incompatible requirements found: paramiko (from -r dev-requirements.txt (line 7)) and paramiko<2.4 (from -r dev-requirements.txt (line 6))
```
No packages end up installed aside from `pip-tools` and its deps.
#### Environment Versions
1. Arch Linux
1. Python version: `3.7.4`
1. pip version: `19.2.3`
1. pip-tools version: `4.1.0`
#### Steps to replicate
```bash
echo "paramiko==2.4.0 ; python_version < '2.7'" > mark.txt
echo "paramiko==2.6.0 ; python_version >= '2.7'" >> mark.txt
pip-sync mark.txt
```
Note that this works:
```bash
pip install --no-deps -r mark.txt
```
#### Expected result
`pip-sync` should ignore non-matching requirements when environment markers are present.
#### Actual result
`pip-sync` checks for conflicts as if it wants to install requirements for all platforms.
#### Further notes
```bash
mv mark.txt mark.in
pip-compile --no-header mark.in
```
```python
asn1crypto==1.0.1 # via cryptography
bcrypt==3.1.7 # via paramiko
cffi==1.12.3 # via bcrypt, cryptography, pynacl
cryptography==2.7 # via paramiko
paramiko==2.6.0 ; python_version >= "2.7"
pycparser==2.19 # via cffi
pynacl==1.3.0 # via paramiko
six==1.12.0 # via bcrypt, cryptography, pynacl
```
Currently, compiling such an in-file will only include the compile-time platform's matching reqs. This hides the issue under discussion, and arguably means it's not a bug. But I believe it is generally desired for pip-sync to honor environment markers, as evidenced by the contents of #206 (closed, but not solved), #600 (merged), #459 (replaced), #460 (merged), #518 (open 2yrs), #563 (open 2yrs), #585 (open 2yrs), #896 (open), etc.
This is probably even more relevant for working with a single python version across different platforms.
</issue>
<code>
[start of piptools/sync.py]
1 import collections
2 import os
3 import sys
4 import tempfile
5 from subprocess import check_call # nosec
6
7 from pip._internal.commands.freeze import DEV_PKGS
8 from pip._internal.utils.compat import stdlib_pkgs
9
10 from . import click
11 from .exceptions import IncompatibleRequirements
12 from .utils import (
13 flat_map,
14 format_requirement,
15 get_hashes_from_ireq,
16 is_url_requirement,
17 key_from_ireq,
18 key_from_req,
19 )
20
21 PACKAGES_TO_IGNORE = (
22 ["-markerlib", "pip", "pip-tools", "pip-review", "pkg-resources"]
23 + list(stdlib_pkgs)
24 + list(DEV_PKGS)
25 )
26
27
28 def dependency_tree(installed_keys, root_key):
29 """
30 Calculate the dependency tree for the package `root_key` and return
31 a collection of all its dependencies. Uses a DFS traversal algorithm.
32
33 `installed_keys` should be a {key: requirement} mapping, e.g.
34 {'django': from_line('django==1.8')}
35 `root_key` should be the key to return the dependency tree for.
36 """
37 dependencies = set()
38 queue = collections.deque()
39
40 if root_key in installed_keys:
41 dep = installed_keys[root_key]
42 queue.append(dep)
43
44 while queue:
45 v = queue.popleft()
46 key = key_from_req(v)
47 if key in dependencies:
48 continue
49
50 dependencies.add(key)
51
52 for dep_specifier in v.requires():
53 dep_name = key_from_req(dep_specifier)
54 if dep_name in installed_keys:
55 dep = installed_keys[dep_name]
56
57 if dep_specifier.specifier.contains(dep.version):
58 queue.append(dep)
59
60 return dependencies
61
62
63 def get_dists_to_ignore(installed):
64 """
65 Returns a collection of package names to ignore when performing pip-sync,
66 based on the currently installed environment. For example, when pip-tools
67 is installed in the local environment, it should be ignored, including all
68 of its dependencies (e.g. click). When pip-tools is not installed
69 locally, click should also be installed/uninstalled depending on the given
70 requirements.
71 """
72 installed_keys = {key_from_req(r): r for r in installed}
73 return list(
74 flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE)
75 )
76
77
78 def merge(requirements, ignore_conflicts):
79 by_key = {}
80
81 for ireq in requirements:
82 # Limitation: URL requirements are merged by precise string match, so
83 # "file:///example.zip#egg=example", "file:///example.zip", and
84 # "example==1.0" will not merge with each other
85 key = key_from_ireq(ireq)
86
87 if not ignore_conflicts:
88 existing_ireq = by_key.get(key)
89 if existing_ireq:
90 # NOTE: We check equality here since we can assume that the
91 # requirements are all pinned
92 if ireq.specifier != existing_ireq.specifier:
93 raise IncompatibleRequirements(ireq, existing_ireq)
94
95 # TODO: Always pick the largest specifier in case of a conflict
96 by_key[key] = ireq
97 return by_key.values()
98
99
100 def diff_key_from_ireq(ireq):
101 """
102 Calculate a key for comparing a compiled requirement with installed modules.
103 For URL requirements, only provide a useful key if the url includes
104 #egg=name==version, which will set ireq.req.name and ireq.specifier.
105 Otherwise return ireq.link so the key will not match and the package will
106 reinstall. Reinstall is necessary to ensure that packages will reinstall
107 if the URL is changed but the version is not.
108 """
109 if is_url_requirement(ireq):
110 if (
111 ireq.req
112 and (getattr(ireq.req, "key", None) or getattr(ireq.req, "name", None))
113 and ireq.specifier
114 ):
115 return key_from_ireq(ireq)
116 return str(ireq.link)
117 return key_from_ireq(ireq)
118
119
120 def diff(compiled_requirements, installed_dists):
121 """
122 Calculate which packages should be installed or uninstalled, given a set
123 of compiled requirements and a list of currently installed modules.
124 """
125 requirements_lut = {diff_key_from_ireq(r): r for r in compiled_requirements}
126
127 satisfied = set() # holds keys
128 to_install = set() # holds InstallRequirement objects
129 to_uninstall = set() # holds keys
130
131 pkgs_to_ignore = get_dists_to_ignore(installed_dists)
132 for dist in installed_dists:
133 key = key_from_req(dist)
134 if key not in requirements_lut or not requirements_lut[key].match_markers():
135 to_uninstall.add(key)
136 elif requirements_lut[key].specifier.contains(dist.version):
137 satisfied.add(key)
138
139 for key, requirement in requirements_lut.items():
140 if key not in satisfied and requirement.match_markers():
141 to_install.add(requirement)
142
143 # Make sure to not uninstall any packages that should be ignored
144 to_uninstall -= set(pkgs_to_ignore)
145
146 return (to_install, to_uninstall)
147
148
149 def sync(
150 to_install,
151 to_uninstall,
152 verbose=False,
153 dry_run=False,
154 install_flags=None,
155 ask=False,
156 ):
157 """
158 Install and uninstalls the given sets of modules.
159 """
160 if not to_uninstall and not to_install:
161 if verbose:
162 click.echo("Everything up-to-date")
163 return 0
164
165 pip_flags = []
166 if not verbose:
167 pip_flags += ["-q"]
168
169 if ask:
170 dry_run = True
171
172 if dry_run:
173 if to_uninstall:
174 click.echo("Would uninstall:")
175 for pkg in to_uninstall:
176 click.echo(" {}".format(pkg))
177
178 if to_install:
179 click.echo("Would install:")
180 for ireq in to_install:
181 click.echo(" {}".format(format_requirement(ireq)))
182
183 if ask and click.confirm("Would you like to proceed with these changes?"):
184 dry_run = False
185
186 if not dry_run:
187 if to_uninstall:
188 check_call( # nosec
189 [sys.executable, "-m", "pip", "uninstall", "-y"]
190 + pip_flags
191 + sorted(to_uninstall)
192 )
193
194 if to_install:
195 if install_flags is None:
196 install_flags = []
197 # prepare requirement lines
198 req_lines = []
199 for ireq in sorted(to_install, key=key_from_ireq):
200 ireq_hashes = get_hashes_from_ireq(ireq)
201 req_lines.append(format_requirement(ireq, hashes=ireq_hashes))
202
203 # save requirement lines to a temporary file
204 tmp_req_file = tempfile.NamedTemporaryFile(mode="wt", delete=False)
205 tmp_req_file.write("\n".join(req_lines))
206 tmp_req_file.close()
207
208 try:
209 check_call( # nosec
210 [sys.executable, "-m", "pip", "install", "-r", tmp_req_file.name]
211 + pip_flags
212 + install_flags
213 )
214 finally:
215 os.unlink(tmp_req_file.name)
216
217 return 0
218
[end of piptools/sync.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/piptools/sync.py b/piptools/sync.py
--- a/piptools/sync.py
+++ b/piptools/sync.py
@@ -82,18 +82,19 @@
# Limitation: URL requirements are merged by precise string match, so
# "file:///example.zip#egg=example", "file:///example.zip", and
# "example==1.0" will not merge with each other
- key = key_from_ireq(ireq)
-
- if not ignore_conflicts:
- existing_ireq = by_key.get(key)
- if existing_ireq:
- # NOTE: We check equality here since we can assume that the
- # requirements are all pinned
- if ireq.specifier != existing_ireq.specifier:
- raise IncompatibleRequirements(ireq, existing_ireq)
-
- # TODO: Always pick the largest specifier in case of a conflict
- by_key[key] = ireq
+ if ireq.match_markers():
+ key = key_from_ireq(ireq)
+
+ if not ignore_conflicts:
+ existing_ireq = by_key.get(key)
+ if existing_ireq:
+ # NOTE: We check equality here since we can assume that the
+ # requirements are all pinned
+ if ireq.specifier != existing_ireq.specifier:
+ raise IncompatibleRequirements(ireq, existing_ireq)
+
+ # TODO: Always pick the largest specifier in case of a conflict
+ by_key[key] = ireq
return by_key.values()
|
{"golden_diff": "diff --git a/piptools/sync.py b/piptools/sync.py\n--- a/piptools/sync.py\n+++ b/piptools/sync.py\n@@ -82,18 +82,19 @@\n # Limitation: URL requirements are merged by precise string match, so\n # \"file:///example.zip#egg=example\", \"file:///example.zip\", and\n # \"example==1.0\" will not merge with each other\n- key = key_from_ireq(ireq)\n-\n- if not ignore_conflicts:\n- existing_ireq = by_key.get(key)\n- if existing_ireq:\n- # NOTE: We check equality here since we can assume that the\n- # requirements are all pinned\n- if ireq.specifier != existing_ireq.specifier:\n- raise IncompatibleRequirements(ireq, existing_ireq)\n-\n- # TODO: Always pick the largest specifier in case of a conflict\n- by_key[key] = ireq\n+ if ireq.match_markers():\n+ key = key_from_ireq(ireq)\n+\n+ if not ignore_conflicts:\n+ existing_ireq = by_key.get(key)\n+ if existing_ireq:\n+ # NOTE: We check equality here since we can assume that the\n+ # requirements are all pinned\n+ if ireq.specifier != existing_ireq.specifier:\n+ raise IncompatibleRequirements(ireq, existing_ireq)\n+\n+ # TODO: Always pick the largest specifier in case of a conflict\n+ by_key[key] = ireq\n return by_key.values()\n", "issue": "Sync raises IncompatibleRequirements even when environment markers indicate the incompatible requirements to be irrelevant for the current platform\nIMO this is simply more of #206, and I've already demonstrated the problem there. But it's been a month and no one has re-opened, so I'm opening this more specific issue to address the problem.\r\n\r\nHere's an example `dev-requirements.txt` from the `plumbum` project:\r\n\r\n```python\r\npytest\r\npytest-cov\r\npytest-mock\r\nidna<2.8 ; python_version < '2.7'\r\npycparser<2.18 ; python_version < '2.7'\r\nparamiko<2.4 ; python_version < '2.7'\r\nparamiko ; python_version >= '2.7'\r\nsetuptools\r\nwheel ; python_version >= '2.7'\r\npsutil\r\n```\r\n\r\n```bash\r\npip-sync dev-requirements.txt\r\n```\r\n\r\nIdentical output whether in a Python `3.7.4` or `2.7.16` env:\r\n\r\n```\r\nIncompatible requirements found: paramiko (from -r dev-requirements.txt (line 7)) and paramiko<2.4 (from -r dev-requirements.txt (line 6))\r\n```\r\n\r\nNo packages end up installed aside from `pip-tools` and its deps.\r\n\r\n#### Environment Versions\r\n\r\n1. Arch Linux\r\n1. Python version: `3.7.4`\r\n1. pip version: `19.2.3`\r\n1. pip-tools version: `4.1.0`\r\n\r\n#### Steps to replicate\r\n\r\n```bash\r\necho \"paramiko==2.4.0 ; python_version < '2.7'\" > mark.txt\r\necho \"paramiko==2.6.0 ; python_version >= '2.7'\" >> mark.txt\r\npip-sync mark.txt\r\n```\r\n\r\nNote that this works:\r\n\r\n```bash\r\npip install --no-deps -r mark.txt\r\n```\r\n\r\n#### Expected result\r\n\r\n`pip-sync` should ignore non-matching requirements when environment markers are present.\r\n\r\n#### Actual result\r\n\r\n`pip-sync` checks for conflicts as if it wants to install requirements for all platforms.\r\n\r\n#### Further notes\r\n\r\n```bash\r\nmv mark.txt mark.in\r\npip-compile --no-header mark.in\r\n```\r\n\r\n```python\r\nasn1crypto==1.0.1 # via cryptography\r\nbcrypt==3.1.7 # via paramiko\r\ncffi==1.12.3 # via bcrypt, cryptography, pynacl\r\ncryptography==2.7 # via paramiko\r\nparamiko==2.6.0 ; python_version >= \"2.7\"\r\npycparser==2.19 # via cffi\r\npynacl==1.3.0 # via paramiko\r\nsix==1.12.0 # via bcrypt, cryptography, pynacl\r\n```\r\n\r\nCurrently, compiling such an in-file will only include the compile-time platform's matching reqs. This hides the issue under discussion, and arguably means it's not a bug. But I believe it is generally desired for pip-sync to honor environment markers, as evidenced by the contents of #206 (closed, but not solved), #600 (merged), #459 (replaced), #460 (merged), #518 (open 2yrs), #563 (open 2yrs), #585 (open 2yrs), #896 (open), etc.\r\n\r\nThis is probably even more relevant for working with a single python version across different platforms.\n", "before_files": [{"content": "import collections\nimport os\nimport sys\nimport tempfile\nfrom subprocess import check_call # nosec\n\nfrom pip._internal.commands.freeze import DEV_PKGS\nfrom pip._internal.utils.compat import stdlib_pkgs\n\nfrom . import click\nfrom .exceptions import IncompatibleRequirements\nfrom .utils import (\n flat_map,\n format_requirement,\n get_hashes_from_ireq,\n is_url_requirement,\n key_from_ireq,\n key_from_req,\n)\n\nPACKAGES_TO_IGNORE = (\n [\"-markerlib\", \"pip\", \"pip-tools\", \"pip-review\", \"pkg-resources\"]\n + list(stdlib_pkgs)\n + list(DEV_PKGS)\n)\n\n\ndef dependency_tree(installed_keys, root_key):\n \"\"\"\n Calculate the dependency tree for the package `root_key` and return\n a collection of all its dependencies. Uses a DFS traversal algorithm.\n\n `installed_keys` should be a {key: requirement} mapping, e.g.\n {'django': from_line('django==1.8')}\n `root_key` should be the key to return the dependency tree for.\n \"\"\"\n dependencies = set()\n queue = collections.deque()\n\n if root_key in installed_keys:\n dep = installed_keys[root_key]\n queue.append(dep)\n\n while queue:\n v = queue.popleft()\n key = key_from_req(v)\n if key in dependencies:\n continue\n\n dependencies.add(key)\n\n for dep_specifier in v.requires():\n dep_name = key_from_req(dep_specifier)\n if dep_name in installed_keys:\n dep = installed_keys[dep_name]\n\n if dep_specifier.specifier.contains(dep.version):\n queue.append(dep)\n\n return dependencies\n\n\ndef get_dists_to_ignore(installed):\n \"\"\"\n Returns a collection of package names to ignore when performing pip-sync,\n based on the currently installed environment. For example, when pip-tools\n is installed in the local environment, it should be ignored, including all\n of its dependencies (e.g. click). When pip-tools is not installed\n locally, click should also be installed/uninstalled depending on the given\n requirements.\n \"\"\"\n installed_keys = {key_from_req(r): r for r in installed}\n return list(\n flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE)\n )\n\n\ndef merge(requirements, ignore_conflicts):\n by_key = {}\n\n for ireq in requirements:\n # Limitation: URL requirements are merged by precise string match, so\n # \"file:///example.zip#egg=example\", \"file:///example.zip\", and\n # \"example==1.0\" will not merge with each other\n key = key_from_ireq(ireq)\n\n if not ignore_conflicts:\n existing_ireq = by_key.get(key)\n if existing_ireq:\n # NOTE: We check equality here since we can assume that the\n # requirements are all pinned\n if ireq.specifier != existing_ireq.specifier:\n raise IncompatibleRequirements(ireq, existing_ireq)\n\n # TODO: Always pick the largest specifier in case of a conflict\n by_key[key] = ireq\n return by_key.values()\n\n\ndef diff_key_from_ireq(ireq):\n \"\"\"\n Calculate a key for comparing a compiled requirement with installed modules.\n For URL requirements, only provide a useful key if the url includes\n #egg=name==version, which will set ireq.req.name and ireq.specifier.\n Otherwise return ireq.link so the key will not match and the package will\n reinstall. Reinstall is necessary to ensure that packages will reinstall\n if the URL is changed but the version is not.\n \"\"\"\n if is_url_requirement(ireq):\n if (\n ireq.req\n and (getattr(ireq.req, \"key\", None) or getattr(ireq.req, \"name\", None))\n and ireq.specifier\n ):\n return key_from_ireq(ireq)\n return str(ireq.link)\n return key_from_ireq(ireq)\n\n\ndef diff(compiled_requirements, installed_dists):\n \"\"\"\n Calculate which packages should be installed or uninstalled, given a set\n of compiled requirements and a list of currently installed modules.\n \"\"\"\n requirements_lut = {diff_key_from_ireq(r): r for r in compiled_requirements}\n\n satisfied = set() # holds keys\n to_install = set() # holds InstallRequirement objects\n to_uninstall = set() # holds keys\n\n pkgs_to_ignore = get_dists_to_ignore(installed_dists)\n for dist in installed_dists:\n key = key_from_req(dist)\n if key not in requirements_lut or not requirements_lut[key].match_markers():\n to_uninstall.add(key)\n elif requirements_lut[key].specifier.contains(dist.version):\n satisfied.add(key)\n\n for key, requirement in requirements_lut.items():\n if key not in satisfied and requirement.match_markers():\n to_install.add(requirement)\n\n # Make sure to not uninstall any packages that should be ignored\n to_uninstall -= set(pkgs_to_ignore)\n\n return (to_install, to_uninstall)\n\n\ndef sync(\n to_install,\n to_uninstall,\n verbose=False,\n dry_run=False,\n install_flags=None,\n ask=False,\n):\n \"\"\"\n Install and uninstalls the given sets of modules.\n \"\"\"\n if not to_uninstall and not to_install:\n if verbose:\n click.echo(\"Everything up-to-date\")\n return 0\n\n pip_flags = []\n if not verbose:\n pip_flags += [\"-q\"]\n\n if ask:\n dry_run = True\n\n if dry_run:\n if to_uninstall:\n click.echo(\"Would uninstall:\")\n for pkg in to_uninstall:\n click.echo(\" {}\".format(pkg))\n\n if to_install:\n click.echo(\"Would install:\")\n for ireq in to_install:\n click.echo(\" {}\".format(format_requirement(ireq)))\n\n if ask and click.confirm(\"Would you like to proceed with these changes?\"):\n dry_run = False\n\n if not dry_run:\n if to_uninstall:\n check_call( # nosec\n [sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\"]\n + pip_flags\n + sorted(to_uninstall)\n )\n\n if to_install:\n if install_flags is None:\n install_flags = []\n # prepare requirement lines\n req_lines = []\n for ireq in sorted(to_install, key=key_from_ireq):\n ireq_hashes = get_hashes_from_ireq(ireq)\n req_lines.append(format_requirement(ireq, hashes=ireq_hashes))\n\n # save requirement lines to a temporary file\n tmp_req_file = tempfile.NamedTemporaryFile(mode=\"wt\", delete=False)\n tmp_req_file.write(\"\\n\".join(req_lines))\n tmp_req_file.close()\n\n try:\n check_call( # nosec\n [sys.executable, \"-m\", \"pip\", \"install\", \"-r\", tmp_req_file.name]\n + pip_flags\n + install_flags\n )\n finally:\n os.unlink(tmp_req_file.name)\n\n return 0\n", "path": "piptools/sync.py"}]}
| 3,409 | 362 |
gh_patches_debug_1942
|
rasdani/github-patches
|
git_diff
|
ocf__ocfweb-72
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add "edit this page" link on docs?
It would link to the GitHub editor page.
</issue>
<code>
[start of ocfweb/docs/doc.py]
1 from collections import namedtuple
2
3
4 class Document(namedtuple('Document', ['name', 'title', 'render'])):
5
6 @property
7 def category(self):
8 """Return full category path of the document.
9
10 For example, "/" or "/staff/backend/".
11 """
12 return self.name.rsplit('/', 1)[0] + '/'
13
14 @property
15 def category_for_sidebar(self):
16 """Return the category to show similar pages for in the sidebar.
17
18 If this page isn't at the root category, we just return this page's
19 category.
20
21 If this page is at the root category, we return the category rooted at
22 this page (which may or may not have any pages in it).
23 """
24 if self.category == '/':
25 return self.name + '/'
26 else:
27 return self.category
28
[end of ocfweb/docs/doc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ocfweb/docs/doc.py b/ocfweb/docs/doc.py
--- a/ocfweb/docs/doc.py
+++ b/ocfweb/docs/doc.py
@@ -25,3 +25,12 @@
return self.name + '/'
else:
return self.category
+
+ @property
+ def edit_url(self):
+ """Return a GitHub edit URL for this page."""
+ return (
+ 'https://github.com/ocf/ocfweb/edit/master/ocfweb/docs/docs' +
+ self.name +
+ '.md'
+ )
|
{"golden_diff": "diff --git a/ocfweb/docs/doc.py b/ocfweb/docs/doc.py\n--- a/ocfweb/docs/doc.py\n+++ b/ocfweb/docs/doc.py\n@@ -25,3 +25,12 @@\n return self.name + '/'\n else:\n return self.category\n+\n+ @property\n+ def edit_url(self):\n+ \"\"\"Return a GitHub edit URL for this page.\"\"\"\n+ return (\n+ 'https://github.com/ocf/ocfweb/edit/master/ocfweb/docs/docs' +\n+ self.name +\n+ '.md'\n+ )\n", "issue": "Add \"edit this page\" link on docs?\nIt would link to the GitHub editor page.\n\n", "before_files": [{"content": "from collections import namedtuple\n\n\nclass Document(namedtuple('Document', ['name', 'title', 'render'])):\n\n @property\n def category(self):\n \"\"\"Return full category path of the document.\n\n For example, \"/\" or \"/staff/backend/\".\n \"\"\"\n return self.name.rsplit('/', 1)[0] + '/'\n\n @property\n def category_for_sidebar(self):\n \"\"\"Return the category to show similar pages for in the sidebar.\n\n If this page isn't at the root category, we just return this page's\n category.\n\n If this page is at the root category, we return the category rooted at\n this page (which may or may not have any pages in it).\n \"\"\"\n if self.category == '/':\n return self.name + '/'\n else:\n return self.category\n", "path": "ocfweb/docs/doc.py"}]}
| 779 | 133 |
gh_patches_debug_2911
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-5067
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dvc version: does not follow symlinks
# Bug Report
## Description
This is the `dvc version` output, where it says the cache directory is `nfs4 on storage:/home` and cache type is `symlink`.
```
DVC version: 1.10.2 (pip)
---------------------------------
Platform: Python 3.8.3 on Linux-5.4.0-54-generic-x86_64-with-glibc2.10
Supports: All remotes
Cache types: symlink
Cache directory: nfs4 on storage:/home
Caches: local
Remotes: s3
Workspace directory: nfs4 on storage:/home
Repo: dvc, git
```
However, I do have a `~/.config/dvc/config` file that overrides this:
```
[core]
experiments = true
[cache]
type = "reflink,symlink,copy"
protected = true
dir = /home/jc/ssd_cache/dvc_cache
[feature]
parametrization = true
```
And the actual cache dir is `/home/jc/ssd_cache/dvc_cache` as I've specified instead of `nfs4 on storage:/home`
</issue>
<code>
[start of dvc/info.py]
1 import itertools
2 import os
3 import pathlib
4 import platform
5 import uuid
6
7 from dvc.exceptions import DvcException, NotDvcRepoError
8 from dvc.repo import Repo
9 from dvc.scm.base import SCMError
10 from dvc.system import System
11 from dvc.tree import TREES, get_tree_cls, get_tree_config
12 from dvc.utils import error_link
13 from dvc.utils.pkg import PKG
14 from dvc.version import __version__
15
16 try:
17 import psutil
18 except ImportError:
19 psutil = None
20
21 if PKG is None:
22 package = ""
23 else:
24 package = f"({PKG})"
25
26
27 def get_dvc_info():
28 info = [
29 f"DVC version: {__version__} {package}",
30 "---------------------------------",
31 f"Platform: Python {platform.python_version()} on "
32 f"{platform.platform()}",
33 f"Supports: {_get_supported_remotes()}",
34 ]
35
36 try:
37 repo = Repo()
38
39 # cache_dir might not exist yet (e.g. after `dvc init`), and we
40 # can't auto-create it, as it might cause issues if the user
41 # later decides to enable shared cache mode with
42 # `dvc config cache.shared group`.
43 if os.path.exists(repo.cache.local.cache_dir):
44 info.append(
45 "Cache types: {}".format(_get_linktype_support_info(repo))
46 )
47 if psutil:
48 fs_type = get_fs_type(repo.cache.local.cache_dir)
49 info.append(f"Cache directory: {fs_type}")
50 else:
51 info.append("Cache types: " + error_link("no-dvc-cache"))
52
53 info.append(f"Caches: {_get_caches(repo.cache)}")
54
55 info.append(f"Remotes: {_get_remotes(repo.config)}")
56
57 except NotDvcRepoError:
58 pass
59 except SCMError:
60 info.append("Repo: dvc, git (broken)")
61 else:
62 root_directory = repo.root_dir
63 if psutil:
64 fs_root = get_fs_type(os.path.abspath(root_directory))
65 info.append(f"Workspace directory: {fs_root}")
66 info.append("Repo: {}".format(_get_dvc_repo_info(repo)))
67 return "\n".join(info)
68
69
70 def _get_caches(cache):
71 caches = (
72 cache_type
73 for cache_type, cache_instance in cache.by_scheme()
74 if cache_instance
75 )
76
77 # Caches will be always non-empty including the local cache
78 return ", ".join(caches)
79
80
81 def _get_remotes(config):
82 schemes = (
83 get_tree_cls(get_tree_config(config, name=remote)).scheme
84 for remote in config["remote"]
85 )
86
87 return ", ".join(schemes) or "None"
88
89
90 def _get_linktype_support_info(repo):
91
92 links = {
93 "reflink": (System.reflink, None),
94 "hardlink": (System.hardlink, System.is_hardlink),
95 "symlink": (System.symlink, System.is_symlink),
96 }
97
98 fname = "." + str(uuid.uuid4())
99 src = os.path.join(repo.cache.local.cache_dir, fname)
100 open(src, "w").close()
101 dst = os.path.join(repo.root_dir, fname)
102
103 cache = []
104
105 for name, (link, is_link) in links.items():
106 try:
107 link(src, dst)
108 status = "supported"
109 if is_link and not is_link(dst):
110 status = "broken"
111 os.unlink(dst)
112 except DvcException:
113 status = "not supported"
114
115 if status == "supported":
116 cache.append(name)
117 os.remove(src)
118
119 return ", ".join(cache)
120
121
122 def _get_supported_remotes():
123
124 supported_remotes = []
125 for tree_cls in TREES:
126 if not tree_cls.get_missing_deps():
127 supported_remotes.append(tree_cls.scheme)
128
129 if len(supported_remotes) == len(TREES):
130 return "All remotes"
131
132 if len(supported_remotes) == 1:
133 return supported_remotes
134
135 return ", ".join(supported_remotes)
136
137
138 def get_fs_type(path):
139
140 partition = {
141 pathlib.Path(part.mountpoint): (part.fstype + " on " + part.device)
142 for part in psutil.disk_partitions(all=True)
143 }
144
145 path = pathlib.Path(path)
146
147 for parent in itertools.chain([path], path.parents):
148 if parent in partition:
149 return partition[parent]
150 return ("unknown", "none")
151
152
153 def _get_dvc_repo_info(self):
154 if self.config.get("core", {}).get("no_scm", False):
155 return "dvc (no_scm)"
156
157 if self.root_dir != self.scm.root_dir:
158 return "dvc (subdir), git"
159
160 return "dvc, git"
161
[end of dvc/info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dvc/info.py b/dvc/info.py
--- a/dvc/info.py
+++ b/dvc/info.py
@@ -142,7 +142,8 @@
for part in psutil.disk_partitions(all=True)
}
- path = pathlib.Path(path)
+ # need to follow the symlink: https://github.com/iterative/dvc/issues/5065
+ path = pathlib.Path(path).resolve()
for parent in itertools.chain([path], path.parents):
if parent in partition:
|
{"golden_diff": "diff --git a/dvc/info.py b/dvc/info.py\n--- a/dvc/info.py\n+++ b/dvc/info.py\n@@ -142,7 +142,8 @@\n for part in psutil.disk_partitions(all=True)\n }\n \n- path = pathlib.Path(path)\n+ # need to follow the symlink: https://github.com/iterative/dvc/issues/5065\n+ path = pathlib.Path(path).resolve()\n \n for parent in itertools.chain([path], path.parents):\n if parent in partition:\n", "issue": "dvc version: does not follow symlinks\n# Bug Report\r\n\r\n## Description\r\n\r\nThis is the `dvc version` output, where it says the cache directory is `nfs4 on storage:/home` and cache type is `symlink`.\r\n\r\n```\r\nDVC version: 1.10.2 (pip)\r\n---------------------------------\r\nPlatform: Python 3.8.3 on Linux-5.4.0-54-generic-x86_64-with-glibc2.10\r\nSupports: All remotes\r\nCache types: symlink\r\nCache directory: nfs4 on storage:/home\r\nCaches: local\r\nRemotes: s3\r\nWorkspace directory: nfs4 on storage:/home\r\nRepo: dvc, git\r\n```\r\n\r\nHowever, I do have a `~/.config/dvc/config` file that overrides this:\r\n\r\n```\r\n[core]\r\n experiments = true\r\n[cache]\r\n type = \"reflink,symlink,copy\"\r\n protected = true\r\n dir = /home/jc/ssd_cache/dvc_cache\r\n[feature]\r\n parametrization = true\r\n```\r\n\r\nAnd the actual cache dir is `/home/jc/ssd_cache/dvc_cache` as I've specified instead of `nfs4 on storage:/home`\n", "before_files": [{"content": "import itertools\nimport os\nimport pathlib\nimport platform\nimport uuid\n\nfrom dvc.exceptions import DvcException, NotDvcRepoError\nfrom dvc.repo import Repo\nfrom dvc.scm.base import SCMError\nfrom dvc.system import System\nfrom dvc.tree import TREES, get_tree_cls, get_tree_config\nfrom dvc.utils import error_link\nfrom dvc.utils.pkg import PKG\nfrom dvc.version import __version__\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nif PKG is None:\n package = \"\"\nelse:\n package = f\"({PKG})\"\n\n\ndef get_dvc_info():\n info = [\n f\"DVC version: {__version__} {package}\",\n \"---------------------------------\",\n f\"Platform: Python {platform.python_version()} on \"\n f\"{platform.platform()}\",\n f\"Supports: {_get_supported_remotes()}\",\n ]\n\n try:\n repo = Repo()\n\n # cache_dir might not exist yet (e.g. after `dvc init`), and we\n # can't auto-create it, as it might cause issues if the user\n # later decides to enable shared cache mode with\n # `dvc config cache.shared group`.\n if os.path.exists(repo.cache.local.cache_dir):\n info.append(\n \"Cache types: {}\".format(_get_linktype_support_info(repo))\n )\n if psutil:\n fs_type = get_fs_type(repo.cache.local.cache_dir)\n info.append(f\"Cache directory: {fs_type}\")\n else:\n info.append(\"Cache types: \" + error_link(\"no-dvc-cache\"))\n\n info.append(f\"Caches: {_get_caches(repo.cache)}\")\n\n info.append(f\"Remotes: {_get_remotes(repo.config)}\")\n\n except NotDvcRepoError:\n pass\n except SCMError:\n info.append(\"Repo: dvc, git (broken)\")\n else:\n root_directory = repo.root_dir\n if psutil:\n fs_root = get_fs_type(os.path.abspath(root_directory))\n info.append(f\"Workspace directory: {fs_root}\")\n info.append(\"Repo: {}\".format(_get_dvc_repo_info(repo)))\n return \"\\n\".join(info)\n\n\ndef _get_caches(cache):\n caches = (\n cache_type\n for cache_type, cache_instance in cache.by_scheme()\n if cache_instance\n )\n\n # Caches will be always non-empty including the local cache\n return \", \".join(caches)\n\n\ndef _get_remotes(config):\n schemes = (\n get_tree_cls(get_tree_config(config, name=remote)).scheme\n for remote in config[\"remote\"]\n )\n\n return \", \".join(schemes) or \"None\"\n\n\ndef _get_linktype_support_info(repo):\n\n links = {\n \"reflink\": (System.reflink, None),\n \"hardlink\": (System.hardlink, System.is_hardlink),\n \"symlink\": (System.symlink, System.is_symlink),\n }\n\n fname = \".\" + str(uuid.uuid4())\n src = os.path.join(repo.cache.local.cache_dir, fname)\n open(src, \"w\").close()\n dst = os.path.join(repo.root_dir, fname)\n\n cache = []\n\n for name, (link, is_link) in links.items():\n try:\n link(src, dst)\n status = \"supported\"\n if is_link and not is_link(dst):\n status = \"broken\"\n os.unlink(dst)\n except DvcException:\n status = \"not supported\"\n\n if status == \"supported\":\n cache.append(name)\n os.remove(src)\n\n return \", \".join(cache)\n\n\ndef _get_supported_remotes():\n\n supported_remotes = []\n for tree_cls in TREES:\n if not tree_cls.get_missing_deps():\n supported_remotes.append(tree_cls.scheme)\n\n if len(supported_remotes) == len(TREES):\n return \"All remotes\"\n\n if len(supported_remotes) == 1:\n return supported_remotes\n\n return \", \".join(supported_remotes)\n\n\ndef get_fs_type(path):\n\n partition = {\n pathlib.Path(part.mountpoint): (part.fstype + \" on \" + part.device)\n for part in psutil.disk_partitions(all=True)\n }\n\n path = pathlib.Path(path)\n\n for parent in itertools.chain([path], path.parents):\n if parent in partition:\n return partition[parent]\n return (\"unknown\", \"none\")\n\n\ndef _get_dvc_repo_info(self):\n if self.config.get(\"core\", {}).get(\"no_scm\", False):\n return \"dvc (no_scm)\"\n\n if self.root_dir != self.scm.root_dir:\n return \"dvc (subdir), git\"\n\n return \"dvc, git\"\n", "path": "dvc/info.py"}]}
| 2,214 | 118 |
gh_patches_debug_15501
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-8708
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Revision model rename breaks Page revision foreign key on SQLite
### Issue Summary
#8441 renamed the PageRevision model to Revision, which included a migration with [a `RenameModel` step](https://github.com/wagtail/wagtail/blob/1f43d8ef51e455b92e42447fdc190d5ec83ec53c/wagtail/migrations/0070_rename_pagerevision_revision.py#L15-L18).
On my local machine, running against SQLite in the default configuration, this renamed the table but didn't update the foreign key from the Page model. Looking at the SQL for the migration starts with:
```
% ./manage.py sqlmigrate wagtailcore 0070
BEGIN;
--
-- Rename model PageRevision to Revision
--
ALTER TABLE "wagtailcore_pagerevision" RENAME TO "wagtailcore_revision";
...
```
But if I then check the `live_revision_id` foreign key on the Page model in SQLite, it hasn't been updated, and still points to the now-renamed `wagtailcore_pagerevision` table.
```
% sqlite3 db.sqlite3
SQLite version 3.32.3 2020-06-18 14:16:19
Enter ".help" for usage hints.
sqlite> PRAGMA foreign_key_list('wagtailcore_page');
...
3|0|wagtailcore_pagerevision|live_revision_id|id|NO ACTION|NO ACTION|NONE
...
```
It looks like I'm getting hit by the `ALTER TABLE RENAME` issue described in the SQLite docs [here](https://www.sqlite.org/draft/lang_altertable.html#alter_table_rename):
> Beginning with version 3.26.0, FOREIGN KEY constraints are always converted when a table is renamed, unless the [PRAGMA legacy_alter_table=ON](https://www.sqlite.org/draft/pragma.html#pragma_legacy_alter_table) setting is engaged.
My `PRAGMA`s are defined thusly (the default values, apparently, for this version of SQLite on MacOS):
```
sqlite> PRAGMA legacy_alter_table;
1
sqlite> PRAGMA foreign_keys;
0
```
I note [this commit](https://github.com/django/django/commit/063cf98d3a6839f40c423cbd845def429c5cf0ce) that just went into Django (the dev version for 4.1) that explicitly disables `legacy_alter_table`; I wonder if I am hitting some edge case that this would fix.
Wagtail seems to have only [one other instance of `RenameModel`](https://github.com/wagtail/wagtail/blob/716bf92c2dc2da2aca5e8f5aa6768b5b087cd4b0/wagtail/contrib/search_promotions/migrations/0001_initial.py#L65) for the SearchPromotion model, but I don't think we have any foreign keys pointing to that. So this might be the first time this combination has hit Wagtail.
### Steps to Reproduce
1. Start a new project with `wagtail start myproject`
2. `cd myproject`
3. `./manage.py migrate`
4. `./manage.py createsuperuser` and create an admin user.
5. `./manage.py runserver`
6. Vist http://localhost:8000/admin/pages/add/home/homepage/3/ to create a new page (you'll be asked to log in first)
7. Fill in the title field with something like "Test". Click "Save Draft". You'll get an error: "OperationalError at /admin/pages/add/home/homepage/3/, no such table: main.wagtailcore_pagerevision"
- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes
### Technical details
- Mac OS version: Big Sur 11.6.5
- SQLite3 version: 3.32.3 2020-06-18 14:16:19 02c344aceaea0d177dd42e62c8541e3cab4a26c757ba33b3a31a43ccc7d4aapl
- Python version: 3.10.4
- Django version: 4.0.5, also happens on 3.2
- Wagtail version: main (1f43d8ef51e455b92e42447fdc190d5ec83ec53c)
</issue>
<code>
[start of wagtail/migrations/0070_rename_pagerevision_revision.py]
1 # Generated by Django 4.0.3 on 2022-04-26 12:31
2
3 from django.conf import settings
4 from django.db import migrations, models
5
6
7 class Migration(migrations.Migration):
8
9 dependencies = [
10 migrations.swappable_dependency(settings.AUTH_USER_MODEL),
11 ("wagtailcore", "0069_log_entry_jsonfield"),
12 ]
13
14 operations = [
15 migrations.RenameModel(
16 old_name="PageRevision",
17 new_name="Revision",
18 ),
19 migrations.AlterModelOptions(
20 name="revision",
21 options={"verbose_name": "revision", "verbose_name_plural": "revisions"},
22 ),
23 migrations.AlterField(
24 model_name="revision",
25 name="page",
26 field=models.CharField(max_length=255, verbose_name="object id"),
27 ),
28 migrations.RenameField(
29 model_name="revision",
30 old_name="page",
31 new_name="object_id",
32 ),
33 migrations.AddField(
34 model_name="revision",
35 name="content_type",
36 field=models.ForeignKey(
37 null=True,
38 on_delete=models.CASCADE,
39 related_name="+",
40 to="contenttypes.contenttype",
41 ),
42 ),
43 migrations.AddField(
44 model_name="revision",
45 name="base_content_type",
46 field=models.ForeignKey(
47 null=True,
48 on_delete=models.CASCADE,
49 related_name="+",
50 to="contenttypes.contenttype",
51 ),
52 ),
53 migrations.AddIndex(
54 model_name="revision",
55 index=models.Index(
56 fields=["content_type", "object_id"],
57 name="content_object_idx",
58 ),
59 ),
60 migrations.AddIndex(
61 model_name="revision",
62 index=models.Index(
63 fields=["base_content_type", "object_id"],
64 name="base_content_object_idx",
65 ),
66 ),
67 ]
68
[end of wagtail/migrations/0070_rename_pagerevision_revision.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wagtail/migrations/0070_rename_pagerevision_revision.py b/wagtail/migrations/0070_rename_pagerevision_revision.py
--- a/wagtail/migrations/0070_rename_pagerevision_revision.py
+++ b/wagtail/migrations/0070_rename_pagerevision_revision.py
@@ -4,6 +4,12 @@
from django.db import migrations, models
+def disable_sqlite_legacy_alter_table(apps, schema_editor):
+ # Fix for https://github.com/wagtail/wagtail/issues/8635
+ if schema_editor.connection.vendor == "sqlite":
+ schema_editor.execute("PRAGMA legacy_alter_table = OFF")
+
+
class Migration(migrations.Migration):
dependencies = [
@@ -12,6 +18,10 @@
]
operations = [
+ migrations.RunPython(
+ disable_sqlite_legacy_alter_table,
+ migrations.RunPython.noop,
+ ),
migrations.RenameModel(
old_name="PageRevision",
new_name="Revision",
|
{"golden_diff": "diff --git a/wagtail/migrations/0070_rename_pagerevision_revision.py b/wagtail/migrations/0070_rename_pagerevision_revision.py\n--- a/wagtail/migrations/0070_rename_pagerevision_revision.py\n+++ b/wagtail/migrations/0070_rename_pagerevision_revision.py\n@@ -4,6 +4,12 @@\n from django.db import migrations, models\n \n \n+def disable_sqlite_legacy_alter_table(apps, schema_editor):\n+ # Fix for https://github.com/wagtail/wagtail/issues/8635\n+ if schema_editor.connection.vendor == \"sqlite\":\n+ schema_editor.execute(\"PRAGMA legacy_alter_table = OFF\")\n+\n+\n class Migration(migrations.Migration):\n \n dependencies = [\n@@ -12,6 +18,10 @@\n ]\n \n operations = [\n+ migrations.RunPython(\n+ disable_sqlite_legacy_alter_table,\n+ migrations.RunPython.noop,\n+ ),\n migrations.RenameModel(\n old_name=\"PageRevision\",\n new_name=\"Revision\",\n", "issue": "Revision model rename breaks Page revision foreign key on SQLite\n### Issue Summary\r\n\r\n#8441 renamed the PageRevision model to Revision, which included a migration with [a `RenameModel` step](https://github.com/wagtail/wagtail/blob/1f43d8ef51e455b92e42447fdc190d5ec83ec53c/wagtail/migrations/0070_rename_pagerevision_revision.py#L15-L18).\r\n\r\nOn my local machine, running against SQLite in the default configuration, this renamed the table but didn't update the foreign key from the Page model. Looking at the SQL for the migration starts with:\r\n\r\n```\r\n% ./manage.py sqlmigrate wagtailcore 0070\r\nBEGIN;\r\n--\r\n-- Rename model PageRevision to Revision\r\n--\r\nALTER TABLE \"wagtailcore_pagerevision\" RENAME TO \"wagtailcore_revision\";\r\n...\r\n```\r\n\r\nBut if I then check the `live_revision_id` foreign key on the Page model in SQLite, it hasn't been updated, and still points to the now-renamed `wagtailcore_pagerevision` table.\r\n\r\n```\r\n% sqlite3 db.sqlite3 \r\nSQLite version 3.32.3 2020-06-18 14:16:19\r\nEnter \".help\" for usage hints.\r\nsqlite> PRAGMA foreign_key_list('wagtailcore_page');\r\n...\r\n3|0|wagtailcore_pagerevision|live_revision_id|id|NO ACTION|NO ACTION|NONE\r\n...\r\n```\r\n\r\nIt looks like I'm getting hit by the `ALTER TABLE RENAME` issue described in the SQLite docs [here](https://www.sqlite.org/draft/lang_altertable.html#alter_table_rename):\r\n\r\n> Beginning with version 3.26.0, FOREIGN KEY constraints are always converted when a table is renamed, unless the [PRAGMA legacy_alter_table=ON](https://www.sqlite.org/draft/pragma.html#pragma_legacy_alter_table) setting is engaged.\r\n\r\nMy `PRAGMA`s are defined thusly (the default values, apparently, for this version of SQLite on MacOS):\r\n\r\n```\r\nsqlite> PRAGMA legacy_alter_table;\r\n1\r\nsqlite> PRAGMA foreign_keys;\r\n0\r\n```\r\n\r\nI note [this commit](https://github.com/django/django/commit/063cf98d3a6839f40c423cbd845def429c5cf0ce) that just went into Django (the dev version for 4.1) that explicitly disables `legacy_alter_table`; I wonder if I am hitting some edge case that this would fix.\r\n\r\nWagtail seems to have only [one other instance of `RenameModel`](https://github.com/wagtail/wagtail/blob/716bf92c2dc2da2aca5e8f5aa6768b5b087cd4b0/wagtail/contrib/search_promotions/migrations/0001_initial.py#L65) for the SearchPromotion model, but I don't think we have any foreign keys pointing to that. So this might be the first time this combination has hit Wagtail.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Start a new project with `wagtail start myproject`\r\n2. `cd myproject`\r\n3. `./manage.py migrate`\r\n4. `./manage.py createsuperuser` and create an admin user.\r\n5. `./manage.py runserver`\r\n6. Vist http://localhost:8000/admin/pages/add/home/homepage/3/ to create a new page (you'll be asked to log in first)\r\n7. Fill in the title field with something like \"Test\". Click \"Save Draft\". You'll get an error: \"OperationalError at /admin/pages/add/home/homepage/3/, no such table: main.wagtailcore_pagerevision\"\r\n\r\n- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes\r\n\r\n### Technical details\r\n\r\n- Mac OS version: Big Sur 11.6.5\r\n- SQLite3 version: 3.32.3 2020-06-18 14:16:19 02c344aceaea0d177dd42e62c8541e3cab4a26c757ba33b3a31a43ccc7d4aapl\r\n- Python version: 3.10.4\r\n- Django version: 4.0.5, also happens on 3.2\r\n- Wagtail version: main (1f43d8ef51e455b92e42447fdc190d5ec83ec53c)\r\n\n", "before_files": [{"content": "# Generated by Django 4.0.3 on 2022-04-26 12:31\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n (\"wagtailcore\", \"0069_log_entry_jsonfield\"),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name=\"PageRevision\",\n new_name=\"Revision\",\n ),\n migrations.AlterModelOptions(\n name=\"revision\",\n options={\"verbose_name\": \"revision\", \"verbose_name_plural\": \"revisions\"},\n ),\n migrations.AlterField(\n model_name=\"revision\",\n name=\"page\",\n field=models.CharField(max_length=255, verbose_name=\"object id\"),\n ),\n migrations.RenameField(\n model_name=\"revision\",\n old_name=\"page\",\n new_name=\"object_id\",\n ),\n migrations.AddField(\n model_name=\"revision\",\n name=\"content_type\",\n field=models.ForeignKey(\n null=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n to=\"contenttypes.contenttype\",\n ),\n ),\n migrations.AddField(\n model_name=\"revision\",\n name=\"base_content_type\",\n field=models.ForeignKey(\n null=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n to=\"contenttypes.contenttype\",\n ),\n ),\n migrations.AddIndex(\n model_name=\"revision\",\n index=models.Index(\n fields=[\"content_type\", \"object_id\"],\n name=\"content_object_idx\",\n ),\n ),\n migrations.AddIndex(\n model_name=\"revision\",\n index=models.Index(\n fields=[\"base_content_type\", \"object_id\"],\n name=\"base_content_object_idx\",\n ),\n ),\n ]\n", "path": "wagtail/migrations/0070_rename_pagerevision_revision.py"}]}
| 2,112 | 239 |
gh_patches_debug_11462
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-772
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Completion inputs label instead of textEdit newText
scalameta/metals#1031 was recently merged which adds an "implement all members" completion option. However, in Sublime it seems to not show up in the same order in the completions as the other editors. It seems to be triggered by e for some reason. Apart from that, if you do decide to use that completion, the completion seems to instead of implementing the `newText` it implements the `label`.
I'm on MacOS using the [Metals Language Server](https://github.com/scalameta/metals) with this SNAPSHOT `0.7.6+224-b3ea857f-SNAPSHOT`
Here is a gif illustrating what I'm talking about

And here is the snippet of lsp json that shows the completion item
```
[Trace - 08:54:53 AM] Received request 'completionItem/resolve - (30)'
Params: {
"label": "Implement all members",
"kind": 12,
"sortText": "00002",
"filterText": "e",
"insertTextFormat": 2,
"textEdit": {
"range": {
"start": {
"line": 9,
"character": 3
},
"end": {
"line": 9,
"character": 4
}
},
"newText": "def foo: Int \u003d ${0:???}\n def boo: Int \u003d ${0:???}"
},
"data": {
"target": "file:/Users/ckipp/Documents/scala-workspace/test-project/?id\u003droot",
"symbol": "local6"
}
}
[Trace - 08:54:53 AM] Sending response 'completionItem/resolve - (30)'. Processing request took 1ms
Result: {
"label": "Implement all members",
"kind": 12,
"sortText": "00002",
"filterText": "e",
"insertTextFormat": 2,
"textEdit": {
"range": {
"start": {
"line": 9,
"character": 3
},
"end": {
"line": 9,
"character": 4
}
},
"newText": "def foo: Int \u003d ${0:???}\n def boo: Int \u003d ${0:???}"
},
"data": {
"target": "file:/Users/ckipp/Documents/scala-workspace/test-project/?id\u003droot",
"symbol": "local6"
}
}
```
If I can provide any more details, just let me know!
</issue>
<code>
[start of plugin/core/completion.py]
1 from .protocol import CompletionItemKind, Range
2 from .types import Settings
3 from .logging import debug
4 try:
5 from typing import Tuple, Optional, Dict, List, Union
6 assert Tuple and Optional and Dict and List and Union and Settings
7 except ImportError:
8 pass
9
10
11 completion_item_kind_names = {v: k for k, v in CompletionItemKind.__dict__.items()}
12
13
14 def get_completion_hint(item: dict, settings: 'Settings') -> 'Optional[str]':
15 # choose hint based on availability and user preference
16 hint = None
17 if settings.completion_hint_type == "auto":
18 hint = item.get("detail")
19 if not hint:
20 kind = item.get("kind")
21 if kind:
22 hint = completion_item_kind_names[kind]
23 elif settings.completion_hint_type == "detail":
24 hint = item.get("detail")
25 elif settings.completion_hint_type == "kind":
26 kind = item.get("kind")
27 if kind:
28 hint = completion_item_kind_names.get(kind)
29 return hint
30
31
32 def format_completion(item: dict, word_col: int, settings: 'Settings') -> 'Tuple[str, str]':
33 # Sublime handles snippets automatically, so we don't have to care about insertTextFormat.
34 if settings.prefer_label_over_filter_text:
35 trigger = item["label"]
36 else:
37 trigger = item.get("filterText") or item["label"]
38
39 hint = get_completion_hint(item, settings)
40
41 # label is an alternative for insertText if neither textEdit nor insertText is provided
42 replacement = text_edit_text(item, word_col) or item.get("insertText") or trigger
43
44 if replacement[0] != trigger[0]:
45 # fix some common cases when server sends different start on label and replacement.
46 if replacement[0] == '$':
47 trigger = '$' + trigger # add missing $
48 elif replacement[0] == '-':
49 trigger = '-' + trigger # add missing -
50 elif trigger[0] == ':':
51 replacement = ':' + replacement # add missing :
52 elif trigger[0] == '$':
53 trigger = trigger[1:] # remove leading $
54 elif trigger[0] == ' ' or trigger[0] == '•':
55 trigger = trigger[1:] # remove clangd insertion indicator
56 else:
57 debug("replacement prefix does not match trigger!")
58 replacement = item.get("insertText") or trigger
59
60 if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.
61 replacement = '\\$' + replacement[1:]
62 # only return trigger with a hint if available
63 return "\t ".join((trigger, hint)) if hint else trigger, replacement
64
65
66 def text_edit_text(item: dict, word_col: int) -> 'Optional[str]':
67 text_edit = item.get('textEdit')
68 if text_edit:
69 edit_range, edit_text = text_edit.get("range"), text_edit.get("newText")
70 if edit_range and edit_text:
71 edit_range = Range.from_lsp(edit_range)
72
73 # debug('textEdit from col {}, {} applied at col {}'.format(
74 # edit_range.start.col, edit_range.end.col, word_col))
75
76 if edit_range.start.col <= word_col:
77 # if edit starts at current word, we can use it.
78 # if edit starts before current word, use the whole thing and we'll fix it up later.
79 return edit_text
80
81 return None
82
83
84 def parse_completion_response(response: 'Optional[Union[Dict,List]]') -> 'Tuple[List[Dict], bool]':
85 items = [] # type: List[Dict]
86 is_incomplete = False
87 if isinstance(response, dict):
88 items = response["items"] or []
89 is_incomplete = response.get("isIncomplete", False)
90 elif isinstance(response, list):
91 items = response
92 items = sorted(items, key=lambda item: item.get("sortText") or item["label"])
93 return items, is_incomplete
94
[end of plugin/core/completion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugin/core/completion.py b/plugin/core/completion.py
--- a/plugin/core/completion.py
+++ b/plugin/core/completion.py
@@ -54,8 +54,7 @@
elif trigger[0] == ' ' or trigger[0] == '•':
trigger = trigger[1:] # remove clangd insertion indicator
else:
- debug("replacement prefix does not match trigger!")
- replacement = item.get("insertText") or trigger
+ debug("WARNING: Replacement prefix does not match trigger '{}'".format(trigger))
if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.
replacement = '\\$' + replacement[1:]
|
{"golden_diff": "diff --git a/plugin/core/completion.py b/plugin/core/completion.py\n--- a/plugin/core/completion.py\n+++ b/plugin/core/completion.py\n@@ -54,8 +54,7 @@\n elif trigger[0] == ' ' or trigger[0] == '\u2022':\n trigger = trigger[1:] # remove clangd insertion indicator\n else:\n- debug(\"replacement prefix does not match trigger!\")\n- replacement = item.get(\"insertText\") or trigger\n+ debug(\"WARNING: Replacement prefix does not match trigger '{}'\".format(trigger))\n \n if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.\n replacement = '\\\\$' + replacement[1:]\n", "issue": "Completion inputs label instead of textEdit newText\nscalameta/metals#1031 was recently merged which adds an \"implement all members\" completion option. However, in Sublime it seems to not show up in the same order in the completions as the other editors. It seems to be triggered by e for some reason. Apart from that, if you do decide to use that completion, the completion seems to instead of implementing the `newText` it implements the `label`.\r\n\r\nI'm on MacOS using the [Metals Language Server](https://github.com/scalameta/metals) with this SNAPSHOT `0.7.6+224-b3ea857f-SNAPSHOT`\r\n\r\nHere is a gif illustrating what I'm talking about\r\n\r\n\r\n\r\nAnd here is the snippet of lsp json that shows the completion item\r\n\r\n```\r\n[Trace - 08:54:53 AM] Received request 'completionItem/resolve - (30)'\r\nParams: {\r\n \"label\": \"Implement all members\",\r\n \"kind\": 12,\r\n \"sortText\": \"00002\",\r\n \"filterText\": \"e\",\r\n \"insertTextFormat\": 2,\r\n \"textEdit\": {\r\n \"range\": {\r\n \"start\": {\r\n \"line\": 9,\r\n \"character\": 3\r\n },\r\n \"end\": {\r\n \"line\": 9,\r\n \"character\": 4\r\n }\r\n },\r\n \"newText\": \"def foo: Int \\u003d ${0:???}\\n def boo: Int \\u003d ${0:???}\"\r\n },\r\n \"data\": {\r\n \"target\": \"file:/Users/ckipp/Documents/scala-workspace/test-project/?id\\u003droot\",\r\n \"symbol\": \"local6\"\r\n }\r\n}\r\n\r\n\r\n[Trace - 08:54:53 AM] Sending response 'completionItem/resolve - (30)'. Processing request took 1ms\r\nResult: {\r\n \"label\": \"Implement all members\",\r\n \"kind\": 12,\r\n \"sortText\": \"00002\",\r\n \"filterText\": \"e\",\r\n \"insertTextFormat\": 2,\r\n \"textEdit\": {\r\n \"range\": {\r\n \"start\": {\r\n \"line\": 9,\r\n \"character\": 3\r\n },\r\n \"end\": {\r\n \"line\": 9,\r\n \"character\": 4\r\n }\r\n },\r\n \"newText\": \"def foo: Int \\u003d ${0:???}\\n def boo: Int \\u003d ${0:???}\"\r\n },\r\n \"data\": {\r\n \"target\": \"file:/Users/ckipp/Documents/scala-workspace/test-project/?id\\u003droot\",\r\n \"symbol\": \"local6\"\r\n }\r\n}\r\n```\r\n\r\nIf I can provide any more details, just let me know!\n", "before_files": [{"content": "from .protocol import CompletionItemKind, Range\nfrom .types import Settings\nfrom .logging import debug\ntry:\n from typing import Tuple, Optional, Dict, List, Union\n assert Tuple and Optional and Dict and List and Union and Settings\nexcept ImportError:\n pass\n\n\ncompletion_item_kind_names = {v: k for k, v in CompletionItemKind.__dict__.items()}\n\n\ndef get_completion_hint(item: dict, settings: 'Settings') -> 'Optional[str]':\n # choose hint based on availability and user preference\n hint = None\n if settings.completion_hint_type == \"auto\":\n hint = item.get(\"detail\")\n if not hint:\n kind = item.get(\"kind\")\n if kind:\n hint = completion_item_kind_names[kind]\n elif settings.completion_hint_type == \"detail\":\n hint = item.get(\"detail\")\n elif settings.completion_hint_type == \"kind\":\n kind = item.get(\"kind\")\n if kind:\n hint = completion_item_kind_names.get(kind)\n return hint\n\n\ndef format_completion(item: dict, word_col: int, settings: 'Settings') -> 'Tuple[str, str]':\n # Sublime handles snippets automatically, so we don't have to care about insertTextFormat.\n if settings.prefer_label_over_filter_text:\n trigger = item[\"label\"]\n else:\n trigger = item.get(\"filterText\") or item[\"label\"]\n\n hint = get_completion_hint(item, settings)\n\n # label is an alternative for insertText if neither textEdit nor insertText is provided\n replacement = text_edit_text(item, word_col) or item.get(\"insertText\") or trigger\n\n if replacement[0] != trigger[0]:\n # fix some common cases when server sends different start on label and replacement.\n if replacement[0] == '$':\n trigger = '$' + trigger # add missing $\n elif replacement[0] == '-':\n trigger = '-' + trigger # add missing -\n elif trigger[0] == ':':\n replacement = ':' + replacement # add missing :\n elif trigger[0] == '$':\n trigger = trigger[1:] # remove leading $\n elif trigger[0] == ' ' or trigger[0] == '\u2022':\n trigger = trigger[1:] # remove clangd insertion indicator\n else:\n debug(\"replacement prefix does not match trigger!\")\n replacement = item.get(\"insertText\") or trigger\n\n if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.\n replacement = '\\\\$' + replacement[1:]\n # only return trigger with a hint if available\n return \"\\t \".join((trigger, hint)) if hint else trigger, replacement\n\n\ndef text_edit_text(item: dict, word_col: int) -> 'Optional[str]':\n text_edit = item.get('textEdit')\n if text_edit:\n edit_range, edit_text = text_edit.get(\"range\"), text_edit.get(\"newText\")\n if edit_range and edit_text:\n edit_range = Range.from_lsp(edit_range)\n\n # debug('textEdit from col {}, {} applied at col {}'.format(\n # edit_range.start.col, edit_range.end.col, word_col))\n\n if edit_range.start.col <= word_col:\n # if edit starts at current word, we can use it.\n # if edit starts before current word, use the whole thing and we'll fix it up later.\n return edit_text\n\n return None\n\n\ndef parse_completion_response(response: 'Optional[Union[Dict,List]]') -> 'Tuple[List[Dict], bool]':\n items = [] # type: List[Dict]\n is_incomplete = False\n if isinstance(response, dict):\n items = response[\"items\"] or []\n is_incomplete = response.get(\"isIncomplete\", False)\n elif isinstance(response, list):\n items = response\n items = sorted(items, key=lambda item: item.get(\"sortText\") or item[\"label\"])\n return items, is_incomplete\n", "path": "plugin/core/completion.py"}]}
| 2,274 | 159 |
gh_patches_debug_29846
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-5569
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
google.cloud.logging.handlers: send messages to stderr
Would it be ok if we do the print at https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/logging/google/cloud/logging/handlers/transports/background_thread.py#L222
to stderr instead? So it doesn't disturb the output of the software?
Thanks
</issue>
<code>
[start of logging/google/cloud/logging/handlers/transports/background_thread.py]
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Transport for Python logging handler
16
17 Uses a background worker to log to Stackdriver Logging asynchronously.
18 """
19
20 from __future__ import print_function
21
22 import atexit
23 import logging
24 import threading
25 import time
26
27 from six.moves import range
28 from six.moves import queue
29
30 from google.cloud.logging.handlers.transports.base import Transport
31
32 _DEFAULT_GRACE_PERIOD = 5.0 # Seconds
33 _DEFAULT_MAX_BATCH_SIZE = 10
34 _DEFAULT_MAX_LATENCY = 0 # Seconds
35 _WORKER_THREAD_NAME = 'google.cloud.logging.Worker'
36 _WORKER_TERMINATOR = object()
37 _LOGGER = logging.getLogger(__name__)
38
39
40 def _get_many(queue_, max_items=None, max_latency=0):
41 """Get multiple items from a Queue.
42
43 Gets at least one (blocking) and at most ``max_items`` items
44 (non-blocking) from a given Queue. Does not mark the items as done.
45
46 :type queue_: :class:`~queue.Queue`
47 :param queue_: The Queue to get items from.
48
49 :type max_items: int
50 :param max_items: The maximum number of items to get. If ``None``, then all
51 available items in the queue are returned.
52
53 :type max_latency: float
54 :param max_latency: The maximum number of seconds to wait for more than one
55 item from a queue. This number includes the time required to retrieve
56 the first item.
57
58 :rtype: Sequence
59 :returns: A sequence of items retrieved from the queue.
60 """
61 start = time.time()
62 # Always return at least one item.
63 items = [queue_.get()]
64 while max_items is None or len(items) < max_items:
65 try:
66 elapsed = time.time() - start
67 timeout = max(0, max_latency - elapsed)
68 items.append(queue_.get(timeout=timeout))
69 except queue.Empty:
70 break
71 return items
72
73
74 class _Worker(object):
75 """A background thread that writes batches of log entries.
76
77 :type cloud_logger: :class:`~google.cloud.logging.logger.Logger`
78 :param cloud_logger: The logger to send entries to.
79
80 :type grace_period: float
81 :param grace_period: The amount of time to wait for pending logs to
82 be submitted when the process is shutting down.
83
84 :type max_batch_size: int
85 :param max_batch_size: The maximum number of items to send at a time
86 in the background thread.
87
88 :type max_latency: float
89 :param max_latency: The amount of time to wait for new logs before
90 sending a new batch. It is strongly recommended to keep this smaller
91 than the grace_period. This means this is effectively the longest
92 amount of time the background thread will hold onto log entries
93 before sending them to the server.
94 """
95
96 def __init__(self, cloud_logger, grace_period=_DEFAULT_GRACE_PERIOD,
97 max_batch_size=_DEFAULT_MAX_BATCH_SIZE,
98 max_latency=_DEFAULT_MAX_LATENCY):
99 self._cloud_logger = cloud_logger
100 self._grace_period = grace_period
101 self._max_batch_size = max_batch_size
102 self._max_latency = max_latency
103 self._queue = queue.Queue(0)
104 self._operational_lock = threading.Lock()
105 self._thread = None
106
107 @property
108 def is_alive(self):
109 """Returns True is the background thread is running."""
110 return self._thread is not None and self._thread.is_alive()
111
112 def _safely_commit_batch(self, batch):
113 total_logs = len(batch.entries)
114
115 try:
116 if total_logs > 0:
117 batch.commit()
118 _LOGGER.debug('Submitted %d logs', total_logs)
119 except Exception:
120 _LOGGER.error(
121 'Failed to submit %d logs.', total_logs, exc_info=True)
122
123 def _thread_main(self):
124 """The entry point for the worker thread.
125
126 Pulls pending log entries off the queue and writes them in batches to
127 the Cloud Logger.
128 """
129 _LOGGER.debug('Background thread started.')
130
131 quit_ = False
132 while True:
133 batch = self._cloud_logger.batch()
134 items = _get_many(
135 self._queue, max_items=self._max_batch_size,
136 max_latency=self._max_latency)
137
138 for item in items:
139 if item is _WORKER_TERMINATOR:
140 quit_ = True
141 # Continue processing items, don't break, try to process
142 # all items we got back before quitting.
143 else:
144 batch.log_struct(**item)
145
146 self._safely_commit_batch(batch)
147
148 for _ in range(len(items)):
149 self._queue.task_done()
150
151 if quit_:
152 break
153
154 _LOGGER.debug('Background thread exited gracefully.')
155
156 def start(self):
157 """Starts the background thread.
158
159 Additionally, this registers a handler for process exit to attempt
160 to send any pending log entries before shutdown.
161 """
162 with self._operational_lock:
163 if self.is_alive:
164 return
165
166 self._thread = threading.Thread(
167 target=self._thread_main,
168 name=_WORKER_THREAD_NAME)
169 self._thread.daemon = True
170 self._thread.start()
171 atexit.register(self._main_thread_terminated)
172
173 def stop(self, grace_period=None):
174 """Signals the background thread to stop.
175
176 This does not terminate the background thread. It simply queues the
177 stop signal. If the main process exits before the background thread
178 processes the stop signal, it will be terminated without finishing
179 work. The ``grace_period`` parameter will give the background
180 thread some time to finish processing before this function returns.
181
182 :type grace_period: float
183 :param grace_period: If specified, this method will block up to this
184 many seconds to allow the background thread to finish work before
185 returning.
186
187 :rtype: bool
188 :returns: True if the thread terminated. False if the thread is still
189 running.
190 """
191 if not self.is_alive:
192 return True
193
194 with self._operational_lock:
195 self._queue.put_nowait(_WORKER_TERMINATOR)
196
197 if grace_period is not None:
198 print('Waiting up to %d seconds.' % (grace_period,))
199
200 self._thread.join(timeout=grace_period)
201
202 # Check this before disowning the thread, because after we disown
203 # the thread is_alive will be False regardless of if the thread
204 # exited or not.
205 success = not self.is_alive
206
207 self._thread = None
208
209 return success
210
211 def _main_thread_terminated(self):
212 """Callback that attempts to send pending logs before termination."""
213 if not self.is_alive:
214 return
215
216 if not self._queue.empty():
217 print(
218 'Program shutting down, attempting to send %d queued log '
219 'entries to Stackdriver Logging...' % (self._queue.qsize(),))
220
221 if self.stop(self._grace_period):
222 print('Sent all pending logs.')
223 else:
224 print('Failed to send %d pending logs.' % (self._queue.qsize(),))
225
226 def enqueue(self, record, message, resource=None, labels=None):
227 """Queues a log entry to be written by the background thread.
228
229 :type record: :class:`logging.LogRecord`
230 :param record: Python log record that the handler was called with.
231
232 :type message: str
233 :param message: The message from the ``LogRecord`` after being
234 formatted by the associated log formatters.
235
236 :type resource: :class:`~google.cloud.logging.resource.Resource`
237 :param resource: (Optional) Monitored resource of the entry
238
239 :type labels: dict
240 :param labels: (Optional) Mapping of labels for the entry.
241 """
242 self._queue.put_nowait({
243 'info': {
244 'message': message,
245 'python_logger': record.name,
246 },
247 'severity': record.levelname,
248 'resource': resource,
249 'labels': labels,
250 })
251
252 def flush(self):
253 """Submit any pending log records."""
254 self._queue.join()
255
256
257 class BackgroundThreadTransport(Transport):
258 """Asynchronous transport that uses a background thread.
259
260 :type client: :class:`~google.cloud.logging.client.Client`
261 :param client: The Logging client.
262
263 :type name: str
264 :param name: the name of the logger.
265
266 :type grace_period: float
267 :param grace_period: The amount of time to wait for pending logs to
268 be submitted when the process is shutting down.
269
270 :type batch_size: int
271 :param batch_size: The maximum number of items to send at a time in the
272 background thread.
273
274 :type max_latency: float
275 :param max_latency: The amount of time to wait for new logs before
276 sending a new batch. It is strongly recommended to keep this smaller
277 than the grace_period. This means this is effectively the longest
278 amount of time the background thread will hold onto log entries
279 before sending them to the server.
280 """
281
282 def __init__(self, client, name, grace_period=_DEFAULT_GRACE_PERIOD,
283 batch_size=_DEFAULT_MAX_BATCH_SIZE,
284 max_latency=_DEFAULT_MAX_LATENCY):
285 self.client = client
286 logger = self.client.logger(name)
287 self.worker = _Worker(logger,
288 grace_period=grace_period,
289 max_batch_size=batch_size,
290 max_latency=max_latency)
291 self.worker.start()
292
293 def send(self, record, message, resource=None, labels=None):
294 """Overrides Transport.send().
295
296 :type record: :class:`logging.LogRecord`
297 :param record: Python log record that the handler was called with.
298
299 :type message: str
300 :param message: The message from the ``LogRecord`` after being
301 formatted by the associated log formatters.
302
303 :type resource: :class:`~google.cloud.logging.resource.Resource`
304 :param resource: (Optional) Monitored resource of the entry.
305
306 :type labels: dict
307 :param labels: (Optional) Mapping of labels for the entry.
308 """
309 self.worker.enqueue(record, message, resource=resource, labels=labels)
310
311 def flush(self):
312 """Submit any pending log records."""
313 self.worker.flush()
314
[end of logging/google/cloud/logging/handlers/transports/background_thread.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/logging/google/cloud/logging/handlers/transports/background_thread.py b/logging/google/cloud/logging/handlers/transports/background_thread.py
--- a/logging/google/cloud/logging/handlers/transports/background_thread.py
+++ b/logging/google/cloud/logging/handlers/transports/background_thread.py
@@ -21,6 +21,7 @@
import atexit
import logging
+import sys
import threading
import time
@@ -195,7 +196,9 @@
self._queue.put_nowait(_WORKER_TERMINATOR)
if grace_period is not None:
- print('Waiting up to %d seconds.' % (grace_period,))
+ print(
+ 'Waiting up to %d seconds.' % (grace_period,),
+ file=sys.stderr)
self._thread.join(timeout=grace_period)
@@ -216,12 +219,15 @@
if not self._queue.empty():
print(
'Program shutting down, attempting to send %d queued log '
- 'entries to Stackdriver Logging...' % (self._queue.qsize(),))
+ 'entries to Stackdriver Logging...' % (self._queue.qsize(),),
+ file=sys.stderr)
if self.stop(self._grace_period):
- print('Sent all pending logs.')
+ print('Sent all pending logs.', file=sys.stderr)
else:
- print('Failed to send %d pending logs.' % (self._queue.qsize(),))
+ print(
+ 'Failed to send %d pending logs.' % (self._queue.qsize(),),
+ file=sys.stderr)
def enqueue(self, record, message, resource=None, labels=None):
"""Queues a log entry to be written by the background thread.
|
{"golden_diff": "diff --git a/logging/google/cloud/logging/handlers/transports/background_thread.py b/logging/google/cloud/logging/handlers/transports/background_thread.py\n--- a/logging/google/cloud/logging/handlers/transports/background_thread.py\n+++ b/logging/google/cloud/logging/handlers/transports/background_thread.py\n@@ -21,6 +21,7 @@\n \n import atexit\n import logging\n+import sys\n import threading\n import time\n \n@@ -195,7 +196,9 @@\n self._queue.put_nowait(_WORKER_TERMINATOR)\n \n if grace_period is not None:\n- print('Waiting up to %d seconds.' % (grace_period,))\n+ print(\n+ 'Waiting up to %d seconds.' % (grace_period,),\n+ file=sys.stderr)\n \n self._thread.join(timeout=grace_period)\n \n@@ -216,12 +219,15 @@\n if not self._queue.empty():\n print(\n 'Program shutting down, attempting to send %d queued log '\n- 'entries to Stackdriver Logging...' % (self._queue.qsize(),))\n+ 'entries to Stackdriver Logging...' % (self._queue.qsize(),),\n+ file=sys.stderr)\n \n if self.stop(self._grace_period):\n- print('Sent all pending logs.')\n+ print('Sent all pending logs.', file=sys.stderr)\n else:\n- print('Failed to send %d pending logs.' % (self._queue.qsize(),))\n+ print(\n+ 'Failed to send %d pending logs.' % (self._queue.qsize(),),\n+ file=sys.stderr)\n \n def enqueue(self, record, message, resource=None, labels=None):\n \"\"\"Queues a log entry to be written by the background thread.\n", "issue": "google.cloud.logging.handlers: send messages to stderr\nWould it be ok if we do the print at https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/logging/google/cloud/logging/handlers/transports/background_thread.py#L222\r\nto stderr instead? So it doesn't disturb the output of the software?\r\n\r\nThanks\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transport for Python logging handler\n\nUses a background worker to log to Stackdriver Logging asynchronously.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport atexit\nimport logging\nimport threading\nimport time\n\nfrom six.moves import range\nfrom six.moves import queue\n\nfrom google.cloud.logging.handlers.transports.base import Transport\n\n_DEFAULT_GRACE_PERIOD = 5.0 # Seconds\n_DEFAULT_MAX_BATCH_SIZE = 10\n_DEFAULT_MAX_LATENCY = 0 # Seconds\n_WORKER_THREAD_NAME = 'google.cloud.logging.Worker'\n_WORKER_TERMINATOR = object()\n_LOGGER = logging.getLogger(__name__)\n\n\ndef _get_many(queue_, max_items=None, max_latency=0):\n \"\"\"Get multiple items from a Queue.\n\n Gets at least one (blocking) and at most ``max_items`` items\n (non-blocking) from a given Queue. Does not mark the items as done.\n\n :type queue_: :class:`~queue.Queue`\n :param queue_: The Queue to get items from.\n\n :type max_items: int\n :param max_items: The maximum number of items to get. If ``None``, then all\n available items in the queue are returned.\n\n :type max_latency: float\n :param max_latency: The maximum number of seconds to wait for more than one\n item from a queue. This number includes the time required to retrieve\n the first item.\n\n :rtype: Sequence\n :returns: A sequence of items retrieved from the queue.\n \"\"\"\n start = time.time()\n # Always return at least one item.\n items = [queue_.get()]\n while max_items is None or len(items) < max_items:\n try:\n elapsed = time.time() - start\n timeout = max(0, max_latency - elapsed)\n items.append(queue_.get(timeout=timeout))\n except queue.Empty:\n break\n return items\n\n\nclass _Worker(object):\n \"\"\"A background thread that writes batches of log entries.\n\n :type cloud_logger: :class:`~google.cloud.logging.logger.Logger`\n :param cloud_logger: The logger to send entries to.\n\n :type grace_period: float\n :param grace_period: The amount of time to wait for pending logs to\n be submitted when the process is shutting down.\n\n :type max_batch_size: int\n :param max_batch_size: The maximum number of items to send at a time\n in the background thread.\n\n :type max_latency: float\n :param max_latency: The amount of time to wait for new logs before\n sending a new batch. It is strongly recommended to keep this smaller\n than the grace_period. This means this is effectively the longest\n amount of time the background thread will hold onto log entries\n before sending them to the server.\n \"\"\"\n\n def __init__(self, cloud_logger, grace_period=_DEFAULT_GRACE_PERIOD,\n max_batch_size=_DEFAULT_MAX_BATCH_SIZE,\n max_latency=_DEFAULT_MAX_LATENCY):\n self._cloud_logger = cloud_logger\n self._grace_period = grace_period\n self._max_batch_size = max_batch_size\n self._max_latency = max_latency\n self._queue = queue.Queue(0)\n self._operational_lock = threading.Lock()\n self._thread = None\n\n @property\n def is_alive(self):\n \"\"\"Returns True is the background thread is running.\"\"\"\n return self._thread is not None and self._thread.is_alive()\n\n def _safely_commit_batch(self, batch):\n total_logs = len(batch.entries)\n\n try:\n if total_logs > 0:\n batch.commit()\n _LOGGER.debug('Submitted %d logs', total_logs)\n except Exception:\n _LOGGER.error(\n 'Failed to submit %d logs.', total_logs, exc_info=True)\n\n def _thread_main(self):\n \"\"\"The entry point for the worker thread.\n\n Pulls pending log entries off the queue and writes them in batches to\n the Cloud Logger.\n \"\"\"\n _LOGGER.debug('Background thread started.')\n\n quit_ = False\n while True:\n batch = self._cloud_logger.batch()\n items = _get_many(\n self._queue, max_items=self._max_batch_size,\n max_latency=self._max_latency)\n\n for item in items:\n if item is _WORKER_TERMINATOR:\n quit_ = True\n # Continue processing items, don't break, try to process\n # all items we got back before quitting.\n else:\n batch.log_struct(**item)\n\n self._safely_commit_batch(batch)\n\n for _ in range(len(items)):\n self._queue.task_done()\n\n if quit_:\n break\n\n _LOGGER.debug('Background thread exited gracefully.')\n\n def start(self):\n \"\"\"Starts the background thread.\n\n Additionally, this registers a handler for process exit to attempt\n to send any pending log entries before shutdown.\n \"\"\"\n with self._operational_lock:\n if self.is_alive:\n return\n\n self._thread = threading.Thread(\n target=self._thread_main,\n name=_WORKER_THREAD_NAME)\n self._thread.daemon = True\n self._thread.start()\n atexit.register(self._main_thread_terminated)\n\n def stop(self, grace_period=None):\n \"\"\"Signals the background thread to stop.\n\n This does not terminate the background thread. It simply queues the\n stop signal. If the main process exits before the background thread\n processes the stop signal, it will be terminated without finishing\n work. The ``grace_period`` parameter will give the background\n thread some time to finish processing before this function returns.\n\n :type grace_period: float\n :param grace_period: If specified, this method will block up to this\n many seconds to allow the background thread to finish work before\n returning.\n\n :rtype: bool\n :returns: True if the thread terminated. False if the thread is still\n running.\n \"\"\"\n if not self.is_alive:\n return True\n\n with self._operational_lock:\n self._queue.put_nowait(_WORKER_TERMINATOR)\n\n if grace_period is not None:\n print('Waiting up to %d seconds.' % (grace_period,))\n\n self._thread.join(timeout=grace_period)\n\n # Check this before disowning the thread, because after we disown\n # the thread is_alive will be False regardless of if the thread\n # exited or not.\n success = not self.is_alive\n\n self._thread = None\n\n return success\n\n def _main_thread_terminated(self):\n \"\"\"Callback that attempts to send pending logs before termination.\"\"\"\n if not self.is_alive:\n return\n\n if not self._queue.empty():\n print(\n 'Program shutting down, attempting to send %d queued log '\n 'entries to Stackdriver Logging...' % (self._queue.qsize(),))\n\n if self.stop(self._grace_period):\n print('Sent all pending logs.')\n else:\n print('Failed to send %d pending logs.' % (self._queue.qsize(),))\n\n def enqueue(self, record, message, resource=None, labels=None):\n \"\"\"Queues a log entry to be written by the background thread.\n\n :type record: :class:`logging.LogRecord`\n :param record: Python log record that the handler was called with.\n\n :type message: str\n :param message: The message from the ``LogRecord`` after being\n formatted by the associated log formatters.\n\n :type resource: :class:`~google.cloud.logging.resource.Resource`\n :param resource: (Optional) Monitored resource of the entry\n\n :type labels: dict\n :param labels: (Optional) Mapping of labels for the entry.\n \"\"\"\n self._queue.put_nowait({\n 'info': {\n 'message': message,\n 'python_logger': record.name,\n },\n 'severity': record.levelname,\n 'resource': resource,\n 'labels': labels,\n })\n\n def flush(self):\n \"\"\"Submit any pending log records.\"\"\"\n self._queue.join()\n\n\nclass BackgroundThreadTransport(Transport):\n \"\"\"Asynchronous transport that uses a background thread.\n\n :type client: :class:`~google.cloud.logging.client.Client`\n :param client: The Logging client.\n\n :type name: str\n :param name: the name of the logger.\n\n :type grace_period: float\n :param grace_period: The amount of time to wait for pending logs to\n be submitted when the process is shutting down.\n\n :type batch_size: int\n :param batch_size: The maximum number of items to send at a time in the\n background thread.\n\n :type max_latency: float\n :param max_latency: The amount of time to wait for new logs before\n sending a new batch. It is strongly recommended to keep this smaller\n than the grace_period. This means this is effectively the longest\n amount of time the background thread will hold onto log entries\n before sending them to the server.\n \"\"\"\n\n def __init__(self, client, name, grace_period=_DEFAULT_GRACE_PERIOD,\n batch_size=_DEFAULT_MAX_BATCH_SIZE,\n max_latency=_DEFAULT_MAX_LATENCY):\n self.client = client\n logger = self.client.logger(name)\n self.worker = _Worker(logger,\n grace_period=grace_period,\n max_batch_size=batch_size,\n max_latency=max_latency)\n self.worker.start()\n\n def send(self, record, message, resource=None, labels=None):\n \"\"\"Overrides Transport.send().\n\n :type record: :class:`logging.LogRecord`\n :param record: Python log record that the handler was called with.\n\n :type message: str\n :param message: The message from the ``LogRecord`` after being\n formatted by the associated log formatters.\n\n :type resource: :class:`~google.cloud.logging.resource.Resource`\n :param resource: (Optional) Monitored resource of the entry.\n\n :type labels: dict\n :param labels: (Optional) Mapping of labels for the entry.\n \"\"\"\n self.worker.enqueue(record, message, resource=resource, labels=labels)\n\n def flush(self):\n \"\"\"Submit any pending log records.\"\"\"\n self.worker.flush()\n", "path": "logging/google/cloud/logging/handlers/transports/background_thread.py"}]}
| 3,809 | 383 |
gh_patches_debug_25655
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-2949
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
not able to search for a user un-logged in
No option to search for a user without being logged in even though it says books or users in the non logged in search field
**Screenshots**


**Instance**
bookwyrm.social
---
**Desktop (please complete the following information):**
iOS 12, Firefox
</issue>
<code>
[start of bookwyrm/views/search.py]
1 """ search views"""
2 import re
3
4 from django.contrib.postgres.search import TrigramSimilarity
5 from django.core.paginator import Paginator
6 from django.db.models.functions import Greatest
7 from django.http import JsonResponse
8 from django.template.response import TemplateResponse
9 from django.views import View
10
11 from csp.decorators import csp_update
12
13 from bookwyrm import models
14 from bookwyrm.connectors import connector_manager
15 from bookwyrm.book_search import search, format_search_result
16 from bookwyrm.settings import PAGE_LENGTH
17 from bookwyrm.utils import regex
18 from .helpers import is_api_request
19 from .helpers import handle_remote_webfinger
20
21
22 # pylint: disable= no-self-use
23 class Search(View):
24 """search users or books"""
25
26 @csp_update(IMG_SRC="*")
27 def get(self, request):
28 """that search bar up top"""
29 if is_api_request(request):
30 return api_book_search(request)
31
32 query = request.GET.get("q")
33 if not query:
34 return TemplateResponse(request, "search/book.html")
35
36 search_type = request.GET.get("type")
37 if query and not search_type:
38 search_type = "user" if "@" in query else "book"
39
40 endpoints = {
41 "book": book_search,
42 "user": user_search,
43 "list": list_search,
44 }
45 if not search_type in endpoints:
46 search_type = "book"
47
48 return endpoints[search_type](request)
49
50
51 def api_book_search(request):
52 """Return books via API response"""
53 query = request.GET.get("q")
54 query = isbn_check(query)
55 min_confidence = request.GET.get("min_confidence", 0)
56 # only return local book results via json so we don't cascade
57 book_results = search(query, min_confidence=min_confidence)
58 return JsonResponse(
59 [format_search_result(r) for r in book_results[:10]], safe=False
60 )
61
62
63 def book_search(request):
64 """the real business is elsewhere"""
65 query = request.GET.get("q")
66 # check if query is isbn
67 query = isbn_check(query)
68 min_confidence = request.GET.get("min_confidence", 0)
69 search_remote = request.GET.get("remote", False) and request.user.is_authenticated
70
71 # try a local-only search
72 local_results = search(query, min_confidence=min_confidence)
73 paginated = Paginator(local_results, PAGE_LENGTH)
74 page = paginated.get_page(request.GET.get("page"))
75 data = {
76 "query": query,
77 "results": page,
78 "type": "book",
79 "remote": search_remote,
80 "page_range": paginated.get_elided_page_range(
81 page.number, on_each_side=2, on_ends=1
82 ),
83 }
84 # if a logged in user requested remote results or got no local results, try remote
85 if request.user.is_authenticated and (not local_results or search_remote):
86 data["remote_results"] = connector_manager.search(
87 query, min_confidence=min_confidence
88 )
89 data["remote"] = True
90 return TemplateResponse(request, "search/book.html", data)
91
92
93 def user_search(request):
94 """cool kids members only user search"""
95 viewer = request.user
96 query = request.GET.get("q")
97 query = query.strip()
98 data = {"type": "user", "query": query}
99 # logged out viewers can't search users
100 if not viewer.is_authenticated:
101 return TemplateResponse(request, "search/user.html", data)
102
103 # use webfinger for mastodon style [email protected] username to load the user if
104 # they don't exist locally (handle_remote_webfinger will check the db)
105 if re.match(regex.FULL_USERNAME, query):
106 handle_remote_webfinger(query)
107
108 results = (
109 models.User.viewer_aware_objects(viewer)
110 .annotate(
111 similarity=Greatest(
112 TrigramSimilarity("username", query),
113 TrigramSimilarity("localname", query),
114 )
115 )
116 .filter(
117 similarity__gt=0.5,
118 )
119 .order_by("-similarity")
120 )
121 paginated = Paginator(results, PAGE_LENGTH)
122 page = paginated.get_page(request.GET.get("page"))
123 data["results"] = page
124 data["page_range"] = paginated.get_elided_page_range(
125 page.number, on_each_side=2, on_ends=1
126 )
127 return TemplateResponse(request, "search/user.html", data)
128
129
130 def list_search(request):
131 """any relevent lists?"""
132 query = request.GET.get("q")
133 data = {"query": query, "type": "list"}
134 results = (
135 models.List.privacy_filter(
136 request.user,
137 privacy_levels=["public", "followers"],
138 )
139 .annotate(
140 similarity=Greatest(
141 TrigramSimilarity("name", query),
142 TrigramSimilarity("description", query),
143 )
144 )
145 .filter(
146 similarity__gt=0.1,
147 )
148 .order_by("-similarity")
149 )
150 paginated = Paginator(results, PAGE_LENGTH)
151 page = paginated.get_page(request.GET.get("page"))
152 data["results"] = page
153 data["page_range"] = paginated.get_elided_page_range(
154 page.number, on_each_side=2, on_ends=1
155 )
156 return TemplateResponse(request, "search/list.html", data)
157
158
159 def isbn_check(query):
160 """isbn10 or isbn13 check, if so remove separators"""
161 if query:
162 su_num = re.sub(r"(?<=\d)\D(?=\d|[xX])", "", query)
163 if len(su_num) == 13 and su_num.isdecimal():
164 # Multiply every other digit by 3
165 # Add these numbers and the other digits
166 product = sum(int(ch) for ch in su_num[::2]) + sum(
167 int(ch) * 3 for ch in su_num[1::2]
168 )
169 if product % 10 == 0:
170 return su_num
171 elif (
172 len(su_num) == 10
173 and su_num[:-1].isdecimal()
174 and (su_num[-1].isdecimal() or su_num[-1].lower() == "x")
175 ):
176 product = 0
177 # Iterate through code_string
178 for i in range(9):
179 # for each character, multiply by a different decreasing number: 10 - x
180 product = product + int(su_num[i]) * (10 - i)
181 # Handle last character
182 if su_num[9].lower() == "x":
183 product += 10
184 else:
185 product += int(su_num[9])
186 if product % 11 == 0:
187 return su_num
188 return query
189
[end of bookwyrm/views/search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bookwyrm/views/search.py b/bookwyrm/views/search.py
--- a/bookwyrm/views/search.py
+++ b/bookwyrm/views/search.py
@@ -91,18 +91,15 @@
def user_search(request):
- """cool kids members only user search"""
+ """user search: search for a user"""
viewer = request.user
query = request.GET.get("q")
query = query.strip()
data = {"type": "user", "query": query}
- # logged out viewers can't search users
- if not viewer.is_authenticated:
- return TemplateResponse(request, "search/user.html", data)
# use webfinger for mastodon style [email protected] username to load the user if
# they don't exist locally (handle_remote_webfinger will check the db)
- if re.match(regex.FULL_USERNAME, query):
+ if re.match(regex.FULL_USERNAME, query) and viewer.is_authenticated:
handle_remote_webfinger(query)
results = (
@@ -118,6 +115,11 @@
)
.order_by("-similarity")
)
+
+ # don't expose remote users
+ if not viewer.is_authenticated:
+ results = results.filter(local=True)
+
paginated = Paginator(results, PAGE_LENGTH)
page = paginated.get_page(request.GET.get("page"))
data["results"] = page
|
{"golden_diff": "diff --git a/bookwyrm/views/search.py b/bookwyrm/views/search.py\n--- a/bookwyrm/views/search.py\n+++ b/bookwyrm/views/search.py\n@@ -91,18 +91,15 @@\n \n \n def user_search(request):\n- \"\"\"cool kids members only user search\"\"\"\n+ \"\"\"user search: search for a user\"\"\"\n viewer = request.user\n query = request.GET.get(\"q\")\n query = query.strip()\n data = {\"type\": \"user\", \"query\": query}\n- # logged out viewers can't search users\n- if not viewer.is_authenticated:\n- return TemplateResponse(request, \"search/user.html\", data)\n \n # use webfinger for mastodon style [email protected] username to load the user if\n # they don't exist locally (handle_remote_webfinger will check the db)\n- if re.match(regex.FULL_USERNAME, query):\n+ if re.match(regex.FULL_USERNAME, query) and viewer.is_authenticated:\n handle_remote_webfinger(query)\n \n results = (\n@@ -118,6 +115,11 @@\n )\n .order_by(\"-similarity\")\n )\n+\n+ # don't expose remote users\n+ if not viewer.is_authenticated:\n+ results = results.filter(local=True)\n+\n paginated = Paginator(results, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data[\"results\"] = page\n", "issue": "not able to search for a user un-logged in\nNo option to search for a user without being logged in even though it says books or users in the non logged in search field \r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Instance**\r\nbookwyrm.social\r\n\r\n---\r\n\r\n**Desktop (please complete the following information):**\r\niOS 12, Firefox\r\n\n", "before_files": [{"content": "\"\"\" search views\"\"\"\nimport re\n\nfrom django.contrib.postgres.search import TrigramSimilarity\nfrom django.core.paginator import Paginator\nfrom django.db.models.functions import Greatest\nfrom django.http import JsonResponse\nfrom django.template.response import TemplateResponse\nfrom django.views import View\n\nfrom csp.decorators import csp_update\n\nfrom bookwyrm import models\nfrom bookwyrm.connectors import connector_manager\nfrom bookwyrm.book_search import search, format_search_result\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom bookwyrm.utils import regex\nfrom .helpers import is_api_request\nfrom .helpers import handle_remote_webfinger\n\n\n# pylint: disable= no-self-use\nclass Search(View):\n \"\"\"search users or books\"\"\"\n\n @csp_update(IMG_SRC=\"*\")\n def get(self, request):\n \"\"\"that search bar up top\"\"\"\n if is_api_request(request):\n return api_book_search(request)\n\n query = request.GET.get(\"q\")\n if not query:\n return TemplateResponse(request, \"search/book.html\")\n\n search_type = request.GET.get(\"type\")\n if query and not search_type:\n search_type = \"user\" if \"@\" in query else \"book\"\n\n endpoints = {\n \"book\": book_search,\n \"user\": user_search,\n \"list\": list_search,\n }\n if not search_type in endpoints:\n search_type = \"book\"\n\n return endpoints[search_type](request)\n\n\ndef api_book_search(request):\n \"\"\"Return books via API response\"\"\"\n query = request.GET.get(\"q\")\n query = isbn_check(query)\n min_confidence = request.GET.get(\"min_confidence\", 0)\n # only return local book results via json so we don't cascade\n book_results = search(query, min_confidence=min_confidence)\n return JsonResponse(\n [format_search_result(r) for r in book_results[:10]], safe=False\n )\n\n\ndef book_search(request):\n \"\"\"the real business is elsewhere\"\"\"\n query = request.GET.get(\"q\")\n # check if query is isbn\n query = isbn_check(query)\n min_confidence = request.GET.get(\"min_confidence\", 0)\n search_remote = request.GET.get(\"remote\", False) and request.user.is_authenticated\n\n # try a local-only search\n local_results = search(query, min_confidence=min_confidence)\n paginated = Paginator(local_results, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"query\": query,\n \"results\": page,\n \"type\": \"book\",\n \"remote\": search_remote,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n # if a logged in user requested remote results or got no local results, try remote\n if request.user.is_authenticated and (not local_results or search_remote):\n data[\"remote_results\"] = connector_manager.search(\n query, min_confidence=min_confidence\n )\n data[\"remote\"] = True\n return TemplateResponse(request, \"search/book.html\", data)\n\n\ndef user_search(request):\n \"\"\"cool kids members only user search\"\"\"\n viewer = request.user\n query = request.GET.get(\"q\")\n query = query.strip()\n data = {\"type\": \"user\", \"query\": query}\n # logged out viewers can't search users\n if not viewer.is_authenticated:\n return TemplateResponse(request, \"search/user.html\", data)\n\n # use webfinger for mastodon style [email protected] username to load the user if\n # they don't exist locally (handle_remote_webfinger will check the db)\n if re.match(regex.FULL_USERNAME, query):\n handle_remote_webfinger(query)\n\n results = (\n models.User.viewer_aware_objects(viewer)\n .annotate(\n similarity=Greatest(\n TrigramSimilarity(\"username\", query),\n TrigramSimilarity(\"localname\", query),\n )\n )\n .filter(\n similarity__gt=0.5,\n )\n .order_by(\"-similarity\")\n )\n paginated = Paginator(results, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data[\"results\"] = page\n data[\"page_range\"] = paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n )\n return TemplateResponse(request, \"search/user.html\", data)\n\n\ndef list_search(request):\n \"\"\"any relevent lists?\"\"\"\n query = request.GET.get(\"q\")\n data = {\"query\": query, \"type\": \"list\"}\n results = (\n models.List.privacy_filter(\n request.user,\n privacy_levels=[\"public\", \"followers\"],\n )\n .annotate(\n similarity=Greatest(\n TrigramSimilarity(\"name\", query),\n TrigramSimilarity(\"description\", query),\n )\n )\n .filter(\n similarity__gt=0.1,\n )\n .order_by(\"-similarity\")\n )\n paginated = Paginator(results, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data[\"results\"] = page\n data[\"page_range\"] = paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n )\n return TemplateResponse(request, \"search/list.html\", data)\n\n\ndef isbn_check(query):\n \"\"\"isbn10 or isbn13 check, if so remove separators\"\"\"\n if query:\n su_num = re.sub(r\"(?<=\\d)\\D(?=\\d|[xX])\", \"\", query)\n if len(su_num) == 13 and su_num.isdecimal():\n # Multiply every other digit by 3\n # Add these numbers and the other digits\n product = sum(int(ch) for ch in su_num[::2]) + sum(\n int(ch) * 3 for ch in su_num[1::2]\n )\n if product % 10 == 0:\n return su_num\n elif (\n len(su_num) == 10\n and su_num[:-1].isdecimal()\n and (su_num[-1].isdecimal() or su_num[-1].lower() == \"x\")\n ):\n product = 0\n # Iterate through code_string\n for i in range(9):\n # for each character, multiply by a different decreasing number: 10 - x\n product = product + int(su_num[i]) * (10 - i)\n # Handle last character\n if su_num[9].lower() == \"x\":\n product += 10\n else:\n product += int(su_num[9])\n if product % 11 == 0:\n return su_num\n return query\n", "path": "bookwyrm/views/search.py"}]}
| 2,714 | 310 |
gh_patches_debug_29372
|
rasdani/github-patches
|
git_diff
|
conda__conda-6752
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
conda is broken if your home directory is read-only
Conda currently requires the user's home directory to be writable.
If the directory conda is installed into is writable (say a tmpfs) then you can get along way by using
```shell
./Miniconda3-latest-Linux-x86_64.sh -p $CONDA_DIR -b -f
conda config --system --set always_yes yes
conda config --system --set changeps1 no
conda config --system --add envs_dirs $CONDA_DIR/envs
conda config --system --add pkgs_dirs $CONDA_DIR/pkgs
```
However, this is foiled by the following line -> https://github.com/conda/conda/blob/7616b87ad87b80da16b8263011c9c708be98147c/conda/core/envs_manager.py#L18
```python
USER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))
```
I'm not sure if this would even work on Windows?
</issue>
<code>
[start of conda/core/envs_manager.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from logging import getLogger
5 from os import listdir
6 from os.path import dirname, isdir, isfile, join, normpath, split as path_split
7
8 from ..base.constants import ROOT_ENV_NAME
9 from ..base.context import context
10 from ..common.compat import ensure_text_type, on_win, open
11 from ..common.path import expand, paths_equal
12 from ..gateways.disk.read import yield_lines
13 from ..gateways.disk.test import is_conda_environment
14
15 log = getLogger(__name__)
16
17
18 USER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))
19
20
21 def register_env(location):
22 location = normpath(location)
23
24 if "placehold_pl" in location:
25 # Don't record envs created by conda-build.
26 return
27
28 if location in yield_lines(USER_ENVIRONMENTS_TXT_FILE):
29 # Nothing to do. Location is already recorded in a known environments.txt file.
30 return
31
32 with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:
33 fh.write(ensure_text_type(location))
34 fh.write('\n')
35
36
37 def unregister_env(location):
38 if isdir(location):
39 meta_dir = join(location, 'conda-meta')
40 if isdir(meta_dir):
41 meta_dir_contents = listdir(meta_dir)
42 if len(meta_dir_contents) > 1:
43 # if there are any files left other than 'conda-meta/history'
44 # then don't unregister
45 return
46
47 _clean_environments_txt(USER_ENVIRONMENTS_TXT_FILE, location)
48
49
50 def list_all_known_prefixes():
51 all_env_paths = set()
52 if on_win:
53 home_dir_dir = dirname(expand('~'))
54 for home_dir in listdir(home_dir_dir):
55 environments_txt_file = join(home_dir_dir, home_dir, '.conda', 'environments.txt')
56 if isfile(environments_txt_file):
57 all_env_paths.update(_clean_environments_txt(environments_txt_file))
58 else:
59 from os import geteuid
60 from pwd import getpwall
61 if geteuid() == 0:
62 search_dirs = tuple(pwentry.pw_dir for pwentry in getpwall()) or (expand('~'),)
63 else:
64 search_dirs = (expand('~'),)
65 for home_dir in search_dirs:
66 environments_txt_file = join(home_dir, '.conda', 'environments.txt')
67 if isfile(environments_txt_file):
68 all_env_paths.update(_clean_environments_txt(environments_txt_file))
69
70 # in case environments.txt files aren't complete, also add all known conda environments in
71 # all envs_dirs
72 envs_dirs = (envs_dir for envs_dir in context.envs_dirs if isdir(envs_dir))
73 all_env_paths.update(path for path in (
74 join(envs_dir, name) for envs_dir in envs_dirs for name in listdir(envs_dir)
75 ) if path not in all_env_paths and is_conda_environment(path))
76
77 all_env_paths.add(context.root_prefix)
78 return sorted(all_env_paths)
79
80
81 def env_name(prefix):
82 if not prefix:
83 return None
84 if paths_equal(prefix, context.root_prefix):
85 return ROOT_ENV_NAME
86 maybe_envs_dir, maybe_name = path_split(prefix)
87 for envs_dir in context.envs_dirs:
88 if paths_equal(envs_dir, maybe_envs_dir):
89 return maybe_name
90 return prefix
91
92
93 def _clean_environments_txt(environments_txt_file, remove_location=None):
94 if not isfile(environments_txt_file):
95 return ()
96
97 if remove_location:
98 remove_location = normpath(remove_location)
99 environments_txt_lines = tuple(yield_lines(environments_txt_file))
100 environments_txt_lines_cleaned = tuple(
101 prefix for prefix in environments_txt_lines
102 if prefix != remove_location and is_conda_environment(prefix)
103 )
104 if environments_txt_lines_cleaned != environments_txt_lines:
105 _rewrite_environments_txt(environments_txt_file, environments_txt_lines_cleaned)
106 return environments_txt_lines_cleaned
107
108
109 def _rewrite_environments_txt(environments_txt_file, prefixes):
110 try:
111 with open(environments_txt_file, 'w') as fh:
112 fh.write('\n'.join(prefixes))
113 fh.write('\n')
114 except (IOError, OSError) as e:
115 log.info("File not cleaned: %s", environments_txt_file)
116 log.debug('%r', e, exc_info=True)
117
[end of conda/core/envs_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conda/core/envs_manager.py b/conda/core/envs_manager.py
--- a/conda/core/envs_manager.py
+++ b/conda/core/envs_manager.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
+from errno import EACCES
from logging import getLogger
from os import listdir
from os.path import dirname, isdir, isfile, join, normpath, split as path_split
@@ -29,9 +30,17 @@
# Nothing to do. Location is already recorded in a known environments.txt file.
return
- with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:
- fh.write(ensure_text_type(location))
- fh.write('\n')
+ try:
+ with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:
+ fh.write(ensure_text_type(location))
+ fh.write('\n')
+ except EnvironmentError as e:
+ if e.errno == EACCES:
+ log.warn("Unable to register environment. Path not writable.\n"
+ " environment location: %s\n"
+ " registry file: %s", location, USER_ENVIRONMENTS_TXT_FILE)
+ else:
+ raise
def unregister_env(location):
@@ -111,6 +120,6 @@
with open(environments_txt_file, 'w') as fh:
fh.write('\n'.join(prefixes))
fh.write('\n')
- except (IOError, OSError) as e:
+ except EnvironmentError as e:
log.info("File not cleaned: %s", environments_txt_file)
log.debug('%r', e, exc_info=True)
|
{"golden_diff": "diff --git a/conda/core/envs_manager.py b/conda/core/envs_manager.py\n--- a/conda/core/envs_manager.py\n+++ b/conda/core/envs_manager.py\n@@ -1,6 +1,7 @@\n # -*- coding: utf-8 -*-\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+from errno import EACCES\n from logging import getLogger\n from os import listdir\n from os.path import dirname, isdir, isfile, join, normpath, split as path_split\n@@ -29,9 +30,17 @@\n # Nothing to do. Location is already recorded in a known environments.txt file.\n return\n \n- with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n- fh.write(ensure_text_type(location))\n- fh.write('\\n')\n+ try:\n+ with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n+ fh.write(ensure_text_type(location))\n+ fh.write('\\n')\n+ except EnvironmentError as e:\n+ if e.errno == EACCES:\n+ log.warn(\"Unable to register environment. Path not writable.\\n\"\n+ \" environment location: %s\\n\"\n+ \" registry file: %s\", location, USER_ENVIRONMENTS_TXT_FILE)\n+ else:\n+ raise\n \n \n def unregister_env(location):\n@@ -111,6 +120,6 @@\n with open(environments_txt_file, 'w') as fh:\n fh.write('\\n'.join(prefixes))\n fh.write('\\n')\n- except (IOError, OSError) as e:\n+ except EnvironmentError as e:\n log.info(\"File not cleaned: %s\", environments_txt_file)\n log.debug('%r', e, exc_info=True)\n", "issue": "conda is broken if your home directory is read-only\nConda currently requires the user's home directory to be writable.\r\n\r\nIf the directory conda is installed into is writable (say a tmpfs) then you can get along way by using \r\n```shell\r\n\t\t./Miniconda3-latest-Linux-x86_64.sh -p $CONDA_DIR -b -f\r\n\t\tconda config --system --set always_yes yes\r\n\t\tconda config --system --set changeps1 no\r\n\t\tconda config --system --add envs_dirs $CONDA_DIR/envs\r\n\t\tconda config --system --add pkgs_dirs $CONDA_DIR/pkgs\r\n```\r\n\r\nHowever, this is foiled by the following line -> https://github.com/conda/conda/blob/7616b87ad87b80da16b8263011c9c708be98147c/conda/core/envs_manager.py#L18\r\n\r\n```python\r\nUSER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))\r\n```\r\n\r\nI'm not sure if this would even work on Windows?\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger\nfrom os import listdir\nfrom os.path import dirname, isdir, isfile, join, normpath, split as path_split\n\nfrom ..base.constants import ROOT_ENV_NAME\nfrom ..base.context import context\nfrom ..common.compat import ensure_text_type, on_win, open\nfrom ..common.path import expand, paths_equal\nfrom ..gateways.disk.read import yield_lines\nfrom ..gateways.disk.test import is_conda_environment\n\nlog = getLogger(__name__)\n\n\nUSER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))\n\n\ndef register_env(location):\n location = normpath(location)\n\n if \"placehold_pl\" in location:\n # Don't record envs created by conda-build.\n return\n\n if location in yield_lines(USER_ENVIRONMENTS_TXT_FILE):\n # Nothing to do. Location is already recorded in a known environments.txt file.\n return\n\n with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n fh.write(ensure_text_type(location))\n fh.write('\\n')\n\n\ndef unregister_env(location):\n if isdir(location):\n meta_dir = join(location, 'conda-meta')\n if isdir(meta_dir):\n meta_dir_contents = listdir(meta_dir)\n if len(meta_dir_contents) > 1:\n # if there are any files left other than 'conda-meta/history'\n # then don't unregister\n return\n\n _clean_environments_txt(USER_ENVIRONMENTS_TXT_FILE, location)\n\n\ndef list_all_known_prefixes():\n all_env_paths = set()\n if on_win:\n home_dir_dir = dirname(expand('~'))\n for home_dir in listdir(home_dir_dir):\n environments_txt_file = join(home_dir_dir, home_dir, '.conda', 'environments.txt')\n if isfile(environments_txt_file):\n all_env_paths.update(_clean_environments_txt(environments_txt_file))\n else:\n from os import geteuid\n from pwd import getpwall\n if geteuid() == 0:\n search_dirs = tuple(pwentry.pw_dir for pwentry in getpwall()) or (expand('~'),)\n else:\n search_dirs = (expand('~'),)\n for home_dir in search_dirs:\n environments_txt_file = join(home_dir, '.conda', 'environments.txt')\n if isfile(environments_txt_file):\n all_env_paths.update(_clean_environments_txt(environments_txt_file))\n\n # in case environments.txt files aren't complete, also add all known conda environments in\n # all envs_dirs\n envs_dirs = (envs_dir for envs_dir in context.envs_dirs if isdir(envs_dir))\n all_env_paths.update(path for path in (\n join(envs_dir, name) for envs_dir in envs_dirs for name in listdir(envs_dir)\n ) if path not in all_env_paths and is_conda_environment(path))\n\n all_env_paths.add(context.root_prefix)\n return sorted(all_env_paths)\n\n\ndef env_name(prefix):\n if not prefix:\n return None\n if paths_equal(prefix, context.root_prefix):\n return ROOT_ENV_NAME\n maybe_envs_dir, maybe_name = path_split(prefix)\n for envs_dir in context.envs_dirs:\n if paths_equal(envs_dir, maybe_envs_dir):\n return maybe_name\n return prefix\n\n\ndef _clean_environments_txt(environments_txt_file, remove_location=None):\n if not isfile(environments_txt_file):\n return ()\n\n if remove_location:\n remove_location = normpath(remove_location)\n environments_txt_lines = tuple(yield_lines(environments_txt_file))\n environments_txt_lines_cleaned = tuple(\n prefix for prefix in environments_txt_lines\n if prefix != remove_location and is_conda_environment(prefix)\n )\n if environments_txt_lines_cleaned != environments_txt_lines:\n _rewrite_environments_txt(environments_txt_file, environments_txt_lines_cleaned)\n return environments_txt_lines_cleaned\n\n\ndef _rewrite_environments_txt(environments_txt_file, prefixes):\n try:\n with open(environments_txt_file, 'w') as fh:\n fh.write('\\n'.join(prefixes))\n fh.write('\\n')\n except (IOError, OSError) as e:\n log.info(\"File not cleaned: %s\", environments_txt_file)\n log.debug('%r', e, exc_info=True)\n", "path": "conda/core/envs_manager.py"}]}
| 1,997 | 390 |
gh_patches_debug_17939
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-1030
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pragma no cache for path graph json
As in https://github.com/makinacorpus/django-mapentity/pull/48
Related https://github.com/makinacorpus/Geotrek/issues/1026
</issue>
<code>
[start of geotrek/core/views.py]
1 # -*- coding: utf-8 -*-
2 import json
3 from django.http import HttpResponse
4 from django.utils.decorators import method_decorator
5 from django.contrib.auth.decorators import login_required
6 from django.views.decorators.http import last_modified as cache_last_modified
7 from django.views.generic.edit import BaseDetailView
8 from django.core.cache import get_cache
9 from django.shortcuts import redirect
10
11 from mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,
12 MapEntityDetail, MapEntityDocument, MapEntityCreate, MapEntityUpdate,
13 MapEntityDelete, MapEntityFormat,
14 JSONResponseMixin, HttpJSONResponse, LastModifiedMixin)
15
16 from geotrek.authent.decorators import path_manager_required, same_structure_required
17
18 from .models import Path, Trail
19 from .forms import PathForm
20 from .filters import PathFilter
21 from . import graph as graph_lib
22
23
24 @login_required
25 def last_list(request):
26 last = request.session.get('last_list') # set in MapEntityList
27 if not last:
28 return redirect('core:path_list')
29 return redirect(last)
30
31 home = last_list
32
33
34 class HttpSVGResponse(HttpResponse):
35 content_type = 'image/svg+xml'
36 def __init__(self, content='', **kwargs):
37 kwargs['content_type'] = self.content_type
38 super(HttpSVGResponse, self).__init__(content, **kwargs)
39
40
41 class ElevationChart(LastModifiedMixin, BaseDetailView):
42
43 @method_decorator(login_required)
44 def dispatch(self, *args, **kwargs):
45 return super(ElevationChart, self).dispatch(*args, **kwargs)
46
47 def render_to_response(self, context, **response_kwargs):
48 return HttpSVGResponse(self.get_object().get_elevation_profile_svg(),
49 **response_kwargs)
50
51
52 class ElevationProfile(LastModifiedMixin, JSONResponseMixin, BaseDetailView):
53 """Extract elevation profile from a path and return it as JSON"""
54
55 @method_decorator(login_required)
56 def dispatch(self, *args, **kwargs):
57 return super(ElevationProfile, self).dispatch(*args, **kwargs)
58
59 def get_context_data(self, **kwargs):
60 """
61 Put elevation profile into response context.
62 """
63 obj = self.get_object()
64 data = {}
65 # Formatted as distance, elevation, [lng, lat]
66 for step in obj.get_elevation_profile():
67 formatted = step[0], step[3], step[1:3]
68 data.setdefault('profile', []).append(formatted)
69 return data
70
71
72 class ElevationArea(LastModifiedMixin, JSONResponseMixin, BaseDetailView):
73 """Extract elevation profile on an area and return it as JSON"""
74
75 @method_decorator(login_required)
76 def dispatch(self, *args, **kwargs):
77 return super(ElevationArea, self).dispatch(*args, **kwargs)
78
79 def get_context_data(self, **kwargs):
80 obj = self.get_object()
81 return obj.get_elevation_area()
82
83
84 class PathLayer(MapEntityLayer):
85 model = Path
86 properties = ['name']
87
88
89 class PathList(MapEntityList):
90 queryset = Path.objects.prefetch_related('networks').select_related('stake', 'trail')
91 filterform = PathFilter
92 columns = ['id', 'name', 'networks', 'stake', 'trail']
93
94
95 class PathJsonList(MapEntityJsonList, PathList):
96 pass
97
98
99 class PathFormatList(MapEntityFormat, PathList):
100 pass
101
102
103 class PathDetail(MapEntityDetail):
104 model = Path
105
106 def can_edit(self):
107 return self.request.user.is_superuser or \
108 (hasattr(self.request.user, 'profile') and \
109 self.request.user.profile.is_path_manager and \
110 self.get_object().same_structure(self.request.user))
111
112
113 class PathDocument(MapEntityDocument):
114 model = Path
115
116 def get_context_data(self, *args, **kwargs):
117 self.get_object().prepare_elevation_chart(self.request)
118 return super(PathDocument, self).get_context_data(*args, **kwargs)
119
120
121 class PathCreate(MapEntityCreate):
122 model = Path
123 form_class = PathForm
124
125 @method_decorator(path_manager_required('core:path_list'))
126 def dispatch(self, *args, **kwargs):
127 return super(PathCreate, self).dispatch(*args, **kwargs)
128
129
130 class PathUpdate(MapEntityUpdate):
131 model = Path
132 form_class = PathForm
133
134 @method_decorator(path_manager_required('core:path_detail'))
135 @same_structure_required('core:path_detail')
136 def dispatch(self, *args, **kwargs):
137 return super(PathUpdate, self).dispatch(*args, **kwargs)
138
139
140 class PathDelete(MapEntityDelete):
141 model = Path
142
143 @method_decorator(path_manager_required('core:path_detail'))
144 @same_structure_required('core:path_detail')
145 def dispatch(self, *args, **kwargs):
146 return super(PathDelete, self).dispatch(*args, **kwargs)
147
148
149 @login_required
150 @cache_last_modified(lambda x: Path.latest_updated())
151 def get_graph_json(request):
152 cache = get_cache('fat')
153 key = 'path_graph_json'
154
155 result = cache.get(key)
156 latest = Path.latest_updated()
157
158 if result and latest:
159 cache_latest, json_graph = result
160 # Not empty and still valid
161 if cache_latest and cache_latest >= latest:
162 return HttpJSONResponse(json_graph)
163
164 # cache does not exist or is not up to date
165 # rebuild the graph and cache the json
166 graph = graph_lib.graph_edges_nodes_of_qs(Path.objects.all())
167 json_graph = json.dumps(graph)
168
169 cache.set(key, (latest, json_graph))
170 return HttpJSONResponse(json_graph)
171
172
173 class TrailDetail(MapEntityDetail):
174 model = Trail
175
176 def can_edit(self):
177 return False
178
179
180 class TrailDocument(MapEntityDocument):
181 model = Trail
182
[end of geotrek/core/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geotrek/core/views.py b/geotrek/core/views.py
--- a/geotrek/core/views.py
+++ b/geotrek/core/views.py
@@ -4,6 +4,7 @@
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import last_modified as cache_last_modified
+from django.views.decorators.cache import never_cache as force_cache_validation
from django.views.generic.edit import BaseDetailView
from django.core.cache import get_cache
from django.shortcuts import redirect
@@ -148,6 +149,7 @@
@login_required
@cache_last_modified(lambda x: Path.latest_updated())
+@force_cache_validation
def get_graph_json(request):
cache = get_cache('fat')
key = 'path_graph_json'
|
{"golden_diff": "diff --git a/geotrek/core/views.py b/geotrek/core/views.py\n--- a/geotrek/core/views.py\n+++ b/geotrek/core/views.py\n@@ -4,6 +4,7 @@\n from django.utils.decorators import method_decorator\n from django.contrib.auth.decorators import login_required\n from django.views.decorators.http import last_modified as cache_last_modified\n+from django.views.decorators.cache import never_cache as force_cache_validation\n from django.views.generic.edit import BaseDetailView\n from django.core.cache import get_cache\n from django.shortcuts import redirect\n@@ -148,6 +149,7 @@\n \n @login_required\n @cache_last_modified(lambda x: Path.latest_updated())\n+@force_cache_validation\n def get_graph_json(request):\n cache = get_cache('fat')\n key = 'path_graph_json'\n", "issue": "Pragma no cache for path graph json\nAs in https://github.com/makinacorpus/django-mapentity/pull/48\n\nRelated https://github.com/makinacorpus/Geotrek/issues/1026\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import last_modified as cache_last_modified\nfrom django.views.generic.edit import BaseDetailView\nfrom django.core.cache import get_cache\nfrom django.shortcuts import redirect\n\nfrom mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,\n MapEntityDetail, MapEntityDocument, MapEntityCreate, MapEntityUpdate,\n MapEntityDelete, MapEntityFormat,\n JSONResponseMixin, HttpJSONResponse, LastModifiedMixin)\n\nfrom geotrek.authent.decorators import path_manager_required, same_structure_required\n\nfrom .models import Path, Trail\nfrom .forms import PathForm\nfrom .filters import PathFilter\nfrom . import graph as graph_lib\n\n\n@login_required\ndef last_list(request):\n last = request.session.get('last_list') # set in MapEntityList\n if not last:\n return redirect('core:path_list')\n return redirect(last)\n\nhome = last_list\n\n\nclass HttpSVGResponse(HttpResponse):\n content_type = 'image/svg+xml'\n def __init__(self, content='', **kwargs):\n kwargs['content_type'] = self.content_type\n super(HttpSVGResponse, self).__init__(content, **kwargs)\n\n\nclass ElevationChart(LastModifiedMixin, BaseDetailView):\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationChart, self).dispatch(*args, **kwargs)\n\n def render_to_response(self, context, **response_kwargs):\n return HttpSVGResponse(self.get_object().get_elevation_profile_svg(),\n **response_kwargs)\n\n\nclass ElevationProfile(LastModifiedMixin, JSONResponseMixin, BaseDetailView):\n \"\"\"Extract elevation profile from a path and return it as JSON\"\"\"\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationProfile, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Put elevation profile into response context.\n \"\"\"\n obj = self.get_object()\n data = {}\n # Formatted as distance, elevation, [lng, lat]\n for step in obj.get_elevation_profile():\n formatted = step[0], step[3], step[1:3]\n data.setdefault('profile', []).append(formatted)\n return data\n\n\nclass ElevationArea(LastModifiedMixin, JSONResponseMixin, BaseDetailView):\n \"\"\"Extract elevation profile on an area and return it as JSON\"\"\"\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationArea, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n obj = self.get_object()\n return obj.get_elevation_area()\n\n\nclass PathLayer(MapEntityLayer):\n model = Path\n properties = ['name']\n\n\nclass PathList(MapEntityList):\n queryset = Path.objects.prefetch_related('networks').select_related('stake', 'trail')\n filterform = PathFilter\n columns = ['id', 'name', 'networks', 'stake', 'trail']\n\n\nclass PathJsonList(MapEntityJsonList, PathList):\n pass\n\n\nclass PathFormatList(MapEntityFormat, PathList):\n pass\n\n\nclass PathDetail(MapEntityDetail):\n model = Path\n\n def can_edit(self):\n return self.request.user.is_superuser or \\\n (hasattr(self.request.user, 'profile') and \\\n self.request.user.profile.is_path_manager and \\\n self.get_object().same_structure(self.request.user))\n\n\nclass PathDocument(MapEntityDocument):\n model = Path\n\n def get_context_data(self, *args, **kwargs):\n self.get_object().prepare_elevation_chart(self.request)\n return super(PathDocument, self).get_context_data(*args, **kwargs)\n\n\nclass PathCreate(MapEntityCreate):\n model = Path\n form_class = PathForm\n\n @method_decorator(path_manager_required('core:path_list'))\n def dispatch(self, *args, **kwargs):\n return super(PathCreate, self).dispatch(*args, **kwargs)\n\n\nclass PathUpdate(MapEntityUpdate):\n model = Path\n form_class = PathForm\n\n @method_decorator(path_manager_required('core:path_detail'))\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n return super(PathUpdate, self).dispatch(*args, **kwargs)\n\n\nclass PathDelete(MapEntityDelete):\n model = Path\n\n @method_decorator(path_manager_required('core:path_detail'))\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n return super(PathDelete, self).dispatch(*args, **kwargs)\n\n\n@login_required\n@cache_last_modified(lambda x: Path.latest_updated())\ndef get_graph_json(request):\n cache = get_cache('fat')\n key = 'path_graph_json'\n\n result = cache.get(key)\n latest = Path.latest_updated()\n\n if result and latest:\n cache_latest, json_graph = result\n # Not empty and still valid\n if cache_latest and cache_latest >= latest:\n return HttpJSONResponse(json_graph)\n\n # cache does not exist or is not up to date\n # rebuild the graph and cache the json\n graph = graph_lib.graph_edges_nodes_of_qs(Path.objects.all())\n json_graph = json.dumps(graph)\n\n cache.set(key, (latest, json_graph))\n return HttpJSONResponse(json_graph)\n\n\nclass TrailDetail(MapEntityDetail):\n model = Trail\n\n def can_edit(self):\n return False\n\n\nclass TrailDocument(MapEntityDocument):\n model = Trail\n", "path": "geotrek/core/views.py"}]}
| 2,269 | 171 |
gh_patches_debug_3724
|
rasdani/github-patches
|
git_diff
|
fal-ai__dbt-fal-344
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make fal scripts __name__ == '__main__'
**Context**
When users test scripts they will normally have a
```py
if __name__ == '__main__':
main()
```
and the `main()` function will be where the whole script lives.
When a script is associated with a model, it is executed "directly". So it could be considered the "__main__" script.
**Describe alternatives you've considered**
Going for the dbt interface of offering a function.
```py
# for models
def model():
pass
```
```py
# for hooks/after/before scripts
def hook():
pass
</issue>
<code>
[start of src/fal/fal_script.py]
1 import os
2 import json
3 from typing import Dict, Any, List, Optional, Union
4 from pathlib import Path
5 from functools import partial
6 from dataclasses import dataclass
7 from deprecation import deprecated
8
9 from faldbt.parse import normalize_path
10 from faldbt.project import DbtModel, FalDbt
11 import faldbt.lib as lib
12
13 from dbt.contracts.results import RunStatus
14 from dbt.config.runtime import RuntimeConfig
15 from dbt.logger import GLOBAL_LOGGER as logger
16
17 if lib.DBT_VCURRENT.compare(lib.DBT_V1) >= 0:
18 from dbt.contracts.graph.parsed import ColumnInfo
19 else:
20 from faldbt.cp.contracts.graph.parsed import ColumnInfo
21
22
23 @dataclass
24 class CurrentModel:
25 name: str
26 alias: str
27 status: RunStatus
28 columns: Dict[str, ColumnInfo]
29 tests: List[Any]
30 meta: Dict[Any, Any]
31
32
33 @dataclass
34 class CurrentTest:
35 name: str
36 model_name: str
37 column: str
38 status: str
39
40 @property
41 @deprecated(details="Use 'model_name' instead")
42 def modelname(self):
43 return self.model_name
44
45
46 @dataclass
47 class ContextConfig:
48 target_path: Path
49
50 def __init__(self, config: RuntimeConfig):
51 self.target_path = Path(
52 os.path.realpath(os.path.join(config.project_root, config.target_path))
53 )
54
55
56 @dataclass
57 class Context:
58 current_model: Union[CurrentModel, None]
59 config: ContextConfig
60
61
62 @dataclass(frozen=True, init=False)
63 class FalScript:
64 model: Optional[DbtModel]
65 path: Path
66 _faldbt: FalDbt
67
68 def __init__(self, faldbt: FalDbt, model: Optional[DbtModel], path: str):
69 # Necessary because of frozen=True
70 object.__setattr__(self, "model", model)
71 object.__setattr__(self, "path", normalize_path(faldbt.scripts_dir, path))
72 object.__setattr__(self, "_faldbt", faldbt)
73
74 @classmethod
75 def model_script(cls, faldbt: FalDbt, model: DbtModel):
76 script = FalScript(faldbt, model, "")
77 # HACK: Set the script path specially for this case
78 object.__setattr__(script, "path", model.python_model)
79 return script
80
81 def exec(self, faldbt: FalDbt):
82 """
83 Executes the script
84 """
85 # Enable local imports
86 try:
87 source_code = python_from_file(self.path)
88 program = compile(source_code, self.path, "exec")
89
90 exec_globals = {
91 "context": self._build_script_context(),
92 "ref": faldbt.ref,
93 "source": faldbt.source,
94 "write_to_source": faldbt.write_to_source,
95 "write_to_firestore": faldbt.write_to_firestore,
96 "list_models": faldbt.list_models,
97 "list_models_ids": faldbt.list_models_ids,
98 "list_sources": faldbt.list_sources,
99 "list_features": faldbt.list_features,
100 "el": faldbt.el,
101 }
102
103 if self.model is not None:
104 # Hard-wire the model
105 exec_globals["write_to_model"] = partial(
106 faldbt.write_to_model, target_1=self.model.name, target_2=None
107 )
108
109 exec(program, exec_globals)
110 finally:
111 pass
112
113 @property
114 def id(self):
115 # TODO: maybe `self.path - project_dir`, to show only relevant path
116 return f"({self.model_name},{self.path})"
117
118 @property
119 def is_global(self):
120 return self.model is None
121
122 @property
123 def model_name(self):
124 return "<GLOBAL>" if self.is_global else self.model.name # type: ignore
125
126 def _build_script_context(self):
127 context_config = ContextConfig(self._faldbt._config)
128 if self.is_global:
129 return Context(current_model=None, config=context_config)
130
131 model: DbtModel = self.model # type: ignore
132
133 meta = model.meta
134 _del_key(meta, self._faldbt.keyword)
135
136 tests = _process_tests(model.tests)
137
138 current_model = CurrentModel(
139 name=model.name,
140 alias=model.alias,
141 status=model.status,
142 columns=model.columns,
143 tests=tests,
144 meta=meta,
145 )
146
147 return Context(current_model=current_model, config=context_config)
148
149
150 def _del_key(dict: Dict[str, Any], key: str):
151 try:
152 del dict[key]
153 except KeyError:
154 pass
155
156
157 def _process_tests(tests: List[Any]):
158 return list(
159 map(
160 lambda test: CurrentTest(
161 name=test.name,
162 column=test.column,
163 status=test.status,
164 model_name=test.model,
165 ),
166 tests,
167 )
168 )
169
170
171 def python_from_file(path: Path) -> str:
172 with open(path) as file:
173 raw_source_code = file.read()
174 if path.suffix == ".ipynb":
175 raw_source_code = _process_ipynb(raw_source_code)
176 return raw_source_code
177
178
179 def _process_ipynb(raw_source_code: str) -> str:
180 def strip_magic(source: List[str]) -> List[str]:
181 NOTEBOOK_LIB = "faldbt.magics"
182 return [item for item in source if item[0] != "%" and NOTEBOOK_LIB not in item]
183
184 ipynb_struct = json.loads(raw_source_code)
185
186 script_list = []
187 for cell in ipynb_struct["cells"]:
188 if cell["cell_type"] == "code":
189 source = strip_magic(cell["source"])
190 script_list.append("".join(source))
191
192 joined_script = "\n #cell \n".join(script_list)
193
194 logger.debug(f"Joined .ipynb cells to:\n{joined_script}")
195
196 return joined_script
197
[end of src/fal/fal_script.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/fal/fal_script.py b/src/fal/fal_script.py
--- a/src/fal/fal_script.py
+++ b/src/fal/fal_script.py
@@ -88,6 +88,7 @@
program = compile(source_code, self.path, "exec")
exec_globals = {
+ "__name__": "__main__",
"context": self._build_script_context(),
"ref": faldbt.ref,
"source": faldbt.source,
|
{"golden_diff": "diff --git a/src/fal/fal_script.py b/src/fal/fal_script.py\n--- a/src/fal/fal_script.py\n+++ b/src/fal/fal_script.py\n@@ -88,6 +88,7 @@\n program = compile(source_code, self.path, \"exec\")\n \n exec_globals = {\n+ \"__name__\": \"__main__\",\n \"context\": self._build_script_context(),\n \"ref\": faldbt.ref,\n \"source\": faldbt.source,\n", "issue": "Make fal scripts __name__ == '__main__'\n**Context**\r\nWhen users test scripts they will normally have a \r\n\r\n```py\r\nif __name__ == '__main__':\r\n main()\r\n```\r\n\r\nand the `main()` function will be where the whole script lives.\r\n\r\nWhen a script is associated with a model, it is executed \"directly\". So it could be considered the \"__main__\" script.\r\n\r\n**Describe alternatives you've considered**\r\nGoing for the dbt interface of offering a function.\r\n\r\n```py\r\n# for models\r\ndef model():\r\n pass\r\n```\r\n\r\n```py\r\n# for hooks/after/before scripts\r\ndef hook():\r\n pass\r\n\n", "before_files": [{"content": "import os\nimport json\nfrom typing import Dict, Any, List, Optional, Union\nfrom pathlib import Path\nfrom functools import partial\nfrom dataclasses import dataclass\nfrom deprecation import deprecated\n\nfrom faldbt.parse import normalize_path\nfrom faldbt.project import DbtModel, FalDbt\nimport faldbt.lib as lib\n\nfrom dbt.contracts.results import RunStatus\nfrom dbt.config.runtime import RuntimeConfig\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\nif lib.DBT_VCURRENT.compare(lib.DBT_V1) >= 0:\n from dbt.contracts.graph.parsed import ColumnInfo\nelse:\n from faldbt.cp.contracts.graph.parsed import ColumnInfo\n\n\n@dataclass\nclass CurrentModel:\n name: str\n alias: str\n status: RunStatus\n columns: Dict[str, ColumnInfo]\n tests: List[Any]\n meta: Dict[Any, Any]\n\n\n@dataclass\nclass CurrentTest:\n name: str\n model_name: str\n column: str\n status: str\n\n @property\n @deprecated(details=\"Use 'model_name' instead\")\n def modelname(self):\n return self.model_name\n\n\n@dataclass\nclass ContextConfig:\n target_path: Path\n\n def __init__(self, config: RuntimeConfig):\n self.target_path = Path(\n os.path.realpath(os.path.join(config.project_root, config.target_path))\n )\n\n\n@dataclass\nclass Context:\n current_model: Union[CurrentModel, None]\n config: ContextConfig\n\n\n@dataclass(frozen=True, init=False)\nclass FalScript:\n model: Optional[DbtModel]\n path: Path\n _faldbt: FalDbt\n\n def __init__(self, faldbt: FalDbt, model: Optional[DbtModel], path: str):\n # Necessary because of frozen=True\n object.__setattr__(self, \"model\", model)\n object.__setattr__(self, \"path\", normalize_path(faldbt.scripts_dir, path))\n object.__setattr__(self, \"_faldbt\", faldbt)\n\n @classmethod\n def model_script(cls, faldbt: FalDbt, model: DbtModel):\n script = FalScript(faldbt, model, \"\")\n # HACK: Set the script path specially for this case\n object.__setattr__(script, \"path\", model.python_model)\n return script\n\n def exec(self, faldbt: FalDbt):\n \"\"\"\n Executes the script\n \"\"\"\n # Enable local imports\n try:\n source_code = python_from_file(self.path)\n program = compile(source_code, self.path, \"exec\")\n\n exec_globals = {\n \"context\": self._build_script_context(),\n \"ref\": faldbt.ref,\n \"source\": faldbt.source,\n \"write_to_source\": faldbt.write_to_source,\n \"write_to_firestore\": faldbt.write_to_firestore,\n \"list_models\": faldbt.list_models,\n \"list_models_ids\": faldbt.list_models_ids,\n \"list_sources\": faldbt.list_sources,\n \"list_features\": faldbt.list_features,\n \"el\": faldbt.el,\n }\n\n if self.model is not None:\n # Hard-wire the model\n exec_globals[\"write_to_model\"] = partial(\n faldbt.write_to_model, target_1=self.model.name, target_2=None\n )\n\n exec(program, exec_globals)\n finally:\n pass\n\n @property\n def id(self):\n # TODO: maybe `self.path - project_dir`, to show only relevant path\n return f\"({self.model_name},{self.path})\"\n\n @property\n def is_global(self):\n return self.model is None\n\n @property\n def model_name(self):\n return \"<GLOBAL>\" if self.is_global else self.model.name # type: ignore\n\n def _build_script_context(self):\n context_config = ContextConfig(self._faldbt._config)\n if self.is_global:\n return Context(current_model=None, config=context_config)\n\n model: DbtModel = self.model # type: ignore\n\n meta = model.meta\n _del_key(meta, self._faldbt.keyword)\n\n tests = _process_tests(model.tests)\n\n current_model = CurrentModel(\n name=model.name,\n alias=model.alias,\n status=model.status,\n columns=model.columns,\n tests=tests,\n meta=meta,\n )\n\n return Context(current_model=current_model, config=context_config)\n\n\ndef _del_key(dict: Dict[str, Any], key: str):\n try:\n del dict[key]\n except KeyError:\n pass\n\n\ndef _process_tests(tests: List[Any]):\n return list(\n map(\n lambda test: CurrentTest(\n name=test.name,\n column=test.column,\n status=test.status,\n model_name=test.model,\n ),\n tests,\n )\n )\n\n\ndef python_from_file(path: Path) -> str:\n with open(path) as file:\n raw_source_code = file.read()\n if path.suffix == \".ipynb\":\n raw_source_code = _process_ipynb(raw_source_code)\n return raw_source_code\n\n\ndef _process_ipynb(raw_source_code: str) -> str:\n def strip_magic(source: List[str]) -> List[str]:\n NOTEBOOK_LIB = \"faldbt.magics\"\n return [item for item in source if item[0] != \"%\" and NOTEBOOK_LIB not in item]\n\n ipynb_struct = json.loads(raw_source_code)\n\n script_list = []\n for cell in ipynb_struct[\"cells\"]:\n if cell[\"cell_type\"] == \"code\":\n source = strip_magic(cell[\"source\"])\n script_list.append(\"\".join(source))\n\n joined_script = \"\\n #cell \\n\".join(script_list)\n\n logger.debug(f\"Joined .ipynb cells to:\\n{joined_script}\")\n\n return joined_script\n", "path": "src/fal/fal_script.py"}]}
| 2,467 | 109 |
gh_patches_debug_35879
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-1437
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Image is not supported as a dependency
Version: `0.22.0+f9b30a`
I ran this command:
```
dvc run -d 2018-12-12-13:32:56.png -o derp 'echo derp > derp'
```
Where the dependency is:
```
2018-12-12-13:32:56.png: PNG image data, 1228 x 494, 8-bit/color RGBA, non-interlaced
```
And I got this:
```
Error: Failed to run command: Dependency '2018-12-12-13:32:56.png' is not supported
```
Is this intended? Why we aren't supporting images as dependencies?
</issue>
<code>
[start of dvc/output/__init__.py]
1 import schema
2
3 from dvc.exceptions import DvcException
4 from dvc.config import Config
5
6 from dvc.dependency import SCHEMA, urlparse
7 from dvc.dependency.base import DependencyBase
8 from dvc.output.s3 import OutputS3
9 from dvc.output.gs import OutputGS
10 from dvc.output.local import OutputLOCAL
11 from dvc.output.hdfs import OutputHDFS
12 from dvc.output.ssh import OutputSSH
13
14 from dvc.remote import Remote
15
16
17 OUTS = [OutputHDFS, OutputS3, OutputGS, OutputSSH, OutputLOCAL]
18
19 OUTS_MAP = {'hdfs': OutputHDFS,
20 's3': OutputS3,
21 'gs': OutputGS,
22 'ssh': OutputSSH,
23 '': OutputLOCAL}
24
25 SCHEMA[schema.Optional(OutputLOCAL.PARAM_CACHE)] = bool
26 SCHEMA[schema.Optional(OutputLOCAL.PARAM_METRIC)] = OutputLOCAL.METRIC_SCHEMA
27
28
29 def _get(stage, p, info, cache, metric):
30 parsed = urlparse(p)
31 if parsed.scheme == 'remote':
32 name = Config.SECTION_REMOTE_FMT.format(parsed.netloc)
33 sect = stage.project.config._config[name]
34 remote = Remote(stage.project, sect)
35 return OUTS_MAP[remote.scheme](stage,
36 p,
37 info,
38 cache=cache,
39 remote=remote,
40 metric=metric)
41
42 for o in OUTS:
43 if o.supported(p):
44 return o(stage, p, info, cache=cache, remote=None, metric=metric)
45 raise DvcException('Output \'{}\' is not supported'.format(p))
46
47
48 def loadd_from(stage, d_list):
49 ret = []
50 for d in d_list:
51 p = d.pop(DependencyBase.PARAM_PATH)
52 cache = d.pop(OutputLOCAL.PARAM_CACHE, True)
53 metric = d.pop(OutputLOCAL.PARAM_METRIC, False)
54 ret.append(_get(stage, p, info=d, cache=cache, metric=metric))
55 return ret
56
57
58 def loads_from(stage, s_list, use_cache=True, metric=False):
59 ret = []
60 for s in s_list:
61 ret.append(_get(stage, s, info={}, cache=use_cache, metric=metric))
62 return ret
63
[end of dvc/output/__init__.py]
[start of dvc/dependency/__init__.py]
1 import schema
2
3 try:
4 from urlparse import urlparse
5 except ImportError:
6 from urllib.parse import urlparse
7
8 from dvc.exceptions import DvcException
9 from dvc.config import Config
10
11 from dvc.dependency.base import DependencyBase
12 from dvc.dependency.s3 import DependencyS3
13 from dvc.dependency.gs import DependencyGS
14 from dvc.dependency.local import DependencyLOCAL
15 from dvc.dependency.hdfs import DependencyHDFS
16 from dvc.dependency.ssh import DependencySSH
17 from dvc.dependency.http import DependencyHTTP
18
19 from dvc.remote import Remote
20 from dvc.remote.local import RemoteLOCAL
21 from dvc.remote.s3 import RemoteS3
22 from dvc.remote.hdfs import RemoteHDFS
23
24 DEPS = [
25 DependencyGS,
26 DependencyHDFS,
27 DependencyHTTP,
28 DependencyLOCAL,
29 DependencyS3,
30 DependencySSH,
31 ]
32
33 DEP_MAP = {
34 '': DependencyLOCAL,
35 'ssh': DependencySSH,
36 's3': DependencyS3,
37 'gs': DependencyGS,
38 'hdfs': DependencyHDFS,
39 'http': DependencyHTTP,
40 'https': DependencyHTTP,
41 }
42
43 # We are skipping RemoteHTTP.PARAM_ETAG because is the same as RemoteS3
44 SCHEMA = {
45 DependencyBase.PARAM_PATH: str,
46 schema.Optional(RemoteLOCAL.PARAM_MD5): schema.Or(str, None),
47 schema.Optional(RemoteS3.PARAM_ETAG): schema.Or(str, None),
48 schema.Optional(RemoteHDFS.PARAM_CHECKSUM): schema.Or(str, None),
49 }
50
51
52 def _get(stage, p, info):
53 parsed = urlparse(p)
54 if parsed.scheme == 'remote':
55 name = Config.SECTION_REMOTE_FMT.format(parsed.netloc)
56 sect = stage.project.config._config[name]
57 remote = Remote(stage.project, sect)
58 return DEP_MAP[remote.scheme](stage, p, info, remote=remote)
59
60 for d in DEPS:
61 if d.supported(p):
62 return d(stage, p, info)
63 raise DvcException('Dependency \'{}\' is not supported'.format(p))
64
65
66 def loadd_from(stage, d_list):
67 ret = []
68 for d in d_list:
69 p = d.pop(DependencyBase.PARAM_PATH)
70 ret.append(_get(stage, p, d))
71 return ret
72
73
74 def loads_from(stage, s_list):
75 ret = []
76 for s in s_list:
77 ret.append(_get(stage, s, {}))
78 return ret
79
[end of dvc/dependency/__init__.py]
[start of dvc/dependency/local.py]
1 import os
2
3 try:
4 from urlparse import urlparse
5 except ImportError:
6 from urllib.parse import urlparse
7
8 from dvc.dependency.base import DependencyBase
9 from dvc.dependency.base import DependencyDoesNotExistError
10 from dvc.dependency.base import DependencyIsNotFileOrDirError
11 from dvc.remote.local import RemoteLOCAL
12
13
14 class DependencyLOCAL(DependencyBase):
15 REGEX = r'^(?P<path>(/+|.:\\+)?[^:]*)$'
16
17 DoesNotExistError = DependencyDoesNotExistError
18 IsNotFileOrDirError = DependencyIsNotFileOrDirError
19
20 def __init__(self, stage, path, info=None, remote=None):
21 self.stage = stage
22 self.project = stage.project
23 self.info = info
24 if remote is not None:
25 self.remote = remote
26 else:
27 self.remote = RemoteLOCAL(stage.project, {})
28
29 if remote:
30 path = os.path.join(remote.prefix, urlparse(path).path.lstrip('/'))
31
32 if not os.path.isabs(path):
33 path = self.remote.ospath(path)
34 path = os.path.join(stage.cwd, path)
35 self.path = os.path.abspath(os.path.normpath(path))
36
37 self.path_info = {'scheme': 'local',
38 'path': self.path}
39
40 def __str__(self):
41 return self.rel_path
42
43 @property
44 def is_local(self):
45 assert os.path.isabs(self.path)
46 assert os.path.isabs(self.project.root_dir)
47 return self.path.startswith(self.project.root_dir)
48
49 @property
50 def sep(self):
51 return os.sep
52
53 @property
54 def rel_path(self):
55 return os.path.relpath(self.path)
56
57 def changed(self):
58 if not self.exists:
59 return True
60
61 info = self.remote.save_info(self.path_info)
62
63 return self.info != info
64
65 def save(self):
66 if not self.exists:
67 raise self.DoesNotExistError(self.rel_path)
68
69 if not os.path.isfile(self.path) \
70 and not os.path.isdir(self.path): # pragma: no cover
71 raise self.IsNotFileOrDirError(self.rel_path)
72
73 if (os.path.isfile(self.path) and os.path.getsize(self.path) == 0) or \
74 (os.path.isdir(self.path) and len(os.listdir(self.path)) == 0):
75 msg = "File/directory '{}' is empty.".format(self.rel_path)
76 self.project.logger.warn(msg)
77
78 self.info = self.remote.save_info(self.path_info)
79
80 def dumpd(self):
81 if self.is_local:
82 path = self.remote.unixpath(os.path.relpath(self.path,
83 self.stage.cwd))
84 else:
85 path = self.path
86
87 info = self.info.copy()
88 info[self.PARAM_PATH] = path
89 return info
90
[end of dvc/dependency/local.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dvc/dependency/__init__.py b/dvc/dependency/__init__.py
--- a/dvc/dependency/__init__.py
+++ b/dvc/dependency/__init__.py
@@ -5,7 +5,6 @@
except ImportError:
from urllib.parse import urlparse
-from dvc.exceptions import DvcException
from dvc.config import Config
from dvc.dependency.base import DependencyBase
@@ -25,9 +24,9 @@
DependencyGS,
DependencyHDFS,
DependencyHTTP,
- DependencyLOCAL,
DependencyS3,
DependencySSH,
+ # NOTE: DependencyLOCAL is the default choice
]
DEP_MAP = {
@@ -60,7 +59,7 @@
for d in DEPS:
if d.supported(p):
return d(stage, p, info)
- raise DvcException('Dependency \'{}\' is not supported'.format(p))
+ return DependencyLOCAL(stage, p, info)
def loadd_from(stage, d_list):
diff --git a/dvc/dependency/local.py b/dvc/dependency/local.py
--- a/dvc/dependency/local.py
+++ b/dvc/dependency/local.py
@@ -12,7 +12,7 @@
class DependencyLOCAL(DependencyBase):
- REGEX = r'^(?P<path>(/+|.:\\+)?[^:]*)$'
+ REGEX = r'^(?P<path>.*)$'
DoesNotExistError = DependencyDoesNotExistError
IsNotFileOrDirError = DependencyIsNotFileOrDirError
diff --git a/dvc/output/__init__.py b/dvc/output/__init__.py
--- a/dvc/output/__init__.py
+++ b/dvc/output/__init__.py
@@ -1,6 +1,5 @@
import schema
-from dvc.exceptions import DvcException
from dvc.config import Config
from dvc.dependency import SCHEMA, urlparse
@@ -14,7 +13,13 @@
from dvc.remote import Remote
-OUTS = [OutputHDFS, OutputS3, OutputGS, OutputSSH, OutputLOCAL]
+OUTS = [
+ OutputHDFS,
+ OutputS3,
+ OutputGS,
+ OutputSSH,
+ # NOTE: OutputLOCAL is the default choice
+]
OUTS_MAP = {'hdfs': OutputHDFS,
's3': OutputS3,
@@ -42,7 +47,7 @@
for o in OUTS:
if o.supported(p):
return o(stage, p, info, cache=cache, remote=None, metric=metric)
- raise DvcException('Output \'{}\' is not supported'.format(p))
+ return OutputLOCAL(stage, p, info, cache=cache, remote=None, metric=metric)
def loadd_from(stage, d_list):
|
{"golden_diff": "diff --git a/dvc/dependency/__init__.py b/dvc/dependency/__init__.py\n--- a/dvc/dependency/__init__.py\n+++ b/dvc/dependency/__init__.py\n@@ -5,7 +5,6 @@\n except ImportError:\n from urllib.parse import urlparse\n \n-from dvc.exceptions import DvcException\n from dvc.config import Config\n \n from dvc.dependency.base import DependencyBase\n@@ -25,9 +24,9 @@\n DependencyGS,\n DependencyHDFS,\n DependencyHTTP,\n- DependencyLOCAL,\n DependencyS3,\n DependencySSH,\n+ # NOTE: DependencyLOCAL is the default choice\n ]\n \n DEP_MAP = {\n@@ -60,7 +59,7 @@\n for d in DEPS:\n if d.supported(p):\n return d(stage, p, info)\n- raise DvcException('Dependency \\'{}\\' is not supported'.format(p))\n+ return DependencyLOCAL(stage, p, info)\n \n \n def loadd_from(stage, d_list):\ndiff --git a/dvc/dependency/local.py b/dvc/dependency/local.py\n--- a/dvc/dependency/local.py\n+++ b/dvc/dependency/local.py\n@@ -12,7 +12,7 @@\n \n \n class DependencyLOCAL(DependencyBase):\n- REGEX = r'^(?P<path>(/+|.:\\\\+)?[^:]*)$'\n+ REGEX = r'^(?P<path>.*)$'\n \n DoesNotExistError = DependencyDoesNotExistError\n IsNotFileOrDirError = DependencyIsNotFileOrDirError\ndiff --git a/dvc/output/__init__.py b/dvc/output/__init__.py\n--- a/dvc/output/__init__.py\n+++ b/dvc/output/__init__.py\n@@ -1,6 +1,5 @@\n import schema\n \n-from dvc.exceptions import DvcException\n from dvc.config import Config\n \n from dvc.dependency import SCHEMA, urlparse\n@@ -14,7 +13,13 @@\n from dvc.remote import Remote\n \n \n-OUTS = [OutputHDFS, OutputS3, OutputGS, OutputSSH, OutputLOCAL]\n+OUTS = [\n+ OutputHDFS,\n+ OutputS3,\n+ OutputGS,\n+ OutputSSH,\n+ # NOTE: OutputLOCAL is the default choice\n+]\n \n OUTS_MAP = {'hdfs': OutputHDFS,\n 's3': OutputS3,\n@@ -42,7 +47,7 @@\n for o in OUTS:\n if o.supported(p):\n return o(stage, p, info, cache=cache, remote=None, metric=metric)\n- raise DvcException('Output \\'{}\\' is not supported'.format(p))\n+ return OutputLOCAL(stage, p, info, cache=cache, remote=None, metric=metric)\n \n \n def loadd_from(stage, d_list):\n", "issue": "Image is not supported as a dependency\nVersion: `0.22.0+f9b30a`\r\n\r\nI ran this command:\r\n```\r\ndvc run -d 2018-12-12-13:32:56.png -o derp 'echo derp > derp'\r\n```\r\n\r\nWhere the dependency is:\r\n```\r\n2018-12-12-13:32:56.png: PNG image data, 1228 x 494, 8-bit/color RGBA, non-interlaced\r\n```\r\n\r\nAnd I got this:\r\n```\r\nError: Failed to run command: Dependency '2018-12-12-13:32:56.png' is not supported\r\n```\r\n\r\nIs this intended? Why we aren't supporting images as dependencies?\n", "before_files": [{"content": "import schema\n\nfrom dvc.exceptions import DvcException\nfrom dvc.config import Config\n\nfrom dvc.dependency import SCHEMA, urlparse\nfrom dvc.dependency.base import DependencyBase\nfrom dvc.output.s3 import OutputS3\nfrom dvc.output.gs import OutputGS\nfrom dvc.output.local import OutputLOCAL\nfrom dvc.output.hdfs import OutputHDFS\nfrom dvc.output.ssh import OutputSSH\n\nfrom dvc.remote import Remote\n\n\nOUTS = [OutputHDFS, OutputS3, OutputGS, OutputSSH, OutputLOCAL]\n\nOUTS_MAP = {'hdfs': OutputHDFS,\n 's3': OutputS3,\n 'gs': OutputGS,\n 'ssh': OutputSSH,\n '': OutputLOCAL}\n\nSCHEMA[schema.Optional(OutputLOCAL.PARAM_CACHE)] = bool\nSCHEMA[schema.Optional(OutputLOCAL.PARAM_METRIC)] = OutputLOCAL.METRIC_SCHEMA\n\n\ndef _get(stage, p, info, cache, metric):\n parsed = urlparse(p)\n if parsed.scheme == 'remote':\n name = Config.SECTION_REMOTE_FMT.format(parsed.netloc)\n sect = stage.project.config._config[name]\n remote = Remote(stage.project, sect)\n return OUTS_MAP[remote.scheme](stage,\n p,\n info,\n cache=cache,\n remote=remote,\n metric=metric)\n\n for o in OUTS:\n if o.supported(p):\n return o(stage, p, info, cache=cache, remote=None, metric=metric)\n raise DvcException('Output \\'{}\\' is not supported'.format(p))\n\n\ndef loadd_from(stage, d_list):\n ret = []\n for d in d_list:\n p = d.pop(DependencyBase.PARAM_PATH)\n cache = d.pop(OutputLOCAL.PARAM_CACHE, True)\n metric = d.pop(OutputLOCAL.PARAM_METRIC, False)\n ret.append(_get(stage, p, info=d, cache=cache, metric=metric))\n return ret\n\n\ndef loads_from(stage, s_list, use_cache=True, metric=False):\n ret = []\n for s in s_list:\n ret.append(_get(stage, s, info={}, cache=use_cache, metric=metric))\n return ret\n", "path": "dvc/output/__init__.py"}, {"content": "import schema\n\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse\n\nfrom dvc.exceptions import DvcException\nfrom dvc.config import Config\n\nfrom dvc.dependency.base import DependencyBase\nfrom dvc.dependency.s3 import DependencyS3\nfrom dvc.dependency.gs import DependencyGS\nfrom dvc.dependency.local import DependencyLOCAL\nfrom dvc.dependency.hdfs import DependencyHDFS\nfrom dvc.dependency.ssh import DependencySSH\nfrom dvc.dependency.http import DependencyHTTP\n\nfrom dvc.remote import Remote\nfrom dvc.remote.local import RemoteLOCAL\nfrom dvc.remote.s3 import RemoteS3\nfrom dvc.remote.hdfs import RemoteHDFS\n\nDEPS = [\n DependencyGS,\n DependencyHDFS,\n DependencyHTTP,\n DependencyLOCAL,\n DependencyS3,\n DependencySSH,\n]\n\nDEP_MAP = {\n '': DependencyLOCAL,\n 'ssh': DependencySSH,\n 's3': DependencyS3,\n 'gs': DependencyGS,\n 'hdfs': DependencyHDFS,\n 'http': DependencyHTTP,\n 'https': DependencyHTTP,\n}\n\n# We are skipping RemoteHTTP.PARAM_ETAG because is the same as RemoteS3\nSCHEMA = {\n DependencyBase.PARAM_PATH: str,\n schema.Optional(RemoteLOCAL.PARAM_MD5): schema.Or(str, None),\n schema.Optional(RemoteS3.PARAM_ETAG): schema.Or(str, None),\n schema.Optional(RemoteHDFS.PARAM_CHECKSUM): schema.Or(str, None),\n}\n\n\ndef _get(stage, p, info):\n parsed = urlparse(p)\n if parsed.scheme == 'remote':\n name = Config.SECTION_REMOTE_FMT.format(parsed.netloc)\n sect = stage.project.config._config[name]\n remote = Remote(stage.project, sect)\n return DEP_MAP[remote.scheme](stage, p, info, remote=remote)\n\n for d in DEPS:\n if d.supported(p):\n return d(stage, p, info)\n raise DvcException('Dependency \\'{}\\' is not supported'.format(p))\n\n\ndef loadd_from(stage, d_list):\n ret = []\n for d in d_list:\n p = d.pop(DependencyBase.PARAM_PATH)\n ret.append(_get(stage, p, d))\n return ret\n\n\ndef loads_from(stage, s_list):\n ret = []\n for s in s_list:\n ret.append(_get(stage, s, {}))\n return ret\n", "path": "dvc/dependency/__init__.py"}, {"content": "import os\n\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse\n\nfrom dvc.dependency.base import DependencyBase\nfrom dvc.dependency.base import DependencyDoesNotExistError\nfrom dvc.dependency.base import DependencyIsNotFileOrDirError\nfrom dvc.remote.local import RemoteLOCAL\n\n\nclass DependencyLOCAL(DependencyBase):\n REGEX = r'^(?P<path>(/+|.:\\\\+)?[^:]*)$'\n\n DoesNotExistError = DependencyDoesNotExistError\n IsNotFileOrDirError = DependencyIsNotFileOrDirError\n\n def __init__(self, stage, path, info=None, remote=None):\n self.stage = stage\n self.project = stage.project\n self.info = info\n if remote is not None:\n self.remote = remote\n else:\n self.remote = RemoteLOCAL(stage.project, {})\n\n if remote:\n path = os.path.join(remote.prefix, urlparse(path).path.lstrip('/'))\n\n if not os.path.isabs(path):\n path = self.remote.ospath(path)\n path = os.path.join(stage.cwd, path)\n self.path = os.path.abspath(os.path.normpath(path))\n\n self.path_info = {'scheme': 'local',\n 'path': self.path}\n\n def __str__(self):\n return self.rel_path\n\n @property\n def is_local(self):\n assert os.path.isabs(self.path)\n assert os.path.isabs(self.project.root_dir)\n return self.path.startswith(self.project.root_dir)\n\n @property\n def sep(self):\n return os.sep\n\n @property\n def rel_path(self):\n return os.path.relpath(self.path)\n\n def changed(self):\n if not self.exists:\n return True\n\n info = self.remote.save_info(self.path_info)\n\n return self.info != info\n\n def save(self):\n if not self.exists:\n raise self.DoesNotExistError(self.rel_path)\n\n if not os.path.isfile(self.path) \\\n and not os.path.isdir(self.path): # pragma: no cover\n raise self.IsNotFileOrDirError(self.rel_path)\n\n if (os.path.isfile(self.path) and os.path.getsize(self.path) == 0) or \\\n (os.path.isdir(self.path) and len(os.listdir(self.path)) == 0):\n msg = \"File/directory '{}' is empty.\".format(self.rel_path)\n self.project.logger.warn(msg)\n\n self.info = self.remote.save_info(self.path_info)\n\n def dumpd(self):\n if self.is_local:\n path = self.remote.unixpath(os.path.relpath(self.path,\n self.stage.cwd))\n else:\n path = self.path\n\n info = self.info.copy()\n info[self.PARAM_PATH] = path\n return info\n", "path": "dvc/dependency/local.py"}]}
| 2,809 | 624 |
gh_patches_debug_23222
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-7217
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NullWriter has no attribute 'closed'
This is similar to issue #1883
It is triggered when using the "click" library in a PyInstaller --noconsole application.
A workaround is as follows:
```python
# fixup somw problems from pyinstaller
if "NullWriter" in str(type(sys.stdout)):
sys.stdout.closed = sys.stderr.closed = False
```
I suggest adding a class attribute, closed=False to fix this.
You may want to add the "errors" and "newlines" attributes as well, see the python docs.
</issue>
<code>
[start of PyInstaller/loader/pyiboot01_bootstrap.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2022, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 #-- Start bootstrap process
13 # Only python built-in modules can be used.
14
15 import sys
16
17 import pyimod02_importers
18
19 # Extend Python import machinery by adding PEP302 importers to sys.meta_path.
20 pyimod02_importers.install()
21
22 #-- Bootstrap process is complete.
23 # We can use other python modules (e.g. os)
24
25 import os # noqa: E402
26
27 # Let other python modules know that the code is running in frozen mode.
28 if not hasattr(sys, 'frozen'):
29 sys.frozen = True
30
31 # sys._MEIPASS is now set in the bootloader. Hooray.
32
33 # Python 3 C-API function Py_SetPath() resets sys.prefix to empty string. Python 2 was using PYTHONHOME for sys.prefix.
34 # Let's do the same for Python 3.
35 sys.prefix = sys._MEIPASS
36 sys.exec_prefix = sys.prefix
37
38 # Python 3.3+ defines also sys.base_prefix. Let's set them too.
39 sys.base_prefix = sys.prefix
40 sys.base_exec_prefix = sys.exec_prefix
41
42 # Some packages behave differently when running inside virtual environment. E.g., IPython tries to append path
43 # VIRTUAL_ENV to sys.path. For the frozen app we want to prevent this behavior.
44 VIRTENV = 'VIRTUAL_ENV'
45 if VIRTENV in os.environ:
46 # On some platforms (e.g., AIX) 'os.unsetenv()' is unavailable and deleting the var from os.environ does not
47 # delete it from the environment.
48 os.environ[VIRTENV] = ''
49 del os.environ[VIRTENV]
50
51 # Ensure sys.path contains absolute paths. Otherwise, import of other python modules will fail when current working
52 # directory is changed by the frozen application.
53 python_path = []
54 for pth in sys.path:
55 python_path.append(os.path.abspath(pth))
56 sys.path = python_path
57
58
59 # Implement workaround for prints in non-console mode. In non-console mode (with "pythonw"), print randomly fails with
60 # "[errno 9] Bad file descriptor" when the printed text is flushed (e.g., buffer full); this is because the sys.stdout
61 # object is bound to an invalid file descriptor.
62 # Python 3000 has a fix for it (http://bugs.python.org/issue1415), but we feel that a workaround in PyInstaller is a
63 # good thing, because most people first encounter this problem with PyInstaller as they do not usually run their code
64 # with "pythonw" (and it is difficult to debug, anyway).
65 class NullWriter:
66 softspace = 0
67 encoding = 'UTF-8'
68
69 def write(*args):
70 pass
71
72 def flush(*args):
73 pass
74
75 # Some packages are checking if stdout/stderr is available (e.g., youtube-dl). For details, see #1883.
76 def isatty(self):
77 return False
78
79
80 # sys.stdout/err is None in GUI mode on Windows.
81 if sys.stdout is None:
82 sys.stdout = NullWriter()
83 if sys.stderr is None:
84 sys.stderr = NullWriter()
85
86 # At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all
87 # the encodings.
88 #
89 # It was also reported that without 'encodings' module, the frozen executable fails to load in some configurations:
90 # http://www.pyinstaller.org/ticket/651
91 #
92 # Importing 'encodings' module in a run-time hook is not enough, since some run-time hooks require this module, and the
93 # order of running the code from the run-time hooks is not defined.
94 try:
95 import encodings # noqa: F401
96 except ImportError:
97 pass
98
99 # In the Python interpreter 'warnings' module is imported when 'sys.warnoptions' is not empty. Mimic this behavior.
100 if sys.warnoptions:
101 import warnings # noqa: F401
102
103 # Install the hooks for ctypes
104 import pyimod03_ctypes # noqa: E402
105
106 pyimod03_ctypes.install()
107
108 # Install the hooks for pywin32 (Windows only)
109 if sys.platform.startswith('win'):
110 import pyimod04_pywin32
111 pyimod04_pywin32.install()
112
113 # Make .eggs and zipfiles available at runtime
114 d = "eggs"
115 d = os.path.join(sys._MEIPASS, d)
116 # Test if the 'eggs' directory exists. This allows us to opportunistically include this script into the packaged exe,
117 # even if no eggs were found when packaging the program. (Which may be a use-case, see issue #653).
118 if os.path.isdir(d):
119 for fn in os.listdir(d):
120 sys.path.append(os.path.join(d, fn))
121
[end of PyInstaller/loader/pyiboot01_bootstrap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/PyInstaller/loader/pyiboot01_bootstrap.py b/PyInstaller/loader/pyiboot01_bootstrap.py
--- a/PyInstaller/loader/pyiboot01_bootstrap.py
+++ b/PyInstaller/loader/pyiboot01_bootstrap.py
@@ -55,34 +55,6 @@
python_path.append(os.path.abspath(pth))
sys.path = python_path
-
-# Implement workaround for prints in non-console mode. In non-console mode (with "pythonw"), print randomly fails with
-# "[errno 9] Bad file descriptor" when the printed text is flushed (e.g., buffer full); this is because the sys.stdout
-# object is bound to an invalid file descriptor.
-# Python 3000 has a fix for it (http://bugs.python.org/issue1415), but we feel that a workaround in PyInstaller is a
-# good thing, because most people first encounter this problem with PyInstaller as they do not usually run their code
-# with "pythonw" (and it is difficult to debug, anyway).
-class NullWriter:
- softspace = 0
- encoding = 'UTF-8'
-
- def write(*args):
- pass
-
- def flush(*args):
- pass
-
- # Some packages are checking if stdout/stderr is available (e.g., youtube-dl). For details, see #1883.
- def isatty(self):
- return False
-
-
-# sys.stdout/err is None in GUI mode on Windows.
-if sys.stdout is None:
- sys.stdout = NullWriter()
-if sys.stderr is None:
- sys.stderr = NullWriter()
-
# At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all
# the encodings.
#
|
{"golden_diff": "diff --git a/PyInstaller/loader/pyiboot01_bootstrap.py b/PyInstaller/loader/pyiboot01_bootstrap.py\n--- a/PyInstaller/loader/pyiboot01_bootstrap.py\n+++ b/PyInstaller/loader/pyiboot01_bootstrap.py\n@@ -55,34 +55,6 @@\n python_path.append(os.path.abspath(pth))\n sys.path = python_path\n \n-\n-# Implement workaround for prints in non-console mode. In non-console mode (with \"pythonw\"), print randomly fails with\n-# \"[errno 9] Bad file descriptor\" when the printed text is flushed (e.g., buffer full); this is because the sys.stdout\n-# object is bound to an invalid file descriptor.\n-# Python 3000 has a fix for it (http://bugs.python.org/issue1415), but we feel that a workaround in PyInstaller is a\n-# good thing, because most people first encounter this problem with PyInstaller as they do not usually run their code\n-# with \"pythonw\" (and it is difficult to debug, anyway).\n-class NullWriter:\n- softspace = 0\n- encoding = 'UTF-8'\n-\n- def write(*args):\n- pass\n-\n- def flush(*args):\n- pass\n-\n- # Some packages are checking if stdout/stderr is available (e.g., youtube-dl). For details, see #1883.\n- def isatty(self):\n- return False\n-\n-\n-# sys.stdout/err is None in GUI mode on Windows.\n-if sys.stdout is None:\n- sys.stdout = NullWriter()\n-if sys.stderr is None:\n- sys.stderr = NullWriter()\n-\n # At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all\n # the encodings.\n #\n", "issue": "NullWriter has no attribute 'closed'\nThis is similar to issue #1883\r\nIt is triggered when using the \"click\" library in a PyInstaller --noconsole application.\r\n\r\nA workaround is as follows:\r\n```python\r\n# fixup somw problems from pyinstaller\r\nif \"NullWriter\" in str(type(sys.stdout)):\r\n sys.stdout.closed = sys.stderr.closed = False\r\n```\r\nI suggest adding a class attribute, closed=False to fix this.\r\nYou may want to add the \"errors\" and \"newlines\" attributes as well, see the python docs.\r\n\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2022, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n#-- Start bootstrap process\n# Only python built-in modules can be used.\n\nimport sys\n\nimport pyimod02_importers\n\n# Extend Python import machinery by adding PEP302 importers to sys.meta_path.\npyimod02_importers.install()\n\n#-- Bootstrap process is complete.\n# We can use other python modules (e.g. os)\n\nimport os # noqa: E402\n\n# Let other python modules know that the code is running in frozen mode.\nif not hasattr(sys, 'frozen'):\n sys.frozen = True\n\n# sys._MEIPASS is now set in the bootloader. Hooray.\n\n# Python 3 C-API function Py_SetPath() resets sys.prefix to empty string. Python 2 was using PYTHONHOME for sys.prefix.\n# Let's do the same for Python 3.\nsys.prefix = sys._MEIPASS\nsys.exec_prefix = sys.prefix\n\n# Python 3.3+ defines also sys.base_prefix. Let's set them too.\nsys.base_prefix = sys.prefix\nsys.base_exec_prefix = sys.exec_prefix\n\n# Some packages behave differently when running inside virtual environment. E.g., IPython tries to append path\n# VIRTUAL_ENV to sys.path. For the frozen app we want to prevent this behavior.\nVIRTENV = 'VIRTUAL_ENV'\nif VIRTENV in os.environ:\n # On some platforms (e.g., AIX) 'os.unsetenv()' is unavailable and deleting the var from os.environ does not\n # delete it from the environment.\n os.environ[VIRTENV] = ''\n del os.environ[VIRTENV]\n\n# Ensure sys.path contains absolute paths. Otherwise, import of other python modules will fail when current working\n# directory is changed by the frozen application.\npython_path = []\nfor pth in sys.path:\n python_path.append(os.path.abspath(pth))\n sys.path = python_path\n\n\n# Implement workaround for prints in non-console mode. In non-console mode (with \"pythonw\"), print randomly fails with\n# \"[errno 9] Bad file descriptor\" when the printed text is flushed (e.g., buffer full); this is because the sys.stdout\n# object is bound to an invalid file descriptor.\n# Python 3000 has a fix for it (http://bugs.python.org/issue1415), but we feel that a workaround in PyInstaller is a\n# good thing, because most people first encounter this problem with PyInstaller as they do not usually run their code\n# with \"pythonw\" (and it is difficult to debug, anyway).\nclass NullWriter:\n softspace = 0\n encoding = 'UTF-8'\n\n def write(*args):\n pass\n\n def flush(*args):\n pass\n\n # Some packages are checking if stdout/stderr is available (e.g., youtube-dl). For details, see #1883.\n def isatty(self):\n return False\n\n\n# sys.stdout/err is None in GUI mode on Windows.\nif sys.stdout is None:\n sys.stdout = NullWriter()\nif sys.stderr is None:\n sys.stderr = NullWriter()\n\n# At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all\n# the encodings.\n#\n# It was also reported that without 'encodings' module, the frozen executable fails to load in some configurations:\n# http://www.pyinstaller.org/ticket/651\n#\n# Importing 'encodings' module in a run-time hook is not enough, since some run-time hooks require this module, and the\n# order of running the code from the run-time hooks is not defined.\ntry:\n import encodings # noqa: F401\nexcept ImportError:\n pass\n\n# In the Python interpreter 'warnings' module is imported when 'sys.warnoptions' is not empty. Mimic this behavior.\nif sys.warnoptions:\n import warnings # noqa: F401\n\n# Install the hooks for ctypes\nimport pyimod03_ctypes # noqa: E402\n\npyimod03_ctypes.install()\n\n# Install the hooks for pywin32 (Windows only)\nif sys.platform.startswith('win'):\n import pyimod04_pywin32\n pyimod04_pywin32.install()\n\n# Make .eggs and zipfiles available at runtime\nd = \"eggs\"\nd = os.path.join(sys._MEIPASS, d)\n# Test if the 'eggs' directory exists. This allows us to opportunistically include this script into the packaged exe,\n# even if no eggs were found when packaging the program. (Which may be a use-case, see issue #653).\nif os.path.isdir(d):\n for fn in os.listdir(d):\n sys.path.append(os.path.join(d, fn))\n", "path": "PyInstaller/loader/pyiboot01_bootstrap.py"}]}
| 2,055 | 402 |
gh_patches_debug_12124
|
rasdani/github-patches
|
git_diff
|
facebookresearch__fairscale-830
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
shard_model does not respect shard_count.
## 🐛 Bug
`autoshard.shard_model()` does not respect `shard_count`. See [comments](https://github.com/facebookresearch/fairscale/pull/817/files#r733237403) in https://github.com/facebookresearch/fairscale/pull/817 for more context and how to reproduce.
</issue>
<code>
[start of fairscale/experimental/nn/auto_shard.py]
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import logging
7 from typing import Dict, List, Set
8
9 import torch
10 import torch.fx
11 from torch.fx.node import Node
12
13
14 def _get_count(param_count: Dict, node_name: str) -> int:
15 """Identify different mutations of a given node name."""
16 # TODO(anj): This is not very stable since it is possible that the name
17 # may not be in the same format. Is there another way to identify nodes
18 # in a graph?
19 if node_name in param_count:
20 return param_count[node_name]
21 elif node_name.split("_")[0] in param_count:
22 return param_count[node_name.split("_")[0]]
23 else:
24 raise RuntimeError(f"Unable to find match between param {param_count} and node {node_name}")
25
26
27 def _create_shard_to_param_count(param_count: Dict, node_name_to_shard_id: Dict) -> Dict:
28 """Utility to create a map from shard id to param count using existing state."""
29
30 shard_to_param_count: Dict[int, int] = {}
31 for node_name in node_name_to_shard_id.keys():
32 try:
33 count = _get_count(param_count, node_name)
34 except RuntimeError:
35 continue
36 if node_name_to_shard_id[node_name] in shard_to_param_count:
37 shard_to_param_count[node_name_to_shard_id[node_name]] += count
38 else:
39 shard_to_param_count[node_name_to_shard_id[node_name]] = count
40 return shard_to_param_count
41
42
43 def _split_nodes(traced_graph_module: torch.fx.GraphModule, shard_count: int = 3) -> Dict:
44 """Utility used to trace a graph and identify shard cutpoints."""
45
46 node_name_to_shard_id: Dict[str, int] = {}
47 shard_id = 0
48 nodes_so_far = []
49 param_count: Dict[str, int] = {}
50 shard_to_param_count = {}
51
52 # Find the total number of params in the model and
53 # the number of params per shard we are aiming for.
54 for name, module in traced_graph_module.named_modules():
55 if "." in name:
56 continue
57 param_count[name] = sum([x.numel() for x in module.parameters()])
58 logging.info(f"Total number of params are {param_count['']}")
59 per_shard_param = param_count[""] // shard_count
60 logging.info(f"Per shard param count {per_shard_param}")
61
62 for node in traced_graph_module.graph.nodes:
63 if node.op == "placeholder":
64 node_name_to_shard_id[node.name] = shard_id
65 nodes_so_far.append(node.name)
66 elif node.op in ["get_attr", "call_function", "call_method", "call_module"]:
67
68 min_shard_id = shard_id
69 min_node_name = ""
70 # For each of the args of a given node, find the arg that is not the
71 # last node we traversed. This is to help us find skip connections
72 # across shards.
73 for arg in node.args:
74 # If the node has args that are inputs to the forward function, they
75 # may not have explicit names.
76 if not hasattr(arg, "name"):
77 continue
78
79 if arg.name in node_name_to_shard_id and arg.name != nodes_so_far[-1]:
80 if node_name_to_shard_id[arg.name] < min_shard_id:
81 min_shard_id = node_name_to_shard_id[arg.name]
82 min_node_name = arg.name
83
84 # If there is an input that is not from the previous shard,
85 # we collapse all the shards in between to be part of 1 shard.
86 # and update the param count per shard accordingly.
87 if min_shard_id < shard_id:
88 for node_name in reversed(nodes_so_far):
89 node_name_to_shard_id[node_name] = min_shard_id
90 if node_name == min_node_name:
91 break
92 shard_id = min_shard_id
93 # TODO(anj-s): Find a way to raise an error early if this can cause OOM errors.
94 shard_to_param_count = _create_shard_to_param_count(param_count, node_name_to_shard_id)
95
96 # Update state that is tracking node -> shard id and shard id -> param count.
97 node_name_to_shard_id[node.name] = shard_id
98 nodes_so_far.append(node.name)
99 # TODO(anj): This could just be an update, we don't need to recreate the map.
100 shard_to_param_count = _create_shard_to_param_count(param_count, node_name_to_shard_id)
101 # If we have gone over the number of params per shard count that we want to
102 # achieve, we should add a new shard.
103 # The shard_id may not have been updated in the map if we are at a node that does not
104 # have params.
105 if shard_id in shard_to_param_count and shard_to_param_count[shard_id] > per_shard_param:
106 shard_id += 1
107 elif node.op == "output":
108 break
109 return node_name_to_shard_id
110
111
112 class _ExtendedLeafTracer(torch.fx.Tracer):
113 """Tracer with an extended set of leaf nn.Modules."""
114
115 def __init__(self, leaf_modules: Set[torch.nn.Module]):
116 """Initializes a new _ExtendedLeafTracer object.
117
118 Args:
119 leaf_modules: The set of extra nn.Modules instances which will not be traced
120 through but instead considered to be leaves.
121 """
122 super().__init__()
123 self.leaf_modules = leaf_modules
124
125 def is_leaf_module(self, m: torch.nn.Module, model_qualified_name: str) -> bool:
126 return super().is_leaf_module(m, model_qualified_name) or m in self.leaf_modules
127
128
129 # TODO(ehotaj): Extend this method to wrap at the least granular level. One way to do
130 # would be to wrap the Module tree bottom up, first wrapping untracable children and
131 # only wrapping parents if they are also untracable.
132 def _trace(model: torch.nn.Module) -> torch.fx.GraphModule:
133 """Traces the given model and automatically wraps untracable modules into leaves."""
134 leaf_modules = set()
135 tracer = _ExtendedLeafTracer(leaf_modules)
136 for name, module in model.named_modules():
137 # TODO(ehotaj): The default is_leaf_module includes everything in torch.nn.
138 # This means that some coarse modules like nn.TransformerEncoder are treated
139 # as leaves, not traced, and are unable to be sharded. We may want to extend our
140 # sharding code to trace through these modules as well.
141 if tracer.is_leaf_module(module, ""):
142 continue
143 try:
144 tracer.trace(module)
145 except (TypeError, torch.fx.proxy.TraceError):
146 leaf_modules.add(module)
147 tracer = _ExtendedLeafTracer(leaf_modules)
148 graph = tracer.trace(model)
149 return torch.fx.GraphModule(model, graph)
150
151
152 def shard_model(model: torch.nn.Module, shard_count: int = 3) -> List[torch.fx.GraphModule]:
153 """Utility used to shard a model using torch.fx.
154
155 This function traces the model twice in an attempt to identify the
156 right cutpoints and then shard the model. In the first pass we calculate
157 the number of parameters as we are tracing the graph and mark nodes at
158 which we might want to create a new module. In the second pass we
159 modify the graph by inserting placeholders and output nodes to essentially
160 shard the graph.
161
162 We don't support skip connections between shards. This means that all
163 input and output is self contained within a given shard. A node from
164 shard 1 cannot be an input to a node from shard 3. We expect all inputs
165 to a given shard to be coming from the last node in the previous shard.
166 This means that we may not be able to shard models by the specified
167 `shard_count` mentioned by the user.
168
169 Args:
170 model (nn.Module): Model to be sharded as specified by the device count.
171
172 shard_count (int): Number of shards that we want to split the model into.
173
174 """
175 module_list: List[torch.fx.GraphModule] = []
176 num_graphs = 0
177 new_graph = torch.fx.Graph() # type: ignore
178 env: Dict[str, Node] = {}
179 new_input_node = None
180
181 traced_graph_module = _trace(model)
182
183 # This is the first pass where we attempt to get a map of where
184 # we need to insert placeholder and output nodes.
185 node_name_to_shard_id = _split_nodes(traced_graph_module, shard_count=shard_count)
186
187 # dummy value which indicates that this is the first node.
188 prev_shard_id = 1000
189 prev_node = None
190 for node in traced_graph_module.graph.nodes:
191 # If the current node is in the next shard, we insert an output node.
192 # A new graph is created and a placeholder is added for the next shard.
193 if node.name in node_name_to_shard_id and prev_shard_id < node_name_to_shard_id[node.name]:
194 assert prev_node, "prev_node cannot be None"
195
196 with new_graph.inserting_after(prev_node):
197 new_graph.output(env[prev_node.name])
198 num_graphs += 1
199 module_list.append(torch.fx.GraphModule(model, new_graph))
200 new_graph = torch.fx.Graph()
201 node_name = "placeholder" + str(num_graphs)
202 pl_node = new_graph.create_node("placeholder", node_name)
203 env[node_name] = pl_node
204 new_input_node = pl_node
205
206 if new_input_node is not None:
207 # Account for a placeholder in the new graph.
208 node.args = (new_input_node,)
209 new_input_node = None
210 if node.op in ["placeholder", "get_attr", "call_function", "call_method", "call_module"]:
211 # Copy the nodes from the existing graph to the new graph.
212 new_node = new_graph.node_copy(node, lambda x: env[x.name])
213 env[node.name] = new_node
214 elif node.op == "output":
215 # If this is the last node, we should add an output
216 # node and add the last graph to the list.
217 assert prev_node, "prev_node cannot be None"
218
219 with new_graph.inserting_after(prev_node):
220 new_graph.output(env[prev_node.name])
221 module_list.append(torch.fx.GraphModule(model, new_graph))
222 break
223 prev_node = new_node
224 prev_shard_id = node_name_to_shard_id[node.name]
225
226 return module_list
227
[end of fairscale/experimental/nn/auto_shard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/fairscale/experimental/nn/auto_shard.py b/fairscale/experimental/nn/auto_shard.py
--- a/fairscale/experimental/nn/auto_shard.py
+++ b/fairscale/experimental/nn/auto_shard.py
@@ -52,8 +52,7 @@
# Find the total number of params in the model and
# the number of params per shard we are aiming for.
for name, module in traced_graph_module.named_modules():
- if "." in name:
- continue
+ name = name.replace(".", "_")
param_count[name] = sum([x.numel() for x in module.parameters()])
logging.info(f"Total number of params are {param_count['']}")
per_shard_param = param_count[""] // shard_count
|
{"golden_diff": "diff --git a/fairscale/experimental/nn/auto_shard.py b/fairscale/experimental/nn/auto_shard.py\n--- a/fairscale/experimental/nn/auto_shard.py\n+++ b/fairscale/experimental/nn/auto_shard.py\n@@ -52,8 +52,7 @@\n # Find the total number of params in the model and\n # the number of params per shard we are aiming for.\n for name, module in traced_graph_module.named_modules():\n- if \".\" in name:\n- continue\n+ name = name.replace(\".\", \"_\")\n param_count[name] = sum([x.numel() for x in module.parameters()])\n logging.info(f\"Total number of params are {param_count['']}\")\n per_shard_param = param_count[\"\"] // shard_count\n", "issue": "shard_model does not respect shard_count.\n## \ud83d\udc1b Bug\r\n\r\n`autoshard.shard_model()` does not respect `shard_count`. See [comments](https://github.com/facebookresearch/fairscale/pull/817/files#r733237403) in https://github.com/facebookresearch/fairscale/pull/817 for more context and how to reproduce. \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nfrom typing import Dict, List, Set\n\nimport torch\nimport torch.fx\nfrom torch.fx.node import Node\n\n\ndef _get_count(param_count: Dict, node_name: str) -> int:\n \"\"\"Identify different mutations of a given node name.\"\"\"\n # TODO(anj): This is not very stable since it is possible that the name\n # may not be in the same format. Is there another way to identify nodes\n # in a graph?\n if node_name in param_count:\n return param_count[node_name]\n elif node_name.split(\"_\")[0] in param_count:\n return param_count[node_name.split(\"_\")[0]]\n else:\n raise RuntimeError(f\"Unable to find match between param {param_count} and node {node_name}\")\n\n\ndef _create_shard_to_param_count(param_count: Dict, node_name_to_shard_id: Dict) -> Dict:\n \"\"\"Utility to create a map from shard id to param count using existing state.\"\"\"\n\n shard_to_param_count: Dict[int, int] = {}\n for node_name in node_name_to_shard_id.keys():\n try:\n count = _get_count(param_count, node_name)\n except RuntimeError:\n continue\n if node_name_to_shard_id[node_name] in shard_to_param_count:\n shard_to_param_count[node_name_to_shard_id[node_name]] += count\n else:\n shard_to_param_count[node_name_to_shard_id[node_name]] = count\n return shard_to_param_count\n\n\ndef _split_nodes(traced_graph_module: torch.fx.GraphModule, shard_count: int = 3) -> Dict:\n \"\"\"Utility used to trace a graph and identify shard cutpoints.\"\"\"\n\n node_name_to_shard_id: Dict[str, int] = {}\n shard_id = 0\n nodes_so_far = []\n param_count: Dict[str, int] = {}\n shard_to_param_count = {}\n\n # Find the total number of params in the model and\n # the number of params per shard we are aiming for.\n for name, module in traced_graph_module.named_modules():\n if \".\" in name:\n continue\n param_count[name] = sum([x.numel() for x in module.parameters()])\n logging.info(f\"Total number of params are {param_count['']}\")\n per_shard_param = param_count[\"\"] // shard_count\n logging.info(f\"Per shard param count {per_shard_param}\")\n\n for node in traced_graph_module.graph.nodes:\n if node.op == \"placeholder\":\n node_name_to_shard_id[node.name] = shard_id\n nodes_so_far.append(node.name)\n elif node.op in [\"get_attr\", \"call_function\", \"call_method\", \"call_module\"]:\n\n min_shard_id = shard_id\n min_node_name = \"\"\n # For each of the args of a given node, find the arg that is not the\n # last node we traversed. This is to help us find skip connections\n # across shards.\n for arg in node.args:\n # If the node has args that are inputs to the forward function, they\n # may not have explicit names.\n if not hasattr(arg, \"name\"):\n continue\n\n if arg.name in node_name_to_shard_id and arg.name != nodes_so_far[-1]:\n if node_name_to_shard_id[arg.name] < min_shard_id:\n min_shard_id = node_name_to_shard_id[arg.name]\n min_node_name = arg.name\n\n # If there is an input that is not from the previous shard,\n # we collapse all the shards in between to be part of 1 shard.\n # and update the param count per shard accordingly.\n if min_shard_id < shard_id:\n for node_name in reversed(nodes_so_far):\n node_name_to_shard_id[node_name] = min_shard_id\n if node_name == min_node_name:\n break\n shard_id = min_shard_id\n # TODO(anj-s): Find a way to raise an error early if this can cause OOM errors.\n shard_to_param_count = _create_shard_to_param_count(param_count, node_name_to_shard_id)\n\n # Update state that is tracking node -> shard id and shard id -> param count.\n node_name_to_shard_id[node.name] = shard_id\n nodes_so_far.append(node.name)\n # TODO(anj): This could just be an update, we don't need to recreate the map.\n shard_to_param_count = _create_shard_to_param_count(param_count, node_name_to_shard_id)\n # If we have gone over the number of params per shard count that we want to\n # achieve, we should add a new shard.\n # The shard_id may not have been updated in the map if we are at a node that does not\n # have params.\n if shard_id in shard_to_param_count and shard_to_param_count[shard_id] > per_shard_param:\n shard_id += 1\n elif node.op == \"output\":\n break\n return node_name_to_shard_id\n\n\nclass _ExtendedLeafTracer(torch.fx.Tracer):\n \"\"\"Tracer with an extended set of leaf nn.Modules.\"\"\"\n\n def __init__(self, leaf_modules: Set[torch.nn.Module]):\n \"\"\"Initializes a new _ExtendedLeafTracer object.\n\n Args:\n leaf_modules: The set of extra nn.Modules instances which will not be traced\n through but instead considered to be leaves.\n \"\"\"\n super().__init__()\n self.leaf_modules = leaf_modules\n\n def is_leaf_module(self, m: torch.nn.Module, model_qualified_name: str) -> bool:\n return super().is_leaf_module(m, model_qualified_name) or m in self.leaf_modules\n\n\n# TODO(ehotaj): Extend this method to wrap at the least granular level. One way to do\n# would be to wrap the Module tree bottom up, first wrapping untracable children and\n# only wrapping parents if they are also untracable.\ndef _trace(model: torch.nn.Module) -> torch.fx.GraphModule:\n \"\"\"Traces the given model and automatically wraps untracable modules into leaves.\"\"\"\n leaf_modules = set()\n tracer = _ExtendedLeafTracer(leaf_modules)\n for name, module in model.named_modules():\n # TODO(ehotaj): The default is_leaf_module includes everything in torch.nn.\n # This means that some coarse modules like nn.TransformerEncoder are treated\n # as leaves, not traced, and are unable to be sharded. We may want to extend our\n # sharding code to trace through these modules as well.\n if tracer.is_leaf_module(module, \"\"):\n continue\n try:\n tracer.trace(module)\n except (TypeError, torch.fx.proxy.TraceError):\n leaf_modules.add(module)\n tracer = _ExtendedLeafTracer(leaf_modules)\n graph = tracer.trace(model)\n return torch.fx.GraphModule(model, graph)\n\n\ndef shard_model(model: torch.nn.Module, shard_count: int = 3) -> List[torch.fx.GraphModule]:\n \"\"\"Utility used to shard a model using torch.fx.\n\n This function traces the model twice in an attempt to identify the\n right cutpoints and then shard the model. In the first pass we calculate\n the number of parameters as we are tracing the graph and mark nodes at\n which we might want to create a new module. In the second pass we\n modify the graph by inserting placeholders and output nodes to essentially\n shard the graph.\n\n We don't support skip connections between shards. This means that all\n input and output is self contained within a given shard. A node from\n shard 1 cannot be an input to a node from shard 3. We expect all inputs\n to a given shard to be coming from the last node in the previous shard.\n This means that we may not be able to shard models by the specified\n `shard_count` mentioned by the user.\n\n Args:\n model (nn.Module): Model to be sharded as specified by the device count.\n\n shard_count (int): Number of shards that we want to split the model into.\n\n \"\"\"\n module_list: List[torch.fx.GraphModule] = []\n num_graphs = 0\n new_graph = torch.fx.Graph() # type: ignore\n env: Dict[str, Node] = {}\n new_input_node = None\n\n traced_graph_module = _trace(model)\n\n # This is the first pass where we attempt to get a map of where\n # we need to insert placeholder and output nodes.\n node_name_to_shard_id = _split_nodes(traced_graph_module, shard_count=shard_count)\n\n # dummy value which indicates that this is the first node.\n prev_shard_id = 1000\n prev_node = None\n for node in traced_graph_module.graph.nodes:\n # If the current node is in the next shard, we insert an output node.\n # A new graph is created and a placeholder is added for the next shard.\n if node.name in node_name_to_shard_id and prev_shard_id < node_name_to_shard_id[node.name]:\n assert prev_node, \"prev_node cannot be None\"\n\n with new_graph.inserting_after(prev_node):\n new_graph.output(env[prev_node.name])\n num_graphs += 1\n module_list.append(torch.fx.GraphModule(model, new_graph))\n new_graph = torch.fx.Graph()\n node_name = \"placeholder\" + str(num_graphs)\n pl_node = new_graph.create_node(\"placeholder\", node_name)\n env[node_name] = pl_node\n new_input_node = pl_node\n\n if new_input_node is not None:\n # Account for a placeholder in the new graph.\n node.args = (new_input_node,)\n new_input_node = None\n if node.op in [\"placeholder\", \"get_attr\", \"call_function\", \"call_method\", \"call_module\"]:\n # Copy the nodes from the existing graph to the new graph.\n new_node = new_graph.node_copy(node, lambda x: env[x.name])\n env[node.name] = new_node\n elif node.op == \"output\":\n # If this is the last node, we should add an output\n # node and add the last graph to the list.\n assert prev_node, \"prev_node cannot be None\"\n\n with new_graph.inserting_after(prev_node):\n new_graph.output(env[prev_node.name])\n module_list.append(torch.fx.GraphModule(model, new_graph))\n break\n prev_node = new_node\n prev_shard_id = node_name_to_shard_id[node.name]\n\n return module_list\n", "path": "fairscale/experimental/nn/auto_shard.py"}]}
| 3,538 | 174 |
gh_patches_debug_39414
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-4467
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SecretInVault secret provider integration tests no longer work
The test `buildbot.test.integration.test_integration_secrets_with_vault.SecretsConfig.test_secret` no longer works.
See https://travis-ci.org/buildbot/buildbot/jobs/464401540.
Looks like the default kv engine shipping with the `vault` engine is now v2 which we don't support yet.
</issue>
<code>
[start of master/buildbot/secrets/providers/vault.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15 """
16 vault based providers
17 """
18
19 from __future__ import absolute_import
20 from __future__ import print_function
21
22 from twisted.internet import defer
23
24 from buildbot import config
25 from buildbot.secrets.providers.base import SecretProviderBase
26 from buildbot.util import httpclientservice
27
28
29 class HashiCorpVaultSecretProvider(SecretProviderBase):
30 """
31 basic provider where each secret is stored in Vault
32 """
33
34 name = 'SecretInVault'
35
36 def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):
37 if not isinstance(vaultServer, str):
38 config.error("vaultServer must be a string while it is %s" % (type(vaultServer,)))
39 if not isinstance(vaultToken, str):
40 config.error("vaultToken must be a string while it is %s" % (type(vaultToken,)))
41
42 @defer.inlineCallbacks
43 def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):
44 if secretsmount is None:
45 self.secretsmount = "secret"
46 else:
47 self.secretsmount = secretsmount
48 self.vaultServer = vaultServer
49 self.vaultToken = vaultToken
50 if vaultServer.endswith('/'):
51 vaultServer = vaultServer[:-1]
52 self._http = yield httpclientservice.HTTPClientService.getService(
53 self.master, self.vaultServer, headers={'X-Vault-Token': self.vaultToken})
54
55 @defer.inlineCallbacks
56 def get(self, entry):
57 """
58 get the value from vault secret backend
59 """
60 path = self.secretsmount + '/' + entry
61 proj = yield self._http.get('/v1/{0}'.format(path))
62 code = yield proj.code
63 if code != 200:
64 raise KeyError("The key %s does not exist in Vault provider: request"
65 " return code:%d." % (entry, code))
66 json = yield proj.json()
67 defer.returnValue(json.get(u'data', {}).get('value'))
68
[end of master/buildbot/secrets/providers/vault.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/master/buildbot/secrets/providers/vault.py b/master/buildbot/secrets/providers/vault.py
--- a/master/buildbot/secrets/providers/vault.py
+++ b/master/buildbot/secrets/providers/vault.py
@@ -28,25 +28,30 @@
class HashiCorpVaultSecretProvider(SecretProviderBase):
"""
- basic provider where each secret is stored in Vault
+ basic provider where each secret is stored in Vault KV secret engine
"""
name = 'SecretInVault'
- def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):
+ def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None,
+ apiVersion=1):
if not isinstance(vaultServer, str):
config.error("vaultServer must be a string while it is %s" % (type(vaultServer,)))
if not isinstance(vaultToken, str):
config.error("vaultToken must be a string while it is %s" % (type(vaultToken,)))
+ if apiVersion not in [1, 2]:
+ config.error("apiVersion %s is not supported" % apiVersion)
@defer.inlineCallbacks
- def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):
+ def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None,
+ apiVersion=1):
if secretsmount is None:
self.secretsmount = "secret"
else:
self.secretsmount = secretsmount
self.vaultServer = vaultServer
self.vaultToken = vaultToken
+ self.apiVersion = apiVersion
if vaultServer.endswith('/'):
vaultServer = vaultServer[:-1]
self._http = yield httpclientservice.HTTPClientService.getService(
@@ -57,11 +62,23 @@
"""
get the value from vault secret backend
"""
- path = self.secretsmount + '/' + entry
+ if self.apiVersion == 1:
+ path = self.secretsmount + '/' + entry
+ else:
+ path = self.secretsmount + '/data/' + entry
+
+ # note that the HTTP path contains v1 for both versions of the key-value
+ # secret engine. Different versions of the key-value engine are
+ # effectively separate secret engines in vault, with the same base HTTP
+ # API, but with different paths within it.
proj = yield self._http.get('/v1/{0}'.format(path))
code = yield proj.code
if code != 200:
raise KeyError("The key %s does not exist in Vault provider: request"
" return code:%d." % (entry, code))
json = yield proj.json()
- defer.returnValue(json.get(u'data', {}).get('value'))
+ if self.apiVersion == 1:
+ ret = json.get(u'data', {}).get('value')
+ else:
+ ret = json.get(u'data', {}).get(u'data', {}).get('value')
+ defer.returnValue(ret)
|
{"golden_diff": "diff --git a/master/buildbot/secrets/providers/vault.py b/master/buildbot/secrets/providers/vault.py\n--- a/master/buildbot/secrets/providers/vault.py\n+++ b/master/buildbot/secrets/providers/vault.py\n@@ -28,25 +28,30 @@\n \n class HashiCorpVaultSecretProvider(SecretProviderBase):\n \"\"\"\n- basic provider where each secret is stored in Vault\n+ basic provider where each secret is stored in Vault KV secret engine\n \"\"\"\n \n name = 'SecretInVault'\n \n- def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):\n+ def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None,\n+ apiVersion=1):\n if not isinstance(vaultServer, str):\n config.error(\"vaultServer must be a string while it is %s\" % (type(vaultServer,)))\n if not isinstance(vaultToken, str):\n config.error(\"vaultToken must be a string while it is %s\" % (type(vaultToken,)))\n+ if apiVersion not in [1, 2]:\n+ config.error(\"apiVersion %s is not supported\" % apiVersion)\n \n @defer.inlineCallbacks\n- def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):\n+ def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None,\n+ apiVersion=1):\n if secretsmount is None:\n self.secretsmount = \"secret\"\n else:\n self.secretsmount = secretsmount\n self.vaultServer = vaultServer\n self.vaultToken = vaultToken\n+ self.apiVersion = apiVersion\n if vaultServer.endswith('/'):\n vaultServer = vaultServer[:-1]\n self._http = yield httpclientservice.HTTPClientService.getService(\n@@ -57,11 +62,23 @@\n \"\"\"\n get the value from vault secret backend\n \"\"\"\n- path = self.secretsmount + '/' + entry\n+ if self.apiVersion == 1:\n+ path = self.secretsmount + '/' + entry\n+ else:\n+ path = self.secretsmount + '/data/' + entry\n+\n+ # note that the HTTP path contains v1 for both versions of the key-value\n+ # secret engine. Different versions of the key-value engine are\n+ # effectively separate secret engines in vault, with the same base HTTP\n+ # API, but with different paths within it.\n proj = yield self._http.get('/v1/{0}'.format(path))\n code = yield proj.code\n if code != 200:\n raise KeyError(\"The key %s does not exist in Vault provider: request\"\n \" return code:%d.\" % (entry, code))\n json = yield proj.json()\n- defer.returnValue(json.get(u'data', {}).get('value'))\n+ if self.apiVersion == 1:\n+ ret = json.get(u'data', {}).get('value')\n+ else:\n+ ret = json.get(u'data', {}).get(u'data', {}).get('value')\n+ defer.returnValue(ret)\n", "issue": "SecretInVault secret provider integration tests no longer work\nThe test `buildbot.test.integration.test_integration_secrets_with_vault.SecretsConfig.test_secret` no longer works.\r\n\r\nSee https://travis-ci.org/buildbot/buildbot/jobs/464401540.\r\n\r\nLooks like the default kv engine shipping with the `vault` engine is now v2 which we don't support yet.\r\n\r\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\"\"\"\nvault based providers\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\n\nfrom buildbot import config\nfrom buildbot.secrets.providers.base import SecretProviderBase\nfrom buildbot.util import httpclientservice\n\n\nclass HashiCorpVaultSecretProvider(SecretProviderBase):\n \"\"\"\n basic provider where each secret is stored in Vault\n \"\"\"\n\n name = 'SecretInVault'\n\n def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):\n if not isinstance(vaultServer, str):\n config.error(\"vaultServer must be a string while it is %s\" % (type(vaultServer,)))\n if not isinstance(vaultToken, str):\n config.error(\"vaultToken must be a string while it is %s\" % (type(vaultToken,)))\n\n @defer.inlineCallbacks\n def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):\n if secretsmount is None:\n self.secretsmount = \"secret\"\n else:\n self.secretsmount = secretsmount\n self.vaultServer = vaultServer\n self.vaultToken = vaultToken\n if vaultServer.endswith('/'):\n vaultServer = vaultServer[:-1]\n self._http = yield httpclientservice.HTTPClientService.getService(\n self.master, self.vaultServer, headers={'X-Vault-Token': self.vaultToken})\n\n @defer.inlineCallbacks\n def get(self, entry):\n \"\"\"\n get the value from vault secret backend\n \"\"\"\n path = self.secretsmount + '/' + entry\n proj = yield self._http.get('/v1/{0}'.format(path))\n code = yield proj.code\n if code != 200:\n raise KeyError(\"The key %s does not exist in Vault provider: request\"\n \" return code:%d.\" % (entry, code))\n json = yield proj.json()\n defer.returnValue(json.get(u'data', {}).get('value'))\n", "path": "master/buildbot/secrets/providers/vault.py"}]}
| 1,352 | 684 |
gh_patches_debug_30219
|
rasdani/github-patches
|
git_diff
|
ansible__awx-13455
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CyberArk Conjur lookup plugin does not work with open source Conjur version
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
### Bug Summary
The CyberArk Conjur Secrets Manager Lookup plugin no longer supports the lookup against the Conjur OSS. Most likely because the opensource conjure version API endpoint does not have the `/api/` portion recently added by this change: https://github.com/ansible/awx/pull/13121
### AWX version
2.4
### Select the relevant components
- [ ] UI
- [X] API
- [ ] Docs
- [ ] Collection
- [ ] CLI
- [ ] Other
### Installation method
docker development environment
### Modifications
no
### Ansible version
_No response_
### Operating system
_No response_
### Web browser
_No response_
### Steps to reproduce
1. Deploy Conjur OSS. We use the latest `cyberark/conjur` image for that. The Conjur version is
```
Version 1.19.1-3398
API Version 5.3.0
```
2. Under the AWX, create the CyberArk Conjur Secrets Manager Lookup credential. Provide Conjure URL, user, and token.
3. Click Test and provide a path.
### Expected results
Connection to Conjur should be established. HTTP 200 response.
### Actual results
401 error returned.
```
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py", line 504, in run
args = self.build_args(self.instance, private_data_dir, passwords)
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py", line 937, in build_args
ssh_username = creds.get_input('username', default='')
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py", line 275, in get_input
return self._get_dynamic_input(field_name)
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py", line 309, in _get_dynamic_input
return input_source.get_input_value()
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py", line 1250, in get_input_value
return backend(**backend_kwargs)
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/conjur.py", line 72, in conjur_backend
raise_for_status(resp)
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/plugin.py", line 12, in raise_for_status
resp.raise_for_status()
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/requests/models.py", line 1021, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: https://conjureurl.com/api/authn/test/admin/authenticate
```
### Additional information
_No response_
</issue>
<code>
[start of awx/main/credential_plugins/conjur.py]
1 from .plugin import CredentialPlugin, CertFiles, raise_for_status
2
3 from urllib.parse import urljoin, quote
4
5 from django.utils.translation import gettext_lazy as _
6 import requests
7
8
9 conjur_inputs = {
10 'fields': [
11 {
12 'id': 'url',
13 'label': _('Conjur URL'),
14 'type': 'string',
15 'format': 'url',
16 },
17 {
18 'id': 'api_key',
19 'label': _('API Key'),
20 'type': 'string',
21 'secret': True,
22 },
23 {
24 'id': 'account',
25 'label': _('Account'),
26 'type': 'string',
27 },
28 {
29 'id': 'username',
30 'label': _('Username'),
31 'type': 'string',
32 },
33 {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},
34 ],
35 'metadata': [
36 {
37 'id': 'secret_path',
38 'label': _('Secret Identifier'),
39 'type': 'string',
40 'help_text': _('The identifier for the secret e.g., /some/identifier'),
41 },
42 {
43 'id': 'secret_version',
44 'label': _('Secret Version'),
45 'type': 'string',
46 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),
47 },
48 ],
49 'required': ['url', 'api_key', 'account', 'username'],
50 }
51
52
53 def conjur_backend(**kwargs):
54 url = kwargs['url']
55 api_key = kwargs['api_key']
56 account = quote(kwargs['account'], safe='')
57 username = quote(kwargs['username'], safe='')
58 secret_path = quote(kwargs['secret_path'], safe='')
59 version = kwargs.get('secret_version')
60 cacert = kwargs.get('cacert', None)
61
62 auth_kwargs = {
63 'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},
64 'data': api_key,
65 'allow_redirects': False,
66 }
67
68 with CertFiles(cacert) as cert:
69 # https://www.conjur.org/api.html#authentication-authenticate-post
70 auth_kwargs['verify'] = cert
71 resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
72 raise_for_status(resp)
73 token = resp.content.decode('utf-8')
74
75 lookup_kwargs = {
76 'headers': {'Authorization': 'Token token="{}"'.format(token)},
77 'allow_redirects': False,
78 }
79
80 # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
81 path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))
82 if version:
83 ver = "version={}".format(version)
84 path = '?'.join([path, ver])
85
86 with CertFiles(cacert) as cert:
87 lookup_kwargs['verify'] = cert
88 resp = requests.get(path, timeout=30, **lookup_kwargs)
89 raise_for_status(resp)
90 return resp.text
91
92
93 conjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)
94
[end of awx/main/credential_plugins/conjur.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/awx/main/credential_plugins/conjur.py b/awx/main/credential_plugins/conjur.py
--- a/awx/main/credential_plugins/conjur.py
+++ b/awx/main/credential_plugins/conjur.py
@@ -68,7 +68,10 @@
with CertFiles(cacert) as cert:
# https://www.conjur.org/api.html#authentication-authenticate-post
auth_kwargs['verify'] = cert
- resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
+ try:
+ resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)
+ except requests.exceptions.ConnectionError:
+ resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
raise_for_status(resp)
token = resp.content.decode('utf-8')
@@ -78,14 +81,19 @@
}
# https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
- path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))
+ path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))
+ path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))
if version:
ver = "version={}".format(version)
path = '?'.join([path, ver])
+ path_conjurcloud = '?'.join([path_conjurcloud, ver])
with CertFiles(cacert) as cert:
lookup_kwargs['verify'] = cert
- resp = requests.get(path, timeout=30, **lookup_kwargs)
+ try:
+ resp = requests.get(path, timeout=30, **lookup_kwargs)
+ except requests.exceptions.ConnectionError:
+ resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)
raise_for_status(resp)
return resp.text
|
{"golden_diff": "diff --git a/awx/main/credential_plugins/conjur.py b/awx/main/credential_plugins/conjur.py\n--- a/awx/main/credential_plugins/conjur.py\n+++ b/awx/main/credential_plugins/conjur.py\n@@ -68,7 +68,10 @@\n with CertFiles(cacert) as cert:\n # https://www.conjur.org/api.html#authentication-authenticate-post\n auth_kwargs['verify'] = cert\n- resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n+ try:\n+ resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)\n+ except requests.exceptions.ConnectionError:\n+ resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = resp.content.decode('utf-8')\n \n@@ -78,14 +81,19 @@\n }\n \n # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get\n- path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n+ path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))\n+ path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n if version:\n ver = \"version={}\".format(version)\n path = '?'.join([path, ver])\n+ path_conjurcloud = '?'.join([path_conjurcloud, ver])\n \n with CertFiles(cacert) as cert:\n lookup_kwargs['verify'] = cert\n- resp = requests.get(path, timeout=30, **lookup_kwargs)\n+ try:\n+ resp = requests.get(path, timeout=30, **lookup_kwargs)\n+ except requests.exceptions.ConnectionError:\n+ resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n", "issue": "CyberArk Conjur lookup plugin does not work with open source Conjur version\n### Please confirm the following\r\n\r\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\r\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\r\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\r\n\r\n### Bug Summary\r\n\r\nThe CyberArk Conjur Secrets Manager Lookup plugin no longer supports the lookup against the Conjur OSS. Most likely because the opensource conjure version API endpoint does not have the `/api/` portion recently added by this change: https://github.com/ansible/awx/pull/13121\r\n\r\n### AWX version\r\n\r\n2.4\r\n\r\n### Select the relevant components\r\n\r\n- [ ] UI\r\n- [X] API\r\n- [ ] Docs\r\n- [ ] Collection\r\n- [ ] CLI\r\n- [ ] Other\r\n\r\n### Installation method\r\n\r\ndocker development environment\r\n\r\n### Modifications\r\n\r\nno\r\n\r\n### Ansible version\r\n\r\n_No response_\r\n\r\n### Operating system\r\n\r\n_No response_\r\n\r\n### Web browser\r\n\r\n_No response_\r\n\r\n### Steps to reproduce\r\n\r\n1. Deploy Conjur OSS. We use the latest `cyberark/conjur` image for that. The Conjur version is\r\n```\r\n Version 1.19.1-3398\r\n API Version 5.3.0 \r\n```\r\n2. Under the AWX, create the CyberArk Conjur Secrets Manager Lookup credential. Provide Conjure URL, user, and token. \r\n3. Click Test and provide a path. \r\n\r\n\r\n### Expected results\r\n\r\nConnection to Conjur should be established. HTTP 200 response. \r\n\r\n### Actual results\r\n\r\n401 error returned. \r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py\", line 504, in run\r\n args = self.build_args(self.instance, private_data_dir, passwords)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py\", line 937, in build_args\r\n ssh_username = creds.get_input('username', default='')\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py\", line 275, in get_input\r\n return self._get_dynamic_input(field_name)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py\", line 309, in _get_dynamic_input\r\n return input_source.get_input_value()\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py\", line 1250, in get_input_value\r\n return backend(**backend_kwargs)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/conjur.py\", line 72, in conjur_backend\r\n raise_for_status(resp)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/plugin.py\", line 12, in raise_for_status\r\n resp.raise_for_status()\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/requests/models.py\", line 1021, in raise_for_status\r\n raise HTTPError(http_error_msg, response=self)\r\nrequests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: https://conjureurl.com/api/authn/test/admin/authenticate\r\n```\r\n\r\n### Additional information\r\n\r\n_No response_\n", "before_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nfrom urllib.parse import urljoin, quote\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\n\nconjur_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('Conjur URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'api_key',\n 'label': _('API Key'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'account',\n 'label': _('Account'),\n 'type': 'string',\n },\n {\n 'id': 'username',\n 'label': _('Username'),\n 'type': 'string',\n },\n {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},\n ],\n 'metadata': [\n {\n 'id': 'secret_path',\n 'label': _('Secret Identifier'),\n 'type': 'string',\n 'help_text': _('The identifier for the secret e.g., /some/identifier'),\n },\n {\n 'id': 'secret_version',\n 'label': _('Secret Version'),\n 'type': 'string',\n 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),\n },\n ],\n 'required': ['url', 'api_key', 'account', 'username'],\n}\n\n\ndef conjur_backend(**kwargs):\n url = kwargs['url']\n api_key = kwargs['api_key']\n account = quote(kwargs['account'], safe='')\n username = quote(kwargs['username'], safe='')\n secret_path = quote(kwargs['secret_path'], safe='')\n version = kwargs.get('secret_version')\n cacert = kwargs.get('cacert', None)\n\n auth_kwargs = {\n 'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},\n 'data': api_key,\n 'allow_redirects': False,\n }\n\n with CertFiles(cacert) as cert:\n # https://www.conjur.org/api.html#authentication-authenticate-post\n auth_kwargs['verify'] = cert\n resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = resp.content.decode('utf-8')\n\n lookup_kwargs = {\n 'headers': {'Authorization': 'Token token=\"{}\"'.format(token)},\n 'allow_redirects': False,\n }\n\n # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get\n path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n if version:\n ver = \"version={}\".format(version)\n path = '?'.join([path, ver])\n\n with CertFiles(cacert) as cert:\n lookup_kwargs['verify'] = cert\n resp = requests.get(path, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n\n\nconjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)\n", "path": "awx/main/credential_plugins/conjur.py"}]}
| 2,292 | 479 |
gh_patches_debug_19765
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-10170
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] `TileRenderer` ignores the `visible` property
#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)
Bokeh 2.1.0rc1
#### Description of expected behavior and the observed behavior
`TileRenderer` should take into account the `visible` property. Both if passed to its constructor and if changed in runtime if using Bokeh server.
#### Complete, minimal, self-contained example code that reproduces the issue
```python
from bokeh.io import show
from bokeh.plotting import figure
from bokeh.tile_providers import CARTODBPOSITRON, get_provider
p = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000),
x_axis_type="mercator", y_axis_type="mercator")
p.add_tile(get_provider(CARTODBPOSITRON), visible=False)
show(p)
```
The root cause is that `TileRenderer` just doesn't check `visible` at all. It seems like every renderer checks this property. Maybe it should be checked at a higher level?
</issue>
<code>
[start of examples/models/file/latex_extension.py]
1 """ The LaTex example was derived from: http://matplotlib.org/users/usetex.html
2 """
3 import numpy as np
4 from scipy.special import jv
5
6 from bokeh.models import Label
7 from bokeh.palettes import Spectral4
8 from bokeh.plotting import figure, output_file, show
9 from bokeh.util.compiler import TypeScript
10
11 output_file('latex_extension.html')
12
13 class LatexLabel(Label):
14 """A subclass of `Label` with all of the same class attributes except
15 canvas mode isn't supported and DOM manipulation happens in the TypeScript
16 superclass implementation that requires setting `render_mode='css'`).
17
18 Only the render method of LabelView is overwritten to perform the
19 text -> latex (via katex) conversion
20 """
21 __javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.js"]
22 __css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.css"]
23 __implementation__ = TypeScript("""
24 import {Label, LabelView} from "models/annotations/label"
25
26 declare namespace katex {
27 function render(expression: string, element: HTMLElement, options: {displayMode?: boolean}): void
28 }
29
30 export class LatexLabelView extends LabelView {
31 model: LatexLabel
32
33 render(): void {
34 // Here because AngleSpec does units tranform and label doesn't support specs
35 let angle: number
36 switch (this.model.angle_units) {
37 case "rad": {
38 angle = -1 * this.model.angle
39 break
40 }
41 case "deg": {
42 angle = -1 * this.model.angle * Math.PI/180.0
43 break
44 }
45 default:
46 throw new Error("unreachable")
47 }
48
49 const panel = this.panel || this.plot_view.frame
50
51 const xscale = this.plot_view.frame.xscales[this.model.x_range_name]
52 const yscale = this.plot_view.frame.yscales[this.model.y_range_name]
53
54 const {x, y} = this.model
55 let sx = this.model.x_units == "data" ? xscale.compute(x) : panel.xview.compute(x)
56 let sy = this.model.y_units == "data" ? yscale.compute(y) : panel.yview.compute(y)
57
58 sx += this.model.x_offset
59 sy -= this.model.y_offset
60
61 this._css_text(this.layer.ctx, "", sx, sy, angle)
62 katex.render(this.model.text, this.el, {displayMode: true})
63 }
64 }
65
66 export class LatexLabel extends Label {
67 static init_LatexLabel(): void {
68 this.prototype.default_view = LatexLabelView
69 }
70 }
71 """)
72
73 p = figure(title="LaTex Extension Demonstration", plot_width=800, plot_height=350,
74 background_fill_color="#fafafa")
75 p.x_range.range_padding = 0
76
77 x = np.arange(0.0, 20.0, 0.02)
78
79 for i, n in enumerate([0, 1, 4, 7]):
80 p.line(x, jv(n, x), line_width=3, color=Spectral4[i], alpha=0.8, legend_label="𝜈=%d" % n)
81
82
83 text = (r"\text{Bessel Functions of the First Kind: }" +
84 r"J_\nu = \sum_{m=0}^{\infty}\frac{(-1)^m}{m!\ \Gamma(m+\nu+1)}" +
85 r"\left(\frac{x}{2}\right)^{2m+\nu}")
86 latex = LatexLabel(text=text,x=4.5, y=250, x_units='data', y_units='screen',
87 render_mode='css', text_font_size='11px',
88 background_fill_color="white", border_line_color="lightgrey")
89
90 p.add_layout(latex)
91
92 show(p)
93
[end of examples/models/file/latex_extension.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/models/file/latex_extension.py b/examples/models/file/latex_extension.py
--- a/examples/models/file/latex_extension.py
+++ b/examples/models/file/latex_extension.py
@@ -30,7 +30,7 @@
export class LatexLabelView extends LabelView {
model: LatexLabel
- render(): void {
+ protected _render(): void {
// Here because AngleSpec does units tranform and label doesn't support specs
let angle: number
switch (this.model.angle_units) {
@@ -59,13 +59,17 @@
sy -= this.model.y_offset
this._css_text(this.layer.ctx, "", sx, sy, angle)
- katex.render(this.model.text, this.el, {displayMode: true})
+ katex.render(this.model.text, this.el!, {displayMode: true})
}
}
export class LatexLabel extends Label {
static init_LatexLabel(): void {
this.prototype.default_view = LatexLabelView
+
+ this.override({
+ render_mode: "css",
+ })
}
}
""")
|
{"golden_diff": "diff --git a/examples/models/file/latex_extension.py b/examples/models/file/latex_extension.py\n--- a/examples/models/file/latex_extension.py\n+++ b/examples/models/file/latex_extension.py\n@@ -30,7 +30,7 @@\n export class LatexLabelView extends LabelView {\n model: LatexLabel\n \n- render(): void {\n+ protected _render(): void {\n // Here because AngleSpec does units tranform and label doesn't support specs\n let angle: number\n switch (this.model.angle_units) {\n@@ -59,13 +59,17 @@\n sy -= this.model.y_offset\n \n this._css_text(this.layer.ctx, \"\", sx, sy, angle)\n- katex.render(this.model.text, this.el, {displayMode: true})\n+ katex.render(this.model.text, this.el!, {displayMode: true})\n }\n }\n \n export class LatexLabel extends Label {\n static init_LatexLabel(): void {\n this.prototype.default_view = LatexLabelView\n+\n+ this.override({\n+ render_mode: \"css\",\n+ })\n }\n }\n \"\"\")\n", "issue": "[BUG] `TileRenderer` ignores the `visible` property\n#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)\r\nBokeh 2.1.0rc1\r\n\r\n#### Description of expected behavior and the observed behavior\r\n`TileRenderer` should take into account the `visible` property. Both if passed to its constructor and if changed in runtime if using Bokeh server.\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n```python\r\nfrom bokeh.io import show\r\nfrom bokeh.plotting import figure\r\nfrom bokeh.tile_providers import CARTODBPOSITRON, get_provider\r\n\r\np = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000),\r\n x_axis_type=\"mercator\", y_axis_type=\"mercator\")\r\n\r\np.add_tile(get_provider(CARTODBPOSITRON), visible=False)\r\n\r\nshow(p)\r\n```\r\nThe root cause is that `TileRenderer` just doesn't check `visible` at all. It seems like every renderer checks this property. Maybe it should be checked at a higher level?\n", "before_files": [{"content": "\"\"\" The LaTex example was derived from: http://matplotlib.org/users/usetex.html\n\"\"\"\nimport numpy as np\nfrom scipy.special import jv\n\nfrom bokeh.models import Label\nfrom bokeh.palettes import Spectral4\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.util.compiler import TypeScript\n\noutput_file('latex_extension.html')\n\nclass LatexLabel(Label):\n \"\"\"A subclass of `Label` with all of the same class attributes except\n canvas mode isn't supported and DOM manipulation happens in the TypeScript\n superclass implementation that requires setting `render_mode='css'`).\n\n Only the render method of LabelView is overwritten to perform the\n text -> latex (via katex) conversion\n \"\"\"\n __javascript__ = [\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.js\"]\n __css__ = [\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.css\"]\n __implementation__ = TypeScript(\"\"\"\nimport {Label, LabelView} from \"models/annotations/label\"\n\ndeclare namespace katex {\n function render(expression: string, element: HTMLElement, options: {displayMode?: boolean}): void\n}\n\nexport class LatexLabelView extends LabelView {\n model: LatexLabel\n\n render(): void {\n // Here because AngleSpec does units tranform and label doesn't support specs\n let angle: number\n switch (this.model.angle_units) {\n case \"rad\": {\n angle = -1 * this.model.angle\n break\n }\n case \"deg\": {\n angle = -1 * this.model.angle * Math.PI/180.0\n break\n }\n default:\n throw new Error(\"unreachable\")\n }\n\n const panel = this.panel || this.plot_view.frame\n\n const xscale = this.plot_view.frame.xscales[this.model.x_range_name]\n const yscale = this.plot_view.frame.yscales[this.model.y_range_name]\n\n const {x, y} = this.model\n let sx = this.model.x_units == \"data\" ? xscale.compute(x) : panel.xview.compute(x)\n let sy = this.model.y_units == \"data\" ? yscale.compute(y) : panel.yview.compute(y)\n\n sx += this.model.x_offset\n sy -= this.model.y_offset\n\n this._css_text(this.layer.ctx, \"\", sx, sy, angle)\n katex.render(this.model.text, this.el, {displayMode: true})\n }\n}\n\nexport class LatexLabel extends Label {\n static init_LatexLabel(): void {\n this.prototype.default_view = LatexLabelView\n }\n}\n\"\"\")\n\np = figure(title=\"LaTex Extension Demonstration\", plot_width=800, plot_height=350,\n background_fill_color=\"#fafafa\")\np.x_range.range_padding = 0\n\nx = np.arange(0.0, 20.0, 0.02)\n\nfor i, n in enumerate([0, 1, 4, 7]):\n p.line(x, jv(n, x), line_width=3, color=Spectral4[i], alpha=0.8, legend_label=\"\ud835\udf08=%d\" % n)\n\n\ntext = (r\"\\text{Bessel Functions of the First Kind: }\" +\n r\"J_\\nu = \\sum_{m=0}^{\\infty}\\frac{(-1)^m}{m!\\ \\Gamma(m+\\nu+1)}\" +\n r\"\\left(\\frac{x}{2}\\right)^{2m+\\nu}\")\nlatex = LatexLabel(text=text,x=4.5, y=250, x_units='data', y_units='screen',\n render_mode='css', text_font_size='11px',\n background_fill_color=\"white\", border_line_color=\"lightgrey\")\n\np.add_layout(latex)\n\nshow(p)\n", "path": "examples/models/file/latex_extension.py"}]}
| 1,822 | 244 |
gh_patches_debug_30984
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-1901
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Directories of exploded archives not recognized as cluster
Directories of exploded archives aren't recognized as a cluster but are erroneously identified as whatever context matches the first marker file we encounter after recursively enumerating every file in all subdirectories.
</issue>
<code>
[start of insights/core/hydration.py]
1 import logging
2 import os
3 from itertools import product
4
5 from insights.core import archives
6 from insights.core.context import (ClusterArchiveContext,
7 JDRContext,
8 HostArchiveContext,
9 SosArchiveContext,
10 SerializedArchiveContext)
11
12 log = logging.getLogger(__name__)
13
14
15 def get_all_files(path):
16 all_files = []
17 for f in archives.get_all_files(path):
18 if os.path.isfile(f) and not os.path.islink(f):
19 all_files.append(f)
20 return all_files
21
22
23 def identify(files):
24 markers = {"insights_archive.txt": SerializedArchiveContext,
25 "insights_commands": HostArchiveContext,
26 "sos_commands": SosArchiveContext,
27 "JBOSS_HOME": JDRContext}
28
29 for f, m in product(files, markers):
30 if m in f:
31 i = f.find(m)
32 common_path = os.path.dirname(f[:i])
33 ctx = markers[m]
34 return common_path, ctx
35
36 common_path = os.path.dirname(os.path.commonprefix(files))
37 if not common_path:
38 raise archives.InvalidArchive("Unable to determine common path")
39
40 if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):
41 return common_path, ClusterArchiveContext
42
43 return common_path, HostArchiveContext
44
45
46 def create_context(path, context=None):
47 all_files = get_all_files(path)
48 if not all_files:
49 raise archives.InvalidArchive("No files in archive")
50
51 common_path, ctx = identify(all_files)
52 context = context or ctx
53 return context(common_path, all_files=all_files)
54
[end of insights/core/hydration.py]
[start of insights/core/cluster.py]
1 #!/usr/bin/env python
2 import itertools
3 import pandas as pd
4 from collections import defaultdict
5
6 from ansible.parsing.dataloader import DataLoader
7 from ansible.inventory.manager import InventoryManager
8
9 from insights.core import dr, plugins
10 from insights.core.archives import extract
11 from insights.core.hydration import create_context
12 from insights.specs import Specs
13
14
15 ID_GENERATOR = itertools.count()
16
17
18 class ClusterMeta(dict):
19 def __init__(self, num_members, kwargs):
20 self.num_members = num_members
21 self.update(**kwargs)
22
23
24 @plugins.combiner(optional=[Specs.machine_id, Specs.hostname])
25 def machine_id(mid, hn):
26 ds = mid or hn
27 if ds:
28 return ds.content[0].strip()
29 return str(next(ID_GENERATOR))
30
31
32 def parse_inventory(path):
33 inventory = InventoryManager(loader=DataLoader(), sources=path)
34 return inventory.get_groups_dict()
35
36
37 def attach_machine_id(result, mid):
38 key = "machine_id"
39 if isinstance(result, list):
40 for r in result:
41 r[key] = mid
42 else:
43 result[key] = mid
44 return result
45
46
47 def process_archives(archives):
48 for archive in archives:
49 with extract(archive) as ex:
50 ctx = create_context(ex.tmp_dir)
51 broker = dr.Broker()
52 broker[ctx.__class__] = ctx
53 yield dr.run(broker=broker)
54
55
56 def extract_facts(brokers):
57 results = defaultdict(list)
58 for b in brokers:
59 mid = b[machine_id]
60 for k, v in b.get_by_type(plugins.fact).items():
61 r = attach_machine_id(v, mid)
62 if isinstance(r, list):
63 results[k].extend(r)
64 else:
65 results[k].append(r)
66 return results
67
68
69 def process_facts(facts, meta, broker):
70 broker[ClusterMeta] = meta
71 for k, v in facts.items():
72 broker[k] = pd.DataFrame(v)
73 return dr.run(dr.COMPONENTS[dr.GROUPS.cluster], broker=broker)
74
75
76 def process_cluster(archives, broker, inventory=None):
77 inventory = parse_inventory(inventory) if inventory else {}
78
79 brokers = process_archives(archives)
80 facts = extract_facts(brokers)
81 meta = ClusterMeta(len(archives), inventory)
82
83 return process_facts(facts, meta, broker)
84
[end of insights/core/cluster.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/insights/core/cluster.py b/insights/core/cluster.py
--- a/insights/core/cluster.py
+++ b/insights/core/cluster.py
@@ -1,8 +1,10 @@
#!/usr/bin/env python
import itertools
-import pandas as pd
+import os
from collections import defaultdict
+import pandas as pd
+
from ansible.parsing.dataloader import DataLoader
from ansible.inventory.manager import InventoryManager
@@ -46,8 +48,14 @@
def process_archives(archives):
for archive in archives:
- with extract(archive) as ex:
- ctx = create_context(ex.tmp_dir)
+ if os.path.isfile(archive):
+ with extract(archive) as ex:
+ ctx = create_context(ex.tmp_dir)
+ broker = dr.Broker()
+ broker[ctx.__class__] = ctx
+ yield dr.run(broker=broker)
+ else:
+ ctx = create_context(archive)
broker = dr.Broker()
broker[ctx.__class__] = ctx
yield dr.run(broker=broker)
diff --git a/insights/core/hydration.py b/insights/core/hydration.py
--- a/insights/core/hydration.py
+++ b/insights/core/hydration.py
@@ -37,13 +37,15 @@
if not common_path:
raise archives.InvalidArchive("Unable to determine common path")
- if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):
- return common_path, ClusterArchiveContext
-
return common_path, HostArchiveContext
def create_context(path, context=None):
+ top = os.listdir(path)
+ arc = [os.path.join(path, f) for f in top if f.endswith(archives.COMPRESSION_TYPES)]
+ if arc:
+ return ClusterArchiveContext(path, all_files=arc)
+
all_files = get_all_files(path)
if not all_files:
raise archives.InvalidArchive("No files in archive")
|
{"golden_diff": "diff --git a/insights/core/cluster.py b/insights/core/cluster.py\n--- a/insights/core/cluster.py\n+++ b/insights/core/cluster.py\n@@ -1,8 +1,10 @@\n #!/usr/bin/env python\n import itertools\n-import pandas as pd\n+import os\n from collections import defaultdict\n \n+import pandas as pd\n+\n from ansible.parsing.dataloader import DataLoader\n from ansible.inventory.manager import InventoryManager\n \n@@ -46,8 +48,14 @@\n \n def process_archives(archives):\n for archive in archives:\n- with extract(archive) as ex:\n- ctx = create_context(ex.tmp_dir)\n+ if os.path.isfile(archive):\n+ with extract(archive) as ex:\n+ ctx = create_context(ex.tmp_dir)\n+ broker = dr.Broker()\n+ broker[ctx.__class__] = ctx\n+ yield dr.run(broker=broker)\n+ else:\n+ ctx = create_context(archive)\n broker = dr.Broker()\n broker[ctx.__class__] = ctx\n yield dr.run(broker=broker)\ndiff --git a/insights/core/hydration.py b/insights/core/hydration.py\n--- a/insights/core/hydration.py\n+++ b/insights/core/hydration.py\n@@ -37,13 +37,15 @@\n if not common_path:\n raise archives.InvalidArchive(\"Unable to determine common path\")\n \n- if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):\n- return common_path, ClusterArchiveContext\n-\n return common_path, HostArchiveContext\n \n \n def create_context(path, context=None):\n+ top = os.listdir(path)\n+ arc = [os.path.join(path, f) for f in top if f.endswith(archives.COMPRESSION_TYPES)]\n+ if arc:\n+ return ClusterArchiveContext(path, all_files=arc)\n+\n all_files = get_all_files(path)\n if not all_files:\n raise archives.InvalidArchive(\"No files in archive\")\n", "issue": "Directories of exploded archives not recognized as cluster\nDirectories of exploded archives aren't recognized as a cluster but are erroneously identified as whatever context matches the first marker file we encounter after recursively enumerating every file in all subdirectories.\n", "before_files": [{"content": "import logging\nimport os\nfrom itertools import product\n\nfrom insights.core import archives\nfrom insights.core.context import (ClusterArchiveContext,\n JDRContext,\n HostArchiveContext,\n SosArchiveContext,\n SerializedArchiveContext)\n\nlog = logging.getLogger(__name__)\n\n\ndef get_all_files(path):\n all_files = []\n for f in archives.get_all_files(path):\n if os.path.isfile(f) and not os.path.islink(f):\n all_files.append(f)\n return all_files\n\n\ndef identify(files):\n markers = {\"insights_archive.txt\": SerializedArchiveContext,\n \"insights_commands\": HostArchiveContext,\n \"sos_commands\": SosArchiveContext,\n \"JBOSS_HOME\": JDRContext}\n\n for f, m in product(files, markers):\n if m in f:\n i = f.find(m)\n common_path = os.path.dirname(f[:i])\n ctx = markers[m]\n return common_path, ctx\n\n common_path = os.path.dirname(os.path.commonprefix(files))\n if not common_path:\n raise archives.InvalidArchive(\"Unable to determine common path\")\n\n if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):\n return common_path, ClusterArchiveContext\n\n return common_path, HostArchiveContext\n\n\ndef create_context(path, context=None):\n all_files = get_all_files(path)\n if not all_files:\n raise archives.InvalidArchive(\"No files in archive\")\n\n common_path, ctx = identify(all_files)\n context = context or ctx\n return context(common_path, all_files=all_files)\n", "path": "insights/core/hydration.py"}, {"content": "#!/usr/bin/env python\nimport itertools\nimport pandas as pd\nfrom collections import defaultdict\n\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.inventory.manager import InventoryManager\n\nfrom insights.core import dr, plugins\nfrom insights.core.archives import extract\nfrom insights.core.hydration import create_context\nfrom insights.specs import Specs\n\n\nID_GENERATOR = itertools.count()\n\n\nclass ClusterMeta(dict):\n def __init__(self, num_members, kwargs):\n self.num_members = num_members\n self.update(**kwargs)\n\n\[email protected](optional=[Specs.machine_id, Specs.hostname])\ndef machine_id(mid, hn):\n ds = mid or hn\n if ds:\n return ds.content[0].strip()\n return str(next(ID_GENERATOR))\n\n\ndef parse_inventory(path):\n inventory = InventoryManager(loader=DataLoader(), sources=path)\n return inventory.get_groups_dict()\n\n\ndef attach_machine_id(result, mid):\n key = \"machine_id\"\n if isinstance(result, list):\n for r in result:\n r[key] = mid\n else:\n result[key] = mid\n return result\n\n\ndef process_archives(archives):\n for archive in archives:\n with extract(archive) as ex:\n ctx = create_context(ex.tmp_dir)\n broker = dr.Broker()\n broker[ctx.__class__] = ctx\n yield dr.run(broker=broker)\n\n\ndef extract_facts(brokers):\n results = defaultdict(list)\n for b in brokers:\n mid = b[machine_id]\n for k, v in b.get_by_type(plugins.fact).items():\n r = attach_machine_id(v, mid)\n if isinstance(r, list):\n results[k].extend(r)\n else:\n results[k].append(r)\n return results\n\n\ndef process_facts(facts, meta, broker):\n broker[ClusterMeta] = meta\n for k, v in facts.items():\n broker[k] = pd.DataFrame(v)\n return dr.run(dr.COMPONENTS[dr.GROUPS.cluster], broker=broker)\n\n\ndef process_cluster(archives, broker, inventory=None):\n inventory = parse_inventory(inventory) if inventory else {}\n\n brokers = process_archives(archives)\n facts = extract_facts(brokers)\n meta = ClusterMeta(len(archives), inventory)\n\n return process_facts(facts, meta, broker)\n", "path": "insights/core/cluster.py"}]}
| 1,716 | 449 |
gh_patches_debug_20507
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-5199
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
update Ansible due to CVE-2019-14864
## Description
[CVE-2019-14864](https://nvd.nist.gov/vuln/detail/CVE-2019-14864) is a vulnerability in Ansible's `no_log` flag for the splunk and sumologic plugins (sensitive data is incorrectly logged) but neither of which we're using. Regardless, we should update Ansible to a version that does not have this vulnerability in the next release.
@emkll also pointed out to me that this is a [good time](https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html) to get onto the 2.8 series since the 2.7 series will become unmaintained when 2.10 is released (it's in development).
</issue>
<code>
[start of install_files/ansible-base/callback_plugins/ansible_version_check.py]
1 # -*- encoding:utf-8 -*-
2 from __future__ import absolute_import, division, print_function, \
3 unicode_literals
4
5 import sys
6
7 import ansible
8
9 try:
10 # Version 2.0+
11 from ansible.plugins.callback import CallbackBase
12 except ImportError:
13 CallbackBase = object
14
15
16 def print_red_bold(text):
17 print('\x1b[31;1m' + text + '\x1b[0m')
18
19
20 class CallbackModule(CallbackBase):
21 def __init__(self):
22 # Can't use `on_X` because this isn't forwards compatible
23 # with Ansible 2.0+
24 required_version = '2.7.13' # Keep synchronized with requirements files
25 if not ansible.__version__.startswith(required_version):
26 print_red_bold(
27 "SecureDrop restriction: only Ansible {version}.*"
28 "is supported."
29 .format(version=required_version)
30 )
31 sys.exit(1)
32
[end of install_files/ansible-base/callback_plugins/ansible_version_check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py
--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py
+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py
@@ -19,13 +19,18 @@
class CallbackModule(CallbackBase):
def __init__(self):
- # Can't use `on_X` because this isn't forwards compatible
- # with Ansible 2.0+
- required_version = '2.7.13' # Keep synchronized with requirements files
- if not ansible.__version__.startswith(required_version):
+ # The acceptable version range needs to be synchronized with
+ # requirements files.
+ viable_start = [2, 9, 7]
+ viable_end = [2, 10, 0]
+ ansible_version = [int(v) for v in ansible.__version__.split('.')]
+ if not (viable_start <= ansible_version < viable_end):
print_red_bold(
- "SecureDrop restriction: only Ansible {version}.*"
- "is supported."
- .format(version=required_version)
+ "SecureDrop restriction: Ansible version must be at least {viable_start} "
+ "and less than {viable_end}."
+ .format(
+ viable_start='.'.join(str(v) for v in viable_start),
+ viable_end='.'.join(str(v) for v in viable_end),
+ )
)
sys.exit(1)
|
{"golden_diff": "diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py\n+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n@@ -19,13 +19,18 @@\n \n class CallbackModule(CallbackBase):\n def __init__(self):\n- # Can't use `on_X` because this isn't forwards compatible\n- # with Ansible 2.0+\n- required_version = '2.7.13' # Keep synchronized with requirements files\n- if not ansible.__version__.startswith(required_version):\n+ # The acceptable version range needs to be synchronized with\n+ # requirements files.\n+ viable_start = [2, 9, 7]\n+ viable_end = [2, 10, 0]\n+ ansible_version = [int(v) for v in ansible.__version__.split('.')]\n+ if not (viable_start <= ansible_version < viable_end):\n print_red_bold(\n- \"SecureDrop restriction: only Ansible {version}.*\"\n- \"is supported.\"\n- .format(version=required_version)\n+ \"SecureDrop restriction: Ansible version must be at least {viable_start} \"\n+ \"and less than {viable_end}.\"\n+ .format(\n+ viable_start='.'.join(str(v) for v in viable_start),\n+ viable_end='.'.join(str(v) for v in viable_end),\n+ )\n )\n sys.exit(1)\n", "issue": "update Ansible due to CVE-2019-14864 \n## Description\r\n\r\n[CVE-2019-14864](https://nvd.nist.gov/vuln/detail/CVE-2019-14864) is a vulnerability in Ansible's `no_log` flag for the splunk and sumologic plugins (sensitive data is incorrectly logged) but neither of which we're using. Regardless, we should update Ansible to a version that does not have this vulnerability in the next release.\r\n\r\n@emkll also pointed out to me that this is a [good time](https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html) to get onto the 2.8 series since the 2.7 series will become unmaintained when 2.10 is released (it's in development). \n", "before_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n required_version = '2.7.13' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n \"is supported.\"\n .format(version=required_version)\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}]}
| 999 | 347 |
gh_patches_debug_24422
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-3569
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Uninitialized ASN.1 value when using LdapUserInfo
After updating some Python packages, I got the following exception on login:
```
Traceback (most recent call last):
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1442, in gotResult
_inlineCallbacks(r, g, deferred)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1432, in _inlineCallbacks
deferred.errback()
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 500, in errback
self._startRunCallbacks(fail)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 567, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/resource.py", line 91, in failHttpError
f.trap(Error)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/failure.py", line 346, in trap
self.raiseException()
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/resource.py", line 84, in failHttpRedirect
f.trap(Redirect)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/failure.py", line 346, in trap
self.raiseException()
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/failure.py", line 393, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/oauth2.py", line 67, in renderLogin
infos = yield self.auth.userInfoProvider.getUserInfo(details['username'])
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/threadpool.py", line 250, in inContext
result = inContext.theWork()
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/threadpool.py", line 266, in <lambda>
inContext.theWork = lambda: context.call(ctx, func, *args, **kw)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/ldapuserinfo.py", line 92, in thd
c = self.connectLdap()
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/ldapuserinfo.py", line 82, in connectLdap
authentication=auth)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/core/connection.py", line 278, in __init__
self.bind(read_server_info=True)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/core/connection.py", line 427, in bind
response = self.post_send_single_response(self.send('bindRequest', request, controls))
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/sync.py", line 122, in post_send_single_response
responses, result = self.get_response(message_id)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/base.py", line 298, in get_response
responses = self._get_response(message_id)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/sync.py", line 168, in _get_response
dict_response = self.decode_response(ldap_resp)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/base.py", line 403, in decode_response
result = bind_response_to_dict(component)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/operation/bind.py", line 119, in bind_response_to_dict
'saslCreds': bytes(response['serverSaslCreds']) if response['serverSaslCreds'] is not None else None}
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/pyasn1/type/univ.py", line 939, in __str__
return str(self._value)
File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/pyasn1/type/base.py", line 186, in plug
raise error.PyAsn1Error('Uninitialized ASN.1 value ("%s" attribute looked up)' % name)
pyasn1.error.PyAsn1Error: Uninitialized ASN.1 value ("__str__" attribute looked up)
```
The problem disappeared after downgrading pyasn1 from 0.3.2 to the version 0.1.9, which I used before.
</issue>
<code>
[start of master/buildbot/www/ldapuserinfo.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from __future__ import absolute_import
17 from __future__ import print_function
18 from future.moves.urllib.parse import urlparse
19
20 import ldap3
21
22 from twisted.internet import threads
23
24 from buildbot.util import flatten
25 from buildbot.www import auth
26 from buildbot.www import avatar
27
28
29 class LdapUserInfo(avatar.AvatarBase, auth.UserInfoProviderBase):
30 name = 'ldap'
31
32 def __init__(self, uri, bindUser, bindPw,
33 accountBase,
34 accountPattern,
35 accountFullName,
36 accountEmail,
37 groupBase=None,
38 groupMemberPattern=None,
39 groupName=None,
40 avatarPattern=None,
41 avatarData=None,
42 accountExtraFields=None):
43 avatar.AvatarBase.__init__(self)
44 auth.UserInfoProviderBase.__init__(self)
45 self.uri = uri
46 self.bindUser = bindUser
47 self.bindPw = bindPw
48 self.accountBase = accountBase
49 self.accountEmail = accountEmail
50 self.accountPattern = accountPattern
51 self.accountFullName = accountFullName
52 group_params = [p for p in (groupName, groupMemberPattern, groupBase)
53 if p is not None]
54 if len(group_params) not in (0, 3):
55 raise ValueError(
56 "Incomplete LDAP groups configuration. "
57 "To use Ldap groups, you need to specify the three "
58 "parameters (groupName, groupMemberPattern and groupBase). ")
59
60 self.groupName = groupName
61 self.groupMemberPattern = groupMemberPattern
62 self.groupBase = groupBase
63 self.avatarPattern = avatarPattern
64 self.avatarData = avatarData
65 if accountExtraFields is None:
66 accountExtraFields = []
67 self.accountExtraFields = accountExtraFields
68
69 def connectLdap(self):
70 server = urlparse(self.uri)
71 netloc = server.netloc.split(":")
72 # define the server and the connection
73 s = ldap3.Server(netloc[0], port=int(netloc[1]), use_ssl=server.scheme == 'ldaps',
74 get_info=ldap3.GET_ALL_INFO)
75
76 auth = ldap3.AUTH_SIMPLE
77 if self.bindUser is None and self.bindPw is None:
78 auth = ldap3.AUTH_ANONYMOUS
79
80 c = ldap3.Connection(s, auto_bind=True, client_strategy=ldap3.STRATEGY_SYNC,
81 user=self.bindUser, password=self.bindPw,
82 authentication=auth)
83 return c
84
85 def search(self, c, base, filterstr='f', attributes=None):
86 c.search(
87 base, filterstr, ldap3.SEARCH_SCOPE_WHOLE_SUBTREE, attributes=attributes)
88 return c.response
89
90 def getUserInfo(self, username):
91 def thd():
92 c = self.connectLdap()
93 infos = {'username': username}
94 pattern = self.accountPattern % dict(username=username)
95 res = self.search(c, self.accountBase, pattern,
96 attributes=[
97 self.accountEmail, self.accountFullName] +
98 self.accountExtraFields)
99 if len(res) != 1:
100 raise KeyError(
101 "ldap search \"%s\" returned %d results" % (pattern, len(res)))
102 dn, ldap_infos = res[0]['dn'], res[0]['raw_attributes']
103 if isinstance(dn, bytes):
104 dn = dn.decode('utf-8')
105
106 def getLdapInfo(x):
107 if isinstance(x, list):
108 return x[0]
109 return x
110 infos['full_name'] = getLdapInfo(ldap_infos[self.accountFullName])
111 infos['email'] = getLdapInfo(ldap_infos[self.accountEmail])
112 for f in self.accountExtraFields:
113 if f in ldap_infos:
114 infos[f] = getLdapInfo(ldap_infos[f])
115
116 if self.groupMemberPattern is None:
117 infos['groups'] = []
118 return infos
119
120 # needs double quoting of backslashing
121 pattern = self.groupMemberPattern % dict(dn=dn)
122 res = self.search(c, self.groupBase, pattern,
123 attributes=[self.groupName])
124 infos['groups'] = flatten(
125 [group_infos['raw_attributes'][self.groupName] for group_infos in res])
126 return infos
127 return threads.deferToThread(thd)
128
129 def findAvatarMime(self, data):
130 # http://en.wikipedia.org/wiki/List_of_file_signatures
131 if data.startswith("\xff\xd8\xff"):
132 return ("image/jpeg", data)
133 if data.startswith("\x89PNG"):
134 return ("image/png", data)
135 if data.startswith("GIF8"):
136 return ("image/gif", data)
137 # ignore unknown image format
138 return None
139
140 def getUserAvatar(self, user_email, size, defaultAvatarUrl):
141 def thd():
142 c = self.connectLdap()
143 pattern = self.avatarPattern % dict(email=user_email)
144 res = self.search(c, self.accountBase, pattern,
145 attributes=[self.avatarData])
146 if not res:
147 return None
148 ldap_infos = res[0]['raw_attributes']
149 if self.avatarData in ldap_infos and ldap_infos[self.avatarData]:
150 data = ldap_infos[self.avatarData][0]
151 return self.findAvatarMime(data)
152 return None
153 return threads.deferToThread(thd)
154
[end of master/buildbot/www/ldapuserinfo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/master/buildbot/www/ldapuserinfo.py b/master/buildbot/www/ldapuserinfo.py
--- a/master/buildbot/www/ldapuserinfo.py
+++ b/master/buildbot/www/ldapuserinfo.py
@@ -71,20 +71,19 @@
netloc = server.netloc.split(":")
# define the server and the connection
s = ldap3.Server(netloc[0], port=int(netloc[1]), use_ssl=server.scheme == 'ldaps',
- get_info=ldap3.GET_ALL_INFO)
+ get_info=ldap3.ALL)
- auth = ldap3.AUTH_SIMPLE
+ auth = ldap3.SIMPLE
if self.bindUser is None and self.bindPw is None:
- auth = ldap3.AUTH_ANONYMOUS
+ auth = ldap3.ANONYMOUS
- c = ldap3.Connection(s, auto_bind=True, client_strategy=ldap3.STRATEGY_SYNC,
+ c = ldap3.Connection(s, auto_bind=True, client_strategy=ldap3.SYNC,
user=self.bindUser, password=self.bindPw,
authentication=auth)
return c
def search(self, c, base, filterstr='f', attributes=None):
- c.search(
- base, filterstr, ldap3.SEARCH_SCOPE_WHOLE_SUBTREE, attributes=attributes)
+ c.search(base, filterstr, ldap3.SUBTREE, attributes=attributes)
return c.response
def getUserInfo(self, username):
|
{"golden_diff": "diff --git a/master/buildbot/www/ldapuserinfo.py b/master/buildbot/www/ldapuserinfo.py\n--- a/master/buildbot/www/ldapuserinfo.py\n+++ b/master/buildbot/www/ldapuserinfo.py\n@@ -71,20 +71,19 @@\n netloc = server.netloc.split(\":\")\n # define the server and the connection\n s = ldap3.Server(netloc[0], port=int(netloc[1]), use_ssl=server.scheme == 'ldaps',\n- get_info=ldap3.GET_ALL_INFO)\n+ get_info=ldap3.ALL)\n \n- auth = ldap3.AUTH_SIMPLE\n+ auth = ldap3.SIMPLE\n if self.bindUser is None and self.bindPw is None:\n- auth = ldap3.AUTH_ANONYMOUS\n+ auth = ldap3.ANONYMOUS\n \n- c = ldap3.Connection(s, auto_bind=True, client_strategy=ldap3.STRATEGY_SYNC,\n+ c = ldap3.Connection(s, auto_bind=True, client_strategy=ldap3.SYNC,\n user=self.bindUser, password=self.bindPw,\n authentication=auth)\n return c\n \n def search(self, c, base, filterstr='f', attributes=None):\n- c.search(\n- base, filterstr, ldap3.SEARCH_SCOPE_WHOLE_SUBTREE, attributes=attributes)\n+ c.search(base, filterstr, ldap3.SUBTREE, attributes=attributes)\n return c.response\n \n def getUserInfo(self, username):\n", "issue": "Uninitialized ASN.1 value when using LdapUserInfo\nAfter updating some Python packages, I got the following exception on login:\r\n\r\n```\r\n Traceback (most recent call last):\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1442, in gotResult\r\n _inlineCallbacks(r, g, deferred)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1432, in _inlineCallbacks\r\n deferred.errback()\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 500, in errback\r\n self._startRunCallbacks(fail)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 567, in _startRunCallbacks\r\n self._runCallbacks()\r\n --- <exception caught here> ---\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 653, in _runCallbacks\r\n current.result = callback(current.result, *args, **kw)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/resource.py\", line 91, in failHttpError\r\n f.trap(Error)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/failure.py\", line 346, in trap\r\n self.raiseException()\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 653, in _runCallbacks\r\n current.result = callback(current.result, *args, **kw)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/resource.py\", line 84, in failHttpRedirect\r\n f.trap(Redirect)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/failure.py\", line 346, in trap\r\n self.raiseException()\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1384, in _inlineCallbacks\r\n result = result.throwExceptionIntoGenerator(g)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/failure.py\", line 393, in throwExceptionIntoGenerator\r\n return g.throw(self.type, self.value, self.tb)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/oauth2.py\", line 67, in renderLogin\r\n infos = yield self.auth.userInfoProvider.getUserInfo(details['username'])\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/threadpool.py\", line 250, in inContext\r\n result = inContext.theWork()\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/threadpool.py\", line 266, in <lambda>\r\n inContext.theWork = lambda: context.call(ctx, func, *args, **kw)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/context.py\", line 122, in callWithContext\r\n return self.currentContext().callWithContext(ctx, func, *args, **kw)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/context.py\", line 85, in callWithContext\r\n return func(*args,**kw)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/ldapuserinfo.py\", line 92, in thd\r\n c = self.connectLdap()\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/ldapuserinfo.py\", line 82, in connectLdap\r\n authentication=auth)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/core/connection.py\", line 278, in __init__\r\n self.bind(read_server_info=True)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/core/connection.py\", line 427, in bind\r\n response = self.post_send_single_response(self.send('bindRequest', request, controls))\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/sync.py\", line 122, in post_send_single_response\r\n responses, result = self.get_response(message_id)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/base.py\", line 298, in get_response\r\n responses = self._get_response(message_id)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/sync.py\", line 168, in _get_response\r\n dict_response = self.decode_response(ldap_resp)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/base.py\", line 403, in decode_response\r\n result = bind_response_to_dict(component)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/operation/bind.py\", line 119, in bind_response_to_dict\r\n 'saslCreds': bytes(response['serverSaslCreds']) if response['serverSaslCreds'] is not None else None}\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/pyasn1/type/univ.py\", line 939, in __str__\r\n return str(self._value)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/pyasn1/type/base.py\", line 186, in plug\r\n raise error.PyAsn1Error('Uninitialized ASN.1 value (\"%s\" attribute looked up)' % name)\r\n pyasn1.error.PyAsn1Error: Uninitialized ASN.1 value (\"__str__\" attribute looked up)\r\n```\r\n\r\nThe problem disappeared after downgrading pyasn1 from 0.3.2 to the version 0.1.9, which I used before.\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom future.moves.urllib.parse import urlparse\n\nimport ldap3\n\nfrom twisted.internet import threads\n\nfrom buildbot.util import flatten\nfrom buildbot.www import auth\nfrom buildbot.www import avatar\n\n\nclass LdapUserInfo(avatar.AvatarBase, auth.UserInfoProviderBase):\n name = 'ldap'\n\n def __init__(self, uri, bindUser, bindPw,\n accountBase,\n accountPattern,\n accountFullName,\n accountEmail,\n groupBase=None,\n groupMemberPattern=None,\n groupName=None,\n avatarPattern=None,\n avatarData=None,\n accountExtraFields=None):\n avatar.AvatarBase.__init__(self)\n auth.UserInfoProviderBase.__init__(self)\n self.uri = uri\n self.bindUser = bindUser\n self.bindPw = bindPw\n self.accountBase = accountBase\n self.accountEmail = accountEmail\n self.accountPattern = accountPattern\n self.accountFullName = accountFullName\n group_params = [p for p in (groupName, groupMemberPattern, groupBase)\n if p is not None]\n if len(group_params) not in (0, 3):\n raise ValueError(\n \"Incomplete LDAP groups configuration. \"\n \"To use Ldap groups, you need to specify the three \"\n \"parameters (groupName, groupMemberPattern and groupBase). \")\n\n self.groupName = groupName\n self.groupMemberPattern = groupMemberPattern\n self.groupBase = groupBase\n self.avatarPattern = avatarPattern\n self.avatarData = avatarData\n if accountExtraFields is None:\n accountExtraFields = []\n self.accountExtraFields = accountExtraFields\n\n def connectLdap(self):\n server = urlparse(self.uri)\n netloc = server.netloc.split(\":\")\n # define the server and the connection\n s = ldap3.Server(netloc[0], port=int(netloc[1]), use_ssl=server.scheme == 'ldaps',\n get_info=ldap3.GET_ALL_INFO)\n\n auth = ldap3.AUTH_SIMPLE\n if self.bindUser is None and self.bindPw is None:\n auth = ldap3.AUTH_ANONYMOUS\n\n c = ldap3.Connection(s, auto_bind=True, client_strategy=ldap3.STRATEGY_SYNC,\n user=self.bindUser, password=self.bindPw,\n authentication=auth)\n return c\n\n def search(self, c, base, filterstr='f', attributes=None):\n c.search(\n base, filterstr, ldap3.SEARCH_SCOPE_WHOLE_SUBTREE, attributes=attributes)\n return c.response\n\n def getUserInfo(self, username):\n def thd():\n c = self.connectLdap()\n infos = {'username': username}\n pattern = self.accountPattern % dict(username=username)\n res = self.search(c, self.accountBase, pattern,\n attributes=[\n self.accountEmail, self.accountFullName] +\n self.accountExtraFields)\n if len(res) != 1:\n raise KeyError(\n \"ldap search \\\"%s\\\" returned %d results\" % (pattern, len(res)))\n dn, ldap_infos = res[0]['dn'], res[0]['raw_attributes']\n if isinstance(dn, bytes):\n dn = dn.decode('utf-8')\n\n def getLdapInfo(x):\n if isinstance(x, list):\n return x[0]\n return x\n infos['full_name'] = getLdapInfo(ldap_infos[self.accountFullName])\n infos['email'] = getLdapInfo(ldap_infos[self.accountEmail])\n for f in self.accountExtraFields:\n if f in ldap_infos:\n infos[f] = getLdapInfo(ldap_infos[f])\n\n if self.groupMemberPattern is None:\n infos['groups'] = []\n return infos\n\n # needs double quoting of backslashing\n pattern = self.groupMemberPattern % dict(dn=dn)\n res = self.search(c, self.groupBase, pattern,\n attributes=[self.groupName])\n infos['groups'] = flatten(\n [group_infos['raw_attributes'][self.groupName] for group_infos in res])\n return infos\n return threads.deferToThread(thd)\n\n def findAvatarMime(self, data):\n # http://en.wikipedia.org/wiki/List_of_file_signatures\n if data.startswith(\"\\xff\\xd8\\xff\"):\n return (\"image/jpeg\", data)\n if data.startswith(\"\\x89PNG\"):\n return (\"image/png\", data)\n if data.startswith(\"GIF8\"):\n return (\"image/gif\", data)\n # ignore unknown image format\n return None\n\n def getUserAvatar(self, user_email, size, defaultAvatarUrl):\n def thd():\n c = self.connectLdap()\n pattern = self.avatarPattern % dict(email=user_email)\n res = self.search(c, self.accountBase, pattern,\n attributes=[self.avatarData])\n if not res:\n return None\n ldap_infos = res[0]['raw_attributes']\n if self.avatarData in ldap_infos and ldap_infos[self.avatarData]:\n data = ldap_infos[self.avatarData][0]\n return self.findAvatarMime(data)\n return None\n return threads.deferToThread(thd)\n", "path": "master/buildbot/www/ldapuserinfo.py"}]}
| 3,822 | 323 |
gh_patches_debug_18849
|
rasdani/github-patches
|
git_diff
|
google__jax-1335
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`slogdet` sign is incorrect
In the following, numpy disagrees with JAX
```python
mat = np.array([[[-0.01]],
[[-0.01]]])
print(np.linalg.slogdet(mat))
print(jnp.linalg.slogdet(jnp.array(mat)))
```
=>
```
(array([-1., -1.]), array([-4.60517019, -4.60517019]))
(DeviceArray([1., 1.]), DeviceArray([-4.60517019, -4.60517019]))
```
</issue>
<code>
[start of jax/numpy/linalg.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 from __future__ import division
17 from __future__ import print_function
18
19 from functools import partial
20
21 import numpy as onp
22 import warnings
23
24 from jax import jit
25 from .. import lax
26 from .. import lax_linalg
27 from .lax_numpy import _not_implemented
28 from .lax_numpy import _wraps
29 from . import lax_numpy as np
30 from ..util import get_module_functions
31 from ..lib import xla_bridge
32
33
34 _T = lambda x: np.swapaxes(x, -1, -2)
35
36
37 def _promote_arg_dtypes(*args):
38 """Promotes `args` to a common inexact type."""
39 def _to_inexact_type(type):
40 return type if np.issubdtype(type, np.inexact) else np.float64
41 inexact_types = [_to_inexact_type(np._dtype(arg)) for arg in args]
42 dtype = xla_bridge.canonicalize_dtype(np.result_type(*inexact_types))
43 args = [lax.convert_element_type(arg, dtype) for arg in args]
44 if len(args) == 1:
45 return args[0]
46 else:
47 return args
48
49
50 @_wraps(onp.linalg.cholesky)
51 def cholesky(a):
52 a = _promote_arg_dtypes(np.asarray(a))
53 return lax_linalg.cholesky(a)
54
55
56 @_wraps(onp.linalg.svd)
57 def svd(a, full_matrices=True, compute_uv=True):
58 a = _promote_arg_dtypes(np.asarray(a))
59 return lax_linalg.svd(a, full_matrices, compute_uv)
60
61
62 @_wraps(onp.linalg.slogdet)
63 def slogdet(a):
64 a = _promote_arg_dtypes(np.asarray(a))
65 dtype = lax.dtype(a)
66 a_shape = np.shape(a)
67 if len(a_shape) < 2 or a_shape[-1] != a_shape[-2]:
68 msg = "Argument to slogdet() must have shape [..., n, n], got {}"
69 raise ValueError(msg.format(a_shape))
70 lu, pivot = lax_linalg.lu(a)
71 diag = np.diagonal(lu, axis1=-2, axis2=-1)
72 is_zero = np.any(diag == np.array(0, dtype=dtype), axis=-1)
73 parity = np.count_nonzero(pivot != np.arange(a_shape[-1]), axis=-1)
74 if np.iscomplexobj(a):
75 sign = np.prod(diag / np.abs(diag))
76 else:
77 sign = np.array(1, dtype=dtype)
78 parity = parity + np.count_nonzero(diag < 0)
79 sign = np.where(is_zero,
80 np.array(0, dtype=dtype),
81 sign * np.array(-2 * (parity % 2) + 1, dtype=dtype))
82 logdet = np.where(
83 is_zero, np.array(-np.inf, dtype=dtype),
84 np.sum(np.log(np.abs(diag)), axis=-1))
85 return sign, np.real(logdet)
86
87
88 @_wraps(onp.linalg.det)
89 def det(a):
90 sign, logdet = slogdet(a)
91 return sign * np.exp(logdet)
92
93
94 @_wraps(onp.linalg.eig)
95 def eig(a):
96 a = _promote_arg_dtypes(np.asarray(a))
97 w, vl, vr = lax_linalg.eig(a)
98 return w, vr
99
100
101 @_wraps(onp.linalg.eigh)
102 def eigh(a, UPLO=None, symmetrize_input=True):
103 if UPLO is None or UPLO == "L":
104 lower = True
105 elif UPLO == "U":
106 lower = False
107 else:
108 msg = "UPLO must be one of None, 'L', or 'U', got {}".format(UPLO)
109 raise ValueError(msg)
110
111 a = _promote_arg_dtypes(np.asarray(a))
112 v, w = lax_linalg.eigh(a, lower=lower, symmetrize_input=symmetrize_input)
113 return w, v
114
115
116 @_wraps(onp.linalg.inv)
117 def inv(a):
118 if np.ndim(a) < 2 or a.shape[-1] != a.shape[-2]:
119 raise ValueError("Argument to inv must have shape [..., n, n], got {}."
120 .format(np.shape(a)))
121 return solve(
122 a, lax.broadcast(np.eye(a.shape[-1], dtype=lax.dtype(a)), a.shape[:-2]))
123
124
125 @partial(jit, static_argnums=(1, 2, 3))
126 def _norm(x, ord, axis, keepdims):
127 x = _promote_arg_dtypes(np.asarray(x))
128 x_shape = np.shape(x)
129 ndim = len(x_shape)
130
131 if axis is None:
132 # NumPy has an undocumented behavior that admits arbitrary rank inputs if
133 # `ord` is None: https://github.com/numpy/numpy/issues/14215
134 if ord is None:
135 return np.sqrt(np.sum(np.real(x * np.conj(x)), keepdims=keepdims))
136 axis = tuple(range(ndim))
137 elif isinstance(axis, tuple):
138 axis = tuple(np._canonicalize_axis(x, ndim) for x in axis)
139 else:
140 axis = (np._canonicalize_axis(axis, ndim),)
141
142 num_axes = len(axis)
143 if num_axes == 1:
144 if ord is None or ord == 2:
145 return np.sqrt(np.sum(np.real(x * np.conj(x)), axis=axis,
146 keepdims=keepdims))
147 elif ord == np.inf:
148 return np.amax(np.abs(x), axis=axis, keepdims=keepdims)
149 elif ord == -np.inf:
150 return np.amin(np.abs(x), axis=axis, keepdims=keepdims)
151 elif ord == 0:
152 return np.sum(x != 0, dtype=np.finfo(lax.dtype(x)).dtype,
153 axis=axis, keepdims=keepdims)
154 elif ord == 1:
155 # Numpy has a special case for ord == 1 as an optimization. We don't
156 # really need the optimization (XLA could do it for us), but the Numpy
157 # code has slightly different type promotion semantics, so we need a
158 # special case too.
159 return np.sum(np.abs(x), axis=axis, keepdims=keepdims)
160 else:
161 return np.power(np.sum(np.abs(x) ** ord, axis=axis, keepdims=keepdims),
162 1. / ord)
163
164 elif num_axes == 2:
165 row_axis, col_axis = axis
166 if ord is None or ord in ('f', 'fro'):
167 return np.sqrt(np.sum(np.real(x * np.conj(x)), axis=axis,
168 keepdims=keepdims))
169 elif ord == 1:
170 if not keepdims and col_axis > row_axis:
171 col_axis -= 1
172 return np.amax(np.sum(np.abs(x), axis=row_axis, keepdims=keepdims),
173 axis=col_axis, keepdims=keepdims)
174 elif ord == -1:
175 if not keepdims and col_axis > row_axis:
176 col_axis -= 1
177 return np.amin(np.sum(np.abs(x), axis=row_axis, keepdims=keepdims),
178 axis=col_axis, keepdims=keepdims)
179 elif ord == np.inf:
180 if not keepdims and row_axis > col_axis:
181 row_axis -= 1
182 return np.amax(np.sum(np.abs(x), axis=col_axis, keepdims=keepdims),
183 axis=row_axis, keepdims=keepdims)
184 elif ord == -np.inf:
185 if not keepdims and row_axis > col_axis:
186 row_axis -= 1
187 return np.amin(np.sum(np.abs(x), axis=col_axis, keepdims=keepdims),
188 axis=row_axis, keepdims=keepdims)
189 elif ord in ('nuc', 2, -2):
190 x = np.moveaxis(x, axis, (-2, -1))
191 if ord == 2:
192 reducer = np.amax
193 elif ord == -2:
194 reducer = np.amin
195 else:
196 reducer = np.sum
197 y = reducer(svd(x, compute_uv=False), axis=-1)
198 if keepdims:
199 result_shape = list(x_shape)
200 result_shape[axis[0]] = 1
201 result_shape[axis[1]] = 1
202 y = np.reshape(y, result_shape)
203 return y
204 else:
205 raise ValueError("Invalid order '{}' for matrix norm.".format(ord))
206 else:
207 raise ValueError(
208 "Invalid axis values ({}) for np.linalg.norm.".format(axis))
209
210 @_wraps(onp.linalg.norm)
211 def norm(x, ord=None, axis=None, keepdims=False):
212 return _norm(x, ord, axis, keepdims)
213
214
215 @_wraps(onp.linalg.qr)
216 def qr(a, mode="reduced"):
217 if mode in ("reduced", "r", "full"):
218 full_matrices = False
219 elif mode == "complete":
220 full_matrices = True
221 else:
222 raise ValueError("Unsupported QR decomposition mode '{}'".format(mode))
223 a = _promote_arg_dtypes(np.asarray(a))
224 q, r = lax_linalg.qr(a, full_matrices)
225 if mode == "r":
226 return r
227 return q, r
228
229
230 @_wraps(onp.linalg.solve)
231 @jit
232 def solve(a, b):
233 a, b = _promote_arg_dtypes(np.asarray(a), np.asarray(b))
234 a_shape = np.shape(a)
235 b_shape = np.shape(b)
236 a_ndims = len(a_shape)
237 b_ndims = len(b_shape)
238 if not (a_ndims >= 2 and a_shape[-1] == a_shape[-2] and b_ndims >= 1):
239 msg = ("The arguments to solve must have shapes a=[..., m, m] and "
240 "b=[..., m, k] or b=[..., m]; got a={} and b={}")
241 raise ValueError(msg.format(a_shape, b_shape))
242 lu, pivots = lax_linalg.lu(a)
243 dtype = lax.dtype(a)
244
245 m = a_shape[-1]
246
247 # Numpy treats the RHS as a (batched) vector if the number of dimensions
248 # differ by 1. Otherwise, broadcasting rules apply.
249 x = b[..., None] if a_ndims == b_ndims + 1 else b
250
251 batch_dims = lax.broadcast_shapes(lu.shape[:-2], x.shape[:-2])
252 x = np.broadcast_to(x, batch_dims + x.shape[-2:])
253 lu = np.broadcast_to(lu, batch_dims + lu.shape[-2:])
254
255 permutation = lax_linalg.lu_pivots_to_permutation(pivots, m)
256 permutation = np.broadcast_to(permutation, batch_dims + (m,))
257 iotas = np.ix_(*(lax.iota(np.int32, b) for b in batch_dims + (1,)))
258 x = x[iotas[:-1] + (permutation, slice(None))]
259
260 x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=True,
261 unit_diagonal=True)
262 x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=False)
263
264 return x[..., 0] if a_ndims == b_ndims + 1 else x
265
266
267 for func in get_module_functions(onp.linalg):
268 if func.__name__ not in globals():
269 globals()[func.__name__] = _not_implemented(func)
270
[end of jax/numpy/linalg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/jax/numpy/linalg.py b/jax/numpy/linalg.py
--- a/jax/numpy/linalg.py
+++ b/jax/numpy/linalg.py
@@ -60,6 +60,7 @@
@_wraps(onp.linalg.slogdet)
+@jit
def slogdet(a):
a = _promote_arg_dtypes(np.asarray(a))
dtype = lax.dtype(a)
@@ -72,10 +73,10 @@
is_zero = np.any(diag == np.array(0, dtype=dtype), axis=-1)
parity = np.count_nonzero(pivot != np.arange(a_shape[-1]), axis=-1)
if np.iscomplexobj(a):
- sign = np.prod(diag / np.abs(diag))
+ sign = np.prod(diag / np.abs(diag), axis=-1)
else:
sign = np.array(1, dtype=dtype)
- parity = parity + np.count_nonzero(diag < 0)
+ parity = parity + np.count_nonzero(diag < 0, axis=-1)
sign = np.where(is_zero,
np.array(0, dtype=dtype),
sign * np.array(-2 * (parity % 2) + 1, dtype=dtype))
|
{"golden_diff": "diff --git a/jax/numpy/linalg.py b/jax/numpy/linalg.py\n--- a/jax/numpy/linalg.py\n+++ b/jax/numpy/linalg.py\n@@ -60,6 +60,7 @@\n \n \n @_wraps(onp.linalg.slogdet)\n+@jit\n def slogdet(a):\n a = _promote_arg_dtypes(np.asarray(a))\n dtype = lax.dtype(a)\n@@ -72,10 +73,10 @@\n is_zero = np.any(diag == np.array(0, dtype=dtype), axis=-1)\n parity = np.count_nonzero(pivot != np.arange(a_shape[-1]), axis=-1)\n if np.iscomplexobj(a):\n- sign = np.prod(diag / np.abs(diag))\n+ sign = np.prod(diag / np.abs(diag), axis=-1)\n else:\n sign = np.array(1, dtype=dtype)\n- parity = parity + np.count_nonzero(diag < 0)\n+ parity = parity + np.count_nonzero(diag < 0, axis=-1)\n sign = np.where(is_zero,\n np.array(0, dtype=dtype),\n sign * np.array(-2 * (parity % 2) + 1, dtype=dtype))\n", "issue": "`slogdet` sign is incorrect\nIn the following, numpy disagrees with JAX\r\n```python\r\nmat = np.array([[[-0.01]],\r\n [[-0.01]]])\r\nprint(np.linalg.slogdet(mat))\r\nprint(jnp.linalg.slogdet(jnp.array(mat)))\r\n```\r\n=>\r\n```\r\n(array([-1., -1.]), array([-4.60517019, -4.60517019]))\r\n(DeviceArray([1., 1.]), DeviceArray([-4.60517019, -4.60517019]))\r\n```\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom functools import partial\n\nimport numpy as onp\nimport warnings\n\nfrom jax import jit\nfrom .. import lax\nfrom .. import lax_linalg\nfrom .lax_numpy import _not_implemented\nfrom .lax_numpy import _wraps\nfrom . import lax_numpy as np\nfrom ..util import get_module_functions\nfrom ..lib import xla_bridge\n\n\n_T = lambda x: np.swapaxes(x, -1, -2)\n\n\ndef _promote_arg_dtypes(*args):\n \"\"\"Promotes `args` to a common inexact type.\"\"\"\n def _to_inexact_type(type):\n return type if np.issubdtype(type, np.inexact) else np.float64\n inexact_types = [_to_inexact_type(np._dtype(arg)) for arg in args]\n dtype = xla_bridge.canonicalize_dtype(np.result_type(*inexact_types))\n args = [lax.convert_element_type(arg, dtype) for arg in args]\n if len(args) == 1:\n return args[0]\n else:\n return args\n\n\n@_wraps(onp.linalg.cholesky)\ndef cholesky(a):\n a = _promote_arg_dtypes(np.asarray(a))\n return lax_linalg.cholesky(a)\n\n\n@_wraps(onp.linalg.svd)\ndef svd(a, full_matrices=True, compute_uv=True):\n a = _promote_arg_dtypes(np.asarray(a))\n return lax_linalg.svd(a, full_matrices, compute_uv)\n\n\n@_wraps(onp.linalg.slogdet)\ndef slogdet(a):\n a = _promote_arg_dtypes(np.asarray(a))\n dtype = lax.dtype(a)\n a_shape = np.shape(a)\n if len(a_shape) < 2 or a_shape[-1] != a_shape[-2]:\n msg = \"Argument to slogdet() must have shape [..., n, n], got {}\"\n raise ValueError(msg.format(a_shape))\n lu, pivot = lax_linalg.lu(a)\n diag = np.diagonal(lu, axis1=-2, axis2=-1)\n is_zero = np.any(diag == np.array(0, dtype=dtype), axis=-1)\n parity = np.count_nonzero(pivot != np.arange(a_shape[-1]), axis=-1)\n if np.iscomplexobj(a):\n sign = np.prod(diag / np.abs(diag))\n else:\n sign = np.array(1, dtype=dtype)\n parity = parity + np.count_nonzero(diag < 0)\n sign = np.where(is_zero,\n np.array(0, dtype=dtype),\n sign * np.array(-2 * (parity % 2) + 1, dtype=dtype))\n logdet = np.where(\n is_zero, np.array(-np.inf, dtype=dtype),\n np.sum(np.log(np.abs(diag)), axis=-1))\n return sign, np.real(logdet)\n\n\n@_wraps(onp.linalg.det)\ndef det(a):\n sign, logdet = slogdet(a)\n return sign * np.exp(logdet)\n\n\n@_wraps(onp.linalg.eig)\ndef eig(a):\n a = _promote_arg_dtypes(np.asarray(a))\n w, vl, vr = lax_linalg.eig(a)\n return w, vr\n\n\n@_wraps(onp.linalg.eigh)\ndef eigh(a, UPLO=None, symmetrize_input=True):\n if UPLO is None or UPLO == \"L\":\n lower = True\n elif UPLO == \"U\":\n lower = False\n else:\n msg = \"UPLO must be one of None, 'L', or 'U', got {}\".format(UPLO)\n raise ValueError(msg)\n\n a = _promote_arg_dtypes(np.asarray(a))\n v, w = lax_linalg.eigh(a, lower=lower, symmetrize_input=symmetrize_input)\n return w, v\n\n\n@_wraps(onp.linalg.inv)\ndef inv(a):\n if np.ndim(a) < 2 or a.shape[-1] != a.shape[-2]:\n raise ValueError(\"Argument to inv must have shape [..., n, n], got {}.\"\n .format(np.shape(a)))\n return solve(\n a, lax.broadcast(np.eye(a.shape[-1], dtype=lax.dtype(a)), a.shape[:-2]))\n\n\n@partial(jit, static_argnums=(1, 2, 3))\ndef _norm(x, ord, axis, keepdims):\n x = _promote_arg_dtypes(np.asarray(x))\n x_shape = np.shape(x)\n ndim = len(x_shape)\n\n if axis is None:\n # NumPy has an undocumented behavior that admits arbitrary rank inputs if\n # `ord` is None: https://github.com/numpy/numpy/issues/14215\n if ord is None:\n return np.sqrt(np.sum(np.real(x * np.conj(x)), keepdims=keepdims))\n axis = tuple(range(ndim))\n elif isinstance(axis, tuple):\n axis = tuple(np._canonicalize_axis(x, ndim) for x in axis)\n else:\n axis = (np._canonicalize_axis(axis, ndim),)\n\n num_axes = len(axis)\n if num_axes == 1:\n if ord is None or ord == 2:\n return np.sqrt(np.sum(np.real(x * np.conj(x)), axis=axis,\n keepdims=keepdims))\n elif ord == np.inf:\n return np.amax(np.abs(x), axis=axis, keepdims=keepdims)\n elif ord == -np.inf:\n return np.amin(np.abs(x), axis=axis, keepdims=keepdims)\n elif ord == 0:\n return np.sum(x != 0, dtype=np.finfo(lax.dtype(x)).dtype,\n axis=axis, keepdims=keepdims)\n elif ord == 1:\n # Numpy has a special case for ord == 1 as an optimization. We don't\n # really need the optimization (XLA could do it for us), but the Numpy\n # code has slightly different type promotion semantics, so we need a\n # special case too.\n return np.sum(np.abs(x), axis=axis, keepdims=keepdims)\n else:\n return np.power(np.sum(np.abs(x) ** ord, axis=axis, keepdims=keepdims),\n 1. / ord)\n\n elif num_axes == 2:\n row_axis, col_axis = axis\n if ord is None or ord in ('f', 'fro'):\n return np.sqrt(np.sum(np.real(x * np.conj(x)), axis=axis,\n keepdims=keepdims))\n elif ord == 1:\n if not keepdims and col_axis > row_axis:\n col_axis -= 1\n return np.amax(np.sum(np.abs(x), axis=row_axis, keepdims=keepdims),\n axis=col_axis, keepdims=keepdims)\n elif ord == -1:\n if not keepdims and col_axis > row_axis:\n col_axis -= 1\n return np.amin(np.sum(np.abs(x), axis=row_axis, keepdims=keepdims),\n axis=col_axis, keepdims=keepdims)\n elif ord == np.inf:\n if not keepdims and row_axis > col_axis:\n row_axis -= 1\n return np.amax(np.sum(np.abs(x), axis=col_axis, keepdims=keepdims),\n axis=row_axis, keepdims=keepdims)\n elif ord == -np.inf:\n if not keepdims and row_axis > col_axis:\n row_axis -= 1\n return np.amin(np.sum(np.abs(x), axis=col_axis, keepdims=keepdims),\n axis=row_axis, keepdims=keepdims)\n elif ord in ('nuc', 2, -2):\n x = np.moveaxis(x, axis, (-2, -1))\n if ord == 2:\n reducer = np.amax\n elif ord == -2:\n reducer = np.amin\n else:\n reducer = np.sum\n y = reducer(svd(x, compute_uv=False), axis=-1)\n if keepdims:\n result_shape = list(x_shape)\n result_shape[axis[0]] = 1\n result_shape[axis[1]] = 1\n y = np.reshape(y, result_shape)\n return y\n else:\n raise ValueError(\"Invalid order '{}' for matrix norm.\".format(ord))\n else:\n raise ValueError(\n \"Invalid axis values ({}) for np.linalg.norm.\".format(axis))\n\n@_wraps(onp.linalg.norm)\ndef norm(x, ord=None, axis=None, keepdims=False):\n return _norm(x, ord, axis, keepdims)\n\n\n@_wraps(onp.linalg.qr)\ndef qr(a, mode=\"reduced\"):\n if mode in (\"reduced\", \"r\", \"full\"):\n full_matrices = False\n elif mode == \"complete\":\n full_matrices = True\n else:\n raise ValueError(\"Unsupported QR decomposition mode '{}'\".format(mode))\n a = _promote_arg_dtypes(np.asarray(a))\n q, r = lax_linalg.qr(a, full_matrices)\n if mode == \"r\":\n return r\n return q, r\n\n\n@_wraps(onp.linalg.solve)\n@jit\ndef solve(a, b):\n a, b = _promote_arg_dtypes(np.asarray(a), np.asarray(b))\n a_shape = np.shape(a)\n b_shape = np.shape(b)\n a_ndims = len(a_shape)\n b_ndims = len(b_shape)\n if not (a_ndims >= 2 and a_shape[-1] == a_shape[-2] and b_ndims >= 1):\n msg = (\"The arguments to solve must have shapes a=[..., m, m] and \"\n \"b=[..., m, k] or b=[..., m]; got a={} and b={}\")\n raise ValueError(msg.format(a_shape, b_shape))\n lu, pivots = lax_linalg.lu(a)\n dtype = lax.dtype(a)\n\n m = a_shape[-1]\n\n # Numpy treats the RHS as a (batched) vector if the number of dimensions\n # differ by 1. Otherwise, broadcasting rules apply.\n x = b[..., None] if a_ndims == b_ndims + 1 else b\n\n batch_dims = lax.broadcast_shapes(lu.shape[:-2], x.shape[:-2])\n x = np.broadcast_to(x, batch_dims + x.shape[-2:])\n lu = np.broadcast_to(lu, batch_dims + lu.shape[-2:])\n\n permutation = lax_linalg.lu_pivots_to_permutation(pivots, m)\n permutation = np.broadcast_to(permutation, batch_dims + (m,))\n iotas = np.ix_(*(lax.iota(np.int32, b) for b in batch_dims + (1,)))\n x = x[iotas[:-1] + (permutation, slice(None))]\n\n x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=True,\n unit_diagonal=True)\n x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=False)\n\n return x[..., 0] if a_ndims == b_ndims + 1 else x\n\n\nfor func in get_module_functions(onp.linalg):\n if func.__name__ not in globals():\n globals()[func.__name__] = _not_implemented(func)\n", "path": "jax/numpy/linalg.py"}]}
| 4,020 | 280 |
gh_patches_debug_33426
|
rasdani/github-patches
|
git_diff
|
ethereum__web3.py-2025
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Redundant async/await in AsyncJSONBaseProvider serde methods
* Version: 5.20
* Python: 3.9.5
* OS: Linux
```
aiohttp==3.7.4.post0
web3==5.20
```
### What was wrong?
Serde methods internally do not await on any coroutines, so there is no reason to have them `async`
### How can it be fixed?
Remove `async/await` in those methods.
</issue>
<code>
[start of web3/providers/async_base.py]
1 import itertools
2 from typing import (
3 TYPE_CHECKING,
4 Any,
5 Callable,
6 Sequence,
7 Tuple,
8 cast,
9 )
10 import warnings
11
12 from eth_utils import (
13 to_bytes,
14 to_text,
15 )
16
17 from web3._utils.encoding import (
18 FriendlyJsonSerde,
19 )
20 from web3.middleware import (
21 async_combine_middlewares,
22 )
23 from web3.types import (
24 Middleware,
25 MiddlewareOnion,
26 RPCEndpoint,
27 RPCResponse,
28 )
29
30 if TYPE_CHECKING:
31 from web3 import Web3 # noqa: F401
32
33
34 class AsyncBaseProvider:
35 _middlewares: Tuple[Middleware, ...] = ()
36 # a tuple of (all_middlewares, request_func)
37 _request_func_cache: Tuple[Tuple[Middleware, ...], Callable[..., RPCResponse]] = (None, None)
38
39 def __init__(self) -> None:
40 warnings.warn(
41 "Async providers are still being developed and refined. "
42 "Expect breaking changes in minor releases.")
43
44 @property
45 def middlewares(self) -> Tuple[Middleware, ...]:
46 return self._middlewares
47
48 @middlewares.setter
49 def middlewares(
50 self, values: MiddlewareOnion
51 ) -> None:
52 # tuple(values) converts to MiddlewareOnion -> Tuple[Middleware, ...]
53 self._middlewares = tuple(values) # type: ignore
54
55 async def request_func(
56 self, web3: "Web3", outer_middlewares: MiddlewareOnion
57 ) -> Callable[[RPCEndpoint], Any]:
58 all_middlewares: Tuple[Middleware] = tuple(outer_middlewares) + tuple(self.middlewares) # type: ignore # noqa: E501
59
60 cache_key = self._request_func_cache[0]
61 if cache_key is None or cache_key != all_middlewares:
62 self._request_func_cache = (
63 all_middlewares,
64 await self._generate_request_func(web3, all_middlewares)
65 )
66 return self._request_func_cache[-1]
67
68 async def _generate_request_func(
69 self, web3: "Web3", middlewares: Sequence[Middleware]
70 ) -> Callable[..., RPCResponse]:
71 return await async_combine_middlewares(
72 middlewares=middlewares,
73 web3=web3,
74 provider_request_fn=self.make_request,
75 )
76
77 async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
78 raise NotImplementedError("Providers must implement this method")
79
80 async def isConnected(self) -> bool:
81 raise NotImplementedError("Providers must implement this method")
82
83
84 class AsyncJSONBaseProvider(AsyncBaseProvider):
85 def __init__(self) -> None:
86 self.request_counter = itertools.count()
87
88 async def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes:
89 rpc_dict = {
90 "jsonrpc": "2.0",
91 "method": method,
92 "params": params or [],
93 "id": next(self.request_counter),
94 }
95 encoded = FriendlyJsonSerde().json_encode(rpc_dict)
96 return to_bytes(text=encoded)
97
98 async def decode_rpc_response(self, raw_response: bytes) -> RPCResponse:
99 text_response = to_text(raw_response)
100 return cast(RPCResponse, FriendlyJsonSerde().json_decode(text_response))
101
102 async def isConnected(self) -> bool:
103 try:
104 response = await self.make_request(RPCEndpoint('web3_clientVersion'), [])
105 except IOError:
106 return False
107
108 assert response['jsonrpc'] == '2.0'
109 assert 'error' not in response
110
111 return True
112
[end of web3/providers/async_base.py]
[start of web3/providers/async_rpc.py]
1 import logging
2 from typing import (
3 Any,
4 Dict,
5 Iterable,
6 Optional,
7 Tuple,
8 Union,
9 )
10
11 from eth_typing import (
12 URI,
13 )
14 from eth_utils import (
15 to_dict,
16 )
17
18 from web3._utils.http import (
19 construct_user_agent,
20 )
21 from web3._utils.request import (
22 async_make_post_request,
23 get_default_http_endpoint,
24 )
25 from web3.types import (
26 RPCEndpoint,
27 RPCResponse,
28 )
29
30 from .async_base import (
31 AsyncJSONBaseProvider,
32 )
33
34
35 class AsyncHTTPProvider(AsyncJSONBaseProvider):
36 logger = logging.getLogger("web3.providers.HTTPProvider")
37 endpoint_uri = None
38 _request_kwargs = None
39
40 def __init__(
41 self, endpoint_uri: Optional[Union[URI, str]] = None,
42 request_kwargs: Optional[Any] = None
43 ) -> None:
44 if endpoint_uri is None:
45 self.endpoint_uri = get_default_http_endpoint()
46 else:
47 self.endpoint_uri = URI(endpoint_uri)
48
49 self._request_kwargs = request_kwargs or {}
50
51 super().__init__()
52
53 def __str__(self) -> str:
54 return "RPC connection {0}".format(self.endpoint_uri)
55
56 @to_dict
57 def get_request_kwargs(self) -> Iterable[Tuple[str, Any]]:
58 if 'headers' not in self._request_kwargs:
59 yield 'headers', self.get_request_headers()
60 for key, value in self._request_kwargs.items():
61 yield key, value
62
63 def get_request_headers(self) -> Dict[str, str]:
64 return {
65 'Content-Type': 'application/json',
66 'User-Agent': construct_user_agent(str(type(self))),
67 }
68
69 async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
70 self.logger.debug("Making request HTTP. URI: %s, Method: %s",
71 self.endpoint_uri, method)
72 request_data = await self.encode_rpc_request(method, params)
73 raw_response = await async_make_post_request(
74 self.endpoint_uri,
75 request_data,
76 **self.get_request_kwargs()
77 )
78 response = await self.decode_rpc_response(raw_response)
79 self.logger.debug("Getting response HTTP. URI: %s, "
80 "Method: %s, Response: %s",
81 self.endpoint_uri, method, response)
82 return response
83
[end of web3/providers/async_rpc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/web3/providers/async_base.py b/web3/providers/async_base.py
--- a/web3/providers/async_base.py
+++ b/web3/providers/async_base.py
@@ -85,7 +85,7 @@
def __init__(self) -> None:
self.request_counter = itertools.count()
- async def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes:
+ def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes:
rpc_dict = {
"jsonrpc": "2.0",
"method": method,
@@ -95,7 +95,7 @@
encoded = FriendlyJsonSerde().json_encode(rpc_dict)
return to_bytes(text=encoded)
- async def decode_rpc_response(self, raw_response: bytes) -> RPCResponse:
+ def decode_rpc_response(self, raw_response: bytes) -> RPCResponse:
text_response = to_text(raw_response)
return cast(RPCResponse, FriendlyJsonSerde().json_decode(text_response))
diff --git a/web3/providers/async_rpc.py b/web3/providers/async_rpc.py
--- a/web3/providers/async_rpc.py
+++ b/web3/providers/async_rpc.py
@@ -69,13 +69,13 @@
async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
self.logger.debug("Making request HTTP. URI: %s, Method: %s",
self.endpoint_uri, method)
- request_data = await self.encode_rpc_request(method, params)
+ request_data = self.encode_rpc_request(method, params)
raw_response = await async_make_post_request(
self.endpoint_uri,
request_data,
**self.get_request_kwargs()
)
- response = await self.decode_rpc_response(raw_response)
+ response = self.decode_rpc_response(raw_response)
self.logger.debug("Getting response HTTP. URI: %s, "
"Method: %s, Response: %s",
self.endpoint_uri, method, response)
|
{"golden_diff": "diff --git a/web3/providers/async_base.py b/web3/providers/async_base.py\n--- a/web3/providers/async_base.py\n+++ b/web3/providers/async_base.py\n@@ -85,7 +85,7 @@\n def __init__(self) -> None:\n self.request_counter = itertools.count()\n \n- async def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes:\n+ def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes:\n rpc_dict = {\n \"jsonrpc\": \"2.0\",\n \"method\": method,\n@@ -95,7 +95,7 @@\n encoded = FriendlyJsonSerde().json_encode(rpc_dict)\n return to_bytes(text=encoded)\n \n- async def decode_rpc_response(self, raw_response: bytes) -> RPCResponse:\n+ def decode_rpc_response(self, raw_response: bytes) -> RPCResponse:\n text_response = to_text(raw_response)\n return cast(RPCResponse, FriendlyJsonSerde().json_decode(text_response))\n \ndiff --git a/web3/providers/async_rpc.py b/web3/providers/async_rpc.py\n--- a/web3/providers/async_rpc.py\n+++ b/web3/providers/async_rpc.py\n@@ -69,13 +69,13 @@\n async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n self.logger.debug(\"Making request HTTP. URI: %s, Method: %s\",\n self.endpoint_uri, method)\n- request_data = await self.encode_rpc_request(method, params)\n+ request_data = self.encode_rpc_request(method, params)\n raw_response = await async_make_post_request(\n self.endpoint_uri,\n request_data,\n **self.get_request_kwargs()\n )\n- response = await self.decode_rpc_response(raw_response)\n+ response = self.decode_rpc_response(raw_response)\n self.logger.debug(\"Getting response HTTP. URI: %s, \"\n \"Method: %s, Response: %s\",\n self.endpoint_uri, method, response)\n", "issue": "Redundant async/await in AsyncJSONBaseProvider serde methods\n* Version: 5.20\r\n* Python: 3.9.5\r\n* OS: Linux\r\n\r\n```\r\naiohttp==3.7.4.post0\r\nweb3==5.20\r\n```\r\n\r\n\r\n### What was wrong?\r\n\r\nSerde methods internally do not await on any coroutines, so there is no reason to have them `async`\r\n\r\n### How can it be fixed?\r\n\r\nRemove `async/await` in those methods.\r\n\n", "before_files": [{"content": "import itertools\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Sequence,\n Tuple,\n cast,\n)\nimport warnings\n\nfrom eth_utils import (\n to_bytes,\n to_text,\n)\n\nfrom web3._utils.encoding import (\n FriendlyJsonSerde,\n)\nfrom web3.middleware import (\n async_combine_middlewares,\n)\nfrom web3.types import (\n Middleware,\n MiddlewareOnion,\n RPCEndpoint,\n RPCResponse,\n)\n\nif TYPE_CHECKING:\n from web3 import Web3 # noqa: F401\n\n\nclass AsyncBaseProvider:\n _middlewares: Tuple[Middleware, ...] = ()\n # a tuple of (all_middlewares, request_func)\n _request_func_cache: Tuple[Tuple[Middleware, ...], Callable[..., RPCResponse]] = (None, None)\n\n def __init__(self) -> None:\n warnings.warn(\n \"Async providers are still being developed and refined. \"\n \"Expect breaking changes in minor releases.\")\n\n @property\n def middlewares(self) -> Tuple[Middleware, ...]:\n return self._middlewares\n\n @middlewares.setter\n def middlewares(\n self, values: MiddlewareOnion\n ) -> None:\n # tuple(values) converts to MiddlewareOnion -> Tuple[Middleware, ...]\n self._middlewares = tuple(values) # type: ignore\n\n async def request_func(\n self, web3: \"Web3\", outer_middlewares: MiddlewareOnion\n ) -> Callable[[RPCEndpoint], Any]:\n all_middlewares: Tuple[Middleware] = tuple(outer_middlewares) + tuple(self.middlewares) # type: ignore # noqa: E501\n\n cache_key = self._request_func_cache[0]\n if cache_key is None or cache_key != all_middlewares:\n self._request_func_cache = (\n all_middlewares,\n await self._generate_request_func(web3, all_middlewares)\n )\n return self._request_func_cache[-1]\n\n async def _generate_request_func(\n self, web3: \"Web3\", middlewares: Sequence[Middleware]\n ) -> Callable[..., RPCResponse]:\n return await async_combine_middlewares(\n middlewares=middlewares,\n web3=web3,\n provider_request_fn=self.make_request,\n )\n\n async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n raise NotImplementedError(\"Providers must implement this method\")\n\n async def isConnected(self) -> bool:\n raise NotImplementedError(\"Providers must implement this method\")\n\n\nclass AsyncJSONBaseProvider(AsyncBaseProvider):\n def __init__(self) -> None:\n self.request_counter = itertools.count()\n\n async def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes:\n rpc_dict = {\n \"jsonrpc\": \"2.0\",\n \"method\": method,\n \"params\": params or [],\n \"id\": next(self.request_counter),\n }\n encoded = FriendlyJsonSerde().json_encode(rpc_dict)\n return to_bytes(text=encoded)\n\n async def decode_rpc_response(self, raw_response: bytes) -> RPCResponse:\n text_response = to_text(raw_response)\n return cast(RPCResponse, FriendlyJsonSerde().json_decode(text_response))\n\n async def isConnected(self) -> bool:\n try:\n response = await self.make_request(RPCEndpoint('web3_clientVersion'), [])\n except IOError:\n return False\n\n assert response['jsonrpc'] == '2.0'\n assert 'error' not in response\n\n return True\n", "path": "web3/providers/async_base.py"}, {"content": "import logging\nfrom typing import (\n Any,\n Dict,\n Iterable,\n Optional,\n Tuple,\n Union,\n)\n\nfrom eth_typing import (\n URI,\n)\nfrom eth_utils import (\n to_dict,\n)\n\nfrom web3._utils.http import (\n construct_user_agent,\n)\nfrom web3._utils.request import (\n async_make_post_request,\n get_default_http_endpoint,\n)\nfrom web3.types import (\n RPCEndpoint,\n RPCResponse,\n)\n\nfrom .async_base import (\n AsyncJSONBaseProvider,\n)\n\n\nclass AsyncHTTPProvider(AsyncJSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.HTTPProvider\")\n endpoint_uri = None\n _request_kwargs = None\n\n def __init__(\n self, endpoint_uri: Optional[Union[URI, str]] = None,\n request_kwargs: Optional[Any] = None\n ) -> None:\n if endpoint_uri is None:\n self.endpoint_uri = get_default_http_endpoint()\n else:\n self.endpoint_uri = URI(endpoint_uri)\n\n self._request_kwargs = request_kwargs or {}\n\n super().__init__()\n\n def __str__(self) -> str:\n return \"RPC connection {0}\".format(self.endpoint_uri)\n\n @to_dict\n def get_request_kwargs(self) -> Iterable[Tuple[str, Any]]:\n if 'headers' not in self._request_kwargs:\n yield 'headers', self.get_request_headers()\n for key, value in self._request_kwargs.items():\n yield key, value\n\n def get_request_headers(self) -> Dict[str, str]:\n return {\n 'Content-Type': 'application/json',\n 'User-Agent': construct_user_agent(str(type(self))),\n }\n\n async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n self.logger.debug(\"Making request HTTP. URI: %s, Method: %s\",\n self.endpoint_uri, method)\n request_data = await self.encode_rpc_request(method, params)\n raw_response = await async_make_post_request(\n self.endpoint_uri,\n request_data,\n **self.get_request_kwargs()\n )\n response = await self.decode_rpc_response(raw_response)\n self.logger.debug(\"Getting response HTTP. URI: %s, \"\n \"Method: %s, Response: %s\",\n self.endpoint_uri, method, response)\n return response\n", "path": "web3/providers/async_rpc.py"}]}
| 2,335 | 450 |
gh_patches_debug_22997
|
rasdani/github-patches
|
git_diff
|
liqd__a4-opin-605
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong user in project
When I get an invitation to a project via email and I am logged in with a different user and click one the link in the email, the second user is added to the project
</issue>
<code>
[start of euth/memberships/views.py]
1 from django.http import Http404
2 from django.shortcuts import redirect
3 from django.views import generic
4 from rules.compat import access_mixins as mixin
5
6 from adhocracy4.projects import models as prj_models
7 from adhocracy4.projects import views as prj_views
8
9 from . import forms, models
10
11
12 class RequestsProjectDetailView(prj_views.ProjectDetailView):
13
14 def handle_no_permission(self):
15 """
16 Check if user clould join
17 """
18 user = self.request.user
19 is_member = user.is_authenticated() and self.project.has_member(user)
20
21 if is_member:
22 return super().handle_no_permission()
23 else:
24 return self.handle_no_membership()
25
26 def handle_no_membership(self):
27 membership_impossible = (
28 not self.request.user.is_authenticated()
29 or self.project.is_draft
30 or self.project.has_member(self.request.user)
31 )
32
33 if membership_impossible:
34 return super().handle_no_permission()
35 else:
36 return redirect('memberships-request',
37 project_slug=self.project.slug)
38
39
40 class InviteView(mixin.LoginRequiredMixin, generic.UpdateView):
41 model = models.Invite
42 form_class = forms.InviteForm
43 slug_field = 'token'
44 slug_url_kwarg = 'invite_token'
45
46 def form_valid(self, form):
47 if form.is_accepted():
48 form.instance.accept(self.request.user)
49 return redirect(form.instance.project.get_absolute_url())
50 else:
51 form.instance.reject()
52 return redirect('/')
53
54
55 class RequestView(mixin.LoginRequiredMixin, generic.DetailView):
56 """
57 Displays membership request if it exists or allows to create one.
58 """
59 model = models.Request
60 slug_field = 'project__slug'
61 slug_url_kwarg = 'project_slug'
62 context_object_name = 'join_request'
63
64 def get_queryset(self):
65 return self.model.objects.filter(creator=self.request.user)
66
67 def get(self, request, *args, **kwargs):
68 if self.project.has_member(request.user):
69 return redirect(self.project.get_absolute_url())
70 else:
71 return super().get(request, *args, **kwargs)
72
73 def post(self, request, *args, **kwargs):
74 user = request.user
75 project = self.project
76 models.Request.objects.request_membership(project, user)
77 return redirect(self.request.path)
78
79 def get_object(self, queryset=None):
80 try:
81 return super().get_object(queryset)
82 except Http404:
83 return None
84
85 @property
86 def project(self):
87 project_slug = self.kwargs[self.slug_url_kwarg]
88 return prj_models.Project.objects.get(slug=project_slug)
89
[end of euth/memberships/views.py]
[start of euth/memberships/forms.py]
1 from django import forms
2 from django.core.exceptions import ValidationError
3
4 from . import models
5
6
7 class InviteForm(forms.ModelForm):
8 accept = forms.CharField(required=False)
9 reject = forms.CharField(required=False)
10
11 class Meta:
12 model = models.Invite
13 fields = ['accept', 'reject']
14
15 def clean(self):
16 data = self.data
17 if 'accept' not in data and 'reject' not in data:
18 raise ValidationError('Reject or accept')
19 return data
20
21 def is_accepted(self):
22 data = self.data
23 return 'accept' in data and 'reject' not in data
24
[end of euth/memberships/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/euth/memberships/forms.py b/euth/memberships/forms.py
--- a/euth/memberships/forms.py
+++ b/euth/memberships/forms.py
@@ -12,10 +12,17 @@
model = models.Invite
fields = ['accept', 'reject']
+ def __init__(self, user=None, **kwargs):
+ super().__init__(**kwargs)
+ self.user = user
+
def clean(self):
data = self.data
if 'accept' not in data and 'reject' not in data:
raise ValidationError('Reject or accept')
+ if 'accept' in data and not self.user.email == self.instance.email:
+ raise ValidationError('This user has another email address than '
+ 'the one that received the invitation.')
return data
def is_accepted(self):
diff --git a/euth/memberships/views.py b/euth/memberships/views.py
--- a/euth/memberships/views.py
+++ b/euth/memberships/views.py
@@ -43,6 +43,11 @@
slug_field = 'token'
slug_url_kwarg = 'invite_token'
+ def get_form_kwargs(self):
+ kwargs = super().get_form_kwargs()
+ kwargs.update({'user': self.request.user})
+ return kwargs
+
def form_valid(self, form):
if form.is_accepted():
form.instance.accept(self.request.user)
|
{"golden_diff": "diff --git a/euth/memberships/forms.py b/euth/memberships/forms.py\n--- a/euth/memberships/forms.py\n+++ b/euth/memberships/forms.py\n@@ -12,10 +12,17 @@\n model = models.Invite\n fields = ['accept', 'reject']\n \n+ def __init__(self, user=None, **kwargs):\n+ super().__init__(**kwargs)\n+ self.user = user\n+\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n+ if 'accept' in data and not self.user.email == self.instance.email:\n+ raise ValidationError('This user has another email address than '\n+ 'the one that received the invitation.')\n return data\n \n def is_accepted(self):\ndiff --git a/euth/memberships/views.py b/euth/memberships/views.py\n--- a/euth/memberships/views.py\n+++ b/euth/memberships/views.py\n@@ -43,6 +43,11 @@\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n \n+ def get_form_kwargs(self):\n+ kwargs = super().get_form_kwargs()\n+ kwargs.update({'user': self.request.user})\n+ return kwargs\n+\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n", "issue": "Wrong user in project\nWhen I get an invitation to a project via email and I am logged in with a different user and click one the link in the email, the second user is added to the project\n", "before_files": [{"content": "from django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.views import generic\nfrom rules.compat import access_mixins as mixin\n\nfrom adhocracy4.projects import models as prj_models\nfrom adhocracy4.projects import views as prj_views\n\nfrom . import forms, models\n\n\nclass RequestsProjectDetailView(prj_views.ProjectDetailView):\n\n def handle_no_permission(self):\n \"\"\"\n Check if user clould join\n \"\"\"\n user = self.request.user\n is_member = user.is_authenticated() and self.project.has_member(user)\n\n if is_member:\n return super().handle_no_permission()\n else:\n return self.handle_no_membership()\n\n def handle_no_membership(self):\n membership_impossible = (\n not self.request.user.is_authenticated()\n or self.project.is_draft\n or self.project.has_member(self.request.user)\n )\n\n if membership_impossible:\n return super().handle_no_permission()\n else:\n return redirect('memberships-request',\n project_slug=self.project.slug)\n\n\nclass InviteView(mixin.LoginRequiredMixin, generic.UpdateView):\n model = models.Invite\n form_class = forms.InviteForm\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n return redirect(form.instance.project.get_absolute_url())\n else:\n form.instance.reject()\n return redirect('/')\n\n\nclass RequestView(mixin.LoginRequiredMixin, generic.DetailView):\n \"\"\"\n Displays membership request if it exists or allows to create one.\n \"\"\"\n model = models.Request\n slug_field = 'project__slug'\n slug_url_kwarg = 'project_slug'\n context_object_name = 'join_request'\n\n def get_queryset(self):\n return self.model.objects.filter(creator=self.request.user)\n\n def get(self, request, *args, **kwargs):\n if self.project.has_member(request.user):\n return redirect(self.project.get_absolute_url())\n else:\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n user = request.user\n project = self.project\n models.Request.objects.request_membership(project, user)\n return redirect(self.request.path)\n\n def get_object(self, queryset=None):\n try:\n return super().get_object(queryset)\n except Http404:\n return None\n\n @property\n def project(self):\n project_slug = self.kwargs[self.slug_url_kwarg]\n return prj_models.Project.objects.get(slug=project_slug)\n", "path": "euth/memberships/views.py"}, {"content": "from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom . import models\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n class Meta:\n model = models.Invite\n fields = ['accept', 'reject']\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n", "path": "euth/memberships/forms.py"}]}
| 1,490 | 316 |
gh_patches_debug_12885
|
rasdani/github-patches
|
git_diff
|
cupy__cupy-3291
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lexsort output is incorrect
The output of cupy.lexsort is incorrect. I can't see any pattern as to what is wrong with the output.
### Code to reproduce
```
import cupy
import numpy
# numpy output is correct:
a = (numpy.random.random((10,2)) * 10).astype(int) # random ints between 0 and 9
print(a[numpy.lexsort(a.T[::-1])]) # sorted by first column, not last
# cupy output is unsorted:
b = cupy.array(a)
print(b[cupy.lexsort(b.T[::-1])])
```
The same thing happens for floats, so that's not the reason.
### Conditions
(output of `python -c 'import cupy; cupy.show_config()'`):
CuPy Version : 7.1.1
CUDA Root : /opt/cuda
CUDA Build Version : 10020
CUDA Driver Version : 10020
CUDA Runtime Version : 10020
cuBLAS Version : 10202
cuFFT Version : 10102
cuRAND Version : 10102
cuSOLVER Version : (10, 3, 0)
cuSPARSE Version : 10301
NVRTC Version : (10, 2)
cuDNN Build Version : 7605
cuDNN Version : 7605
NCCL Build Version : 2507
NCCL Runtime Version : 2507
</issue>
<code>
[start of cupy/_sorting/sort.py]
1 import cupy
2 import numpy
3
4 if cupy.cuda.thrust_enabled:
5 from cupy.cuda import thrust
6
7
8 def sort(a, axis=-1):
9 """Returns a sorted copy of an array with a stable sorting algorithm.
10
11 Args:
12 a (cupy.ndarray): Array to be sorted.
13 axis (int or None): Axis along which to sort. Default is -1, which
14 means sort along the last axis. If None is supplied, the array is
15 flattened before sorting.
16
17 Returns:
18 cupy.ndarray: Array of the same type and shape as ``a``.
19
20 .. note::
21 For its implementation reason, ``cupy.sort`` currently does not support
22 ``kind`` and ``order`` parameters that ``numpy.sort`` does
23 support.
24
25 .. seealso:: :func:`numpy.sort`
26
27 """
28 if axis is None:
29 ret = a.flatten()
30 axis = -1
31 else:
32 ret = a.copy()
33 ret.sort(axis=axis)
34 return ret
35
36
37 def lexsort(keys):
38 """Perform an indirect sort using an array of keys.
39
40 Args:
41 keys (cupy.ndarray): ``(k, N)`` array containing ``k`` ``(N,)``-shaped
42 arrays. The ``k`` different "rows" to be sorted. The last row is
43 the primary sort key.
44
45 Returns:
46 cupy.ndarray: Array of indices that sort the keys.
47
48 .. note::
49 For its implementation reason, ``cupy.lexsort`` currently supports only
50 keys with their rank of one or two and does not support ``axis``
51 parameter that ``numpy.lexsort`` supports.
52
53 .. seealso:: :func:`numpy.lexsort`
54
55 """
56
57 # TODO(takagi): Support axis argument.
58
59 if not cupy.cuda.thrust_enabled:
60 raise RuntimeError('Thrust is needed to use cupy.lexsort. Please '
61 'install CUDA Toolkit with Thrust then reinstall '
62 'CuPy after uninstalling it.')
63
64 if keys.ndim == ():
65 # as numpy.lexsort() raises
66 raise TypeError('need sequence of keys with len > 0 in lexsort')
67
68 if keys.ndim == 1:
69 return 0
70
71 # TODO(takagi): Support ranks of three or more.
72 if keys.ndim > 2:
73 raise NotImplementedError('Keys with the rank of three or more is not '
74 'supported in lexsort')
75
76 idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)
77 k = keys._shape[0]
78 n = keys._shape[1]
79 thrust.lexsort(keys.dtype, idx_array.data.ptr, keys.data.ptr, k, n)
80
81 return idx_array
82
83
84 def argsort(a, axis=-1):
85 """Returns the indices that would sort an array with a stable sorting.
86
87 Args:
88 a (cupy.ndarray): Array to sort.
89 axis (int or None): Axis along which to sort. Default is -1, which
90 means sort along the last axis. If None is supplied, the array is
91 flattened before sorting.
92
93 Returns:
94 cupy.ndarray: Array of indices that sort ``a``.
95
96 .. note::
97 For its implementation reason, ``cupy.argsort`` does not support
98 ``kind`` and ``order`` parameters.
99
100 .. seealso:: :func:`numpy.argsort`
101
102 """
103 return a.argsort(axis=axis)
104
105
106 def msort(a):
107 """Returns a copy of an array sorted along the first axis.
108
109 Args:
110 a (cupy.ndarray): Array to be sorted.
111
112 Returns:
113 cupy.ndarray: Array of the same type and shape as ``a``.
114
115 .. note:
116 ``cupy.msort(a)``, the CuPy counterpart of ``numpy.msort(a)``, is
117 equivalent to ``cupy.sort(a, axis=0)``.
118
119 .. seealso:: :func:`numpy.msort`
120
121 """
122
123 # TODO(takagi): Support float16 and bool.
124 return sort(a, axis=0)
125
126
127 # TODO(okuta): Implement sort_complex
128
129
130 def partition(a, kth, axis=-1):
131 """Returns a partitioned copy of an array.
132
133 Creates a copy of the array whose elements are rearranged such that the
134 value of the element in k-th position would occur in that position in a
135 sorted array. All of the elements before the new k-th element are less
136 than or equal to the elements after the new k-th element.
137
138 Args:
139 a (cupy.ndarray): Array to be sorted.
140 kth (int or sequence of ints): Element index to partition by. If
141 supplied with a sequence of k-th it will partition all elements
142 indexed by k-th of them into their sorted position at once.
143 axis (int or None): Axis along which to sort. Default is -1, which
144 means sort along the last axis. If None is supplied, the array is
145 flattened before sorting.
146
147 Returns:
148 cupy.ndarray: Array of the same type and shape as ``a``.
149
150 .. seealso:: :func:`numpy.partition`
151
152 """
153 if axis is None:
154 ret = a.flatten()
155 axis = -1
156 else:
157 ret = a.copy()
158 ret.partition(kth, axis=axis)
159 return ret
160
161
162 def argpartition(a, kth, axis=-1):
163 """Returns the indices that would partially sort an array.
164
165 Args:
166 a (cupy.ndarray): Array to be sorted.
167 kth (int or sequence of ints): Element index to partition by. If
168 supplied with a sequence of k-th it will partition all elements
169 indexed by k-th of them into their sorted position at once.
170 axis (int or None): Axis along which to sort. Default is -1, which
171 means sort along the last axis. If None is supplied, the array is
172 flattened before sorting.
173
174 Returns:
175 cupy.ndarray: Array of the same type and shape as ``a``.
176
177 .. note::
178 For its implementation reason, `cupy.argpartition` fully sorts the
179 given array as `cupy.argsort` does. It also does not support ``kind``
180 and ``order`` parameters that ``numpy.argpartition`` supports.
181
182 .. seealso:: :func:`numpy.argpartition`
183
184 """
185 return a.argpartition(kth, axis=axis)
186
[end of cupy/_sorting/sort.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cupy/_sorting/sort.py b/cupy/_sorting/sort.py
--- a/cupy/_sorting/sort.py
+++ b/cupy/_sorting/sort.py
@@ -73,6 +73,10 @@
raise NotImplementedError('Keys with the rank of three or more is not '
'supported in lexsort')
+ # thrust.lexsort() assumes a C-contiguous array
+ if not keys.flags.c_contiguous:
+ keys = keys.copy('C')
+
idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)
k = keys._shape[0]
n = keys._shape[1]
@@ -120,7 +124,6 @@
"""
- # TODO(takagi): Support float16 and bool.
return sort(a, axis=0)
|
{"golden_diff": "diff --git a/cupy/_sorting/sort.py b/cupy/_sorting/sort.py\n--- a/cupy/_sorting/sort.py\n+++ b/cupy/_sorting/sort.py\n@@ -73,6 +73,10 @@\n raise NotImplementedError('Keys with the rank of three or more is not '\n 'supported in lexsort')\n \n+ # thrust.lexsort() assumes a C-contiguous array\n+ if not keys.flags.c_contiguous:\n+ keys = keys.copy('C')\n+\n idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)\n k = keys._shape[0]\n n = keys._shape[1]\n@@ -120,7 +124,6 @@\n \n \"\"\"\n \n- # TODO(takagi): Support float16 and bool.\n return sort(a, axis=0)\n", "issue": "Lexsort output is incorrect\nThe output of cupy.lexsort is incorrect. I can't see any pattern as to what is wrong with the output.\r\n\r\n### Code to reproduce\r\n```\r\nimport cupy\r\nimport numpy\r\n\r\n# numpy output is correct:\r\na = (numpy.random.random((10,2)) * 10).astype(int) # random ints between 0 and 9\r\nprint(a[numpy.lexsort(a.T[::-1])]) # sorted by first column, not last\r\n\r\n# cupy output is unsorted:\r\nb = cupy.array(a)\r\nprint(b[cupy.lexsort(b.T[::-1])])\r\n```\r\nThe same thing happens for floats, so that's not the reason.\r\n\r\n### Conditions\r\n(output of `python -c 'import cupy; cupy.show_config()'`):\r\n\r\nCuPy Version : 7.1.1\r\nCUDA Root : /opt/cuda\r\nCUDA Build Version : 10020\r\nCUDA Driver Version : 10020\r\nCUDA Runtime Version : 10020\r\ncuBLAS Version : 10202\r\ncuFFT Version : 10102\r\ncuRAND Version : 10102\r\ncuSOLVER Version : (10, 3, 0)\r\ncuSPARSE Version : 10301\r\nNVRTC Version : (10, 2)\r\ncuDNN Build Version : 7605\r\ncuDNN Version : 7605\r\nNCCL Build Version : 2507\r\nNCCL Runtime Version : 2507\n", "before_files": [{"content": "import cupy\nimport numpy\n\nif cupy.cuda.thrust_enabled:\n from cupy.cuda import thrust\n\n\ndef sort(a, axis=-1):\n \"\"\"Returns a sorted copy of an array with a stable sorting algorithm.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n axis (int or None): Axis along which to sort. Default is -1, which\n means sort along the last axis. If None is supplied, the array is\n flattened before sorting.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. note::\n For its implementation reason, ``cupy.sort`` currently does not support\n ``kind`` and ``order`` parameters that ``numpy.sort`` does\n support.\n\n .. seealso:: :func:`numpy.sort`\n\n \"\"\"\n if axis is None:\n ret = a.flatten()\n axis = -1\n else:\n ret = a.copy()\n ret.sort(axis=axis)\n return ret\n\n\ndef lexsort(keys):\n \"\"\"Perform an indirect sort using an array of keys.\n\n Args:\n keys (cupy.ndarray): ``(k, N)`` array containing ``k`` ``(N,)``-shaped\n arrays. The ``k`` different \"rows\" to be sorted. The last row is\n the primary sort key.\n\n Returns:\n cupy.ndarray: Array of indices that sort the keys.\n\n .. note::\n For its implementation reason, ``cupy.lexsort`` currently supports only\n keys with their rank of one or two and does not support ``axis``\n parameter that ``numpy.lexsort`` supports.\n\n .. seealso:: :func:`numpy.lexsort`\n\n \"\"\"\n\n # TODO(takagi): Support axis argument.\n\n if not cupy.cuda.thrust_enabled:\n raise RuntimeError('Thrust is needed to use cupy.lexsort. Please '\n 'install CUDA Toolkit with Thrust then reinstall '\n 'CuPy after uninstalling it.')\n\n if keys.ndim == ():\n # as numpy.lexsort() raises\n raise TypeError('need sequence of keys with len > 0 in lexsort')\n\n if keys.ndim == 1:\n return 0\n\n # TODO(takagi): Support ranks of three or more.\n if keys.ndim > 2:\n raise NotImplementedError('Keys with the rank of three or more is not '\n 'supported in lexsort')\n\n idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)\n k = keys._shape[0]\n n = keys._shape[1]\n thrust.lexsort(keys.dtype, idx_array.data.ptr, keys.data.ptr, k, n)\n\n return idx_array\n\n\ndef argsort(a, axis=-1):\n \"\"\"Returns the indices that would sort an array with a stable sorting.\n\n Args:\n a (cupy.ndarray): Array to sort.\n axis (int or None): Axis along which to sort. Default is -1, which\n means sort along the last axis. If None is supplied, the array is\n flattened before sorting.\n\n Returns:\n cupy.ndarray: Array of indices that sort ``a``.\n\n .. note::\n For its implementation reason, ``cupy.argsort`` does not support\n ``kind`` and ``order`` parameters.\n\n .. seealso:: :func:`numpy.argsort`\n\n \"\"\"\n return a.argsort(axis=axis)\n\n\ndef msort(a):\n \"\"\"Returns a copy of an array sorted along the first axis.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. note:\n ``cupy.msort(a)``, the CuPy counterpart of ``numpy.msort(a)``, is\n equivalent to ``cupy.sort(a, axis=0)``.\n\n .. seealso:: :func:`numpy.msort`\n\n \"\"\"\n\n # TODO(takagi): Support float16 and bool.\n return sort(a, axis=0)\n\n\n# TODO(okuta): Implement sort_complex\n\n\ndef partition(a, kth, axis=-1):\n \"\"\"Returns a partitioned copy of an array.\n\n Creates a copy of the array whose elements are rearranged such that the\n value of the element in k-th position would occur in that position in a\n sorted array. All of the elements before the new k-th element are less\n than or equal to the elements after the new k-th element.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n kth (int or sequence of ints): Element index to partition by. If\n supplied with a sequence of k-th it will partition all elements\n indexed by k-th of them into their sorted position at once.\n axis (int or None): Axis along which to sort. Default is -1, which\n means sort along the last axis. If None is supplied, the array is\n flattened before sorting.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. seealso:: :func:`numpy.partition`\n\n \"\"\"\n if axis is None:\n ret = a.flatten()\n axis = -1\n else:\n ret = a.copy()\n ret.partition(kth, axis=axis)\n return ret\n\n\ndef argpartition(a, kth, axis=-1):\n \"\"\"Returns the indices that would partially sort an array.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n kth (int or sequence of ints): Element index to partition by. If\n supplied with a sequence of k-th it will partition all elements\n indexed by k-th of them into their sorted position at once.\n axis (int or None): Axis along which to sort. Default is -1, which\n means sort along the last axis. If None is supplied, the array is\n flattened before sorting.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. note::\n For its implementation reason, `cupy.argpartition` fully sorts the\n given array as `cupy.argsort` does. It also does not support ``kind``\n and ``order`` parameters that ``numpy.argpartition`` supports.\n\n .. seealso:: :func:`numpy.argpartition`\n\n \"\"\"\n return a.argpartition(kth, axis=axis)\n", "path": "cupy/_sorting/sort.py"}]}
| 2,745 | 189 |
gh_patches_debug_19714
|
rasdani/github-patches
|
git_diff
|
plotly__plotly.py-2713
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
histogram() got an unexpected keyword argument 'legend' with pandas backend
pandas version 1.1.0
plotly version 4.9.0
The following will raise `histogram() got an unexpected keyword argument 'legend' `:
```python
import pandas as pd
pd.options.plotting.backend = "plotly"
df = pd.DataFrame()
df.hist()
```
I suggest to add `legend` in the skip list:
https://github.com/plotly/plotly.py/blob/dc9c5fdfe70367d5dc4fcdca6a0ad07125d64647/packages/python/plotly/plotly/__init__.py#L161
</issue>
<code>
[start of packages/python/plotly/plotly/__init__.py]
1 """
2 https://plot.ly/python/
3
4 Plotly's Python API allows users to programmatically access Plotly's
5 server resources.
6
7 This package is organized as follows:
8
9 Subpackages:
10
11 - plotly: all functionality that requires access to Plotly's servers
12
13 - graph_objs: objects for designing figures and visualizing data
14
15 - matplotlylib: tools to convert matplotlib figures
16
17 Modules:
18
19 - tools: some helpful tools that do not require access to Plotly's servers
20
21 - utils: functions that you probably won't need, but that subpackages use
22
23 - version: holds the current API version
24
25 - exceptions: defines our custom exception classes
26
27 """
28 from __future__ import absolute_import
29 import sys
30 from _plotly_utils.importers import relative_import
31
32
33 if sys.version_info < (3, 7):
34 from plotly import (
35 graph_objs,
36 tools,
37 utils,
38 offline,
39 colors,
40 io,
41 data,
42 )
43 from plotly.version import __version__
44
45 __all__ = [
46 "graph_objs",
47 "tools",
48 "utils",
49 "offline",
50 "colors",
51 "io",
52 "data",
53 "__version__",
54 ]
55
56 # Set default template (for >= 3.7 this is done in ploty/io/__init__.py)
57 from plotly.io import templates
58
59 templates._default = "plotly"
60 else:
61 __all__, __getattr__, __dir__ = relative_import(
62 __name__,
63 [
64 ".graph_objs",
65 ".graph_objects",
66 ".tools",
67 ".utils",
68 ".offline",
69 ".colors",
70 ".io",
71 ".data",
72 ],
73 [".version.__version__"],
74 )
75
76
77 def plot(data_frame, kind, **kwargs):
78 """
79 Pandas plotting backend function, not meant to be called directly.
80 To activate, set pandas.options.plotting.backend="plotly"
81 See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py
82 """
83 from .express import (
84 scatter,
85 line,
86 area,
87 bar,
88 box,
89 histogram,
90 violin,
91 strip,
92 funnel,
93 density_contour,
94 density_heatmap,
95 imshow,
96 )
97
98 if kind == "scatter":
99 new_kwargs = {k: kwargs[k] for k in kwargs if k not in ["s", "c"]}
100 return scatter(data_frame, **new_kwargs)
101 if kind == "line":
102 return line(data_frame, **kwargs)
103 if kind == "area":
104 return area(data_frame, **kwargs)
105 if kind == "bar":
106 return bar(data_frame, **kwargs)
107 if kind == "barh":
108 return bar(data_frame, orientation="h", **kwargs)
109 if kind == "box":
110 new_kwargs = {k: kwargs[k] for k in kwargs if k not in ["by"]}
111 return box(data_frame, **new_kwargs)
112 if kind in ["hist", "histogram"]:
113 new_kwargs = {k: kwargs[k] for k in kwargs if k not in ["by", "bins"]}
114 return histogram(data_frame, **new_kwargs)
115 if kind == "violin":
116 return violin(data_frame, **kwargs)
117 if kind == "strip":
118 return strip(data_frame, **kwargs)
119 if kind == "funnel":
120 return funnel(data_frame, **kwargs)
121 if kind == "density_contour":
122 return density_contour(data_frame, **kwargs)
123 if kind == "density_heatmap":
124 return density_heatmap(data_frame, **kwargs)
125 if kind == "imshow":
126 return imshow(data_frame, **kwargs)
127 if kind == "heatmap":
128 raise ValueError(
129 "kind='heatmap' not supported plotting.backend='plotly'. "
130 "Please use kind='imshow' or kind='density_heatmap'."
131 )
132
133 raise NotImplementedError(
134 "kind='%s' not yet supported for plotting.backend='plotly'" % kind
135 )
136
137
138 def boxplot_frame(data_frame, **kwargs):
139 """
140 Pandas plotting backend function, not meant to be called directly.
141 To activate, set pandas.options.plotting.backend="plotly"
142 See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py
143 """
144 from .express import box
145
146 skip = ["by", "column", "ax", "fontsize", "rot", "grid", "figsize", "layout"]
147 skip += ["return_type"]
148 new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}
149 return box(data_frame, **new_kwargs)
150
151
152 def hist_frame(data_frame, **kwargs):
153 """
154 Pandas plotting backend function, not meant to be called directly.
155 To activate, set pandas.options.plotting.backend="plotly"
156 See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py
157 """
158 from .express import histogram
159
160 skip = ["column", "by", "grid", "xlabelsize", "xrot", "ylabelsize", "yrot"]
161 skip += ["ax", "sharex", "sharey", "figsize", "layout", "bins"]
162 new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}
163 return histogram(data_frame, **new_kwargs)
164
165
166 def hist_series(data_frame, **kwargs):
167 """
168 Pandas plotting backend function, not meant to be called directly.
169 To activate, set pandas.options.plotting.backend="plotly"
170 See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py
171 """
172 from .express import histogram
173
174 skip = ["by", "grid", "xlabelsize", "xrot", "ylabelsize", "yrot", "ax"]
175 skip += ["figsize", "bins"]
176 new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}
177 return histogram(data_frame, **new_kwargs)
178
[end of packages/python/plotly/plotly/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/packages/python/plotly/plotly/__init__.py b/packages/python/plotly/plotly/__init__.py
--- a/packages/python/plotly/plotly/__init__.py
+++ b/packages/python/plotly/plotly/__init__.py
@@ -158,7 +158,7 @@
from .express import histogram
skip = ["column", "by", "grid", "xlabelsize", "xrot", "ylabelsize", "yrot"]
- skip += ["ax", "sharex", "sharey", "figsize", "layout", "bins"]
+ skip += ["ax", "sharex", "sharey", "figsize", "layout", "bins", "legend"]
new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}
return histogram(data_frame, **new_kwargs)
@@ -172,6 +172,6 @@
from .express import histogram
skip = ["by", "grid", "xlabelsize", "xrot", "ylabelsize", "yrot", "ax"]
- skip += ["figsize", "bins"]
+ skip += ["figsize", "bins", "legend"]
new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}
return histogram(data_frame, **new_kwargs)
|
{"golden_diff": "diff --git a/packages/python/plotly/plotly/__init__.py b/packages/python/plotly/plotly/__init__.py\n--- a/packages/python/plotly/plotly/__init__.py\n+++ b/packages/python/plotly/plotly/__init__.py\n@@ -158,7 +158,7 @@\n from .express import histogram\n \n skip = [\"column\", \"by\", \"grid\", \"xlabelsize\", \"xrot\", \"ylabelsize\", \"yrot\"]\n- skip += [\"ax\", \"sharex\", \"sharey\", \"figsize\", \"layout\", \"bins\"]\n+ skip += [\"ax\", \"sharex\", \"sharey\", \"figsize\", \"layout\", \"bins\", \"legend\"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return histogram(data_frame, **new_kwargs)\n \n@@ -172,6 +172,6 @@\n from .express import histogram\n \n skip = [\"by\", \"grid\", \"xlabelsize\", \"xrot\", \"ylabelsize\", \"yrot\", \"ax\"]\n- skip += [\"figsize\", \"bins\"]\n+ skip += [\"figsize\", \"bins\", \"legend\"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return histogram(data_frame, **new_kwargs)\n", "issue": "histogram() got an unexpected keyword argument 'legend' with pandas backend\npandas version 1.1.0\r\nplotly version 4.9.0\r\n\r\nThe following will raise `histogram() got an unexpected keyword argument 'legend' `:\r\n```python\r\nimport pandas as pd\r\npd.options.plotting.backend = \"plotly\"\r\ndf = pd.DataFrame()\r\ndf.hist()\r\n```\r\nI suggest to add `legend` in the skip list:\r\nhttps://github.com/plotly/plotly.py/blob/dc9c5fdfe70367d5dc4fcdca6a0ad07125d64647/packages/python/plotly/plotly/__init__.py#L161\n", "before_files": [{"content": "\"\"\"\nhttps://plot.ly/python/\n\nPlotly's Python API allows users to programmatically access Plotly's\nserver resources.\n\nThis package is organized as follows:\n\nSubpackages:\n\n- plotly: all functionality that requires access to Plotly's servers\n\n- graph_objs: objects for designing figures and visualizing data\n\n- matplotlylib: tools to convert matplotlib figures\n\nModules:\n\n- tools: some helpful tools that do not require access to Plotly's servers\n\n- utils: functions that you probably won't need, but that subpackages use\n\n- version: holds the current API version\n\n- exceptions: defines our custom exception classes\n\n\"\"\"\nfrom __future__ import absolute_import\nimport sys\nfrom _plotly_utils.importers import relative_import\n\n\nif sys.version_info < (3, 7):\n from plotly import (\n graph_objs,\n tools,\n utils,\n offline,\n colors,\n io,\n data,\n )\n from plotly.version import __version__\n\n __all__ = [\n \"graph_objs\",\n \"tools\",\n \"utils\",\n \"offline\",\n \"colors\",\n \"io\",\n \"data\",\n \"__version__\",\n ]\n\n # Set default template (for >= 3.7 this is done in ploty/io/__init__.py)\n from plotly.io import templates\n\n templates._default = \"plotly\"\nelse:\n __all__, __getattr__, __dir__ = relative_import(\n __name__,\n [\n \".graph_objs\",\n \".graph_objects\",\n \".tools\",\n \".utils\",\n \".offline\",\n \".colors\",\n \".io\",\n \".data\",\n ],\n [\".version.__version__\"],\n )\n\n\ndef plot(data_frame, kind, **kwargs):\n \"\"\"\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend=\"plotly\"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n \"\"\"\n from .express import (\n scatter,\n line,\n area,\n bar,\n box,\n histogram,\n violin,\n strip,\n funnel,\n density_contour,\n density_heatmap,\n imshow,\n )\n\n if kind == \"scatter\":\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in [\"s\", \"c\"]}\n return scatter(data_frame, **new_kwargs)\n if kind == \"line\":\n return line(data_frame, **kwargs)\n if kind == \"area\":\n return area(data_frame, **kwargs)\n if kind == \"bar\":\n return bar(data_frame, **kwargs)\n if kind == \"barh\":\n return bar(data_frame, orientation=\"h\", **kwargs)\n if kind == \"box\":\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in [\"by\"]}\n return box(data_frame, **new_kwargs)\n if kind in [\"hist\", \"histogram\"]:\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in [\"by\", \"bins\"]}\n return histogram(data_frame, **new_kwargs)\n if kind == \"violin\":\n return violin(data_frame, **kwargs)\n if kind == \"strip\":\n return strip(data_frame, **kwargs)\n if kind == \"funnel\":\n return funnel(data_frame, **kwargs)\n if kind == \"density_contour\":\n return density_contour(data_frame, **kwargs)\n if kind == \"density_heatmap\":\n return density_heatmap(data_frame, **kwargs)\n if kind == \"imshow\":\n return imshow(data_frame, **kwargs)\n if kind == \"heatmap\":\n raise ValueError(\n \"kind='heatmap' not supported plotting.backend='plotly'. \"\n \"Please use kind='imshow' or kind='density_heatmap'.\"\n )\n\n raise NotImplementedError(\n \"kind='%s' not yet supported for plotting.backend='plotly'\" % kind\n )\n\n\ndef boxplot_frame(data_frame, **kwargs):\n \"\"\"\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend=\"plotly\"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n \"\"\"\n from .express import box\n\n skip = [\"by\", \"column\", \"ax\", \"fontsize\", \"rot\", \"grid\", \"figsize\", \"layout\"]\n skip += [\"return_type\"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return box(data_frame, **new_kwargs)\n\n\ndef hist_frame(data_frame, **kwargs):\n \"\"\"\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend=\"plotly\"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n \"\"\"\n from .express import histogram\n\n skip = [\"column\", \"by\", \"grid\", \"xlabelsize\", \"xrot\", \"ylabelsize\", \"yrot\"]\n skip += [\"ax\", \"sharex\", \"sharey\", \"figsize\", \"layout\", \"bins\"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return histogram(data_frame, **new_kwargs)\n\n\ndef hist_series(data_frame, **kwargs):\n \"\"\"\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend=\"plotly\"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n \"\"\"\n from .express import histogram\n\n skip = [\"by\", \"grid\", \"xlabelsize\", \"xrot\", \"ylabelsize\", \"yrot\", \"ax\"]\n skip += [\"figsize\", \"bins\"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return histogram(data_frame, **new_kwargs)\n", "path": "packages/python/plotly/plotly/__init__.py"}]}
| 2,424 | 297 |
gh_patches_debug_3941
|
rasdani/github-patches
|
git_diff
|
microsoft__botbuilder-python-1431
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[PORT] Create test for waterfall cancellation telemetry
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/3314
For https://github.com/microsoft/botbuilder-js/issues/1619
# Changed projects
* Microsoft.Bot.Builder.Dialogs.Tests
</issue>
<code>
[start of libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4
5 import uuid
6 from typing import Coroutine
7 from botbuilder.core import TurnContext
8 from botbuilder.schema import ActivityTypes
9 from .dialog_reason import DialogReason
10 from .dialog import Dialog
11 from .dialog_turn_result import DialogTurnResult
12 from .dialog_context import DialogContext
13 from .dialog_instance import DialogInstance
14 from .waterfall_step_context import WaterfallStepContext
15
16
17 class WaterfallDialog(Dialog):
18 PersistedOptions = "options"
19 StepIndex = "stepIndex"
20 PersistedValues = "values"
21 PersistedInstanceId = "instanceId"
22
23 def __init__(self, dialog_id: str, steps: [Coroutine] = None):
24 super(WaterfallDialog, self).__init__(dialog_id)
25 if not steps:
26 self._steps = []
27 else:
28 if not isinstance(steps, list):
29 raise TypeError("WaterfallDialog(): steps must be list of steps")
30 self._steps = steps
31
32 def add_step(self, step):
33 """
34 Adds a new step to the waterfall.
35 :param step: Step to add
36 :return: Waterfall dialog for fluent calls to `add_step()`.
37 """
38 if not step:
39 raise TypeError("WaterfallDialog.add_step(): step cannot be None.")
40
41 self._steps.append(step)
42 return self
43
44 async def begin_dialog(
45 self, dialog_context: DialogContext, options: object = None
46 ) -> DialogTurnResult:
47
48 if not dialog_context:
49 raise TypeError("WaterfallDialog.begin_dialog(): dc cannot be None.")
50
51 # Initialize waterfall state
52 state = dialog_context.active_dialog.state
53
54 instance_id = uuid.uuid1().__str__()
55 state[self.PersistedOptions] = options
56 state[self.PersistedValues] = {}
57 state[self.PersistedInstanceId] = instance_id
58
59 properties = {}
60 properties["DialogId"] = self.id
61 properties["InstanceId"] = instance_id
62 self.telemetry_client.track_event("WaterfallStart", properties)
63
64 # Run first stepkinds
65 return await self.run_step(dialog_context, 0, DialogReason.BeginCalled, None)
66
67 async def continue_dialog( # pylint: disable=unused-argument,arguments-differ
68 self,
69 dialog_context: DialogContext = None,
70 reason: DialogReason = None,
71 result: object = NotImplementedError(),
72 ) -> DialogTurnResult:
73 if not dialog_context:
74 raise TypeError("WaterfallDialog.continue_dialog(): dc cannot be None.")
75
76 if dialog_context.context.activity.type != ActivityTypes.message:
77 return Dialog.end_of_turn
78
79 return await self.resume_dialog(
80 dialog_context,
81 DialogReason.ContinueCalled,
82 dialog_context.context.activity.text,
83 )
84
85 async def resume_dialog(
86 self, dialog_context: DialogContext, reason: DialogReason, result: object
87 ):
88 if dialog_context is None:
89 raise TypeError("WaterfallDialog.resume_dialog(): dc cannot be None.")
90
91 # Increment step index and run step
92 state = dialog_context.active_dialog.state
93
94 # Future Me:
95 # If issues with CosmosDB, see https://github.com/Microsoft/botbuilder-dotnet/issues/871
96 # for hints.
97 return await self.run_step(
98 dialog_context, state[self.StepIndex] + 1, reason, result
99 )
100
101 async def end_dialog( # pylint: disable=unused-argument
102 self, context: TurnContext, instance: DialogInstance, reason: DialogReason
103 ) -> None:
104 if reason is DialogReason.CancelCalled:
105 index = instance.state[self.StepIndex]
106 step_name = self.get_step_name(index)
107 instance_id = str(instance.state[self.PersistedInstanceId])
108 properties = {
109 "DialogId": self.id,
110 "StepName": step_name,
111 "InstanceId": instance_id,
112 }
113 self.telemetry_client.track_event("WaterfallCancel", properties)
114 else:
115 if reason is DialogReason.EndCalled:
116
117 instance_id = str(instance.state[self.PersistedInstanceId])
118 properties = {"DialogId": self.id, "InstanceId": instance_id}
119 self.telemetry_client.track_event("WaterfallComplete", properties)
120
121 return
122
123 async def on_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
124 step_name = self.get_step_name(step_context.index)
125 instance_id = str(step_context.active_dialog.state[self.PersistedInstanceId])
126 properties = {
127 "DialogId": self.id,
128 "StepName": step_name,
129 "InstanceId": instance_id,
130 }
131 self.telemetry_client.track_event("WaterfallStep", properties)
132 return await self._steps[step_context.index](step_context)
133
134 async def run_step(
135 self,
136 dialog_context: DialogContext,
137 index: int,
138 reason: DialogReason,
139 result: object,
140 ) -> DialogTurnResult:
141 if not dialog_context:
142 raise TypeError(
143 "WaterfallDialog.run_steps(): dialog_context cannot be None."
144 )
145 if index < len(self._steps):
146 # Update persisted step index
147 state = dialog_context.active_dialog.state
148 state[self.StepIndex] = index
149
150 # Create step context
151 options = state[self.PersistedOptions]
152 values = state[self.PersistedValues]
153 step_context = WaterfallStepContext(
154 self, dialog_context, options, values, index, reason, result
155 )
156 return await self.on_step(step_context)
157
158 # End of waterfall so just return any result to parent
159 return await dialog_context.end_dialog(result)
160
161 def get_step_name(self, index: int) -> str:
162 """
163 Give the waterfall step a unique name
164 """
165 step_name = self._steps[index].__qualname__
166
167 if not step_name or ">" in step_name:
168 step_name = f"Step{index + 1}of{len(self._steps)}"
169
170 return step_name
171
[end of libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py
--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py
+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py
@@ -164,7 +164,7 @@
"""
step_name = self._steps[index].__qualname__
- if not step_name or ">" in step_name:
+ if not step_name or step_name.endswith("<lambda>"):
step_name = f"Step{index + 1}of{len(self._steps)}"
return step_name
|
{"golden_diff": "diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py\n@@ -164,7 +164,7 @@\n \"\"\"\r\n step_name = self._steps[index].__qualname__\r\n \r\n- if not step_name or \">\" in step_name:\r\n+ if not step_name or step_name.endswith(\"<lambda>\"):\r\n step_name = f\"Step{index + 1}of{len(self._steps)}\"\r\n \r\n return step_name\n", "issue": "[PORT] Create test for waterfall cancellation telemetry\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3314\n\nFor https://github.com/microsoft/botbuilder-js/issues/1619\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Builder.Dialogs.Tests\r\n\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\n\r\nimport uuid\r\nfrom typing import Coroutine\r\nfrom botbuilder.core import TurnContext\r\nfrom botbuilder.schema import ActivityTypes\r\nfrom .dialog_reason import DialogReason\r\nfrom .dialog import Dialog\r\nfrom .dialog_turn_result import DialogTurnResult\r\nfrom .dialog_context import DialogContext\r\nfrom .dialog_instance import DialogInstance\r\nfrom .waterfall_step_context import WaterfallStepContext\r\n\r\n\r\nclass WaterfallDialog(Dialog):\r\n PersistedOptions = \"options\"\r\n StepIndex = \"stepIndex\"\r\n PersistedValues = \"values\"\r\n PersistedInstanceId = \"instanceId\"\r\n\r\n def __init__(self, dialog_id: str, steps: [Coroutine] = None):\r\n super(WaterfallDialog, self).__init__(dialog_id)\r\n if not steps:\r\n self._steps = []\r\n else:\r\n if not isinstance(steps, list):\r\n raise TypeError(\"WaterfallDialog(): steps must be list of steps\")\r\n self._steps = steps\r\n\r\n def add_step(self, step):\r\n \"\"\"\r\n Adds a new step to the waterfall.\r\n :param step: Step to add\r\n :return: Waterfall dialog for fluent calls to `add_step()`.\r\n \"\"\"\r\n if not step:\r\n raise TypeError(\"WaterfallDialog.add_step(): step cannot be None.\")\r\n\r\n self._steps.append(step)\r\n return self\r\n\r\n async def begin_dialog(\r\n self, dialog_context: DialogContext, options: object = None\r\n ) -> DialogTurnResult:\r\n\r\n if not dialog_context:\r\n raise TypeError(\"WaterfallDialog.begin_dialog(): dc cannot be None.\")\r\n\r\n # Initialize waterfall state\r\n state = dialog_context.active_dialog.state\r\n\r\n instance_id = uuid.uuid1().__str__()\r\n state[self.PersistedOptions] = options\r\n state[self.PersistedValues] = {}\r\n state[self.PersistedInstanceId] = instance_id\r\n\r\n properties = {}\r\n properties[\"DialogId\"] = self.id\r\n properties[\"InstanceId\"] = instance_id\r\n self.telemetry_client.track_event(\"WaterfallStart\", properties)\r\n\r\n # Run first stepkinds\r\n return await self.run_step(dialog_context, 0, DialogReason.BeginCalled, None)\r\n\r\n async def continue_dialog( # pylint: disable=unused-argument,arguments-differ\r\n self,\r\n dialog_context: DialogContext = None,\r\n reason: DialogReason = None,\r\n result: object = NotImplementedError(),\r\n ) -> DialogTurnResult:\r\n if not dialog_context:\r\n raise TypeError(\"WaterfallDialog.continue_dialog(): dc cannot be None.\")\r\n\r\n if dialog_context.context.activity.type != ActivityTypes.message:\r\n return Dialog.end_of_turn\r\n\r\n return await self.resume_dialog(\r\n dialog_context,\r\n DialogReason.ContinueCalled,\r\n dialog_context.context.activity.text,\r\n )\r\n\r\n async def resume_dialog(\r\n self, dialog_context: DialogContext, reason: DialogReason, result: object\r\n ):\r\n if dialog_context is None:\r\n raise TypeError(\"WaterfallDialog.resume_dialog(): dc cannot be None.\")\r\n\r\n # Increment step index and run step\r\n state = dialog_context.active_dialog.state\r\n\r\n # Future Me:\r\n # If issues with CosmosDB, see https://github.com/Microsoft/botbuilder-dotnet/issues/871\r\n # for hints.\r\n return await self.run_step(\r\n dialog_context, state[self.StepIndex] + 1, reason, result\r\n )\r\n\r\n async def end_dialog( # pylint: disable=unused-argument\r\n self, context: TurnContext, instance: DialogInstance, reason: DialogReason\r\n ) -> None:\r\n if reason is DialogReason.CancelCalled:\r\n index = instance.state[self.StepIndex]\r\n step_name = self.get_step_name(index)\r\n instance_id = str(instance.state[self.PersistedInstanceId])\r\n properties = {\r\n \"DialogId\": self.id,\r\n \"StepName\": step_name,\r\n \"InstanceId\": instance_id,\r\n }\r\n self.telemetry_client.track_event(\"WaterfallCancel\", properties)\r\n else:\r\n if reason is DialogReason.EndCalled:\r\n\r\n instance_id = str(instance.state[self.PersistedInstanceId])\r\n properties = {\"DialogId\": self.id, \"InstanceId\": instance_id}\r\n self.telemetry_client.track_event(\"WaterfallComplete\", properties)\r\n\r\n return\r\n\r\n async def on_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:\r\n step_name = self.get_step_name(step_context.index)\r\n instance_id = str(step_context.active_dialog.state[self.PersistedInstanceId])\r\n properties = {\r\n \"DialogId\": self.id,\r\n \"StepName\": step_name,\r\n \"InstanceId\": instance_id,\r\n }\r\n self.telemetry_client.track_event(\"WaterfallStep\", properties)\r\n return await self._steps[step_context.index](step_context)\r\n\r\n async def run_step(\r\n self,\r\n dialog_context: DialogContext,\r\n index: int,\r\n reason: DialogReason,\r\n result: object,\r\n ) -> DialogTurnResult:\r\n if not dialog_context:\r\n raise TypeError(\r\n \"WaterfallDialog.run_steps(): dialog_context cannot be None.\"\r\n )\r\n if index < len(self._steps):\r\n # Update persisted step index\r\n state = dialog_context.active_dialog.state\r\n state[self.StepIndex] = index\r\n\r\n # Create step context\r\n options = state[self.PersistedOptions]\r\n values = state[self.PersistedValues]\r\n step_context = WaterfallStepContext(\r\n self, dialog_context, options, values, index, reason, result\r\n )\r\n return await self.on_step(step_context)\r\n\r\n # End of waterfall so just return any result to parent\r\n return await dialog_context.end_dialog(result)\r\n\r\n def get_step_name(self, index: int) -> str:\r\n \"\"\"\r\n Give the waterfall step a unique name\r\n \"\"\"\r\n step_name = self._steps[index].__qualname__\r\n\r\n if not step_name or \">\" in step_name:\r\n step_name = f\"Step{index + 1}of{len(self._steps)}\"\r\n\r\n return step_name\r\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py"}]}
| 2,323 | 163 |
gh_patches_debug_16065
|
rasdani/github-patches
|
git_diff
|
ultrabug__py3status-380
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
broken click events
since our last merges, the click events are not working anymore :(
</issue>
<code>
[start of py3status/events.py]
1 import select
2 import sys
3
4 from threading import Thread
5 from time import time
6 from subprocess import Popen, call, PIPE
7 from json import loads
8
9 from py3status.profiling import profile
10
11
12 class IOPoller:
13 """
14 This class implements a predictive and timing-out I/O reader
15 using select and the poll() mechanism for greater compatibility.
16 """
17
18 def __init__(self, io, eventmask=select.POLLIN):
19 """
20 Our default is to read (POLLIN) the specified 'io' file descriptor.
21 """
22 self.io = io
23 self.poller = select.poll()
24 self.poller.register(io, eventmask)
25
26 def readline(self, timeout=500):
27 """
28 Try to read our I/O for 'timeout' milliseconds, return None otherwise.
29 This makes calling and reading I/O non blocking !
30 """
31 poll_result = self.poller.poll(timeout)
32 if poll_result:
33 line = self.io.readline().strip()
34 if self.io == sys.stdin and line == '[':
35 # skip first event line wrt issue #19
36 line = self.io.readline().strip()
37 try:
38 # python3 compatibility code
39 line = line.decode()
40 except (AttributeError, UnicodeDecodeError):
41 pass
42 return line
43 else:
44 return None
45
46
47 class Events(Thread):
48 """
49 This class is responsible for dispatching event JSONs sent by the i3bar.
50 """
51
52 def __init__(self, py3_wrapper):
53 """
54 We need to poll stdin to receive i3bar messages.
55 """
56 Thread.__init__(self)
57 self.config = py3_wrapper.config
58 self.error = None
59 self.i3s_config = py3_wrapper.i3status_thread.config
60 self.last_refresh_ts = time()
61 self.lock = py3_wrapper.lock
62 self.modules = py3_wrapper.modules
63 self.on_click = self.i3s_config['on_click']
64 self.output_modules = py3_wrapper.output_modules
65 self.poller_inp = IOPoller(sys.stdin)
66 self.py3_wrapper = py3_wrapper
67
68 def refresh(self, module_name):
69 """
70 Force a cache expiration for all the methods of the given module.
71
72 We rate limit the i3status refresh to 100ms.
73 """
74 module = self.modules.get(module_name)
75 if module is not None:
76 if self.config['debug']:
77 self.py3_wrapper.log('refresh module {}'.format(module_name))
78 module.force_update()
79 else:
80 if time() > (self.last_refresh_ts + 0.1):
81 if self.config['debug']:
82 self.py3_wrapper.log(
83 'refresh i3status for module {}'.format(module_name))
84 call(['killall', '-s', 'USR1', 'i3status'])
85 self.last_refresh_ts = time()
86
87 def refresh_all(self, module_name):
88 """
89 Force a full refresh of py3status and i3status modules by sending
90 a SIGUSR1 signal to py3status.
91
92 We rate limit this command to 100ms for obvious abusive behavior.
93 """
94 if time() > (self.last_refresh_ts + 0.1):
95 call(['killall', '-s', 'USR1', 'py3status'])
96 self.last_refresh_ts = time()
97
98 def on_click_dispatcher(self, module_name, command):
99 """
100 Dispatch on_click config parameters to either:
101 - Our own methods for special py3status commands (listed below)
102 - The i3-msg program which is part of i3wm
103 """
104 py3_commands = ['refresh', 'refresh_all']
105 if command is None:
106 return
107 elif command in py3_commands:
108 # this is a py3status command handled by this class
109 method = getattr(self, command)
110 method(module_name)
111 else:
112 # this is a i3 message
113 self.i3_msg(module_name, command)
114
115 # to make the bar more responsive to users we ask for a refresh
116 # of the module or of i3status if the module is an i3status one
117 self.refresh(module_name)
118
119 def i3_msg(self, module_name, command):
120 """
121 Execute the given i3 message and log its output.
122 """
123 i3_msg_pipe = Popen(['i3-msg', command], stdout=PIPE)
124 self.py3_wrapper.log('i3-msg module="{}" command="{}" stdout={}'.format(
125 module_name, command, i3_msg_pipe.stdout.read()))
126
127 def process_event(self, module_name, event, top_level=True):
128 """
129 Process the event for the named module.
130 Events may have been declared in i3status.conf, modules may have
131 on_click() functions. There is a default middle click event etc.
132 """
133 button = event.get('button', 0)
134 default_event = False
135 # execute any configured i3-msg command
136 # we do not do this for containers
137 if top_level:
138 if self.on_click.get(module_name, {}).get(button):
139 self.on_click_dispatcher(module_name,
140 self.on_click[module_name].get(button))
141 # otherwise setup default action on button 2 press
142 elif button == 2:
143 default_event = True
144
145 # get the module that the event is for
146 module_info = self.output_modules.get(module_name)
147 module = module_info['module']
148 # if module is a py3status one and it has an on_click function then
149 # call it.
150 if module_info['type'] == 'py3status' and module.click_events:
151 module.click_event(event)
152 if self.config['debug']:
153 self.py3_wrapper.log('dispatching event {}'.format(event))
154
155 # to make the bar more responsive to users we refresh the module
156 # unless the on_click event called py3.prevent_refresh()
157 if not module.prevent_refresh:
158 self.refresh(module_name)
159 default_event = False
160
161 if default_event:
162 # default button 2 action is to clear this method's cache
163 if self.config['debug']:
164 self.py3_wrapper.log(
165 'dispatching default event {}'.format(event))
166 self.refresh(module_name)
167
168 # find container that holds the module and call its onclick
169 module_groups = self.i3s_config['.module_groups']
170 containers = module_groups.get(module_name, [])
171 for container in containers:
172 self.process_event(container, event, top_level=False)
173
174 @profile
175 def run(self):
176 """
177 Wait for an i3bar JSON event, then find the right module to dispatch
178 the message to based on the 'name' and 'instance' of the event.
179
180 In case the module does NOT support click_events, the default
181 implementation is to clear the module's cache
182 when the MIDDLE button (2) is pressed on it.
183
184 Example event:
185 {'y': 13, 'x': 1737, 'button': 1, 'name': 'empty', 'instance': 'first'}
186 """
187 while self.lock.is_set():
188 event_str = self.poller_inp.readline()
189 if not event_str:
190 continue
191 try:
192 # remove leading comma if present
193 if event_str[0] == ',':
194 event_str = event_str[1:]
195 event = loads(event_str)
196
197 if self.config['debug']:
198 self.py3_wrapper.log('received event {}'.format(event))
199
200 # usage variables
201 instance = event.get('instance', '')
202 name = event.get('name', '')
203
204 # composites have an index which is passed to i3bar with
205 # the instance. We need to separate this out here and
206 # clean up the event. If index
207 # is an integer type then cast it as such.
208 if ' ' in instance:
209 instance, index = instance.split(' ', 1)
210 try:
211 index = int(index)
212 except ValueError:
213 pass
214 event['index'] = index
215 event['instance'] = instance
216
217 if self.config['debug']:
218 self.py3_wrapper.log(
219 'trying to dispatch event to module "{}"'.format(
220 '{} {}'.format(name, instance).strip()))
221
222 # guess the module config name
223 module_name = '{} {}'.format(name, instance).strip()
224 # do the work
225 self.process_event(module_name, event)
226
227 except Exception:
228 err = sys.exc_info()[1]
229 self.error = err
230 self.py3_wrapper.log('event failed ({})'.format(err), 'warning')
231
[end of py3status/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/py3status/events.py b/py3status/events.py
--- a/py3status/events.py
+++ b/py3status/events.py
@@ -135,9 +135,13 @@
# execute any configured i3-msg command
# we do not do this for containers
if top_level:
- if self.on_click.get(module_name, {}).get(button):
- self.on_click_dispatcher(module_name,
- self.on_click[module_name].get(button))
+ click_module = event['name']
+ if event['instance']:
+ click_module += ' ' + event['instance']
+ btn = str(button)
+ if self.on_click.get(click_module, {}).get(btn):
+ self.on_click_dispatcher(click_module,
+ self.on_click[module_name].get(btn))
# otherwise setup default action on button 2 press
elif button == 2:
default_event = True
|
{"golden_diff": "diff --git a/py3status/events.py b/py3status/events.py\n--- a/py3status/events.py\n+++ b/py3status/events.py\n@@ -135,9 +135,13 @@\n # execute any configured i3-msg command\n # we do not do this for containers\n if top_level:\n- if self.on_click.get(module_name, {}).get(button):\n- self.on_click_dispatcher(module_name,\n- self.on_click[module_name].get(button))\n+ click_module = event['name']\n+ if event['instance']:\n+ click_module += ' ' + event['instance']\n+ btn = str(button)\n+ if self.on_click.get(click_module, {}).get(btn):\n+ self.on_click_dispatcher(click_module,\n+ self.on_click[module_name].get(btn))\n # otherwise setup default action on button 2 press\n elif button == 2:\n default_event = True\n", "issue": "broken click events\nsince our last merges, the click events are not working anymore :(\n\n", "before_files": [{"content": "import select\nimport sys\n\nfrom threading import Thread\nfrom time import time\nfrom subprocess import Popen, call, PIPE\nfrom json import loads\n\nfrom py3status.profiling import profile\n\n\nclass IOPoller:\n \"\"\"\n This class implements a predictive and timing-out I/O reader\n using select and the poll() mechanism for greater compatibility.\n \"\"\"\n\n def __init__(self, io, eventmask=select.POLLIN):\n \"\"\"\n Our default is to read (POLLIN) the specified 'io' file descriptor.\n \"\"\"\n self.io = io\n self.poller = select.poll()\n self.poller.register(io, eventmask)\n\n def readline(self, timeout=500):\n \"\"\"\n Try to read our I/O for 'timeout' milliseconds, return None otherwise.\n This makes calling and reading I/O non blocking !\n \"\"\"\n poll_result = self.poller.poll(timeout)\n if poll_result:\n line = self.io.readline().strip()\n if self.io == sys.stdin and line == '[':\n # skip first event line wrt issue #19\n line = self.io.readline().strip()\n try:\n # python3 compatibility code\n line = line.decode()\n except (AttributeError, UnicodeDecodeError):\n pass\n return line\n else:\n return None\n\n\nclass Events(Thread):\n \"\"\"\n This class is responsible for dispatching event JSONs sent by the i3bar.\n \"\"\"\n\n def __init__(self, py3_wrapper):\n \"\"\"\n We need to poll stdin to receive i3bar messages.\n \"\"\"\n Thread.__init__(self)\n self.config = py3_wrapper.config\n self.error = None\n self.i3s_config = py3_wrapper.i3status_thread.config\n self.last_refresh_ts = time()\n self.lock = py3_wrapper.lock\n self.modules = py3_wrapper.modules\n self.on_click = self.i3s_config['on_click']\n self.output_modules = py3_wrapper.output_modules\n self.poller_inp = IOPoller(sys.stdin)\n self.py3_wrapper = py3_wrapper\n\n def refresh(self, module_name):\n \"\"\"\n Force a cache expiration for all the methods of the given module.\n\n We rate limit the i3status refresh to 100ms.\n \"\"\"\n module = self.modules.get(module_name)\n if module is not None:\n if self.config['debug']:\n self.py3_wrapper.log('refresh module {}'.format(module_name))\n module.force_update()\n else:\n if time() > (self.last_refresh_ts + 0.1):\n if self.config['debug']:\n self.py3_wrapper.log(\n 'refresh i3status for module {}'.format(module_name))\n call(['killall', '-s', 'USR1', 'i3status'])\n self.last_refresh_ts = time()\n\n def refresh_all(self, module_name):\n \"\"\"\n Force a full refresh of py3status and i3status modules by sending\n a SIGUSR1 signal to py3status.\n\n We rate limit this command to 100ms for obvious abusive behavior.\n \"\"\"\n if time() > (self.last_refresh_ts + 0.1):\n call(['killall', '-s', 'USR1', 'py3status'])\n self.last_refresh_ts = time()\n\n def on_click_dispatcher(self, module_name, command):\n \"\"\"\n Dispatch on_click config parameters to either:\n - Our own methods for special py3status commands (listed below)\n - The i3-msg program which is part of i3wm\n \"\"\"\n py3_commands = ['refresh', 'refresh_all']\n if command is None:\n return\n elif command in py3_commands:\n # this is a py3status command handled by this class\n method = getattr(self, command)\n method(module_name)\n else:\n # this is a i3 message\n self.i3_msg(module_name, command)\n\n # to make the bar more responsive to users we ask for a refresh\n # of the module or of i3status if the module is an i3status one\n self.refresh(module_name)\n\n def i3_msg(self, module_name, command):\n \"\"\"\n Execute the given i3 message and log its output.\n \"\"\"\n i3_msg_pipe = Popen(['i3-msg', command], stdout=PIPE)\n self.py3_wrapper.log('i3-msg module=\"{}\" command=\"{}\" stdout={}'.format(\n module_name, command, i3_msg_pipe.stdout.read()))\n\n def process_event(self, module_name, event, top_level=True):\n \"\"\"\n Process the event for the named module.\n Events may have been declared in i3status.conf, modules may have\n on_click() functions. There is a default middle click event etc.\n \"\"\"\n button = event.get('button', 0)\n default_event = False\n # execute any configured i3-msg command\n # we do not do this for containers\n if top_level:\n if self.on_click.get(module_name, {}).get(button):\n self.on_click_dispatcher(module_name,\n self.on_click[module_name].get(button))\n # otherwise setup default action on button 2 press\n elif button == 2:\n default_event = True\n\n # get the module that the event is for\n module_info = self.output_modules.get(module_name)\n module = module_info['module']\n # if module is a py3status one and it has an on_click function then\n # call it.\n if module_info['type'] == 'py3status' and module.click_events:\n module.click_event(event)\n if self.config['debug']:\n self.py3_wrapper.log('dispatching event {}'.format(event))\n\n # to make the bar more responsive to users we refresh the module\n # unless the on_click event called py3.prevent_refresh()\n if not module.prevent_refresh:\n self.refresh(module_name)\n default_event = False\n\n if default_event:\n # default button 2 action is to clear this method's cache\n if self.config['debug']:\n self.py3_wrapper.log(\n 'dispatching default event {}'.format(event))\n self.refresh(module_name)\n\n # find container that holds the module and call its onclick\n module_groups = self.i3s_config['.module_groups']\n containers = module_groups.get(module_name, [])\n for container in containers:\n self.process_event(container, event, top_level=False)\n\n @profile\n def run(self):\n \"\"\"\n Wait for an i3bar JSON event, then find the right module to dispatch\n the message to based on the 'name' and 'instance' of the event.\n\n In case the module does NOT support click_events, the default\n implementation is to clear the module's cache\n when the MIDDLE button (2) is pressed on it.\n\n Example event:\n {'y': 13, 'x': 1737, 'button': 1, 'name': 'empty', 'instance': 'first'}\n \"\"\"\n while self.lock.is_set():\n event_str = self.poller_inp.readline()\n if not event_str:\n continue\n try:\n # remove leading comma if present\n if event_str[0] == ',':\n event_str = event_str[1:]\n event = loads(event_str)\n\n if self.config['debug']:\n self.py3_wrapper.log('received event {}'.format(event))\n\n # usage variables\n instance = event.get('instance', '')\n name = event.get('name', '')\n\n # composites have an index which is passed to i3bar with\n # the instance. We need to separate this out here and\n # clean up the event. If index\n # is an integer type then cast it as such.\n if ' ' in instance:\n instance, index = instance.split(' ', 1)\n try:\n index = int(index)\n except ValueError:\n pass\n event['index'] = index\n event['instance'] = instance\n\n if self.config['debug']:\n self.py3_wrapper.log(\n 'trying to dispatch event to module \"{}\"'.format(\n '{} {}'.format(name, instance).strip()))\n\n # guess the module config name\n module_name = '{} {}'.format(name, instance).strip()\n # do the work\n self.process_event(module_name, event)\n\n except Exception:\n err = sys.exc_info()[1]\n self.error = err\n self.py3_wrapper.log('event failed ({})'.format(err), 'warning')\n", "path": "py3status/events.py"}]}
| 2,982 | 202 |
gh_patches_debug_7523
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-758
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow passing in a tensor to tfa.optimizers.MovingAverage num_updates
Currently, `tfa.optimizers.MovingAverage` has an assert that requires `num_updates` to be of type int, [shown here](https://github.com/tensorflow/addons/blob/604a70de563f8797984c9c3f002aff70bef6c90b/tensorflow_addons/optimizers/moving_average.py#L81). This prevents me from passing in an integer tensor that changes with the global step, which is officially supported by `tf.train.ExponentialMovingAverage`.
Can this assert be updated to handle this use case?
</issue>
<code>
[start of tensorflow_addons/optimizers/moving_average.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15
16 from __future__ import absolute_import
17 from __future__ import division
18 from __future__ import print_function
19
20 import tensorflow as tf
21
22
23 @tf.keras.utils.register_keras_serializable(package='Addons')
24 class MovingAverage(tf.keras.optimizers.Optimizer):
25 """Optimizer that computes a moving average of the variables.
26
27 Empirically it has been found that using the moving average of the trained
28 parameters of a deep network is better than using its trained parameters
29 directly. This optimizer allows you to compute this moving average and swap
30 the variables at save time so that any code outside of the training loop
31 will use by default the average values instead of the original ones.
32
33 Example of usage:
34
35 ```python
36 opt = tf.keras.optimizers.SGD(learning_rate)
37 opt = tfa.optimizers.MovingAverage(opt)
38
39 ```
40 """
41
42 def __init__(self,
43 optimizer,
44 average_decay=0.1,
45 num_updates=None,
46 sequential_update=True,
47 name="MovingAverage",
48 **kwargs):
49 """Construct a new MovingAverage optimizer.
50
51 Args:
52 optimizer: str or `tf.keras.optimizers.Optimizer` that will be
53 used to compute and apply gradients.
54 average_decay: float. Decay to use to maintain the moving averages
55 of trained variables. See `tf.train.ExponentialMovingAverage`
56 for details.
57 num_updates: Optional count of the number of updates applied to
58 variables. See `tf.train.ExponentialMovingAverage` for details.
59 sequential_update: Bool. If False, will compute the moving average
60 at the same time as the model is updated, potentially doing
61 benign data races. If True, will update the moving average
62 after gradient updates.
63 name: Optional name for the operations created when applying
64 gradients. Defaults to "MovingAverage".
65 **kwargs: keyword arguments. Allowed to be {`clipnorm`,
66 `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by
67 norm; `clipvalue` is clip gradients by value, `decay` is
68 included for backward compatibility to allow time inverse
69 decay of learning rate. `lr` is included for backward
70 compatibility, recommended to use `learning_rate` instead.
71 """
72 super(MovingAverage, self).__init__(name, **kwargs)
73
74 if isinstance(optimizer, str):
75 optimizer = tf.keras.optimizers.get(optimizer)
76
77 if not isinstance(optimizer, tf.keras.optimizers.Optimizer):
78 raise TypeError(
79 "optimizer is not an object of tf.keras.optimizers.Optimizer")
80
81 if num_updates is not None and not isinstance(num_updates, int):
82 raise TypeError("num_updates must be None or of integer type")
83
84 if not isinstance(sequential_update, bool):
85 raise TypeError("sequential_update must be of bool type")
86
87 with tf.name_scope(name):
88 self._ema = tf.train.ExponentialMovingAverage(
89 average_decay, num_updates=num_updates)
90
91 self._optimizer = optimizer
92 self._set_hyper("average_decay", average_decay)
93 self._num_updates = num_updates
94 self._sequential_update = sequential_update
95 self._initialized = False
96
97 def apply_gradients(self, grads_and_vars, name=None):
98 var_list = [v for (_, v) in grads_and_vars]
99
100 if tf.executing_eagerly() and not self._initialized:
101 # this to ensure that var_list is registered initially
102 self._ema.apply(var_list)
103 self._initialized = True
104
105 train_op = self._optimizer.apply_gradients(grads_and_vars, name=name)
106
107 if self._sequential_update:
108 with tf.control_dependencies([train_op]):
109 ma_op = self._ema.apply(var_list)
110 else:
111 ma_op = self._ema.apply(var_list)
112
113 return tf.group(train_op, ma_op, name="train_with_avg")
114
115 def get_config(self):
116 config = {
117 'optimizer': tf.keras.optimizers.serialize(self._optimizer),
118 'average_decay': self._serialize_hyperparameter('average_decay'),
119 'num_updates': self._num_updates,
120 'sequential_update': self._sequential_update
121 }
122 base_config = super(MovingAverage, self).get_config()
123 return dict(list(base_config.items()) + list(config.items()))
124
125 @classmethod
126 def from_config(cls, config, custom_objects=None):
127 optimizer = tf.keras.optimizers.deserialize(
128 config.pop('optimizer'), custom_objects=custom_objects)
129 return cls(optimizer, **config)
130
131 def assign_average_vars(self, var_list):
132 """Assign variables in var_list with their respective moving averages.
133
134 Example:
135 ```python
136 model = tf.Sequential([...])
137 opt = tfa.optimizers.MovingAverage(
138 tf.keras.optimizers.SGD(lr=2.0), 0.5)
139
140 model.compile(opt, ...)
141 model.fit(x, y, ...)
142
143 # Update the weights to their mean before saving
144 opt.assign_average_vars(model.variables)
145
146 model.save('model.h5')
147 ```
148 """
149 assign = tf.group([v.assign(self._ema.average(v)) for v in var_list])
150 return assign
151
152 @property
153 def weights(self):
154 return self._optimizer.weights
155
156 def _resource_apply_dense(self, grad, var):
157 return self._optimizer._resource_apply_dense(grad, var) # pylint: disable=protected-access
158
159 def _resource_apply_sparse_duplicate_indices(self, grad, var, indices):
160 return self._optimizer._resource_apply_sparse_duplicate_indices( # pylint: disable=protected-access
161 grad, var, indices)
162
163 def _resource_apply_sparse(self, grad, var, indices):
164 return self._optimizer._resource_apply_sparse(grad, var, indices) # pylint: disable=protected-access
165
166 @property
167 def learning_rate(self):
168 return self._optimizer._get_hyper('learning_rate')
169
170 @learning_rate.setter
171 def learning_rate(self, learning_rate):
172 self._optimizer._set_hyper('learning_rate', learning_rate)
173
174 @property
175 def lr(self):
176 return self.learning_rate
177
178 @lr.setter
179 def lr(self, lr):
180 self.learning_rate = lr
181
[end of tensorflow_addons/optimizers/moving_average.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tensorflow_addons/optimizers/moving_average.py b/tensorflow_addons/optimizers/moving_average.py
--- a/tensorflow_addons/optimizers/moving_average.py
+++ b/tensorflow_addons/optimizers/moving_average.py
@@ -78,9 +78,6 @@
raise TypeError(
"optimizer is not an object of tf.keras.optimizers.Optimizer")
- if num_updates is not None and not isinstance(num_updates, int):
- raise TypeError("num_updates must be None or of integer type")
-
if not isinstance(sequential_update, bool):
raise TypeError("sequential_update must be of bool type")
|
{"golden_diff": "diff --git a/tensorflow_addons/optimizers/moving_average.py b/tensorflow_addons/optimizers/moving_average.py\n--- a/tensorflow_addons/optimizers/moving_average.py\n+++ b/tensorflow_addons/optimizers/moving_average.py\n@@ -78,9 +78,6 @@\n raise TypeError(\n \"optimizer is not an object of tf.keras.optimizers.Optimizer\")\n \n- if num_updates is not None and not isinstance(num_updates, int):\n- raise TypeError(\"num_updates must be None or of integer type\")\n-\n if not isinstance(sequential_update, bool):\n raise TypeError(\"sequential_update must be of bool type\")\n", "issue": "Allow passing in a tensor to tfa.optimizers.MovingAverage num_updates\nCurrently, `tfa.optimizers.MovingAverage` has an assert that requires `num_updates` to be of type int, [shown here](https://github.com/tensorflow/addons/blob/604a70de563f8797984c9c3f002aff70bef6c90b/tensorflow_addons/optimizers/moving_average.py#L81). This prevents me from passing in an integer tensor that changes with the global step, which is officially supported by `tf.train.ExponentialMovingAverage`.\r\n\r\nCan this assert be updated to handle this use case?\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='Addons')\nclass MovingAverage(tf.keras.optimizers.Optimizer):\n \"\"\"Optimizer that computes a moving average of the variables.\n\n Empirically it has been found that using the moving average of the trained\n parameters of a deep network is better than using its trained parameters\n directly. This optimizer allows you to compute this moving average and swap\n the variables at save time so that any code outside of the training loop\n will use by default the average values instead of the original ones.\n\n Example of usage:\n\n ```python\n opt = tf.keras.optimizers.SGD(learning_rate)\n opt = tfa.optimizers.MovingAverage(opt)\n\n ```\n \"\"\"\n\n def __init__(self,\n optimizer,\n average_decay=0.1,\n num_updates=None,\n sequential_update=True,\n name=\"MovingAverage\",\n **kwargs):\n \"\"\"Construct a new MovingAverage optimizer.\n\n Args:\n optimizer: str or `tf.keras.optimizers.Optimizer` that will be\n used to compute and apply gradients.\n average_decay: float. Decay to use to maintain the moving averages\n of trained variables. See `tf.train.ExponentialMovingAverage`\n for details.\n num_updates: Optional count of the number of updates applied to\n variables. See `tf.train.ExponentialMovingAverage` for details.\n sequential_update: Bool. If False, will compute the moving average\n at the same time as the model is updated, potentially doing\n benign data races. If True, will update the moving average\n after gradient updates.\n name: Optional name for the operations created when applying\n gradients. Defaults to \"MovingAverage\".\n **kwargs: keyword arguments. Allowed to be {`clipnorm`,\n `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by\n norm; `clipvalue` is clip gradients by value, `decay` is\n included for backward compatibility to allow time inverse\n decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n \"\"\"\n super(MovingAverage, self).__init__(name, **kwargs)\n\n if isinstance(optimizer, str):\n optimizer = tf.keras.optimizers.get(optimizer)\n\n if not isinstance(optimizer, tf.keras.optimizers.Optimizer):\n raise TypeError(\n \"optimizer is not an object of tf.keras.optimizers.Optimizer\")\n\n if num_updates is not None and not isinstance(num_updates, int):\n raise TypeError(\"num_updates must be None or of integer type\")\n\n if not isinstance(sequential_update, bool):\n raise TypeError(\"sequential_update must be of bool type\")\n\n with tf.name_scope(name):\n self._ema = tf.train.ExponentialMovingAverage(\n average_decay, num_updates=num_updates)\n\n self._optimizer = optimizer\n self._set_hyper(\"average_decay\", average_decay)\n self._num_updates = num_updates\n self._sequential_update = sequential_update\n self._initialized = False\n\n def apply_gradients(self, grads_and_vars, name=None):\n var_list = [v for (_, v) in grads_and_vars]\n\n if tf.executing_eagerly() and not self._initialized:\n # this to ensure that var_list is registered initially\n self._ema.apply(var_list)\n self._initialized = True\n\n train_op = self._optimizer.apply_gradients(grads_and_vars, name=name)\n\n if self._sequential_update:\n with tf.control_dependencies([train_op]):\n ma_op = self._ema.apply(var_list)\n else:\n ma_op = self._ema.apply(var_list)\n\n return tf.group(train_op, ma_op, name=\"train_with_avg\")\n\n def get_config(self):\n config = {\n 'optimizer': tf.keras.optimizers.serialize(self._optimizer),\n 'average_decay': self._serialize_hyperparameter('average_decay'),\n 'num_updates': self._num_updates,\n 'sequential_update': self._sequential_update\n }\n base_config = super(MovingAverage, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n optimizer = tf.keras.optimizers.deserialize(\n config.pop('optimizer'), custom_objects=custom_objects)\n return cls(optimizer, **config)\n\n def assign_average_vars(self, var_list):\n \"\"\"Assign variables in var_list with their respective moving averages.\n\n Example:\n ```python\n model = tf.Sequential([...])\n opt = tfa.optimizers.MovingAverage(\n tf.keras.optimizers.SGD(lr=2.0), 0.5)\n\n model.compile(opt, ...)\n model.fit(x, y, ...)\n\n # Update the weights to their mean before saving\n opt.assign_average_vars(model.variables)\n\n model.save('model.h5')\n ```\n \"\"\"\n assign = tf.group([v.assign(self._ema.average(v)) for v in var_list])\n return assign\n\n @property\n def weights(self):\n return self._optimizer.weights\n\n def _resource_apply_dense(self, grad, var):\n return self._optimizer._resource_apply_dense(grad, var) # pylint: disable=protected-access\n\n def _resource_apply_sparse_duplicate_indices(self, grad, var, indices):\n return self._optimizer._resource_apply_sparse_duplicate_indices( # pylint: disable=protected-access\n grad, var, indices)\n\n def _resource_apply_sparse(self, grad, var, indices):\n return self._optimizer._resource_apply_sparse(grad, var, indices) # pylint: disable=protected-access\n\n @property\n def learning_rate(self):\n return self._optimizer._get_hyper('learning_rate')\n\n @learning_rate.setter\n def learning_rate(self, learning_rate):\n self._optimizer._set_hyper('learning_rate', learning_rate)\n\n @property\n def lr(self):\n return self.learning_rate\n\n @lr.setter\n def lr(self, lr):\n self.learning_rate = lr\n", "path": "tensorflow_addons/optimizers/moving_average.py"}]}
| 2,598 | 147 |
gh_patches_debug_1689
|
rasdani/github-patches
|
git_diff
|
ibis-project__ibis-3630
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug(duckdb): duckdb backend should add in CAST for some bind parameters
DuckDB casts bind parameters `?` to strings which leads to binder errors with some queries
If we have a small tpch dataset:
```python
import duckdb
con = duckdb.connect("tpch.ddb")
con.execute("CALL dbgen(sf=0.1)")
import ibis
con = ibis.duckdb.connect("tpch.ddb")
t = con.table('orders')
expr = t.aggregate(high_line_count=(t.o_orderpriority.case().when('1-URGENT', 1).else_(0).end().sum()
expr.execute()
```
raises
```
RuntimeError: Binder Error: No function matches the given name and argument types 'sum(VARCHAR)'. You might need to add explicit type casts.
Candidate functions:
sum(DECIMAL) -> DECIMAL
sum(SMALLINT) -> HUGEINT
sum(INTEGER) -> HUGEINT
sum(BIGINT) -> HUGEINT
sum(HUGEINT) -> HUGEINT
sum(DOUBLE) -> DOUBLE
LINE 1: SELECT sum(CASE WHEN (t0.o_orderpriority = ?) ...
```
because our generated SQL doesn't have explicit casts:
```
print(expr.compile())
SELECT sum(CASE WHEN (t0.o_orderpriority = ?) THEN ? ELSE ? END) AS high_line_count
FROM orders AS t0
```
we want to generate
```
SELECT sum(CASE WHEN (t0.o_orderpriority = ?) THEN cast(? as INTEGER) ELSE cast(? as INTEGER) END) AS high_line_count FROM orders as t0
```
</issue>
<code>
[start of ibis/backends/duckdb/registry.py]
1 import collections
2 import operator
3
4 import numpy as np
5 import sqlalchemy as sa
6
7 import ibis.expr.datatypes as dt
8 import ibis.expr.operations as ops
9 from ibis.backends.base.sql.alchemy import to_sqla_type, unary
10
11 from ..base.sql.alchemy.registry import _geospatial_functions, _table_column
12 from ..postgres.registry import fixed_arity, operation_registry
13
14 operation_registry = {
15 op: operation_registry[op]
16 # duckdb does not support geospatial operations, but shares most of the
17 # remaining postgres rules
18 for op in operation_registry.keys() - _geospatial_functions.keys()
19 }
20
21
22 def _round(t, expr):
23 arg, digits = expr.op().args
24 sa_arg = t.translate(arg)
25
26 if digits is None:
27 return sa.func.round(sa_arg)
28
29 return sa.func.round(sa_arg, t.translate(digits))
30
31
32 _LOG_BASE_FUNCS = {
33 2: sa.func.log2,
34 10: sa.func.log,
35 }
36
37
38 def _generic_log(arg, base):
39 return sa.func.ln(arg) / sa.func.ln(base)
40
41
42 def _log(t, expr):
43 arg, base = expr.op().args
44 sa_arg = t.translate(arg)
45 if base is not None:
46 sa_base = t.translate(base)
47 try:
48 base_value = sa_base.value
49 except AttributeError:
50 return _generic_log(sa_arg, sa_base)
51 else:
52 func = _LOG_BASE_FUNCS.get(base_value, _generic_log)
53 return func(sa_arg)
54 return sa.func.ln(sa_arg)
55
56
57 def _timestamp_from_unix(t, expr):
58 op = expr.op()
59 arg, unit = op.args
60 arg = t.translate(arg)
61
62 if unit in {"us", "ns"}:
63 raise ValueError(f"`{unit}` unit is not supported!")
64
65 if unit == "ms":
66 return sa.func.epoch_ms(arg)
67 elif unit == "s":
68 return sa.func.to_timestamp(arg)
69
70
71 def _literal(_, expr):
72 dtype = expr.type()
73 sqla_type = to_sqla_type(dtype)
74 op = expr.op()
75 value = op.value
76
77 if isinstance(dtype, dt.Interval):
78 return sa.text(f"INTERVAL '{value} {dtype.resolution}'")
79 elif isinstance(dtype, dt.Set) or (
80 isinstance(value, collections.abc.Sequence)
81 and not isinstance(value, str)
82 ):
83 return sa.cast(sa.func.list_value(*value), sqla_type)
84 elif isinstance(value, np.ndarray):
85 return sa.cast(sa.func.list_value(*value.tolist()), sqla_type)
86 elif isinstance(value, collections.abc.Mapping):
87 if isinstance(dtype, dt.Struct):
88 placeholders = ", ".join(
89 f"{key!r}: :v{i}" for i, key in enumerate(value.keys())
90 )
91 return sa.text(f"{{{placeholders}}}").bindparams(
92 *(
93 sa.bindparam(f"v{i:d}", val)
94 for i, val in enumerate(value.values())
95 )
96 )
97 raise NotImplementedError(
98 f"Ibis dtype `{dtype}` with mapping type "
99 f"`{type(value).__name__}` isn't yet supported with the duckdb "
100 "backend"
101 )
102 return sa.literal(value)
103
104
105 def _array_column(t, expr):
106 (arg,) = expr.op().args
107 sqla_type = to_sqla_type(expr.type())
108 return sa.cast(sa.func.list_value(*map(t.translate, arg)), sqla_type)
109
110
111 def _struct_field(t, expr):
112 op = expr.op()
113 return sa.func.struct_extract(
114 t.translate(op.arg),
115 sa.text(repr(op.field)),
116 type_=to_sqla_type(expr.type()),
117 )
118
119
120 def _regex_extract(t, expr):
121 string, pattern, index = map(t.translate, expr.op().args)
122 result = sa.case(
123 [
124 (
125 sa.func.regexp_matches(string, pattern),
126 sa.func.regexp_extract(
127 string,
128 pattern,
129 # DuckDB requires the index to be a constant so we compile
130 # the value and inline it using sa.text
131 sa.text(
132 str(
133 (index + 1).compile(
134 compile_kwargs=dict(literal_binds=True)
135 )
136 )
137 ),
138 ),
139 )
140 ],
141 else_="",
142 )
143 return result
144
145
146 operation_registry.update(
147 {
148 ops.ArrayColumn: _array_column,
149 ops.ArrayConcat: fixed_arity('array_concat', 2),
150 ops.ArrayIndex: fixed_arity('list_element', 2),
151 ops.DayOfWeekName: unary(sa.func.dayname),
152 ops.Literal: _literal,
153 ops.Log2: unary(sa.func.log2),
154 ops.Ln: unary(sa.func.ln),
155 ops.Log: _log,
156 # TODO: map operations, but DuckDB's maps are multimaps
157 ops.Modulus: fixed_arity(operator.mod, 2),
158 ops.Round: _round,
159 ops.StructField: _struct_field,
160 ops.TableColumn: _table_column,
161 ops.TimestampDiff: fixed_arity('age', 2),
162 ops.TimestampFromUNIX: _timestamp_from_unix,
163 ops.Translate: fixed_arity('replace', 3),
164 ops.TimestampNow: fixed_arity('now', 0),
165 ops.RegexExtract: _regex_extract,
166 ops.RegexReplace: fixed_arity("regexp_replace", 3),
167 }
168 )
169
[end of ibis/backends/duckdb/registry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ibis/backends/duckdb/registry.py b/ibis/backends/duckdb/registry.py
--- a/ibis/backends/duckdb/registry.py
+++ b/ibis/backends/duckdb/registry.py
@@ -99,7 +99,7 @@
f"`{type(value).__name__}` isn't yet supported with the duckdb "
"backend"
)
- return sa.literal(value)
+ return sa.cast(sa.literal(value), sqla_type)
def _array_column(t, expr):
|
{"golden_diff": "diff --git a/ibis/backends/duckdb/registry.py b/ibis/backends/duckdb/registry.py\n--- a/ibis/backends/duckdb/registry.py\n+++ b/ibis/backends/duckdb/registry.py\n@@ -99,7 +99,7 @@\n f\"`{type(value).__name__}` isn't yet supported with the duckdb \"\n \"backend\"\n )\n- return sa.literal(value)\n+ return sa.cast(sa.literal(value), sqla_type)\n \n \n def _array_column(t, expr):\n", "issue": "bug(duckdb): duckdb backend should add in CAST for some bind parameters\nDuckDB casts bind parameters `?` to strings which leads to binder errors with some queries\r\n\r\nIf we have a small tpch dataset:\r\n\r\n```python\r\nimport duckdb\r\ncon = duckdb.connect(\"tpch.ddb\")\r\ncon.execute(\"CALL dbgen(sf=0.1)\")\r\n\r\nimport ibis\r\ncon = ibis.duckdb.connect(\"tpch.ddb\")\r\n\r\nt = con.table('orders')\r\nexpr = t.aggregate(high_line_count=(t.o_orderpriority.case().when('1-URGENT', 1).else_(0).end().sum()\r\n\r\nexpr.execute()\r\n```\r\n\r\nraises\r\n\r\n```\r\nRuntimeError: Binder Error: No function matches the given name and argument types 'sum(VARCHAR)'. You might need to add explicit type casts.\r\n\tCandidate functions:\r\n\tsum(DECIMAL) -> DECIMAL\r\n\tsum(SMALLINT) -> HUGEINT\r\n\tsum(INTEGER) -> HUGEINT\r\n\tsum(BIGINT) -> HUGEINT\r\n\tsum(HUGEINT) -> HUGEINT\r\n\tsum(DOUBLE) -> DOUBLE\r\n\r\nLINE 1: SELECT sum(CASE WHEN (t0.o_orderpriority = ?) ...\r\n```\r\n\r\nbecause our generated SQL doesn't have explicit casts:\r\n\r\n```\r\nprint(expr.compile())\r\nSELECT sum(CASE WHEN (t0.o_orderpriority = ?) THEN ? ELSE ? END) AS high_line_count \r\nFROM orders AS t0\r\n```\r\n\r\nwe want to generate\r\n\r\n```\r\nSELECT sum(CASE WHEN (t0.o_orderpriority = ?) THEN cast(? as INTEGER) ELSE cast(? as INTEGER) END) AS high_line_count FROM orders as t0\r\n```\n", "before_files": [{"content": "import collections\nimport operator\n\nimport numpy as np\nimport sqlalchemy as sa\n\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nfrom ibis.backends.base.sql.alchemy import to_sqla_type, unary\n\nfrom ..base.sql.alchemy.registry import _geospatial_functions, _table_column\nfrom ..postgres.registry import fixed_arity, operation_registry\n\noperation_registry = {\n op: operation_registry[op]\n # duckdb does not support geospatial operations, but shares most of the\n # remaining postgres rules\n for op in operation_registry.keys() - _geospatial_functions.keys()\n}\n\n\ndef _round(t, expr):\n arg, digits = expr.op().args\n sa_arg = t.translate(arg)\n\n if digits is None:\n return sa.func.round(sa_arg)\n\n return sa.func.round(sa_arg, t.translate(digits))\n\n\n_LOG_BASE_FUNCS = {\n 2: sa.func.log2,\n 10: sa.func.log,\n}\n\n\ndef _generic_log(arg, base):\n return sa.func.ln(arg) / sa.func.ln(base)\n\n\ndef _log(t, expr):\n arg, base = expr.op().args\n sa_arg = t.translate(arg)\n if base is not None:\n sa_base = t.translate(base)\n try:\n base_value = sa_base.value\n except AttributeError:\n return _generic_log(sa_arg, sa_base)\n else:\n func = _LOG_BASE_FUNCS.get(base_value, _generic_log)\n return func(sa_arg)\n return sa.func.ln(sa_arg)\n\n\ndef _timestamp_from_unix(t, expr):\n op = expr.op()\n arg, unit = op.args\n arg = t.translate(arg)\n\n if unit in {\"us\", \"ns\"}:\n raise ValueError(f\"`{unit}` unit is not supported!\")\n\n if unit == \"ms\":\n return sa.func.epoch_ms(arg)\n elif unit == \"s\":\n return sa.func.to_timestamp(arg)\n\n\ndef _literal(_, expr):\n dtype = expr.type()\n sqla_type = to_sqla_type(dtype)\n op = expr.op()\n value = op.value\n\n if isinstance(dtype, dt.Interval):\n return sa.text(f\"INTERVAL '{value} {dtype.resolution}'\")\n elif isinstance(dtype, dt.Set) or (\n isinstance(value, collections.abc.Sequence)\n and not isinstance(value, str)\n ):\n return sa.cast(sa.func.list_value(*value), sqla_type)\n elif isinstance(value, np.ndarray):\n return sa.cast(sa.func.list_value(*value.tolist()), sqla_type)\n elif isinstance(value, collections.abc.Mapping):\n if isinstance(dtype, dt.Struct):\n placeholders = \", \".join(\n f\"{key!r}: :v{i}\" for i, key in enumerate(value.keys())\n )\n return sa.text(f\"{{{placeholders}}}\").bindparams(\n *(\n sa.bindparam(f\"v{i:d}\", val)\n for i, val in enumerate(value.values())\n )\n )\n raise NotImplementedError(\n f\"Ibis dtype `{dtype}` with mapping type \"\n f\"`{type(value).__name__}` isn't yet supported with the duckdb \"\n \"backend\"\n )\n return sa.literal(value)\n\n\ndef _array_column(t, expr):\n (arg,) = expr.op().args\n sqla_type = to_sqla_type(expr.type())\n return sa.cast(sa.func.list_value(*map(t.translate, arg)), sqla_type)\n\n\ndef _struct_field(t, expr):\n op = expr.op()\n return sa.func.struct_extract(\n t.translate(op.arg),\n sa.text(repr(op.field)),\n type_=to_sqla_type(expr.type()),\n )\n\n\ndef _regex_extract(t, expr):\n string, pattern, index = map(t.translate, expr.op().args)\n result = sa.case(\n [\n (\n sa.func.regexp_matches(string, pattern),\n sa.func.regexp_extract(\n string,\n pattern,\n # DuckDB requires the index to be a constant so we compile\n # the value and inline it using sa.text\n sa.text(\n str(\n (index + 1).compile(\n compile_kwargs=dict(literal_binds=True)\n )\n )\n ),\n ),\n )\n ],\n else_=\"\",\n )\n return result\n\n\noperation_registry.update(\n {\n ops.ArrayColumn: _array_column,\n ops.ArrayConcat: fixed_arity('array_concat', 2),\n ops.ArrayIndex: fixed_arity('list_element', 2),\n ops.DayOfWeekName: unary(sa.func.dayname),\n ops.Literal: _literal,\n ops.Log2: unary(sa.func.log2),\n ops.Ln: unary(sa.func.ln),\n ops.Log: _log,\n # TODO: map operations, but DuckDB's maps are multimaps\n ops.Modulus: fixed_arity(operator.mod, 2),\n ops.Round: _round,\n ops.StructField: _struct_field,\n ops.TableColumn: _table_column,\n ops.TimestampDiff: fixed_arity('age', 2),\n ops.TimestampFromUNIX: _timestamp_from_unix,\n ops.Translate: fixed_arity('replace', 3),\n ops.TimestampNow: fixed_arity('now', 0),\n ops.RegexExtract: _regex_extract,\n ops.RegexReplace: fixed_arity(\"regexp_replace\", 3),\n }\n)\n", "path": "ibis/backends/duckdb/registry.py"}]}
| 2,457 | 122 |
gh_patches_debug_37493
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-2593
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Kinesis stream records do not offer AWS-defined optional values
<!-- Love localstack? Please consider supporting our collective:
👉 https://opencollective.com/localstack/donate -->
# Type of request: This is a ...
[ ] bug report
[x] feature request
# Detailed description
Kinesis does not populate/offer to populate all potential values in a record. I have a lambda being triggered by kinesis events. The records being passed in are only those required by AWS, but there are no optional values being used.
## Expected behavior
An event's kinesis object should be configurable to look as follows
```
{
"ApproximateArrivalTimestamp": number,
"Data": blob,
"EncryptionType": "string",
"PartitionKey": "string",
"SequenceNumber": "string"
}
```
## Actual behavior
In its current state, only required values are present.
```
{
"Data": blob,
"PartitionKey": "string",
"SequenceNumber": "string"
}
```
# Steps to reproduce
## Command used to start LocalStack
`docker-compose up` with given `docker-compose.yml`
## Client code (AWS SDK code snippet, or sequence of "awslocal" commands)
The source of this issue/feature request lies in the kinesis listener. AWS documentation of a kinesis stream record lists all values here: https://docs.aws.amazon.com/kinesis/latest/APIReference/API_Record.html
We are only returning the required values.
https://github.com/localstack/localstack/blob/78031dd65da9394f8b1b020be01ef02c63c433ee/localstack/services/kinesis/kinesis_listener.py#L85-L94
If there is no implicit reason for not offering optional values, I'm more than happy to submit a PR.
</issue>
<code>
[start of localstack/services/kinesis/kinesis_listener.py]
1 import json
2 import random
3 from datetime import datetime
4 from requests.models import Response
5 from localstack import config
6 from localstack.utils.common import to_str, json_safe, clone
7 from localstack.utils.analytics import event_publisher
8 from localstack.services.awslambda import lambda_api
9 from localstack.services.generic_proxy import ProxyListener
10
11 # action headers
12 ACTION_PREFIX = 'Kinesis_20131202'
13 ACTION_PUT_RECORD = '%s.PutRecord' % ACTION_PREFIX
14 ACTION_PUT_RECORDS = '%s.PutRecords' % ACTION_PREFIX
15 ACTION_LIST_STREAMS = '%s.ListStreams' % ACTION_PREFIX
16 ACTION_CREATE_STREAM = '%s.CreateStream' % ACTION_PREFIX
17 ACTION_DELETE_STREAM = '%s.DeleteStream' % ACTION_PREFIX
18 ACTION_UPDATE_SHARD_COUNT = '%s.UpdateShardCount' % ACTION_PREFIX
19
20 # list of stream consumer details
21 STREAM_CONSUMERS = []
22
23
24 class ProxyListenerKinesis(ProxyListener):
25
26 def forward_request(self, method, path, data, headers):
27 global STREAM_CONSUMERS
28 data = json.loads(to_str(data or '{}'))
29 action = headers.get('X-Amz-Target')
30
31 if action == '%s.RegisterStreamConsumer' % ACTION_PREFIX:
32 consumer = clone(data)
33 consumer['ConsumerStatus'] = 'ACTIVE'
34 consumer['ConsumerARN'] = '%s/consumer/%s' % (data['StreamARN'], data['ConsumerName'])
35 consumer['ConsumerCreationTimestamp'] = datetime.now()
36 consumer = json_safe(consumer)
37 STREAM_CONSUMERS.append(consumer)
38 return {'Consumer': consumer}
39 elif action == '%s.DeregisterStreamConsumer' % ACTION_PREFIX:
40 def consumer_matches(c):
41 stream_arn = data.get('StreamARN')
42 cons_name = data.get('ConsumerName')
43 cons_arn = data.get('ConsumerARN')
44 return (c.get('ConsumerARN') == cons_arn or
45 (c.get('StreamARN') == stream_arn and c.get('ConsumerName') == cons_name))
46 STREAM_CONSUMERS = [c for c in STREAM_CONSUMERS if not consumer_matches(c)]
47 return {}
48 elif action == '%s.ListStreamConsumers' % ACTION_PREFIX:
49 result = {
50 'Consumers': [c for c in STREAM_CONSUMERS if c.get('StreamARN') == data.get('StreamARN')]
51 }
52 return result
53 elif action == '%s.DescribeStreamConsumer' % ACTION_PREFIX:
54 consumer_arn = data.get('ConsumerARN') or data['ConsumerName']
55 consumer_name = data.get('ConsumerName') or data['ConsumerARN']
56 result = {
57 'ConsumerDescription': {
58 'ConsumerARN': consumer_arn,
59 # 'ConsumerCreationTimestamp': number,
60 'ConsumerName': consumer_name,
61 'ConsumerStatus': 'ACTIVE',
62 'StreamARN': data.get('StreamARN')
63 }
64 }
65 return result
66
67 if random.random() < config.KINESIS_ERROR_PROBABILITY:
68 action = headers.get('X-Amz-Target')
69 if action in [ACTION_PUT_RECORD, ACTION_PUT_RECORDS]:
70 return kinesis_error_response(data, action)
71 return True
72
73 def return_response(self, method, path, data, headers, response):
74 action = headers.get('X-Amz-Target')
75 data = json.loads(to_str(data or '{}'))
76
77 records = []
78 if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):
79 event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM
80 else event_publisher.EVENT_KINESIS_DELETE_STREAM)
81 payload = {'n': event_publisher.get_hash(data.get('StreamName'))}
82 if action == ACTION_CREATE_STREAM:
83 payload['s'] = data.get('ShardCount')
84 event_publisher.fire_event(event_type, payload=payload)
85 elif action == ACTION_PUT_RECORD:
86 response_body = json.loads(to_str(response.content))
87 event_record = {
88 'data': data['Data'],
89 'partitionKey': data['PartitionKey'],
90 'sequenceNumber': response_body.get('SequenceNumber')
91 }
92 event_records = [event_record]
93 stream_name = data['StreamName']
94 lambda_api.process_kinesis_records(event_records, stream_name)
95 elif action == ACTION_PUT_RECORDS:
96 event_records = []
97 response_body = json.loads(to_str(response.content))
98 if 'Records' in response_body:
99 response_records = response_body['Records']
100 records = data['Records']
101 for index in range(0, len(records)):
102 record = records[index]
103 event_record = {
104 'data': record['Data'],
105 'partitionKey': record['PartitionKey'],
106 'sequenceNumber': response_records[index].get('SequenceNumber')
107 }
108 event_records.append(event_record)
109 stream_name = data['StreamName']
110 lambda_api.process_kinesis_records(event_records, stream_name)
111 elif action == ACTION_UPDATE_SHARD_COUNT:
112 # Currently kinesalite, which backs the Kinesis implementation for localstack, does
113 # not support UpdateShardCount:
114 # https://github.com/mhart/kinesalite/issues/61
115 #
116 # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it
117 # applies Kinesis resources. A Terraform run fails when this is not present.
118 #
119 # The code that follows just returns a successful response, bypassing the 400
120 # response that kinesalite returns.
121 #
122 response = Response()
123 response.status_code = 200
124 content = {
125 'CurrentShardCount': 1,
126 'StreamName': data['StreamName'],
127 'TargetShardCount': data['TargetShardCount']
128 }
129 response.encoding = 'UTF-8'
130 response._content = json.dumps(content)
131 return response
132
133
134 # instantiate listener
135 UPDATE_KINESIS = ProxyListenerKinesis()
136
137
138 def kinesis_error_response(data, action):
139 error_response = Response()
140
141 if action == ACTION_PUT_RECORD:
142 error_response.status_code = 400
143 content = {
144 'ErrorCode': 'ProvisionedThroughputExceededException',
145 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'
146 }
147 else:
148 error_response.status_code = 200
149 content = {'FailedRecordCount': 1, 'Records': []}
150 for record in data.get('Records', []):
151 content['Records'].append({
152 'ErrorCode': 'ProvisionedThroughputExceededException',
153 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'
154 })
155
156 error_response._content = json.dumps(content)
157 return error_response
158
[end of localstack/services/kinesis/kinesis_listener.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/localstack/services/kinesis/kinesis_listener.py b/localstack/services/kinesis/kinesis_listener.py
--- a/localstack/services/kinesis/kinesis_listener.py
+++ b/localstack/services/kinesis/kinesis_listener.py
@@ -1,9 +1,8 @@
import json
import random
-from datetime import datetime
from requests.models import Response
from localstack import config
-from localstack.utils.common import to_str, json_safe, clone
+from localstack.utils.common import to_str, json_safe, clone, timestamp_millis
from localstack.utils.analytics import event_publisher
from localstack.services.awslambda import lambda_api
from localstack.services.generic_proxy import ProxyListener
@@ -32,7 +31,7 @@
consumer = clone(data)
consumer['ConsumerStatus'] = 'ACTIVE'
consumer['ConsumerARN'] = '%s/consumer/%s' % (data['StreamARN'], data['ConsumerName'])
- consumer['ConsumerCreationTimestamp'] = datetime.now()
+ consumer['ConsumerCreationTimestamp'] = timestamp_millis()
consumer = json_safe(consumer)
STREAM_CONSUMERS.append(consumer)
return {'Consumer': consumer}
@@ -85,7 +84,9 @@
elif action == ACTION_PUT_RECORD:
response_body = json.loads(to_str(response.content))
event_record = {
+ 'approximateArrivalTimestamp': timestamp_millis(),
'data': data['Data'],
+ 'encryptionType': 'NONE',
'partitionKey': data['PartitionKey'],
'sequenceNumber': response_body.get('SequenceNumber')
}
@@ -101,7 +102,9 @@
for index in range(0, len(records)):
record = records[index]
event_record = {
+ 'approximateArrivalTimestamp': timestamp_millis(),
'data': record['Data'],
+ 'encryptionType': 'NONE',
'partitionKey': record['PartitionKey'],
'sequenceNumber': response_records[index].get('SequenceNumber')
}
|
{"golden_diff": "diff --git a/localstack/services/kinesis/kinesis_listener.py b/localstack/services/kinesis/kinesis_listener.py\n--- a/localstack/services/kinesis/kinesis_listener.py\n+++ b/localstack/services/kinesis/kinesis_listener.py\n@@ -1,9 +1,8 @@\n import json\n import random\n-from datetime import datetime\n from requests.models import Response\n from localstack import config\n-from localstack.utils.common import to_str, json_safe, clone\n+from localstack.utils.common import to_str, json_safe, clone, timestamp_millis\n from localstack.utils.analytics import event_publisher\n from localstack.services.awslambda import lambda_api\n from localstack.services.generic_proxy import ProxyListener\n@@ -32,7 +31,7 @@\n consumer = clone(data)\n consumer['ConsumerStatus'] = 'ACTIVE'\n consumer['ConsumerARN'] = '%s/consumer/%s' % (data['StreamARN'], data['ConsumerName'])\n- consumer['ConsumerCreationTimestamp'] = datetime.now()\n+ consumer['ConsumerCreationTimestamp'] = timestamp_millis()\n consumer = json_safe(consumer)\n STREAM_CONSUMERS.append(consumer)\n return {'Consumer': consumer}\n@@ -85,7 +84,9 @@\n elif action == ACTION_PUT_RECORD:\n response_body = json.loads(to_str(response.content))\n event_record = {\n+ 'approximateArrivalTimestamp': timestamp_millis(),\n 'data': data['Data'],\n+ 'encryptionType': 'NONE',\n 'partitionKey': data['PartitionKey'],\n 'sequenceNumber': response_body.get('SequenceNumber')\n }\n@@ -101,7 +102,9 @@\n for index in range(0, len(records)):\n record = records[index]\n event_record = {\n+ 'approximateArrivalTimestamp': timestamp_millis(),\n 'data': record['Data'],\n+ 'encryptionType': 'NONE',\n 'partitionKey': record['PartitionKey'],\n 'sequenceNumber': response_records[index].get('SequenceNumber')\n }\n", "issue": "Kinesis stream records do not offer AWS-defined optional values\n<!-- Love localstack? Please consider supporting our collective:\r\n\ud83d\udc49 https://opencollective.com/localstack/donate -->\r\n\r\n# Type of request: This is a ...\r\n\r\n[ ] bug report\r\n[x] feature request\r\n\r\n# Detailed description\r\n\r\nKinesis does not populate/offer to populate all potential values in a record. I have a lambda being triggered by kinesis events. The records being passed in are only those required by AWS, but there are no optional values being used.\r\n\r\n## Expected behavior\r\n\r\nAn event's kinesis object should be configurable to look as follows\r\n```\r\n{ \r\n \"ApproximateArrivalTimestamp\": number,\r\n \"Data\": blob,\r\n \"EncryptionType\": \"string\",\r\n \"PartitionKey\": \"string\",\r\n \"SequenceNumber\": \"string\"\r\n}\r\n```\r\n\r\n## Actual behavior\r\n\r\nIn its current state, only required values are present.\r\n```\r\n{ \r\n \"Data\": blob,\r\n \"PartitionKey\": \"string\",\r\n \"SequenceNumber\": \"string\"\r\n}\r\n```\r\n\r\n# Steps to reproduce\r\n\r\n## Command used to start LocalStack\r\n\r\n`docker-compose up` with given `docker-compose.yml`\r\n\r\n## Client code (AWS SDK code snippet, or sequence of \"awslocal\" commands)\r\n\r\nThe source of this issue/feature request lies in the kinesis listener. AWS documentation of a kinesis stream record lists all values here: https://docs.aws.amazon.com/kinesis/latest/APIReference/API_Record.html\r\n\r\nWe are only returning the required values.\r\nhttps://github.com/localstack/localstack/blob/78031dd65da9394f8b1b020be01ef02c63c433ee/localstack/services/kinesis/kinesis_listener.py#L85-L94\r\n\r\nIf there is no implicit reason for not offering optional values, I'm more than happy to submit a PR.\r\n\n", "before_files": [{"content": "import json\nimport random\nfrom datetime import datetime\nfrom requests.models import Response\nfrom localstack import config\nfrom localstack.utils.common import to_str, json_safe, clone\nfrom localstack.utils.analytics import event_publisher\nfrom localstack.services.awslambda import lambda_api\nfrom localstack.services.generic_proxy import ProxyListener\n\n# action headers\nACTION_PREFIX = 'Kinesis_20131202'\nACTION_PUT_RECORD = '%s.PutRecord' % ACTION_PREFIX\nACTION_PUT_RECORDS = '%s.PutRecords' % ACTION_PREFIX\nACTION_LIST_STREAMS = '%s.ListStreams' % ACTION_PREFIX\nACTION_CREATE_STREAM = '%s.CreateStream' % ACTION_PREFIX\nACTION_DELETE_STREAM = '%s.DeleteStream' % ACTION_PREFIX\nACTION_UPDATE_SHARD_COUNT = '%s.UpdateShardCount' % ACTION_PREFIX\n\n# list of stream consumer details\nSTREAM_CONSUMERS = []\n\n\nclass ProxyListenerKinesis(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n global STREAM_CONSUMERS\n data = json.loads(to_str(data or '{}'))\n action = headers.get('X-Amz-Target')\n\n if action == '%s.RegisterStreamConsumer' % ACTION_PREFIX:\n consumer = clone(data)\n consumer['ConsumerStatus'] = 'ACTIVE'\n consumer['ConsumerARN'] = '%s/consumer/%s' % (data['StreamARN'], data['ConsumerName'])\n consumer['ConsumerCreationTimestamp'] = datetime.now()\n consumer = json_safe(consumer)\n STREAM_CONSUMERS.append(consumer)\n return {'Consumer': consumer}\n elif action == '%s.DeregisterStreamConsumer' % ACTION_PREFIX:\n def consumer_matches(c):\n stream_arn = data.get('StreamARN')\n cons_name = data.get('ConsumerName')\n cons_arn = data.get('ConsumerARN')\n return (c.get('ConsumerARN') == cons_arn or\n (c.get('StreamARN') == stream_arn and c.get('ConsumerName') == cons_name))\n STREAM_CONSUMERS = [c for c in STREAM_CONSUMERS if not consumer_matches(c)]\n return {}\n elif action == '%s.ListStreamConsumers' % ACTION_PREFIX:\n result = {\n 'Consumers': [c for c in STREAM_CONSUMERS if c.get('StreamARN') == data.get('StreamARN')]\n }\n return result\n elif action == '%s.DescribeStreamConsumer' % ACTION_PREFIX:\n consumer_arn = data.get('ConsumerARN') or data['ConsumerName']\n consumer_name = data.get('ConsumerName') or data['ConsumerARN']\n result = {\n 'ConsumerDescription': {\n 'ConsumerARN': consumer_arn,\n # 'ConsumerCreationTimestamp': number,\n 'ConsumerName': consumer_name,\n 'ConsumerStatus': 'ACTIVE',\n 'StreamARN': data.get('StreamARN')\n }\n }\n return result\n\n if random.random() < config.KINESIS_ERROR_PROBABILITY:\n action = headers.get('X-Amz-Target')\n if action in [ACTION_PUT_RECORD, ACTION_PUT_RECORDS]:\n return kinesis_error_response(data, action)\n return True\n\n def return_response(self, method, path, data, headers, response):\n action = headers.get('X-Amz-Target')\n data = json.loads(to_str(data or '{}'))\n\n records = []\n if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):\n event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM\n else event_publisher.EVENT_KINESIS_DELETE_STREAM)\n payload = {'n': event_publisher.get_hash(data.get('StreamName'))}\n if action == ACTION_CREATE_STREAM:\n payload['s'] = data.get('ShardCount')\n event_publisher.fire_event(event_type, payload=payload)\n elif action == ACTION_PUT_RECORD:\n response_body = json.loads(to_str(response.content))\n event_record = {\n 'data': data['Data'],\n 'partitionKey': data['PartitionKey'],\n 'sequenceNumber': response_body.get('SequenceNumber')\n }\n event_records = [event_record]\n stream_name = data['StreamName']\n lambda_api.process_kinesis_records(event_records, stream_name)\n elif action == ACTION_PUT_RECORDS:\n event_records = []\n response_body = json.loads(to_str(response.content))\n if 'Records' in response_body:\n response_records = response_body['Records']\n records = data['Records']\n for index in range(0, len(records)):\n record = records[index]\n event_record = {\n 'data': record['Data'],\n 'partitionKey': record['PartitionKey'],\n 'sequenceNumber': response_records[index].get('SequenceNumber')\n }\n event_records.append(event_record)\n stream_name = data['StreamName']\n lambda_api.process_kinesis_records(event_records, stream_name)\n elif action == ACTION_UPDATE_SHARD_COUNT:\n # Currently kinesalite, which backs the Kinesis implementation for localstack, does\n # not support UpdateShardCount:\n # https://github.com/mhart/kinesalite/issues/61\n #\n # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it\n # applies Kinesis resources. A Terraform run fails when this is not present.\n #\n # The code that follows just returns a successful response, bypassing the 400\n # response that kinesalite returns.\n #\n response = Response()\n response.status_code = 200\n content = {\n 'CurrentShardCount': 1,\n 'StreamName': data['StreamName'],\n 'TargetShardCount': data['TargetShardCount']\n }\n response.encoding = 'UTF-8'\n response._content = json.dumps(content)\n return response\n\n\n# instantiate listener\nUPDATE_KINESIS = ProxyListenerKinesis()\n\n\ndef kinesis_error_response(data, action):\n error_response = Response()\n\n if action == ACTION_PUT_RECORD:\n error_response.status_code = 400\n content = {\n 'ErrorCode': 'ProvisionedThroughputExceededException',\n 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'\n }\n else:\n error_response.status_code = 200\n content = {'FailedRecordCount': 1, 'Records': []}\n for record in data.get('Records', []):\n content['Records'].append({\n 'ErrorCode': 'ProvisionedThroughputExceededException',\n 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'\n })\n\n error_response._content = json.dumps(content)\n return error_response\n", "path": "localstack/services/kinesis/kinesis_listener.py"}]}
| 2,738 | 429 |
gh_patches_debug_10304
|
rasdani/github-patches
|
git_diff
|
google__openhtf-393
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Nicer failure mode for misuse of StoreInModule.
See #389.
> ...it's nonsensical to have no dots, but something one might accidentally do if you forget to do the %s/name business, maybe throw in a quick format check on the inputs of add_argument() to fail-fast rather than raise an obscure error here?
> @grybmadsci
Document util/argv.py
There are no docs on how to use this module.
</issue>
<code>
[start of openhtf/util/argv.py]
1 """Utilities for handling command line arguments.
2
3 StoreInModule:
4 Enables emulating a gflags-esque API (flag affects global value), but one
5 doesn't necessarily need to use flags to set values.
6
7 Example usage:
8 DEFAULT_VALUE = 0
9 ARG_PARSER = argv.ModuleParser()
10 ARG_PARSER.add_argument(
11 '--override-value', action=argv.StoreInModule,
12 default=DEFAULT_VALUE, target='%s.DEFAULT_VALUE' % __name__)
13
14 Then in an entry point (main() function), use that parser as a parent:
15 parser = argparse.ArgumentParser(parents=[other_module.ARG_PARSER])
16 parser.parse_args()
17 """
18
19 import argparse
20
21
22 def ModuleParser():
23 return argparse.ArgumentParser(add_help=False)
24
25
26 class StoreInModule(argparse.Action):
27
28 def __init__(self, *args, **kwargs):
29 self._tgt_mod, self._tgt_attr = kwargs.pop('target').rsplit('.', 1)
30 proxy_cls = kwargs.pop('proxy', None)
31 if proxy_cls is not None:
32 self._proxy = proxy_cls(*args, **kwargs)
33 super(StoreInModule, self).__init__(*args, **kwargs)
34
35 def __call__(self, parser, namespace, values, option_string=None):
36 if hasattr(self, '_proxy'):
37 values = self._proxy(parser, namespace, values)
38 base, mod = self._tgt_mod.rsplit('.', 1)
39 module = getattr(__import__(base, fromlist=[mod]), mod)
40 setattr(module, self._tgt_attr, values)
41
42
[end of openhtf/util/argv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/openhtf/util/argv.py b/openhtf/util/argv.py
--- a/openhtf/util/argv.py
+++ b/openhtf/util/argv.py
@@ -35,7 +35,10 @@
def __call__(self, parser, namespace, values, option_string=None):
if hasattr(self, '_proxy'):
values = self._proxy(parser, namespace, values)
- base, mod = self._tgt_mod.rsplit('.', 1)
- module = getattr(__import__(base, fromlist=[mod]), mod)
+ if '.' in self._tgt_mod:
+ base, mod = self._tgt_mod.rsplit('.', 1)
+ module = getattr(__import__(base, fromlist=[mod]), mod)
+ else:
+ module = __import__(self._tgt_mod)
setattr(module, self._tgt_attr, values)
|
{"golden_diff": "diff --git a/openhtf/util/argv.py b/openhtf/util/argv.py\n--- a/openhtf/util/argv.py\n+++ b/openhtf/util/argv.py\n@@ -35,7 +35,10 @@\n def __call__(self, parser, namespace, values, option_string=None):\n if hasattr(self, '_proxy'):\n values = self._proxy(parser, namespace, values)\n- base, mod = self._tgt_mod.rsplit('.', 1)\n- module = getattr(__import__(base, fromlist=[mod]), mod)\n+ if '.' in self._tgt_mod:\n+ base, mod = self._tgt_mod.rsplit('.', 1)\n+ module = getattr(__import__(base, fromlist=[mod]), mod)\n+ else:\n+ module = __import__(self._tgt_mod)\n setattr(module, self._tgt_attr, values)\n", "issue": "Nicer failure mode for misuse of StoreInModule.\nSee #389.\n\n> ...it's nonsensical to have no dots, but something one might accidentally do if you forget to do the %s/name business, maybe throw in a quick format check on the inputs of add_argument() to fail-fast rather than raise an obscure error here?\n> @grybmadsci\n\nDocument util/argv.py\nThere are no docs on how to use this module.\n\n", "before_files": [{"content": "\"\"\"Utilities for handling command line arguments.\n\nStoreInModule:\n Enables emulating a gflags-esque API (flag affects global value), but one\n doesn't necessarily need to use flags to set values.\n \n Example usage:\n DEFAULT_VALUE = 0\n ARG_PARSER = argv.ModuleParser()\n ARG_PARSER.add_argument(\n '--override-value', action=argv.StoreInModule,\n default=DEFAULT_VALUE, target='%s.DEFAULT_VALUE' % __name__)\n\n Then in an entry point (main() function), use that parser as a parent:\n parser = argparse.ArgumentParser(parents=[other_module.ARG_PARSER])\n parser.parse_args()\n\"\"\"\n\nimport argparse\n\n\ndef ModuleParser():\n return argparse.ArgumentParser(add_help=False)\n\n\nclass StoreInModule(argparse.Action):\n\n def __init__(self, *args, **kwargs):\n self._tgt_mod, self._tgt_attr = kwargs.pop('target').rsplit('.', 1)\n proxy_cls = kwargs.pop('proxy', None)\n if proxy_cls is not None:\n self._proxy = proxy_cls(*args, **kwargs)\n super(StoreInModule, self).__init__(*args, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n if hasattr(self, '_proxy'):\n values = self._proxy(parser, namespace, values)\n base, mod = self._tgt_mod.rsplit('.', 1)\n module = getattr(__import__(base, fromlist=[mod]), mod)\n setattr(module, self._tgt_attr, values)\n\n", "path": "openhtf/util/argv.py"}]}
| 1,039 | 193 |
gh_patches_debug_8626
|
rasdani/github-patches
|
git_diff
|
holoviz__panel-2539
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
panel serve --autoreload does not work consistently with imported modules
#### ALL software version info
- python: 3.9.2
- holoviews: 1.14.4
- jupyter_bokeh: 3.0.2
- ipython 7.22.0
- jupyterlab: 3.0.16
- pyviz-comms: 2.0.1
- MacOS: 11.4
- VSCode Insiders:
- Version: 1.59.0-insider
- Commit: f6df685c62da50886f7540cbf768ed4333d58bea
- Date: 2021-07-13T05:15:28.368Z
- Electron: 12.0.13
- Chrome: 89.0.4389.128
- Node.js: 14.16.0
- V8: 8.9.255.25-electron.0
- OS: Darwin x64 20.5.0
#### Description of expected behavior and the observed behavior
`panel --autoreload` does not trigger a reload when saving changes to an imported module.
However, if I first save the actual served module (triggering a reload), *then* subsequent saves of imported modules will also trigger reloads. It's as if the "registry" of imports is not populated when you first start serving, and only becomes populated after the first reload.
#### Complete, minimal, self-contained example code that reproduces the issue
```
# test_panel.py
import holoviews as hv
import panel as pn
data = [('a', 10), ('b', 7)]
def test():
return pn.panel(hv.Bars(data))
# test.py
import test_panel
test_panel.test().servable()
```
Try `panel serve --autoreload` with `test.py`. While serving, change the data values in `test_panel.py` and save. No reload.
Try the same thing again, but this time save `test.py` immediately after you begin serving (you don't have to change anytime, just save it to trigger a server reload). Now if you change the values in `test_panel.py`, it will reload.
</issue>
<code>
[start of panel/io/reload.py]
1 import fnmatch
2 import os
3 import sys
4 import types
5
6 from contextlib import contextmanager
7 from functools import partial
8
9 from .callbacks import PeriodicCallback
10 from .state import state
11
12 _watched_files = set()
13 _modules = set()
14 _callbacks = {}
15
16 # List of paths to ignore
17 DEFAULT_FOLDER_BLACKLIST = [
18 "**/.*",
19 "**/anaconda",
20 "**/anaconda2",
21 "**/anaconda3",
22 "**/dist-packages",
23 "**/miniconda",
24 "**/miniconda2",
25 "**/miniconda3",
26 "**/node_modules",
27 "**/pyenv",
28 "**/site-packages",
29 "**/venv",
30 "**/virtualenv",
31 ]
32
33
34 def in_blacklist(filepath):
35 return any(
36 file_is_in_folder_glob(filepath, blacklisted_folder)
37 for blacklisted_folder in DEFAULT_FOLDER_BLACKLIST
38 )
39
40 def file_is_in_folder_glob(filepath, folderpath_glob):
41 """
42 Test whether a file is in some folder with globbing support.
43
44 Parameters
45 ----------
46 filepath : str
47 A file path.
48 folderpath_glob: str
49 A path to a folder that may include globbing.
50 """
51 # Make the glob always end with "/*" so we match files inside subfolders of
52 # folderpath_glob.
53 if not folderpath_glob.endswith("*"):
54 if folderpath_glob.endswith("/"):
55 folderpath_glob += "*"
56 else:
57 folderpath_glob += "/*"
58
59 file_dir = os.path.dirname(filepath) + "/"
60 return fnmatch.fnmatch(file_dir, folderpath_glob)
61
62 def autoreload_watcher():
63 """
64 Installs a periodic callback which checks for changes in watched
65 files and sys.modules.
66 """
67 cb = partial(_reload_on_update, {})
68 _callbacks[state.curdoc] = pcb = PeriodicCallback(callback=cb)
69 pcb.start()
70
71 def watch(filename):
72 """
73 Add a file to the watch list.
74
75 All imported modules are watched by default.
76 """
77 _watched_files.add(filename)
78
79 @contextmanager
80 def record_modules():
81 """
82 Records modules which are currently imported.
83 """
84 modules = set(sys.modules)
85 yield
86 if _modules:
87 return
88 for module_name in set(sys.modules).difference(modules):
89 if module_name.startswith('bokeh_app'):
90 continue
91 module = sys.modules[module_name]
92 try:
93 spec = getattr(module, "__spec__", None)
94 if spec is None:
95 filepath = getattr(module, "__file__", None)
96 if filepath is None: # no user
97 continue
98 else:
99 filepath = spec.origin
100
101 filepath = os.path.abspath(filepath)
102
103 if filepath is None or in_blacklist(filepath):
104 continue
105
106 if not os.path.isfile(filepath): # e.g. built-in
107 continue
108 _modules.add(module_name)
109 except Exception:
110 continue
111
112 def _reload(module=None):
113 if module is not None:
114 for module in _modules:
115 del sys.modules[module]
116 if state.curdoc in _callbacks:
117 _callbacks[state.curdoc].stop()
118 del _callbacks[state.curdoc]
119 if state.location:
120 state.location.reload = True
121
122 def _check_file(modify_times, path, module=None):
123 try:
124 modified = os.stat(path).st_mtime
125 except Exception:
126 return
127 if path not in modify_times:
128 modify_times[path] = modified
129 return
130 if modify_times[path] != modified:
131 _reload(module)
132 modify_times[path] = modified
133
134 def _reload_on_update(modify_times):
135 for module_name in _modules:
136 # Some modules play games with sys.modules (e.g. email/__init__.py
137 # in the standard library), and occasionally this can cause strange
138 # failures in getattr. Just ignore anything that's not an ordinary
139 # module.
140 if not module_name in sys.modules:
141 continue
142 module = sys.modules[module_name]
143 if not isinstance(module, types.ModuleType):
144 continue
145 path = getattr(module, "__file__", None)
146 if not path:
147 continue
148 if path.endswith(".pyc") or path.endswith(".pyo"):
149 path = path[:-1]
150 _check_file(modify_times, path, module_name)
151 for path in _watched_files:
152 _check_file(modify_times, path)
153
[end of panel/io/reload.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/panel/io/reload.py b/panel/io/reload.py
--- a/panel/io/reload.py
+++ b/panel/io/reload.py
@@ -113,11 +113,12 @@
if module is not None:
for module in _modules:
del sys.modules[module]
- if state.curdoc in _callbacks:
- _callbacks[state.curdoc].stop()
- del _callbacks[state.curdoc]
- if state.location:
- state.location.reload = True
+ for cb in _callbacks.values():
+ cb.stop()
+ _callbacks.clear()
+ state.location.reload = True
+ for loc in state._locations.values():
+ loc.reload = True
def _check_file(modify_times, path, module=None):
try:
|
{"golden_diff": "diff --git a/panel/io/reload.py b/panel/io/reload.py\n--- a/panel/io/reload.py\n+++ b/panel/io/reload.py\n@@ -113,11 +113,12 @@\n if module is not None:\n for module in _modules:\n del sys.modules[module]\n- if state.curdoc in _callbacks:\n- _callbacks[state.curdoc].stop()\n- del _callbacks[state.curdoc]\n- if state.location:\n- state.location.reload = True\n+ for cb in _callbacks.values():\n+ cb.stop()\n+ _callbacks.clear()\n+ state.location.reload = True\n+ for loc in state._locations.values():\n+ loc.reload = True\n \n def _check_file(modify_times, path, module=None):\n try:\n", "issue": "panel serve --autoreload does not work consistently with imported modules\n#### ALL software version info\r\n\r\n- python: 3.9.2\r\n - holoviews: 1.14.4\r\n - jupyter_bokeh: 3.0.2\r\n - ipython 7.22.0\r\n - jupyterlab: 3.0.16\r\n - pyviz-comms: 2.0.1\r\n\r\n- MacOS: 11.4\r\n\r\n- VSCode Insiders: \r\n - Version: 1.59.0-insider\r\n - Commit: f6df685c62da50886f7540cbf768ed4333d58bea\r\n - Date: 2021-07-13T05:15:28.368Z\r\n - Electron: 12.0.13\r\n - Chrome: 89.0.4389.128\r\n - Node.js: 14.16.0\r\n - V8: 8.9.255.25-electron.0\r\n - OS: Darwin x64 20.5.0\r\n\r\n#### Description of expected behavior and the observed behavior\r\n\r\n`panel --autoreload` does not trigger a reload when saving changes to an imported module.\r\n\r\nHowever, if I first save the actual served module (triggering a reload), *then* subsequent saves of imported modules will also trigger reloads. It's as if the \"registry\" of imports is not populated when you first start serving, and only becomes populated after the first reload.\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n\r\n```\r\n# test_panel.py\r\nimport holoviews as hv\r\nimport panel as pn\r\n\r\ndata = [('a', 10), ('b', 7)]\r\ndef test():\r\n return pn.panel(hv.Bars(data))\r\n\r\n# test.py\r\nimport test_panel\r\ntest_panel.test().servable()\r\n```\r\n\r\nTry `panel serve --autoreload` with `test.py`. While serving, change the data values in `test_panel.py` and save. No reload.\r\n\r\nTry the same thing again, but this time save `test.py` immediately after you begin serving (you don't have to change anytime, just save it to trigger a server reload). Now if you change the values in `test_panel.py`, it will reload.\n", "before_files": [{"content": "import fnmatch\nimport os\nimport sys\nimport types\n\nfrom contextlib import contextmanager\nfrom functools import partial\n\nfrom .callbacks import PeriodicCallback\nfrom .state import state\n\n_watched_files = set()\n_modules = set()\n_callbacks = {}\n\n# List of paths to ignore\nDEFAULT_FOLDER_BLACKLIST = [\n \"**/.*\",\n \"**/anaconda\",\n \"**/anaconda2\",\n \"**/anaconda3\",\n \"**/dist-packages\",\n \"**/miniconda\",\n \"**/miniconda2\",\n \"**/miniconda3\",\n \"**/node_modules\",\n \"**/pyenv\",\n \"**/site-packages\",\n \"**/venv\",\n \"**/virtualenv\",\n]\n\n\ndef in_blacklist(filepath):\n return any(\n file_is_in_folder_glob(filepath, blacklisted_folder)\n for blacklisted_folder in DEFAULT_FOLDER_BLACKLIST\n )\n\ndef file_is_in_folder_glob(filepath, folderpath_glob):\n \"\"\"\n Test whether a file is in some folder with globbing support.\n\n Parameters\n ----------\n filepath : str\n A file path.\n folderpath_glob: str\n A path to a folder that may include globbing.\n \"\"\"\n # Make the glob always end with \"/*\" so we match files inside subfolders of\n # folderpath_glob.\n if not folderpath_glob.endswith(\"*\"):\n if folderpath_glob.endswith(\"/\"):\n folderpath_glob += \"*\"\n else:\n folderpath_glob += \"/*\"\n\n file_dir = os.path.dirname(filepath) + \"/\"\n return fnmatch.fnmatch(file_dir, folderpath_glob)\n\ndef autoreload_watcher():\n \"\"\"\n Installs a periodic callback which checks for changes in watched\n files and sys.modules.\n \"\"\"\n cb = partial(_reload_on_update, {})\n _callbacks[state.curdoc] = pcb = PeriodicCallback(callback=cb)\n pcb.start()\n\ndef watch(filename):\n \"\"\"\n Add a file to the watch list.\n\n All imported modules are watched by default.\n \"\"\"\n _watched_files.add(filename)\n\n@contextmanager\ndef record_modules():\n \"\"\"\n Records modules which are currently imported.\n \"\"\"\n modules = set(sys.modules)\n yield\n if _modules:\n return\n for module_name in set(sys.modules).difference(modules):\n if module_name.startswith('bokeh_app'):\n continue\n module = sys.modules[module_name]\n try:\n spec = getattr(module, \"__spec__\", None)\n if spec is None:\n filepath = getattr(module, \"__file__\", None)\n if filepath is None: # no user\n continue\n else:\n filepath = spec.origin\n\n filepath = os.path.abspath(filepath)\n\n if filepath is None or in_blacklist(filepath):\n continue\n\n if not os.path.isfile(filepath): # e.g. built-in\n continue\n _modules.add(module_name)\n except Exception:\n continue\n\ndef _reload(module=None):\n if module is not None:\n for module in _modules:\n del sys.modules[module]\n if state.curdoc in _callbacks:\n _callbacks[state.curdoc].stop()\n del _callbacks[state.curdoc]\n if state.location:\n state.location.reload = True\n\ndef _check_file(modify_times, path, module=None):\n try:\n modified = os.stat(path).st_mtime\n except Exception:\n return\n if path not in modify_times:\n modify_times[path] = modified\n return\n if modify_times[path] != modified:\n _reload(module)\n modify_times[path] = modified\n\ndef _reload_on_update(modify_times):\n for module_name in _modules:\n # Some modules play games with sys.modules (e.g. email/__init__.py\n # in the standard library), and occasionally this can cause strange\n # failures in getattr. Just ignore anything that's not an ordinary\n # module.\n if not module_name in sys.modules:\n continue\n module = sys.modules[module_name]\n if not isinstance(module, types.ModuleType):\n continue\n path = getattr(module, \"__file__\", None)\n if not path:\n continue\n if path.endswith(\".pyc\") or path.endswith(\".pyo\"):\n path = path[:-1]\n _check_file(modify_times, path, module_name)\n for path in _watched_files:\n _check_file(modify_times, path)\n", "path": "panel/io/reload.py"}]}
| 2,372 | 177 |
gh_patches_debug_45174
|
rasdani/github-patches
|
git_diff
|
pyodide__pyodide-1215
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Performance issues with buffer conversions from python to javascript
Hello @hoodmane, thank you for the magnificent APIs (deep/shallowCopyToJavaScript) that would eliminate the heavy cost of big array conversion from python to Javascript. I think the shallowCopy version is designed for memory address reference instead of bulks of content copy. But I find the perfomance not so good as I've imagined, which is displayed as the following:

It takes about 1~7 seconds for shallowCopyToJavascript() to complete the memory address reference and maybe some necessary meta data copy I guess. However, it's not adequate for a realtime computation. Any suggestions for better conversion performance?
_Originally posted by @daoxian in https://github.com/iodide-project/pyodide/issues/1167#issuecomment-774488338_
</issue>
<code>
[start of docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py]
1 from docutils import nodes
2 from docutils.parsers.rst import Directive, Parser as RstParser
3 from docutils.statemachine import StringList
4 from docutils.utils import new_document
5
6 from collections import OrderedDict
7
8 from sphinx import addnodes
9 from sphinx.util import rst
10 from sphinx.util.docutils import switch_source_input
11 from sphinx.ext.autosummary import autosummary_table, extract_summary
12
13 from sphinx_js.jsdoc import Analyzer as JsAnalyzer
14 from sphinx_js.ir import Function
15 from sphinx_js.parsers import path_and_formal_params, PathVisitor
16 from sphinx_js.renderers import AutoFunctionRenderer, AutoAttributeRenderer
17
18
19 class PyodideAnalyzer:
20 """JsDoc automatically instantiates the JsAnalyzer. Rather than subclassing
21 or monkey patching it, we use composition (see getattr impl).
22
23 The main extra thing we do is reorganize the doclets based on our globals /
24 functions / attributes scheme. This we use to subdivide the sections in our
25 summary. We store these in the "js_docs" field which is the only field that
26 we access later.
27 """
28
29 def __init__(self, analyzer: JsAnalyzer) -> None:
30 self.inner = analyzer
31 self.create_js_doclets()
32
33 def __getattr__(self, key):
34 return getattr(self.inner, key)
35
36 def longname_to_path(self, name):
37 """Convert the longname field produced by jsdoc to a path appropriate to use
38 with _sphinxjs_analyzer.get_object. Based on:
39 https://github.com/mozilla/sphinx-js/blob/3.1/sphinx_js/jsdoc.py#L181
40 """
41 return PathVisitor().visit(path_and_formal_params["path"].parse(name))
42
43 def get_object_from_json(self, json):
44 """Look up the JsDoc IR object corresponding to this object. We use the
45 "kind" field to decide whether the object is a "function" or an
46 "attribute". We use longname_to_path to convert the path into a list of
47 path components which JsAnalyzer.get_object requires.
48 """
49 path = self.longname_to_path(json["longname"])
50 kind = "function" if json["kind"] == "function" else "attribute"
51 obj = self.inner.get_object(path, kind)
52 obj.kind = kind
53 return obj
54
55 def create_js_doclets(self):
56 """Search through the doclets generated by JsDoc and categorize them by
57 summary section. Skip docs labeled as "@private".
58 """
59
60 def get_val():
61 return OrderedDict([["attribute", []], ["function", []]])
62
63 self.js_docs = {key: get_val() for key in ["globals", "pyodide", "PyProxy"]}
64 items = {"PyProxy": []}
65 for (key, group) in self._doclets_by_class.items():
66 key = [x for x in key if "/" not in x]
67 if key[-1] == "globalThis":
68 items["globals"] = group
69 if key[0] == "pyodide." and key[-1] == "Module":
70 items["pyodide"] = group
71 if key[0] == "pyproxy.":
72 items["PyProxy"] += group
73
74 for key, value in items.items():
75 for json in value:
76 if json.get("access", None) == "private":
77 continue
78 obj = self.get_object_from_json(json)
79 if obj.name[0] == '"' and obj.name[-1] == '"':
80 obj.name = "[" + obj.name[1:-1] + "]"
81 self.js_docs[key][obj.kind].append(obj)
82
83
84 def get_jsdoc_content_directive(app):
85 """These directives need to close over app """
86
87 class JsDocContent(Directive):
88 """A directive that just dumps a summary table in place. There are no
89 options, it only prints the one thing, we control the behavior from
90 here
91 """
92
93 required_arguments = 1
94
95 def get_rst(self, obj):
96 """Grab the appropriate renderer and render us to rst.
97 JsDoc also has an AutoClassRenderer which may be useful in the future."""
98 if isinstance(obj, Function):
99 renderer = AutoFunctionRenderer
100 else:
101 renderer = AutoAttributeRenderer
102 return renderer(self, app, arguments=["dummy"]).rst(
103 [obj.name], obj, use_short_name=False
104 )
105
106 def get_rst_for_group(self, objects):
107 return [self.get_rst(obj) for obj in objects]
108
109 def parse_rst(self, rst):
110 """We produce a bunch of rst but directives are supposed to output
111 docutils trees. This is a helper that converts the rst to docutils.
112 """
113 settings = self.state.document.settings
114 doc = new_document("", settings)
115 RstParser().parse(rst, doc)
116 return doc.children
117
118 def run(self):
119 module = self.arguments[0]
120 values = app._sphinxjs_analyzer.js_docs[module]
121 rst = []
122 rst.append([f".. js:module:: {module}"])
123 for group in values.values():
124 rst.append(self.get_rst_for_group(group))
125 joined_rst = "\n\n".join(["\n\n".join(r) for r in rst])
126 return self.parse_rst(joined_rst)
127
128 return JsDocContent
129
130
131 def get_jsdoc_summary_directive(app):
132 class JsDocSummary(Directive):
133 """A directive that just dumps the Js API docs in place. There are no
134 options, it only prints the one thing, we control the behavior from
135 here
136 """
137
138 required_arguments = 1
139
140 def run(self):
141 result = []
142 module = self.arguments[0]
143 value = app._sphinxjs_analyzer.js_docs[module]
144 for group_name, group_objects in value.items():
145 if not group_objects:
146 continue
147 result.append(self.format_heading(group_name.title() + "s:"))
148 table_items = self.get_summary_table(module, group_objects)
149 table_markup = self.format_table(table_items)
150 result.extend(table_markup)
151 return result
152
153 def format_heading(self, text):
154 """Make a section heading. This corresponds to the rst: "**Heading:**"
155 autodocsumm uses headings like that, so this will match that style.
156 """
157 heading = nodes.paragraph("")
158 strong = nodes.strong("")
159 strong.append(nodes.Text(text))
160 heading.append(strong)
161 return heading
162
163 def extract_summary(self, descr):
164 """Wrapper around autosummary extract_summary that is easier to use.
165 It seems like colons need escaping for some reason.
166 """
167 colon_esc = "esccolon\\\xafhoa:"
168 return extract_summary(
169 [descr.replace(":", colon_esc)], self.state.document
170 ).replace(colon_esc, ":")
171
172 def get_sig(self, obj):
173 """If the object is a function, get its signature (as figured by JsDoc)"""
174 if isinstance(obj, Function):
175 return AutoFunctionRenderer(
176 self, app, arguments=["dummy"]
177 )._formal_params(obj)
178 else:
179 return ""
180
181 def get_summary_row(self, pkgname, obj):
182 """Get the summary table row for obj.
183
184 The output is designed to be input to format_table. The link name
185 needs to be set up so that :any:`link_name` makes a link to the
186 actual api docs for this object.
187 """
188 sig = self.get_sig(obj)
189 display_name = obj.name
190 summary = self.extract_summary(obj.description)
191 link_name = pkgname + "." + display_name
192 return (display_name, sig, summary, link_name)
193
194 def get_summary_table(self, pkgname, group):
195 """Get the data for a summary table. Return value is set up to be an
196 argument of format_table.
197 """
198 return [self.get_summary_row(pkgname, obj) for obj in group]
199
200 # This following method is copied almost verbatim from autosummary
201 # (where it is called get_table).
202 #
203 # We have to change the value of one string: qualifier = 'obj ==>
204 # qualifier = 'any'
205 # https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/ext/autosummary/__init__.py#L392
206 def format_table(self, items):
207 """Generate a proper list of table nodes for autosummary:: directive.
208
209 *items* is a list produced by :meth:`get_items`.
210 """
211 table_spec = addnodes.tabular_col_spec()
212 table_spec["spec"] = r"\X{1}{2}\X{1}{2}"
213
214 table = autosummary_table("")
215 real_table = nodes.table("", classes=["longtable"])
216 table.append(real_table)
217 group = nodes.tgroup("", cols=2)
218 real_table.append(group)
219 group.append(nodes.colspec("", colwidth=10))
220 group.append(nodes.colspec("", colwidth=90))
221 body = nodes.tbody("")
222 group.append(body)
223
224 def append_row(*column_texts: str) -> None:
225 row = nodes.row("")
226 source, line = self.state_machine.get_source_and_line()
227 for text in column_texts:
228 node = nodes.paragraph("")
229 vl = StringList()
230 vl.append(text, "%s:%d:<autosummary>" % (source, line))
231 with switch_source_input(self.state, vl):
232 self.state.nested_parse(vl, 0, node)
233 try:
234 if isinstance(node[0], nodes.paragraph):
235 node = node[0]
236 except IndexError:
237 pass
238 row.append(nodes.entry("", node))
239 body.append(row)
240
241 for name, sig, summary, real_name in items:
242 qualifier = "any" # <== Only thing changed from autosummary version
243 if "nosignatures" not in self.options:
244 col1 = ":%s:`%s <%s>`\\ %s" % (
245 qualifier,
246 name,
247 real_name,
248 rst.escape(sig),
249 )
250 else:
251 col1 = ":%s:`%s <%s>`" % (qualifier, name, real_name)
252 col2 = summary
253 append_row(col1, col2)
254
255 return [table_spec, table]
256
257 return JsDocSummary
258
[end of docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py b/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py
--- a/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py
+++ b/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py
@@ -4,6 +4,7 @@
from docutils.utils import new_document
from collections import OrderedDict
+import re
from sphinx import addnodes
from sphinx.util import rst
@@ -11,9 +12,13 @@
from sphinx.ext.autosummary import autosummary_table, extract_summary
from sphinx_js.jsdoc import Analyzer as JsAnalyzer
-from sphinx_js.ir import Function
+from sphinx_js.ir import Class, Function
from sphinx_js.parsers import path_and_formal_params, PathVisitor
-from sphinx_js.renderers import AutoFunctionRenderer, AutoAttributeRenderer
+from sphinx_js.renderers import (
+ AutoFunctionRenderer,
+ AutoAttributeRenderer,
+ AutoClassRenderer,
+)
class PyodideAnalyzer:
@@ -47,7 +52,12 @@
path components which JsAnalyzer.get_object requires.
"""
path = self.longname_to_path(json["longname"])
- kind = "function" if json["kind"] == "function" else "attribute"
+ if json["kind"] == "function":
+ kind = "function"
+ elif json["kind"] == "class":
+ kind = "class"
+ else:
+ kind = "attribute"
obj = self.inner.get_object(path, kind)
obj.kind = kind
return obj
@@ -58,12 +68,16 @@
"""
def get_val():
- return OrderedDict([["attribute", []], ["function", []]])
+ return OrderedDict([["attribute", []], ["function", []], ["class", []]])
self.js_docs = {key: get_val() for key in ["globals", "pyodide", "PyProxy"]}
items = {"PyProxy": []}
for (key, group) in self._doclets_by_class.items():
key = [x for x in key if "/" not in x]
+ if key[-1] == "PyBuffer":
+ # PyBuffer stuff is documented as a class. Would be nice to have
+ # a less ad hoc way to deal with this...
+ continue
if key[-1] == "globalThis":
items["globals"] = group
if key[0] == "pyodide." and key[-1] == "Module":
@@ -76,7 +90,13 @@
if json.get("access", None) == "private":
continue
obj = self.get_object_from_json(json)
+ if isinstance(obj, Class):
+ # sphinx-jsdoc messes up array types. Fix them.
+ for x in obj.members:
+ if hasattr(x, "type"):
+ x.type = re.sub("Array\.<([a-zA-Z_0-9]*)>", r"\1[]", x.type)
if obj.name[0] == '"' and obj.name[-1] == '"':
+ # sphinx-jsdoc messes up Symbol attributes. Fix them.
obj.name = "[" + obj.name[1:-1] + "]"
self.js_docs[key][obj.kind].append(obj)
@@ -97,11 +117,13 @@
JsDoc also has an AutoClassRenderer which may be useful in the future."""
if isinstance(obj, Function):
renderer = AutoFunctionRenderer
+ elif isinstance(obj, Class):
+ renderer = AutoClassRenderer
else:
renderer = AutoAttributeRenderer
- return renderer(self, app, arguments=["dummy"]).rst(
- [obj.name], obj, use_short_name=False
- )
+ return renderer(
+ self, app, arguments=["dummy"], options={"members": ["*"]}
+ ).rst([obj.name], obj, use_short_name=False)
def get_rst_for_group(self, objects):
return [self.get_rst(obj) for obj in objects]
@@ -144,6 +166,9 @@
for group_name, group_objects in value.items():
if not group_objects:
continue
+ if group_name == "class":
+ # Plural of class is "classes" not "classs"
+ group_name += "e"
result.append(self.format_heading(group_name.title() + "s:"))
table_items = self.get_summary_table(module, group_objects)
table_markup = self.format_table(table_items)
|
{"golden_diff": "diff --git a/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py b/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py\n--- a/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py\n+++ b/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py\n@@ -4,6 +4,7 @@\n from docutils.utils import new_document\n \n from collections import OrderedDict\n+import re\n \n from sphinx import addnodes\n from sphinx.util import rst\n@@ -11,9 +12,13 @@\n from sphinx.ext.autosummary import autosummary_table, extract_summary\n \n from sphinx_js.jsdoc import Analyzer as JsAnalyzer\n-from sphinx_js.ir import Function\n+from sphinx_js.ir import Class, Function\n from sphinx_js.parsers import path_and_formal_params, PathVisitor\n-from sphinx_js.renderers import AutoFunctionRenderer, AutoAttributeRenderer\n+from sphinx_js.renderers import (\n+ AutoFunctionRenderer,\n+ AutoAttributeRenderer,\n+ AutoClassRenderer,\n+)\n \n \n class PyodideAnalyzer:\n@@ -47,7 +52,12 @@\n path components which JsAnalyzer.get_object requires.\n \"\"\"\n path = self.longname_to_path(json[\"longname\"])\n- kind = \"function\" if json[\"kind\"] == \"function\" else \"attribute\"\n+ if json[\"kind\"] == \"function\":\n+ kind = \"function\"\n+ elif json[\"kind\"] == \"class\":\n+ kind = \"class\"\n+ else:\n+ kind = \"attribute\"\n obj = self.inner.get_object(path, kind)\n obj.kind = kind\n return obj\n@@ -58,12 +68,16 @@\n \"\"\"\n \n def get_val():\n- return OrderedDict([[\"attribute\", []], [\"function\", []]])\n+ return OrderedDict([[\"attribute\", []], [\"function\", []], [\"class\", []]])\n \n self.js_docs = {key: get_val() for key in [\"globals\", \"pyodide\", \"PyProxy\"]}\n items = {\"PyProxy\": []}\n for (key, group) in self._doclets_by_class.items():\n key = [x for x in key if \"/\" not in x]\n+ if key[-1] == \"PyBuffer\":\n+ # PyBuffer stuff is documented as a class. Would be nice to have\n+ # a less ad hoc way to deal with this...\n+ continue\n if key[-1] == \"globalThis\":\n items[\"globals\"] = group\n if key[0] == \"pyodide.\" and key[-1] == \"Module\":\n@@ -76,7 +90,13 @@\n if json.get(\"access\", None) == \"private\":\n continue\n obj = self.get_object_from_json(json)\n+ if isinstance(obj, Class):\n+ # sphinx-jsdoc messes up array types. Fix them.\n+ for x in obj.members:\n+ if hasattr(x, \"type\"):\n+ x.type = re.sub(\"Array\\.<([a-zA-Z_0-9]*)>\", r\"\\1[]\", x.type)\n if obj.name[0] == '\"' and obj.name[-1] == '\"':\n+ # sphinx-jsdoc messes up Symbol attributes. Fix them.\n obj.name = \"[\" + obj.name[1:-1] + \"]\"\n self.js_docs[key][obj.kind].append(obj)\n \n@@ -97,11 +117,13 @@\n JsDoc also has an AutoClassRenderer which may be useful in the future.\"\"\"\n if isinstance(obj, Function):\n renderer = AutoFunctionRenderer\n+ elif isinstance(obj, Class):\n+ renderer = AutoClassRenderer\n else:\n renderer = AutoAttributeRenderer\n- return renderer(self, app, arguments=[\"dummy\"]).rst(\n- [obj.name], obj, use_short_name=False\n- )\n+ return renderer(\n+ self, app, arguments=[\"dummy\"], options={\"members\": [\"*\"]}\n+ ).rst([obj.name], obj, use_short_name=False)\n \n def get_rst_for_group(self, objects):\n return [self.get_rst(obj) for obj in objects]\n@@ -144,6 +166,9 @@\n for group_name, group_objects in value.items():\n if not group_objects:\n continue\n+ if group_name == \"class\":\n+ # Plural of class is \"classes\" not \"classs\"\n+ group_name += \"e\"\n result.append(self.format_heading(group_name.title() + \"s:\"))\n table_items = self.get_summary_table(module, group_objects)\n table_markup = self.format_table(table_items)\n", "issue": "Performance issues with buffer conversions from python to javascript\nHello @hoodmane, thank you for the magnificent APIs (deep/shallowCopyToJavaScript) that would eliminate the heavy cost of big array conversion from python to Javascript. I think the shallowCopy version is designed for memory address reference instead of bulks of content copy. But I find the perfomance not so good as I've imagined, which is displayed as the following:\r\n\r\n\r\nIt takes about 1~7 seconds for shallowCopyToJavascript() to complete the memory address reference and maybe some necessary meta data copy I guess. However, it's not adequate for a realtime computation. Any suggestions for better conversion performance?\r\n\r\n_Originally posted by @daoxian in https://github.com/iodide-project/pyodide/issues/1167#issuecomment-774488338_\n", "before_files": [{"content": "from docutils import nodes\nfrom docutils.parsers.rst import Directive, Parser as RstParser\nfrom docutils.statemachine import StringList\nfrom docutils.utils import new_document\n\nfrom collections import OrderedDict\n\nfrom sphinx import addnodes\nfrom sphinx.util import rst\nfrom sphinx.util.docutils import switch_source_input\nfrom sphinx.ext.autosummary import autosummary_table, extract_summary\n\nfrom sphinx_js.jsdoc import Analyzer as JsAnalyzer\nfrom sphinx_js.ir import Function\nfrom sphinx_js.parsers import path_and_formal_params, PathVisitor\nfrom sphinx_js.renderers import AutoFunctionRenderer, AutoAttributeRenderer\n\n\nclass PyodideAnalyzer:\n \"\"\"JsDoc automatically instantiates the JsAnalyzer. Rather than subclassing\n or monkey patching it, we use composition (see getattr impl).\n\n The main extra thing we do is reorganize the doclets based on our globals /\n functions / attributes scheme. This we use to subdivide the sections in our\n summary. We store these in the \"js_docs\" field which is the only field that\n we access later.\n \"\"\"\n\n def __init__(self, analyzer: JsAnalyzer) -> None:\n self.inner = analyzer\n self.create_js_doclets()\n\n def __getattr__(self, key):\n return getattr(self.inner, key)\n\n def longname_to_path(self, name):\n \"\"\"Convert the longname field produced by jsdoc to a path appropriate to use\n with _sphinxjs_analyzer.get_object. Based on:\n https://github.com/mozilla/sphinx-js/blob/3.1/sphinx_js/jsdoc.py#L181\n \"\"\"\n return PathVisitor().visit(path_and_formal_params[\"path\"].parse(name))\n\n def get_object_from_json(self, json):\n \"\"\"Look up the JsDoc IR object corresponding to this object. We use the\n \"kind\" field to decide whether the object is a \"function\" or an\n \"attribute\". We use longname_to_path to convert the path into a list of\n path components which JsAnalyzer.get_object requires.\n \"\"\"\n path = self.longname_to_path(json[\"longname\"])\n kind = \"function\" if json[\"kind\"] == \"function\" else \"attribute\"\n obj = self.inner.get_object(path, kind)\n obj.kind = kind\n return obj\n\n def create_js_doclets(self):\n \"\"\"Search through the doclets generated by JsDoc and categorize them by\n summary section. Skip docs labeled as \"@private\".\n \"\"\"\n\n def get_val():\n return OrderedDict([[\"attribute\", []], [\"function\", []]])\n\n self.js_docs = {key: get_val() for key in [\"globals\", \"pyodide\", \"PyProxy\"]}\n items = {\"PyProxy\": []}\n for (key, group) in self._doclets_by_class.items():\n key = [x for x in key if \"/\" not in x]\n if key[-1] == \"globalThis\":\n items[\"globals\"] = group\n if key[0] == \"pyodide.\" and key[-1] == \"Module\":\n items[\"pyodide\"] = group\n if key[0] == \"pyproxy.\":\n items[\"PyProxy\"] += group\n\n for key, value in items.items():\n for json in value:\n if json.get(\"access\", None) == \"private\":\n continue\n obj = self.get_object_from_json(json)\n if obj.name[0] == '\"' and obj.name[-1] == '\"':\n obj.name = \"[\" + obj.name[1:-1] + \"]\"\n self.js_docs[key][obj.kind].append(obj)\n\n\ndef get_jsdoc_content_directive(app):\n \"\"\"These directives need to close over app \"\"\"\n\n class JsDocContent(Directive):\n \"\"\"A directive that just dumps a summary table in place. There are no\n options, it only prints the one thing, we control the behavior from\n here\n \"\"\"\n\n required_arguments = 1\n\n def get_rst(self, obj):\n \"\"\"Grab the appropriate renderer and render us to rst.\n JsDoc also has an AutoClassRenderer which may be useful in the future.\"\"\"\n if isinstance(obj, Function):\n renderer = AutoFunctionRenderer\n else:\n renderer = AutoAttributeRenderer\n return renderer(self, app, arguments=[\"dummy\"]).rst(\n [obj.name], obj, use_short_name=False\n )\n\n def get_rst_for_group(self, objects):\n return [self.get_rst(obj) for obj in objects]\n\n def parse_rst(self, rst):\n \"\"\"We produce a bunch of rst but directives are supposed to output\n docutils trees. This is a helper that converts the rst to docutils.\n \"\"\"\n settings = self.state.document.settings\n doc = new_document(\"\", settings)\n RstParser().parse(rst, doc)\n return doc.children\n\n def run(self):\n module = self.arguments[0]\n values = app._sphinxjs_analyzer.js_docs[module]\n rst = []\n rst.append([f\".. js:module:: {module}\"])\n for group in values.values():\n rst.append(self.get_rst_for_group(group))\n joined_rst = \"\\n\\n\".join([\"\\n\\n\".join(r) for r in rst])\n return self.parse_rst(joined_rst)\n\n return JsDocContent\n\n\ndef get_jsdoc_summary_directive(app):\n class JsDocSummary(Directive):\n \"\"\"A directive that just dumps the Js API docs in place. There are no\n options, it only prints the one thing, we control the behavior from\n here\n \"\"\"\n\n required_arguments = 1\n\n def run(self):\n result = []\n module = self.arguments[0]\n value = app._sphinxjs_analyzer.js_docs[module]\n for group_name, group_objects in value.items():\n if not group_objects:\n continue\n result.append(self.format_heading(group_name.title() + \"s:\"))\n table_items = self.get_summary_table(module, group_objects)\n table_markup = self.format_table(table_items)\n result.extend(table_markup)\n return result\n\n def format_heading(self, text):\n \"\"\"Make a section heading. This corresponds to the rst: \"**Heading:**\"\n autodocsumm uses headings like that, so this will match that style.\n \"\"\"\n heading = nodes.paragraph(\"\")\n strong = nodes.strong(\"\")\n strong.append(nodes.Text(text))\n heading.append(strong)\n return heading\n\n def extract_summary(self, descr):\n \"\"\"Wrapper around autosummary extract_summary that is easier to use.\n It seems like colons need escaping for some reason.\n \"\"\"\n colon_esc = \"esccolon\\\\\\xafhoa:\"\n return extract_summary(\n [descr.replace(\":\", colon_esc)], self.state.document\n ).replace(colon_esc, \":\")\n\n def get_sig(self, obj):\n \"\"\"If the object is a function, get its signature (as figured by JsDoc)\"\"\"\n if isinstance(obj, Function):\n return AutoFunctionRenderer(\n self, app, arguments=[\"dummy\"]\n )._formal_params(obj)\n else:\n return \"\"\n\n def get_summary_row(self, pkgname, obj):\n \"\"\"Get the summary table row for obj.\n\n The output is designed to be input to format_table. The link name\n needs to be set up so that :any:`link_name` makes a link to the\n actual api docs for this object.\n \"\"\"\n sig = self.get_sig(obj)\n display_name = obj.name\n summary = self.extract_summary(obj.description)\n link_name = pkgname + \".\" + display_name\n return (display_name, sig, summary, link_name)\n\n def get_summary_table(self, pkgname, group):\n \"\"\"Get the data for a summary table. Return value is set up to be an\n argument of format_table.\n \"\"\"\n return [self.get_summary_row(pkgname, obj) for obj in group]\n\n # This following method is copied almost verbatim from autosummary\n # (where it is called get_table).\n #\n # We have to change the value of one string: qualifier = 'obj ==>\n # qualifier = 'any'\n # https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/ext/autosummary/__init__.py#L392\n def format_table(self, items):\n \"\"\"Generate a proper list of table nodes for autosummary:: directive.\n\n *items* is a list produced by :meth:`get_items`.\n \"\"\"\n table_spec = addnodes.tabular_col_spec()\n table_spec[\"spec\"] = r\"\\X{1}{2}\\X{1}{2}\"\n\n table = autosummary_table(\"\")\n real_table = nodes.table(\"\", classes=[\"longtable\"])\n table.append(real_table)\n group = nodes.tgroup(\"\", cols=2)\n real_table.append(group)\n group.append(nodes.colspec(\"\", colwidth=10))\n group.append(nodes.colspec(\"\", colwidth=90))\n body = nodes.tbody(\"\")\n group.append(body)\n\n def append_row(*column_texts: str) -> None:\n row = nodes.row(\"\")\n source, line = self.state_machine.get_source_and_line()\n for text in column_texts:\n node = nodes.paragraph(\"\")\n vl = StringList()\n vl.append(text, \"%s:%d:<autosummary>\" % (source, line))\n with switch_source_input(self.state, vl):\n self.state.nested_parse(vl, 0, node)\n try:\n if isinstance(node[0], nodes.paragraph):\n node = node[0]\n except IndexError:\n pass\n row.append(nodes.entry(\"\", node))\n body.append(row)\n\n for name, sig, summary, real_name in items:\n qualifier = \"any\" # <== Only thing changed from autosummary version\n if \"nosignatures\" not in self.options:\n col1 = \":%s:`%s <%s>`\\\\ %s\" % (\n qualifier,\n name,\n real_name,\n rst.escape(sig),\n )\n else:\n col1 = \":%s:`%s <%s>`\" % (qualifier, name, real_name)\n col2 = summary\n append_row(col1, col2)\n\n return [table_spec, table]\n\n return JsDocSummary\n", "path": "docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py"}]}
| 3,710 | 1,015 |
gh_patches_debug_39557
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-1093
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a show_versions function
Similarly to `pandas.show_versions()` and `sklearn.show_versions()`, it would be nice to have a function like that for GeoPandas. We can probably base the code on those examples.
It could list the versions of the direct python dependencies, but also try to show the versions of the underlying GEOS / GDAL libraries.
</issue>
<code>
[start of geopandas/tools/_show_versions.py]
1 import platform
2 import sys
3 import importlib
4
5
6 def _get_sys_info():
7 """System information
8
9 Returns
10 -------
11 sys_info : dict
12 system and Python version information
13 """
14 python = sys.version.replace('\n', ' ')
15
16 blob = [
17 ("python", python),
18 ('executable', sys.executable),
19 ("machine", platform.platform()),
20 ]
21
22 return dict(blob)
23
24
25 def _get_deps_info():
26 """Overview of the installed version of main dependencies
27
28 Returns
29 -------
30 deps_info: dict
31 version information on relevant Python libraries
32 """
33 deps = [
34 "geopandas",
35 "pandas",
36 "fiona",
37 "osgeo.gdal",
38 "numpy",
39 "shapely",
40 "rtree",
41 "pyproj",
42 "matplotlib",
43 "mapclassify",
44 "pysal",
45 "geopy",
46 "psycopg2",
47 "descartes"
48 ]
49
50 def get_version(module):
51 return module.__version__
52
53 deps_info = {}
54
55 for modname in deps:
56 try:
57 if modname in sys.modules:
58 mod = sys.modules[modname]
59 else:
60 mod = importlib.import_module(modname)
61 ver = get_version(mod)
62 deps_info[modname] = ver
63 except ImportError:
64 deps_info[modname] = None
65 except AttributeError:
66 deps_info[modname] = None
67
68 return deps_info
69
70
71 def show_versions():
72 """
73 Print system information and installed module versions.
74
75 Example
76 -------
77 > python -c "import geopandas; geopandas.show_versions()"
78 """
79 sys_info = _get_sys_info()
80 deps_info = _get_deps_info()
81
82 maxlen = max(len(x) for x in deps_info)
83 tpl = "{{k:<{maxlen}}}: {{stat}}".format(maxlen=maxlen)
84 print("\nSYSTEM INFO")
85 print("-----------")
86 for k, stat in sys_info.items():
87 print(tpl.format(k=k, stat=stat))
88 print("\nPYTHON DEPENDENCIES")
89 print("-------------------")
90 for k, stat in deps_info.items():
91 print(tpl.format(k=k, stat=stat))
92
[end of geopandas/tools/_show_versions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geopandas/tools/_show_versions.py b/geopandas/tools/_show_versions.py
--- a/geopandas/tools/_show_versions.py
+++ b/geopandas/tools/_show_versions.py
@@ -22,6 +22,53 @@
return dict(blob)
+def _get_C_info():
+ """Information on system PROJ, GDAL, GEOS
+ Returns
+ -------
+ c_info: dict
+ system PROJ information
+ """
+ try:
+ import pyproj
+ from pyproj.exceptions import DataDirError
+ proj = pyproj.proj_version_str
+ try:
+ proj_dir = pyproj.datadir.get_data_dir()
+ except DataDirError:
+ proj_dir = None
+ except Exception:
+ proj = None
+ proj_dir = None
+
+ try:
+ import shapely._buildcfg
+ geos = '{}.{}.{}'.format(*shapely._buildcfg.geos_version)
+ geos_dir = shapely._buildcfg.geos_library_path
+ except Exception:
+ geos = None
+ geos_dir = None
+
+ try:
+ import fiona
+ gdal = fiona.env.get_gdal_release_name()
+ gdal_dir = fiona.env.GDALDataFinder().search()
+ except Exception:
+ gdal = None
+ gdal_dir = None
+
+ blob = [
+ ("GEOS", geos),
+ ("GEOS lib", geos_dir),
+ ("GDAL", gdal),
+ ("GDAL dir", gdal_dir),
+ ("PROJ", proj),
+ ("PROJ data dir", proj_dir)
+ ]
+
+ return dict(blob)
+
+
def _get_deps_info():
"""Overview of the installed version of main dependencies
@@ -34,7 +81,6 @@
"geopandas",
"pandas",
"fiona",
- "osgeo.gdal",
"numpy",
"shapely",
"rtree",
@@ -43,8 +89,7 @@
"mapclassify",
"pysal",
"geopy",
- "psycopg2",
- "descartes"
+ "psycopg2"
]
def get_version(module):
@@ -60,9 +105,7 @@
mod = importlib.import_module(modname)
ver = get_version(mod)
deps_info[modname] = ver
- except ImportError:
- deps_info[modname] = None
- except AttributeError:
+ except Exception:
deps_info[modname] = None
return deps_info
@@ -78,6 +121,7 @@
"""
sys_info = _get_sys_info()
deps_info = _get_deps_info()
+ proj_info = _get_C_info()
maxlen = max(len(x) for x in deps_info)
tpl = "{{k:<{maxlen}}}: {{stat}}".format(maxlen=maxlen)
@@ -85,6 +129,10 @@
print("-----------")
for k, stat in sys_info.items():
print(tpl.format(k=k, stat=stat))
+ print("\nGEOS, GDAL, PROJ INFO")
+ print("---------------------")
+ for k, stat in proj_info.items():
+ print(tpl.format(k=k, stat=stat))
print("\nPYTHON DEPENDENCIES")
print("-------------------")
for k, stat in deps_info.items():
|
{"golden_diff": "diff --git a/geopandas/tools/_show_versions.py b/geopandas/tools/_show_versions.py\n--- a/geopandas/tools/_show_versions.py\n+++ b/geopandas/tools/_show_versions.py\n@@ -22,6 +22,53 @@\n return dict(blob)\n \n \n+def _get_C_info():\n+ \"\"\"Information on system PROJ, GDAL, GEOS\n+ Returns\n+ -------\n+ c_info: dict\n+ system PROJ information\n+ \"\"\"\n+ try:\n+ import pyproj\n+ from pyproj.exceptions import DataDirError\n+ proj = pyproj.proj_version_str\n+ try:\n+ proj_dir = pyproj.datadir.get_data_dir()\n+ except DataDirError:\n+ proj_dir = None\n+ except Exception:\n+ proj = None\n+ proj_dir = None\n+\n+ try:\n+ import shapely._buildcfg\n+ geos = '{}.{}.{}'.format(*shapely._buildcfg.geos_version)\n+ geos_dir = shapely._buildcfg.geos_library_path\n+ except Exception:\n+ geos = None\n+ geos_dir = None\n+\n+ try:\n+ import fiona\n+ gdal = fiona.env.get_gdal_release_name()\n+ gdal_dir = fiona.env.GDALDataFinder().search()\n+ except Exception:\n+ gdal = None\n+ gdal_dir = None\n+\n+ blob = [\n+ (\"GEOS\", geos),\n+ (\"GEOS lib\", geos_dir),\n+ (\"GDAL\", gdal),\n+ (\"GDAL dir\", gdal_dir),\n+ (\"PROJ\", proj),\n+ (\"PROJ data dir\", proj_dir)\n+ ]\n+\n+ return dict(blob)\n+\n+\n def _get_deps_info():\n \"\"\"Overview of the installed version of main dependencies\n \n@@ -34,7 +81,6 @@\n \"geopandas\",\n \"pandas\",\n \"fiona\",\n- \"osgeo.gdal\",\n \"numpy\",\n \"shapely\",\n \"rtree\",\n@@ -43,8 +89,7 @@\n \"mapclassify\",\n \"pysal\",\n \"geopy\",\n- \"psycopg2\",\n- \"descartes\"\n+ \"psycopg2\"\n ]\n \n def get_version(module):\n@@ -60,9 +105,7 @@\n mod = importlib.import_module(modname)\n ver = get_version(mod)\n deps_info[modname] = ver\n- except ImportError:\n- deps_info[modname] = None\n- except AttributeError:\n+ except Exception:\n deps_info[modname] = None\n \n return deps_info\n@@ -78,6 +121,7 @@\n \"\"\"\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n+ proj_info = _get_C_info()\n \n maxlen = max(len(x) for x in deps_info)\n tpl = \"{{k:<{maxlen}}}: {{stat}}\".format(maxlen=maxlen)\n@@ -85,6 +129,10 @@\n print(\"-----------\")\n for k, stat in sys_info.items():\n print(tpl.format(k=k, stat=stat))\n+ print(\"\\nGEOS, GDAL, PROJ INFO\")\n+ print(\"---------------------\")\n+ for k, stat in proj_info.items():\n+ print(tpl.format(k=k, stat=stat))\n print(\"\\nPYTHON DEPENDENCIES\")\n print(\"-------------------\")\n for k, stat in deps_info.items():\n", "issue": "Add a show_versions function\nSimilarly to `pandas.show_versions()` and `sklearn.show_versions()`, it would be nice to have a function like that for GeoPandas. We can probably base the code on those examples. \r\n\r\nIt could list the versions of the direct python dependencies, but also try to show the versions of the underlying GEOS / GDAL libraries.\n", "before_files": [{"content": "import platform\nimport sys\nimport importlib\n\n\ndef _get_sys_info():\n \"\"\"System information\n\n Returns\n -------\n sys_info : dict\n system and Python version information\n \"\"\"\n python = sys.version.replace('\\n', ' ')\n\n blob = [\n (\"python\", python),\n ('executable', sys.executable),\n (\"machine\", platform.platform()),\n ]\n\n return dict(blob)\n\n\ndef _get_deps_info():\n \"\"\"Overview of the installed version of main dependencies\n\n Returns\n -------\n deps_info: dict\n version information on relevant Python libraries\n \"\"\"\n deps = [\n \"geopandas\",\n \"pandas\",\n \"fiona\",\n \"osgeo.gdal\",\n \"numpy\",\n \"shapely\",\n \"rtree\",\n \"pyproj\",\n \"matplotlib\",\n \"mapclassify\",\n \"pysal\",\n \"geopy\",\n \"psycopg2\",\n \"descartes\"\n ]\n\n def get_version(module):\n return module.__version__\n\n deps_info = {}\n\n for modname in deps:\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n ver = get_version(mod)\n deps_info[modname] = ver\n except ImportError:\n deps_info[modname] = None\n except AttributeError:\n deps_info[modname] = None\n\n return deps_info\n\n\ndef show_versions():\n \"\"\"\n Print system information and installed module versions.\n\n Example\n -------\n > python -c \"import geopandas; geopandas.show_versions()\"\n \"\"\"\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n\n maxlen = max(len(x) for x in deps_info)\n tpl = \"{{k:<{maxlen}}}: {{stat}}\".format(maxlen=maxlen)\n print(\"\\nSYSTEM INFO\")\n print(\"-----------\")\n for k, stat in sys_info.items():\n print(tpl.format(k=k, stat=stat))\n print(\"\\nPYTHON DEPENDENCIES\")\n print(\"-------------------\")\n for k, stat in deps_info.items():\n print(tpl.format(k=k, stat=stat))\n", "path": "geopandas/tools/_show_versions.py"}]}
| 1,279 | 795 |
gh_patches_debug_35337
|
rasdani/github-patches
|
git_diff
|
MycroftAI__mycroft-core-2659
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
skill keep reloading indefinitely if there are files with a future date
due to a temporary problem in my system, i ended up with a skill with __init.py modification time set in the future. this caused that skill to constantly be reloaded my mycroft core, and unloaded just after the loading was complted. took some hours of debug to understand this was actually the problem.
perhaps skills with files with modification date from the future should just be stopped from loading and have a debug log about it?
</issue>
<code>
[start of mycroft/skills/skill_loader.py]
1 # Copyright 2019 Mycroft AI Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 """Periodically run by skill manager to load skills into memory."""
16 import gc
17 import importlib
18 import os
19 from os.path import dirname
20 import sys
21 from time import time
22
23 from mycroft.configuration import Configuration
24 from mycroft.messagebus import Message
25 from mycroft.skills.settings import save_settings
26 from mycroft.util.log import LOG
27
28 from .settings import SettingsMetaUploader
29
30 SKILL_MAIN_MODULE = '__init__.py'
31
32
33 def remove_submodule_refs(module_name):
34 """Ensure submodules are reloaded by removing the refs from sys.modules.
35
36 Python import system puts a reference for each module in the sys.modules
37 dictionary to bypass loading if a module is already in memory. To make
38 sure skills are completely reloaded these references are deleted.
39
40 Arguments:
41 module_name: name of skill module.
42 """
43 submodules = []
44 LOG.debug('Skill module'.format(module_name))
45 # Collect found submodules
46 for m in sys.modules:
47 if m.startswith(module_name + '.'):
48 submodules.append(m)
49 # Remove all references them to in sys.modules
50 for m in submodules:
51 LOG.debug('Removing sys.modules ref for {}'.format(m))
52 del(sys.modules[m])
53
54
55 def load_skill_module(path, skill_id):
56 """Load a skill module
57
58 This function handles the differences between python 3.4 and 3.5+ as well
59 as makes sure the module is inserted into the sys.modules dict.
60
61 Arguments:
62 path: Path to the skill main file (__init__.py)
63 skill_id: skill_id used as skill identifier in the module list
64 """
65 module_name = skill_id.replace('.', '_')
66
67 remove_submodule_refs(module_name)
68
69 spec = importlib.util.spec_from_file_location(module_name, path)
70 mod = importlib.util.module_from_spec(spec)
71 sys.modules[module_name] = mod
72 spec.loader.exec_module(mod)
73 return mod
74
75
76 def _get_last_modified_time(path):
77 """Get the last modified date of the most recently updated file in a path.
78
79 Exclude compiled python files, hidden directories and the settings.json
80 file.
81
82 Arguments:
83 path: skill directory to check
84
85 Returns:
86 int: time of last change
87 """
88 all_files = []
89 for root_dir, dirs, files in os.walk(path):
90 dirs[:] = [d for d in dirs if not d.startswith('.')]
91 for f in files:
92 ignore_file = (
93 f.endswith('.pyc') or
94 f == 'settings.json' or
95 f.startswith('.') or
96 f.endswith('.qmlc')
97 )
98 if not ignore_file:
99 all_files.append(os.path.join(root_dir, f))
100
101 # check files of interest in the skill root directory
102 if all_files:
103 return max(os.path.getmtime(f) for f in all_files)
104 else:
105 return 0
106
107
108 class SkillLoader:
109 def __init__(self, bus, skill_directory):
110 self.bus = bus
111 self.skill_directory = skill_directory
112 self.skill_id = os.path.basename(skill_directory)
113 self.load_attempted = False
114 self.loaded = False
115 self.last_modified = 0
116 self.last_loaded = 0
117 self.instance = None
118 self.active = True
119 self.config = Configuration.get()
120
121 @property
122 def is_blacklisted(self):
123 """Boolean value representing whether or not a skill is blacklisted."""
124 blacklist = self.config['skills'].get('blacklisted_skills', [])
125 if self.skill_id in blacklist:
126 return True
127 else:
128 return False
129
130 def reload_needed(self):
131 """Load an unloaded skill or reload unloaded/changed skill.
132
133 Returns:
134 bool: if the skill was loaded/reloaded
135 """
136 try:
137 self.last_modified = _get_last_modified_time(self.skill_directory)
138 except FileNotFoundError as e:
139 LOG.error('Failed to get last_modification time '
140 '({})'.format(repr(e)))
141 self.last_modified = self.last_loaded
142
143 modified = self.last_modified > self.last_loaded
144
145 # create local reference to avoid threading issues
146 instance = self.instance
147
148 reload_allowed = (
149 self.active and
150 (instance is None or instance.reload_skill)
151 )
152 return modified and reload_allowed
153
154 def reload(self):
155 LOG.info('ATTEMPTING TO RELOAD SKILL: ' + self.skill_id)
156 if self.instance:
157 self._unload()
158 return self._load()
159
160 def load(self):
161 LOG.info('ATTEMPTING TO LOAD SKILL: ' + self.skill_id)
162 return self._load()
163
164 def _unload(self):
165 """Remove listeners and stop threads before loading"""
166 self._execute_instance_shutdown()
167 if self.config.get("debug", False):
168 self._garbage_collect()
169 self.loaded = False
170 self._emit_skill_shutdown_event()
171
172 def unload(self):
173 if self.instance:
174 self._execute_instance_shutdown()
175 self.loaded = False
176
177 def activate(self):
178 self.active = True
179 self.load()
180
181 def deactivate(self):
182 self.active = False
183 self.unload()
184
185 def _execute_instance_shutdown(self):
186 """Call the shutdown method of the skill being reloaded."""
187 try:
188 self.instance.default_shutdown()
189 except Exception:
190 log_msg = 'An error occurred while shutting down {}'
191 LOG.exception(log_msg.format(self.instance.name))
192 else:
193 LOG.info('Skill {} shut down successfully'.format(self.skill_id))
194
195 def _garbage_collect(self):
196 """Invoke Python garbage collector to remove false references"""
197 gc.collect()
198 # Remove two local references that are known
199 refs = sys.getrefcount(self.instance) - 2
200 if refs > 0:
201 log_msg = (
202 "After shutdown of {} there are still {} references "
203 "remaining. The skill won't be cleaned from memory."
204 )
205 LOG.warning(log_msg.format(self.instance.name, refs))
206
207 def _emit_skill_shutdown_event(self):
208 message = Message(
209 "mycroft.skills.shutdown",
210 data=dict(path=self.skill_directory, id=self.skill_id)
211 )
212 self.bus.emit(message)
213
214 def _load(self):
215 self._prepare_for_load()
216 if self.is_blacklisted:
217 self._skip_load()
218 else:
219 skill_module = self._load_skill_source()
220 if skill_module and self._create_skill_instance(skill_module):
221 self._check_for_first_run()
222 self.loaded = True
223
224 self.last_loaded = time()
225 self._communicate_load_status()
226 if self.loaded:
227 self._prepare_settings_meta()
228 return self.loaded
229
230 def _prepare_settings_meta(self):
231 settings_meta = SettingsMetaUploader(self.skill_directory,
232 self.instance.name)
233 self.instance.settings_meta = settings_meta
234
235 def _prepare_for_load(self):
236 self.load_attempted = True
237 self.loaded = False
238 self.instance = None
239
240 def _skip_load(self):
241 log_msg = 'Skill {} is blacklisted - it will not be loaded'
242 LOG.info(log_msg.format(self.skill_id))
243
244 def _load_skill_source(self):
245 """Use Python's import library to load a skill's source code."""
246 main_file_path = os.path.join(self.skill_directory, SKILL_MAIN_MODULE)
247 if not os.path.exists(main_file_path):
248 error_msg = 'Failed to load {} due to a missing file.'
249 LOG.error(error_msg.format(self.skill_id))
250 else:
251 try:
252 skill_module = load_skill_module(main_file_path, self.skill_id)
253 except Exception as e:
254 LOG.exception('Failed to load skill: '
255 '{} ({})'.format(self.skill_id, repr(e)))
256 else:
257 module_is_skill = (
258 hasattr(skill_module, 'create_skill') and
259 callable(skill_module.create_skill)
260 )
261 if module_is_skill:
262 return skill_module
263 return None # Module wasn't loaded
264
265 def _create_skill_instance(self, skill_module):
266 """Use v2 skills framework to create the skill."""
267 try:
268 self.instance = skill_module.create_skill()
269 except Exception as e:
270 log_msg = 'Skill __init__ failed with {}'
271 LOG.exception(log_msg.format(repr(e)))
272 self.instance = None
273
274 if self.instance:
275 self.instance.skill_id = self.skill_id
276 self.instance.bind(self.bus)
277 try:
278 self.instance.load_data_files()
279 # Set up intent handlers
280 # TODO: can this be a public method?
281 self.instance._register_decorated()
282 self.instance.register_resting_screen()
283 self.instance.initialize()
284 except Exception as e:
285 # If an exception occurs, make sure to clean up the skill
286 self.instance.default_shutdown()
287 self.instance = None
288 log_msg = 'Skill initialization failed with {}'
289 LOG.exception(log_msg.format(repr(e)))
290
291 return self.instance is not None
292
293 def _check_for_first_run(self):
294 """The very first time a skill is run, speak the intro."""
295 first_run = self.instance.settings.get(
296 "__mycroft_skill_firstrun",
297 True
298 )
299 if first_run:
300 LOG.info("First run of " + self.skill_id)
301 self.instance.settings["__mycroft_skill_firstrun"] = False
302 save_settings(self.instance.settings_write_path,
303 self.instance.settings)
304 intro = self.instance.get_intro_message()
305 if intro:
306 self.instance.speak(intro)
307
308 def _communicate_load_status(self):
309 if self.loaded:
310 message = Message(
311 'mycroft.skills.loaded',
312 data=dict(
313 path=self.skill_directory,
314 id=self.skill_id,
315 name=self.instance.name,
316 modified=self.last_modified
317 )
318 )
319 self.bus.emit(message)
320 LOG.info('Skill {} loaded successfully'.format(self.skill_id))
321 else:
322 message = Message(
323 'mycroft.skills.loading_failure',
324 data=dict(path=self.skill_directory, id=self.skill_id)
325 )
326 self.bus.emit(message)
327 LOG.error('Skill {} failed to load'.format(self.skill_id))
328
[end of mycroft/skills/skill_loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mycroft/skills/skill_loader.py b/mycroft/skills/skill_loader.py
--- a/mycroft/skills/skill_loader.py
+++ b/mycroft/skills/skill_loader.py
@@ -73,6 +73,19 @@
return mod
+def _bad_mod_times(mod_times):
+ """Return all entries with modification time in the future.
+
+ Arguments:
+ mod_times (dict): dict mapping file paths to modification times.
+
+ Returns:
+ List of files with bad modification times.
+ """
+ current_time = time()
+ return [path for path in mod_times if mod_times[path] > current_time]
+
+
def _get_last_modified_time(path):
"""Get the last modified date of the most recently updated file in a path.
@@ -99,6 +112,11 @@
all_files.append(os.path.join(root_dir, f))
# check files of interest in the skill root directory
+ mod_times = {f: os.path.getmtime(f) for f in all_files}
+ # Ensure modification times are valid
+ bad_times = _bad_mod_times(mod_times)
+ if bad_times:
+ raise OSError('{} had bad modification times'.format(bad_times))
if all_files:
return max(os.path.getmtime(f) for f in all_files)
else:
@@ -118,6 +136,8 @@
self.active = True
self.config = Configuration.get()
+ self.modtime_error_log_written = False
+
@property
def is_blacklisted(self):
"""Boolean value representing whether or not a skill is blacklisted."""
@@ -135,10 +155,14 @@
"""
try:
self.last_modified = _get_last_modified_time(self.skill_directory)
- except FileNotFoundError as e:
- LOG.error('Failed to get last_modification time '
- '({})'.format(repr(e)))
+ except OSError as err:
self.last_modified = self.last_loaded
+ if not self.modtime_error_log_written:
+ self.modtime_error_log_written = True
+ LOG.error('Failed to get last_modification time '
+ '({})'.format(repr(err)))
+ else:
+ self.modtime_error_log_written = False
modified = self.last_modified > self.last_loaded
|
{"golden_diff": "diff --git a/mycroft/skills/skill_loader.py b/mycroft/skills/skill_loader.py\n--- a/mycroft/skills/skill_loader.py\n+++ b/mycroft/skills/skill_loader.py\n@@ -73,6 +73,19 @@\n return mod\n \n \n+def _bad_mod_times(mod_times):\n+ \"\"\"Return all entries with modification time in the future.\n+\n+ Arguments:\n+ mod_times (dict): dict mapping file paths to modification times.\n+\n+ Returns:\n+ List of files with bad modification times.\n+ \"\"\"\n+ current_time = time()\n+ return [path for path in mod_times if mod_times[path] > current_time]\n+\n+\n def _get_last_modified_time(path):\n \"\"\"Get the last modified date of the most recently updated file in a path.\n \n@@ -99,6 +112,11 @@\n all_files.append(os.path.join(root_dir, f))\n \n # check files of interest in the skill root directory\n+ mod_times = {f: os.path.getmtime(f) for f in all_files}\n+ # Ensure modification times are valid\n+ bad_times = _bad_mod_times(mod_times)\n+ if bad_times:\n+ raise OSError('{} had bad modification times'.format(bad_times))\n if all_files:\n return max(os.path.getmtime(f) for f in all_files)\n else:\n@@ -118,6 +136,8 @@\n self.active = True\n self.config = Configuration.get()\n \n+ self.modtime_error_log_written = False\n+\n @property\n def is_blacklisted(self):\n \"\"\"Boolean value representing whether or not a skill is blacklisted.\"\"\"\n@@ -135,10 +155,14 @@\n \"\"\"\n try:\n self.last_modified = _get_last_modified_time(self.skill_directory)\n- except FileNotFoundError as e:\n- LOG.error('Failed to get last_modification time '\n- '({})'.format(repr(e)))\n+ except OSError as err:\n self.last_modified = self.last_loaded\n+ if not self.modtime_error_log_written:\n+ self.modtime_error_log_written = True\n+ LOG.error('Failed to get last_modification time '\n+ '({})'.format(repr(err)))\n+ else:\n+ self.modtime_error_log_written = False\n \n modified = self.last_modified > self.last_loaded\n", "issue": "skill keep reloading indefinitely if there are files with a future date\ndue to a temporary problem in my system, i ended up with a skill with __init.py modification time set in the future. this caused that skill to constantly be reloaded my mycroft core, and unloaded just after the loading was complted. took some hours of debug to understand this was actually the problem.\r\n\r\nperhaps skills with files with modification date from the future should just be stopped from loading and have a debug log about it?\n", "before_files": [{"content": "# Copyright 2019 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"Periodically run by skill manager to load skills into memory.\"\"\"\nimport gc\nimport importlib\nimport os\nfrom os.path import dirname\nimport sys\nfrom time import time\n\nfrom mycroft.configuration import Configuration\nfrom mycroft.messagebus import Message\nfrom mycroft.skills.settings import save_settings\nfrom mycroft.util.log import LOG\n\nfrom .settings import SettingsMetaUploader\n\nSKILL_MAIN_MODULE = '__init__.py'\n\n\ndef remove_submodule_refs(module_name):\n \"\"\"Ensure submodules are reloaded by removing the refs from sys.modules.\n\n Python import system puts a reference for each module in the sys.modules\n dictionary to bypass loading if a module is already in memory. To make\n sure skills are completely reloaded these references are deleted.\n\n Arguments:\n module_name: name of skill module.\n \"\"\"\n submodules = []\n LOG.debug('Skill module'.format(module_name))\n # Collect found submodules\n for m in sys.modules:\n if m.startswith(module_name + '.'):\n submodules.append(m)\n # Remove all references them to in sys.modules\n for m in submodules:\n LOG.debug('Removing sys.modules ref for {}'.format(m))\n del(sys.modules[m])\n\n\ndef load_skill_module(path, skill_id):\n \"\"\"Load a skill module\n\n This function handles the differences between python 3.4 and 3.5+ as well\n as makes sure the module is inserted into the sys.modules dict.\n\n Arguments:\n path: Path to the skill main file (__init__.py)\n skill_id: skill_id used as skill identifier in the module list\n \"\"\"\n module_name = skill_id.replace('.', '_')\n\n remove_submodule_refs(module_name)\n\n spec = importlib.util.spec_from_file_location(module_name, path)\n mod = importlib.util.module_from_spec(spec)\n sys.modules[module_name] = mod\n spec.loader.exec_module(mod)\n return mod\n\n\ndef _get_last_modified_time(path):\n \"\"\"Get the last modified date of the most recently updated file in a path.\n\n Exclude compiled python files, hidden directories and the settings.json\n file.\n\n Arguments:\n path: skill directory to check\n\n Returns:\n int: time of last change\n \"\"\"\n all_files = []\n for root_dir, dirs, files in os.walk(path):\n dirs[:] = [d for d in dirs if not d.startswith('.')]\n for f in files:\n ignore_file = (\n f.endswith('.pyc') or\n f == 'settings.json' or\n f.startswith('.') or\n f.endswith('.qmlc')\n )\n if not ignore_file:\n all_files.append(os.path.join(root_dir, f))\n\n # check files of interest in the skill root directory\n if all_files:\n return max(os.path.getmtime(f) for f in all_files)\n else:\n return 0\n\n\nclass SkillLoader:\n def __init__(self, bus, skill_directory):\n self.bus = bus\n self.skill_directory = skill_directory\n self.skill_id = os.path.basename(skill_directory)\n self.load_attempted = False\n self.loaded = False\n self.last_modified = 0\n self.last_loaded = 0\n self.instance = None\n self.active = True\n self.config = Configuration.get()\n\n @property\n def is_blacklisted(self):\n \"\"\"Boolean value representing whether or not a skill is blacklisted.\"\"\"\n blacklist = self.config['skills'].get('blacklisted_skills', [])\n if self.skill_id in blacklist:\n return True\n else:\n return False\n\n def reload_needed(self):\n \"\"\"Load an unloaded skill or reload unloaded/changed skill.\n\n Returns:\n bool: if the skill was loaded/reloaded\n \"\"\"\n try:\n self.last_modified = _get_last_modified_time(self.skill_directory)\n except FileNotFoundError as e:\n LOG.error('Failed to get last_modification time '\n '({})'.format(repr(e)))\n self.last_modified = self.last_loaded\n\n modified = self.last_modified > self.last_loaded\n\n # create local reference to avoid threading issues\n instance = self.instance\n\n reload_allowed = (\n self.active and\n (instance is None or instance.reload_skill)\n )\n return modified and reload_allowed\n\n def reload(self):\n LOG.info('ATTEMPTING TO RELOAD SKILL: ' + self.skill_id)\n if self.instance:\n self._unload()\n return self._load()\n\n def load(self):\n LOG.info('ATTEMPTING TO LOAD SKILL: ' + self.skill_id)\n return self._load()\n\n def _unload(self):\n \"\"\"Remove listeners and stop threads before loading\"\"\"\n self._execute_instance_shutdown()\n if self.config.get(\"debug\", False):\n self._garbage_collect()\n self.loaded = False\n self._emit_skill_shutdown_event()\n\n def unload(self):\n if self.instance:\n self._execute_instance_shutdown()\n self.loaded = False\n\n def activate(self):\n self.active = True\n self.load()\n\n def deactivate(self):\n self.active = False\n self.unload()\n\n def _execute_instance_shutdown(self):\n \"\"\"Call the shutdown method of the skill being reloaded.\"\"\"\n try:\n self.instance.default_shutdown()\n except Exception:\n log_msg = 'An error occurred while shutting down {}'\n LOG.exception(log_msg.format(self.instance.name))\n else:\n LOG.info('Skill {} shut down successfully'.format(self.skill_id))\n\n def _garbage_collect(self):\n \"\"\"Invoke Python garbage collector to remove false references\"\"\"\n gc.collect()\n # Remove two local references that are known\n refs = sys.getrefcount(self.instance) - 2\n if refs > 0:\n log_msg = (\n \"After shutdown of {} there are still {} references \"\n \"remaining. The skill won't be cleaned from memory.\"\n )\n LOG.warning(log_msg.format(self.instance.name, refs))\n\n def _emit_skill_shutdown_event(self):\n message = Message(\n \"mycroft.skills.shutdown\",\n data=dict(path=self.skill_directory, id=self.skill_id)\n )\n self.bus.emit(message)\n\n def _load(self):\n self._prepare_for_load()\n if self.is_blacklisted:\n self._skip_load()\n else:\n skill_module = self._load_skill_source()\n if skill_module and self._create_skill_instance(skill_module):\n self._check_for_first_run()\n self.loaded = True\n\n self.last_loaded = time()\n self._communicate_load_status()\n if self.loaded:\n self._prepare_settings_meta()\n return self.loaded\n\n def _prepare_settings_meta(self):\n settings_meta = SettingsMetaUploader(self.skill_directory,\n self.instance.name)\n self.instance.settings_meta = settings_meta\n\n def _prepare_for_load(self):\n self.load_attempted = True\n self.loaded = False\n self.instance = None\n\n def _skip_load(self):\n log_msg = 'Skill {} is blacklisted - it will not be loaded'\n LOG.info(log_msg.format(self.skill_id))\n\n def _load_skill_source(self):\n \"\"\"Use Python's import library to load a skill's source code.\"\"\"\n main_file_path = os.path.join(self.skill_directory, SKILL_MAIN_MODULE)\n if not os.path.exists(main_file_path):\n error_msg = 'Failed to load {} due to a missing file.'\n LOG.error(error_msg.format(self.skill_id))\n else:\n try:\n skill_module = load_skill_module(main_file_path, self.skill_id)\n except Exception as e:\n LOG.exception('Failed to load skill: '\n '{} ({})'.format(self.skill_id, repr(e)))\n else:\n module_is_skill = (\n hasattr(skill_module, 'create_skill') and\n callable(skill_module.create_skill)\n )\n if module_is_skill:\n return skill_module\n return None # Module wasn't loaded\n\n def _create_skill_instance(self, skill_module):\n \"\"\"Use v2 skills framework to create the skill.\"\"\"\n try:\n self.instance = skill_module.create_skill()\n except Exception as e:\n log_msg = 'Skill __init__ failed with {}'\n LOG.exception(log_msg.format(repr(e)))\n self.instance = None\n\n if self.instance:\n self.instance.skill_id = self.skill_id\n self.instance.bind(self.bus)\n try:\n self.instance.load_data_files()\n # Set up intent handlers\n # TODO: can this be a public method?\n self.instance._register_decorated()\n self.instance.register_resting_screen()\n self.instance.initialize()\n except Exception as e:\n # If an exception occurs, make sure to clean up the skill\n self.instance.default_shutdown()\n self.instance = None\n log_msg = 'Skill initialization failed with {}'\n LOG.exception(log_msg.format(repr(e)))\n\n return self.instance is not None\n\n def _check_for_first_run(self):\n \"\"\"The very first time a skill is run, speak the intro.\"\"\"\n first_run = self.instance.settings.get(\n \"__mycroft_skill_firstrun\",\n True\n )\n if first_run:\n LOG.info(\"First run of \" + self.skill_id)\n self.instance.settings[\"__mycroft_skill_firstrun\"] = False\n save_settings(self.instance.settings_write_path,\n self.instance.settings)\n intro = self.instance.get_intro_message()\n if intro:\n self.instance.speak(intro)\n\n def _communicate_load_status(self):\n if self.loaded:\n message = Message(\n 'mycroft.skills.loaded',\n data=dict(\n path=self.skill_directory,\n id=self.skill_id,\n name=self.instance.name,\n modified=self.last_modified\n )\n )\n self.bus.emit(message)\n LOG.info('Skill {} loaded successfully'.format(self.skill_id))\n else:\n message = Message(\n 'mycroft.skills.loading_failure',\n data=dict(path=self.skill_directory, id=self.skill_id)\n )\n self.bus.emit(message)\n LOG.error('Skill {} failed to load'.format(self.skill_id))\n", "path": "mycroft/skills/skill_loader.py"}]}
| 3,805 | 519 |
gh_patches_debug_26059
|
rasdani/github-patches
|
git_diff
|
DDMAL__CantusDB-192
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
make a test case to test the permissions
we have implemented a lot of different restrictions to views. create a unit test to automate the testing process.
</issue>
<code>
[start of django/cantusdb_project/main_app/views/sequence.py]
1 from django.views.generic import DetailView, ListView, UpdateView
2 from main_app.models import Sequence
3 from django.db.models import Q
4 from main_app.forms import SequenceEditForm
5 from django.contrib.auth.mixins import LoginRequiredMixin
6 from django.contrib import messages
7 from django.contrib.auth.mixins import UserPassesTestMixin
8 from django.core.exceptions import PermissionDenied
9 from django.http import Http404
10
11
12
13 class SequenceDetailView(DetailView):
14 """
15 Displays a single Sequence object. Accessed with ``sequences/<int:pk>``
16 """
17
18 model = Sequence
19 context_object_name = "sequence"
20 template_name = "sequence_detail.html"
21
22 def get_context_data(self, **kwargs):
23
24 # if the sequence's source isn't public, only logged-in users should be able to view the sequence's detail page
25 sequence = self.get_object()
26 source = sequence.source
27 if (source.public is False) and (not self.request.user.is_authenticated):
28 raise PermissionDenied()
29
30 context = super().get_context_data(**kwargs)
31 context["concordances"] = Sequence.objects.filter(
32 cantus_id=self.get_object().cantus_id
33 ).order_by("siglum")
34 return context
35
36
37 class SequenceListView(ListView):
38 """
39 Displays a list of Sequence objects. Accessed with ``sequences/``
40 """
41
42 model = Sequence
43 paginate_by = 100
44 context_object_name = "sequences"
45 template_name = "sequence_list.html"
46
47 def get_queryset(self):
48 queryset = super().get_queryset()
49 q_obj_filter = Q(source__visible=True)
50 q_obj_filter &= Q(source__public=True)
51
52 if self.request.GET.get("incipit"):
53 incipit = self.request.GET.get("incipit")
54 q_obj_filter &= Q(incipit__icontains=incipit)
55 if self.request.GET.get("siglum"):
56 siglum = self.request.GET.get("siglum")
57 q_obj_filter &= Q(siglum__icontains=siglum)
58 if self.request.GET.get("cantus_id"):
59 cantus_id = self.request.GET.get("cantus_id")
60 q_obj_filter &= Q(cantus_id__icontains=cantus_id)
61
62 return queryset.filter(q_obj_filter).order_by("siglum", "sequence")
63
64 class SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
65 template_name = "sequence_edit.html"
66 model = Sequence
67 form_class = SequenceEditForm
68 pk_url_kwarg = "sequence_id"
69
70 def form_valid(self, form):
71 messages.success(
72 self.request,
73 "Sequence updated successfully!",
74 )
75 return super().form_valid(form)
76
77 def test_func(self):
78 user = self.request.user
79 sequence_id = self.kwargs.get(self.pk_url_kwarg)
80 try:
81 sequence = Sequence.objects.get(id=sequence_id)
82 except:
83 raise Http404("This sequence does not exist")
84 # find the source of this sequence
85 source = sequence.source
86 # checks if the user is an editor or a proofreader,
87 # and if the user is given privilege to edit this source and thus, it's sequences
88 is_editor_proofreader = user.groups.filter(Q(name="editor")|Q(name="proofreader")).exists()
89 can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)
90 # checks if the user is a project manager (they should have the privilege to edit any sequence)
91 is_project_manager = user.groups.filter(name="project manager").exists()
92 # checks if the user is a contributor,
93 # and if the user is the creator of this source
94 # (they should only have the privilege to edit sequences in a source they have created)
95 is_contributor = user.groups.filter(name="contributor").exists()
96
97 if ((is_editor_proofreader and can_edit_sequences_in_source)
98 or (is_project_manager)
99 or (is_contributor and source.created_by == user)):
100 return True
101 else:
102 return False
103
[end of django/cantusdb_project/main_app/views/sequence.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/django/cantusdb_project/main_app/views/sequence.py b/django/cantusdb_project/main_app/views/sequence.py
--- a/django/cantusdb_project/main_app/views/sequence.py
+++ b/django/cantusdb_project/main_app/views/sequence.py
@@ -76,27 +76,10 @@
def test_func(self):
user = self.request.user
- sequence_id = self.kwargs.get(self.pk_url_kwarg)
- try:
- sequence = Sequence.objects.get(id=sequence_id)
- except:
- raise Http404("This sequence does not exist")
- # find the source of this sequence
- source = sequence.source
- # checks if the user is an editor or a proofreader,
- # and if the user is given privilege to edit this source and thus, it's sequences
- is_editor_proofreader = user.groups.filter(Q(name="editor")|Q(name="proofreader")).exists()
- can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)
# checks if the user is a project manager (they should have the privilege to edit any sequence)
is_project_manager = user.groups.filter(name="project manager").exists()
- # checks if the user is a contributor,
- # and if the user is the creator of this source
- # (they should only have the privilege to edit sequences in a source they have created)
- is_contributor = user.groups.filter(name="contributor").exists()
-
- if ((is_editor_proofreader and can_edit_sequences_in_source)
- or (is_project_manager)
- or (is_contributor and source.created_by == user)):
+
+ if is_project_manager:
return True
else:
return False
|
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/sequence.py b/django/cantusdb_project/main_app/views/sequence.py\n--- a/django/cantusdb_project/main_app/views/sequence.py\n+++ b/django/cantusdb_project/main_app/views/sequence.py\n@@ -76,27 +76,10 @@\n \n def test_func(self):\n user = self.request.user\n- sequence_id = self.kwargs.get(self.pk_url_kwarg)\n- try:\n- sequence = Sequence.objects.get(id=sequence_id)\n- except:\n- raise Http404(\"This sequence does not exist\")\n- # find the source of this sequence\n- source = sequence.source\n- # checks if the user is an editor or a proofreader,\n- # and if the user is given privilege to edit this source and thus, it's sequences\n- is_editor_proofreader = user.groups.filter(Q(name=\"editor\")|Q(name=\"proofreader\")).exists()\n- can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)\n # checks if the user is a project manager (they should have the privilege to edit any sequence)\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n- # checks if the user is a contributor,\n- # and if the user is the creator of this source \n- # (they should only have the privilege to edit sequences in a source they have created)\n- is_contributor = user.groups.filter(name=\"contributor\").exists()\n-\n- if ((is_editor_proofreader and can_edit_sequences_in_source) \n- or (is_project_manager) \n- or (is_contributor and source.created_by == user)):\n+\n+ if is_project_manager:\n return True\n else:\n return False\n", "issue": "make a test case to test the permissions\nwe have implemented a lot of different restrictions to views. create a unit test to automate the testing process.\n", "before_files": [{"content": "from django.views.generic import DetailView, ListView, UpdateView\nfrom main_app.models import Sequence\nfrom django.db.models import Q\nfrom main_app.forms import SequenceEditForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\n\n\n\nclass SequenceDetailView(DetailView):\n \"\"\"\n Displays a single Sequence object. Accessed with ``sequences/<int:pk>``\n \"\"\"\n\n model = Sequence\n context_object_name = \"sequence\"\n template_name = \"sequence_detail.html\"\n\n def get_context_data(self, **kwargs):\n\n # if the sequence's source isn't public, only logged-in users should be able to view the sequence's detail page\n sequence = self.get_object()\n source = sequence.source\n if (source.public is False) and (not self.request.user.is_authenticated):\n raise PermissionDenied()\n \n context = super().get_context_data(**kwargs)\n context[\"concordances\"] = Sequence.objects.filter(\n cantus_id=self.get_object().cantus_id\n ).order_by(\"siglum\")\n return context\n\n\nclass SequenceListView(ListView):\n \"\"\"\n Displays a list of Sequence objects. Accessed with ``sequences/``\n \"\"\"\n\n model = Sequence\n paginate_by = 100\n context_object_name = \"sequences\"\n template_name = \"sequence_list.html\"\n\n def get_queryset(self):\n queryset = super().get_queryset()\n q_obj_filter = Q(source__visible=True)\n q_obj_filter &= Q(source__public=True)\n\n if self.request.GET.get(\"incipit\"):\n incipit = self.request.GET.get(\"incipit\")\n q_obj_filter &= Q(incipit__icontains=incipit)\n if self.request.GET.get(\"siglum\"):\n siglum = self.request.GET.get(\"siglum\")\n q_obj_filter &= Q(siglum__icontains=siglum)\n if self.request.GET.get(\"cantus_id\"):\n cantus_id = self.request.GET.get(\"cantus_id\")\n q_obj_filter &= Q(cantus_id__icontains=cantus_id)\n\n return queryset.filter(q_obj_filter).order_by(\"siglum\", \"sequence\")\n\nclass SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n template_name = \"sequence_edit.html\"\n model = Sequence\n form_class = SequenceEditForm\n pk_url_kwarg = \"sequence_id\"\n\n def form_valid(self, form):\n messages.success(\n self.request,\n \"Sequence updated successfully!\",\n )\n return super().form_valid(form)\n\n def test_func(self):\n user = self.request.user\n sequence_id = self.kwargs.get(self.pk_url_kwarg)\n try:\n sequence = Sequence.objects.get(id=sequence_id)\n except:\n raise Http404(\"This sequence does not exist\")\n # find the source of this sequence\n source = sequence.source\n # checks if the user is an editor or a proofreader,\n # and if the user is given privilege to edit this source and thus, it's sequences\n is_editor_proofreader = user.groups.filter(Q(name=\"editor\")|Q(name=\"proofreader\")).exists()\n can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)\n # checks if the user is a project manager (they should have the privilege to edit any sequence)\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n # checks if the user is a contributor,\n # and if the user is the creator of this source \n # (they should only have the privilege to edit sequences in a source they have created)\n is_contributor = user.groups.filter(name=\"contributor\").exists()\n\n if ((is_editor_proofreader and can_edit_sequences_in_source) \n or (is_project_manager) \n or (is_contributor and source.created_by == user)):\n return True\n else:\n return False\n", "path": "django/cantusdb_project/main_app/views/sequence.py"}]}
| 1,643 | 392 |
gh_patches_debug_17751
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-3930
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Runners can be configured when not yet installed, but not after removal
### Bug description
When you go into the Preferences dialog, Runners section, all the runners may be configured even if not installed. But if you remove one, it's configure button disappears.
### How to Reproduce
Go into Preferences, select "Runners". Now, pick a runner and remove it. The configure button for that runner will disappear.
I think that is correct- but notice that all the other uninstalled runners have configure buttons. See the media attached. That can't be right.
### Expected behavior
I expect that the configure button will appear only for installed runners in the first place.
### Log output
```shell
INFO 2021-12-24 07:12:16,550 [startup.check_driver:53]:Using NVIDIA drivers 470.86 for x86_64
INFO 2021-12-24 07:12:16,550 [startup.check_driver:57]:GPU: NVIDIA GeForce RTX 3060
INFO 2021-12-24 07:12:16,550 [startup.check_driver:73]:GPU: 10DE:2503 196E:1377 (nvidia drivers)
DEBUG 2021-12-24 07:12:16,725 [lutriswindow.update_store:451]:Showing 25 games
DEBUG 2021-12-24 07:12:20,443 [application.show_window:259]:Showing window <class 'lutris.gui.config.preferences_dialog.PreferencesDialog'>{}
DEBUG 2021-12-24 07:12:26,667 [application.on_app_window_destroyed:276]:Removed window <class 'lutris.gui.config.preferences_dialog.PreferencesDialog'>{}
INFO 2021-12-24 07:12:28,505 [application.do_shutdown:631]:Shutting down Lutris
Hmm, using the master branch to repro produces less logging!
```
### System Information
```shell
[System]
OS: Pop!_OS 21.10 impish
Arch: x86_64
Kernel: 5.15.8-76051508-generic
Desktop: GNOME
Display Server: x11
[CPU]
Vendor: AuthenticAMD
Model: AMD Ryzen 7 5800X 8-Core Processor
Physical cores: 8
Logical cores: 16
[Memory]
RAM: 31.4 GB
Swap: 0.0 GB
[Graphics]
Vendor: NVIDIA Corporation
OpenGL Renderer: NVIDIA GeForce RTX 3060/PCIe/SSE2
OpenGL Version: 4.6.0 NVIDIA 470.86
OpenGL Core: 4.6.0 NVIDIA 470.86
OpenGL ES: OpenGL ES 3.2 NVIDIA 470.86
Vulkan: Supported
```
### Media (optional)

### Checklist:
- [X] I'm not asking for support with a game or the wine runner.
- [X] I have followed the above mentioned guides and have all the graphics and wine dependencies installed.
- [X] I have checked for existing issues that describe my problem prior to opening this one.
- [X] I understand that improperly formatted bug reports may be closed without explanation.
</issue>
<code>
[start of lutris/gui/config/runner_box.py]
1 from gettext import gettext as _
2
3 from gi.repository import GObject, Gtk
4
5 from lutris import runners
6 from lutris.gui.config.runner import RunnerConfigDialog
7 from lutris.gui.dialogs import ErrorDialog, QuestionDialog
8 from lutris.gui.dialogs.download import simple_downloader
9 from lutris.gui.dialogs.runner_install import RunnerInstallDialog
10 from lutris.gui.widgets.utils import ICON_SIZE, get_icon
11 from lutris.util.log import logger
12
13
14 class RunnerBox(Gtk.Box):
15 __gsignals__ = {
16 "runner-installed": (GObject.SIGNAL_RUN_FIRST, None, ()),
17 "runner-removed": (GObject.SIGNAL_RUN_FIRST, None, ()),
18 }
19
20 def __init__(self, runner_name):
21 super().__init__(visible=True)
22
23 self.connect("runner-installed", self.on_runner_installed)
24 self.connect("runner-removed", self.on_runner_removed)
25
26 self.set_margin_bottom(12)
27 self.set_margin_top(12)
28 self.set_margin_left(12)
29 self.set_margin_right(12)
30 self.runner = runners.import_runner(runner_name)()
31 icon = get_icon(self.runner.name, icon_format='pixbuf', size=ICON_SIZE)
32 if icon:
33 runner_icon = Gtk.Image(visible=True)
34 runner_icon.set_from_pixbuf(icon)
35 else:
36 runner_icon = Gtk.Image.new_from_icon_name("package-x-generic-symbolic", Gtk.IconSize.DND)
37 runner_icon.show()
38 runner_icon.set_margin_right(12)
39 self.pack_start(runner_icon, False, True, 6)
40
41 self.runner_label_box = Gtk.VBox(visible=True)
42 self.runner_label_box.set_margin_top(12)
43
44 runner_label = Gtk.Label(visible=True)
45 runner_label.set_alignment(0, 0.5)
46 runner_label.set_markup("<b>%s</b>" % self.runner.human_name)
47 self.runner_label_box.pack_start(runner_label, False, False, 0)
48
49 desc_label = Gtk.Label(visible=True)
50 desc_label.set_alignment(0, 0.5)
51 desc_label.set_text(self.runner.description)
52 self.runner_label_box.pack_start(desc_label, False, False, 0)
53
54 self.pack_start(self.runner_label_box, True, True, 0)
55
56 self.configure_button = Gtk.Button.new_from_icon_name("preferences-system-symbolic", Gtk.IconSize.BUTTON)
57 self.configure_button.set_margin_right(12)
58 self.configure_button.connect("clicked", self.on_configure_clicked)
59 self.configure_button.show()
60 self.pack_start(self.configure_button, False, False, 0)
61 if not self.runner.is_installed():
62 self.runner_label_box.set_sensitive(False)
63 self.action_alignment = Gtk.Alignment.new(0.5, 0.5, 0, 0)
64 self.action_alignment.show()
65 self.action_alignment.add(self.get_action_button())
66 self.pack_start(self.action_alignment, False, False, 0)
67
68 def get_action_button(self):
69 """Return a install or remove button"""
70 if self.runner.multiple_versions:
71 _button = Gtk.Button.new_from_icon_name("preferences-other-symbolic", Gtk.IconSize.BUTTON)
72 _button.get_style_context().add_class("circular")
73 _button.connect("clicked", self.on_versions_clicked)
74 else:
75 if self.runner.is_installed():
76 _button = Gtk.Button.new_from_icon_name("edit-delete-symbolic", Gtk.IconSize.BUTTON)
77 _button.get_style_context().add_class("circular")
78 _button.connect("clicked", self.on_remove_clicked)
79 else:
80 _button = Gtk.Button.new_from_icon_name("system-software-install-symbolic", Gtk.IconSize.BUTTON)
81 _button.get_style_context().add_class("circular")
82 _button.connect("clicked", self.on_install_clicked)
83 _button.show()
84 return _button
85
86 def on_versions_clicked(self, widget):
87 RunnerInstallDialog(
88 _("Manage %s versions") % self.runner.name,
89 None,
90 self.runner.name
91 )
92 # connect a runner-installed signal from the above dialog?
93
94 def on_install_clicked(self, widget):
95 """Install a runner."""
96 logger.debug("Install of %s requested", self.runner)
97 try:
98 self.runner.install(downloader=simple_downloader)
99 except (
100 runners.RunnerInstallationError,
101 runners.NonInstallableRunnerError,
102 ) as ex:
103 logger.error(ex)
104 ErrorDialog(ex.message)
105 return
106 if self.runner.is_installed():
107 self.emit("runner-installed")
108 else:
109 logger.error("Runner failed to install")
110
111 def on_configure_clicked(self, widget):
112 RunnerConfigDialog(self.runner)
113
114 def on_remove_clicked(self, widget):
115 dialog = QuestionDialog(
116 {
117 "title": _("Do you want to uninstall %s?") % self.runner.human_name,
118 "question": _("This will remove <b>%s</b> and all associated data." % self.runner.human_name)
119
120 }
121 )
122 if Gtk.ResponseType.YES == dialog.result:
123 self.runner.uninstall()
124 self.emit("runner-removed")
125
126 def on_runner_installed(self, widget):
127 """Called after the runnner is installed"""
128 self.runner_label_box.set_sensitive(True)
129 self.configure_button.show()
130 self.action_alignment.get_children()[0].destroy()
131 self.action_alignment.add(self.get_action_button())
132
133 def on_runner_removed(self, widget):
134 """Called after the runner is removed"""
135 self.runner_label_box.set_sensitive(False)
136 self.configure_button.hide()
137 self.action_alignment.get_children()[0].destroy()
138 self.action_alignment.add(self.get_action_button())
139
[end of lutris/gui/config/runner_box.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lutris/gui/config/runner_box.py b/lutris/gui/config/runner_box.py
--- a/lutris/gui/config/runner_box.py
+++ b/lutris/gui/config/runner_box.py
@@ -126,13 +126,11 @@
def on_runner_installed(self, widget):
"""Called after the runnner is installed"""
self.runner_label_box.set_sensitive(True)
- self.configure_button.show()
self.action_alignment.get_children()[0].destroy()
self.action_alignment.add(self.get_action_button())
def on_runner_removed(self, widget):
"""Called after the runner is removed"""
self.runner_label_box.set_sensitive(False)
- self.configure_button.hide()
self.action_alignment.get_children()[0].destroy()
self.action_alignment.add(self.get_action_button())
|
{"golden_diff": "diff --git a/lutris/gui/config/runner_box.py b/lutris/gui/config/runner_box.py\n--- a/lutris/gui/config/runner_box.py\n+++ b/lutris/gui/config/runner_box.py\n@@ -126,13 +126,11 @@\n def on_runner_installed(self, widget):\n \"\"\"Called after the runnner is installed\"\"\"\n self.runner_label_box.set_sensitive(True)\n- self.configure_button.show()\n self.action_alignment.get_children()[0].destroy()\n self.action_alignment.add(self.get_action_button())\n \n def on_runner_removed(self, widget):\n \"\"\"Called after the runner is removed\"\"\"\n self.runner_label_box.set_sensitive(False)\n- self.configure_button.hide()\n self.action_alignment.get_children()[0].destroy()\n self.action_alignment.add(self.get_action_button())\n", "issue": "Runners can be configured when not yet installed, but not after removal\n### Bug description\n\nWhen you go into the Preferences dialog, Runners section, all the runners may be configured even if not installed. But if you remove one, it's configure button disappears.\n\n### How to Reproduce\n\nGo into Preferences, select \"Runners\". Now, pick a runner and remove it. The configure button for that runner will disappear.\r\n\r\nI think that is correct- but notice that all the other uninstalled runners have configure buttons. See the media attached. That can't be right.\n\n### Expected behavior\n\nI expect that the configure button will appear only for installed runners in the first place.\n\n### Log output\n\n```shell\nINFO 2021-12-24 07:12:16,550 [startup.check_driver:53]:Using NVIDIA drivers 470.86 for x86_64\r\nINFO 2021-12-24 07:12:16,550 [startup.check_driver:57]:GPU: NVIDIA GeForce RTX 3060\r\nINFO 2021-12-24 07:12:16,550 [startup.check_driver:73]:GPU: 10DE:2503 196E:1377 (nvidia drivers)\r\nDEBUG 2021-12-24 07:12:16,725 [lutriswindow.update_store:451]:Showing 25 games\r\nDEBUG 2021-12-24 07:12:20,443 [application.show_window:259]:Showing window <class 'lutris.gui.config.preferences_dialog.PreferencesDialog'>{}\r\nDEBUG 2021-12-24 07:12:26,667 [application.on_app_window_destroyed:276]:Removed window <class 'lutris.gui.config.preferences_dialog.PreferencesDialog'>{}\r\nINFO 2021-12-24 07:12:28,505 [application.do_shutdown:631]:Shutting down Lutris\r\n\r\nHmm, using the master branch to repro produces less logging!\n```\n\n\n### System Information\n\n```shell\n[System]\r\nOS: Pop!_OS 21.10 impish\r\nArch: x86_64\r\nKernel: 5.15.8-76051508-generic\r\nDesktop: GNOME\r\nDisplay Server: x11\r\n\r\n[CPU]\r\nVendor: AuthenticAMD\r\nModel: AMD Ryzen 7 5800X 8-Core Processor\r\nPhysical cores: 8\r\nLogical cores: 16\r\n\r\n[Memory]\r\nRAM: 31.4 GB\r\nSwap: 0.0 GB\r\n\r\n[Graphics]\r\nVendor: NVIDIA Corporation\r\nOpenGL Renderer: NVIDIA GeForce RTX 3060/PCIe/SSE2\r\nOpenGL Version: 4.6.0 NVIDIA 470.86\r\nOpenGL Core: 4.6.0 NVIDIA 470.86\r\nOpenGL ES: OpenGL ES 3.2 NVIDIA 470.86\r\nVulkan: Supported\n```\n\n\n### Media (optional)\n\n\r\n\n\n### Checklist:\n\n- [X] I'm not asking for support with a game or the wine runner.\n- [X] I have followed the above mentioned guides and have all the graphics and wine dependencies installed.\n- [X] I have checked for existing issues that describe my problem prior to opening this one.\n- [X] I understand that improperly formatted bug reports may be closed without explanation.\n", "before_files": [{"content": "from gettext import gettext as _\n\nfrom gi.repository import GObject, Gtk\n\nfrom lutris import runners\nfrom lutris.gui.config.runner import RunnerConfigDialog\nfrom lutris.gui.dialogs import ErrorDialog, QuestionDialog\nfrom lutris.gui.dialogs.download import simple_downloader\nfrom lutris.gui.dialogs.runner_install import RunnerInstallDialog\nfrom lutris.gui.widgets.utils import ICON_SIZE, get_icon\nfrom lutris.util.log import logger\n\n\nclass RunnerBox(Gtk.Box):\n __gsignals__ = {\n \"runner-installed\": (GObject.SIGNAL_RUN_FIRST, None, ()),\n \"runner-removed\": (GObject.SIGNAL_RUN_FIRST, None, ()),\n }\n\n def __init__(self, runner_name):\n super().__init__(visible=True)\n\n self.connect(\"runner-installed\", self.on_runner_installed)\n self.connect(\"runner-removed\", self.on_runner_removed)\n\n self.set_margin_bottom(12)\n self.set_margin_top(12)\n self.set_margin_left(12)\n self.set_margin_right(12)\n self.runner = runners.import_runner(runner_name)()\n icon = get_icon(self.runner.name, icon_format='pixbuf', size=ICON_SIZE)\n if icon:\n runner_icon = Gtk.Image(visible=True)\n runner_icon.set_from_pixbuf(icon)\n else:\n runner_icon = Gtk.Image.new_from_icon_name(\"package-x-generic-symbolic\", Gtk.IconSize.DND)\n runner_icon.show()\n runner_icon.set_margin_right(12)\n self.pack_start(runner_icon, False, True, 6)\n\n self.runner_label_box = Gtk.VBox(visible=True)\n self.runner_label_box.set_margin_top(12)\n\n runner_label = Gtk.Label(visible=True)\n runner_label.set_alignment(0, 0.5)\n runner_label.set_markup(\"<b>%s</b>\" % self.runner.human_name)\n self.runner_label_box.pack_start(runner_label, False, False, 0)\n\n desc_label = Gtk.Label(visible=True)\n desc_label.set_alignment(0, 0.5)\n desc_label.set_text(self.runner.description)\n self.runner_label_box.pack_start(desc_label, False, False, 0)\n\n self.pack_start(self.runner_label_box, True, True, 0)\n\n self.configure_button = Gtk.Button.new_from_icon_name(\"preferences-system-symbolic\", Gtk.IconSize.BUTTON)\n self.configure_button.set_margin_right(12)\n self.configure_button.connect(\"clicked\", self.on_configure_clicked)\n self.configure_button.show()\n self.pack_start(self.configure_button, False, False, 0)\n if not self.runner.is_installed():\n self.runner_label_box.set_sensitive(False)\n self.action_alignment = Gtk.Alignment.new(0.5, 0.5, 0, 0)\n self.action_alignment.show()\n self.action_alignment.add(self.get_action_button())\n self.pack_start(self.action_alignment, False, False, 0)\n\n def get_action_button(self):\n \"\"\"Return a install or remove button\"\"\"\n if self.runner.multiple_versions:\n _button = Gtk.Button.new_from_icon_name(\"preferences-other-symbolic\", Gtk.IconSize.BUTTON)\n _button.get_style_context().add_class(\"circular\")\n _button.connect(\"clicked\", self.on_versions_clicked)\n else:\n if self.runner.is_installed():\n _button = Gtk.Button.new_from_icon_name(\"edit-delete-symbolic\", Gtk.IconSize.BUTTON)\n _button.get_style_context().add_class(\"circular\")\n _button.connect(\"clicked\", self.on_remove_clicked)\n else:\n _button = Gtk.Button.new_from_icon_name(\"system-software-install-symbolic\", Gtk.IconSize.BUTTON)\n _button.get_style_context().add_class(\"circular\")\n _button.connect(\"clicked\", self.on_install_clicked)\n _button.show()\n return _button\n\n def on_versions_clicked(self, widget):\n RunnerInstallDialog(\n _(\"Manage %s versions\") % self.runner.name,\n None,\n self.runner.name\n )\n # connect a runner-installed signal from the above dialog?\n\n def on_install_clicked(self, widget):\n \"\"\"Install a runner.\"\"\"\n logger.debug(\"Install of %s requested\", self.runner)\n try:\n self.runner.install(downloader=simple_downloader)\n except (\n runners.RunnerInstallationError,\n runners.NonInstallableRunnerError,\n ) as ex:\n logger.error(ex)\n ErrorDialog(ex.message)\n return\n if self.runner.is_installed():\n self.emit(\"runner-installed\")\n else:\n logger.error(\"Runner failed to install\")\n\n def on_configure_clicked(self, widget):\n RunnerConfigDialog(self.runner)\n\n def on_remove_clicked(self, widget):\n dialog = QuestionDialog(\n {\n \"title\": _(\"Do you want to uninstall %s?\") % self.runner.human_name,\n \"question\": _(\"This will remove <b>%s</b> and all associated data.\" % self.runner.human_name)\n\n }\n )\n if Gtk.ResponseType.YES == dialog.result:\n self.runner.uninstall()\n self.emit(\"runner-removed\")\n\n def on_runner_installed(self, widget):\n \"\"\"Called after the runnner is installed\"\"\"\n self.runner_label_box.set_sensitive(True)\n self.configure_button.show()\n self.action_alignment.get_children()[0].destroy()\n self.action_alignment.add(self.get_action_button())\n\n def on_runner_removed(self, widget):\n \"\"\"Called after the runner is removed\"\"\"\n self.runner_label_box.set_sensitive(False)\n self.configure_button.hide()\n self.action_alignment.get_children()[0].destroy()\n self.action_alignment.add(self.get_action_button())\n", "path": "lutris/gui/config/runner_box.py"}]}
| 2,952 | 177 |
gh_patches_debug_38134
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-536
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LaunchConfigurationEBSEncryption gives false-positives, due to not checking snapshot_id
In the relevant `aws_launch_configuration` Terraform docs https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_configuration
```
encrypted - (Optional) Whether the volume should be encrypted or not.
Do not use this option if you are using snapshot_id as the encrypted flag will be determined by the snapshot.
(Default: false).
```
The relevant part is **Do not use this option if you are using snapshot_id as the encrypted flag will be determined by the snapshot.** `snapshot_id` is not taken into account in the current check:
https://github.com/bridgecrewio/checkov/blob/d67ec380daf81b889e83d4d7c7d33f490525e899/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py#L16-L34
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class SecurityGroupRuleDescription(BaseResourceCheck):
6 def __init__(self):
7 name = "Ensure every security groups rule has a description"
8 id = "CKV_AWS_23"
9 supported_resource = ['aws_security_group', 'aws_security_group_rule', 'aws_db_security_group',
10 'aws_elasticache_security_group', 'aws_redshift_security_group']
11 categories = [CheckCategories.NETWORKING]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)
13
14 def scan_resource_conf(self, conf):
15 """
16 Looks for description at security group rules :
17 https://www.terraform.io/docs/providers/aws/r/security_group.html
18 :param conf: aws_security_group configuration
19 :return: <CheckResult>
20 """
21 if 'description' in conf.keys():
22 if conf['description']:
23 return CheckResult.PASSED
24 egress_result = self.check_rule(rule_type='egress', conf=conf)
25 ingress_result = self.check_rule(rule_type='ingress', conf=conf)
26 if egress_result == CheckResult.PASSED and ingress_result == CheckResult.PASSED:
27 return CheckResult.PASSED
28 return CheckResult.FAILED
29
30 def check_rule(self, rule_type, conf):
31 if rule_type in conf.keys():
32 for rule in conf[rule_type]:
33 if isinstance(rule, dict):
34 if 'description' not in rule.keys() or not rule['description']:
35 return CheckResult.FAILED
36 return CheckResult.PASSED
37
38
39 check = SecurityGroupRuleDescription()
40
[end of checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py]
[start of checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3
4
5 class LaunchConfigurationEBSEncryption(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure all data stored in the Launch configuration EBS is securely encrypted"
8 id = "CKV_AWS_8"
9 supported_resources = ['aws_launch_configuration', 'aws_instance']
10 categories = [CheckCategories.ENCRYPTION]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return "*_block_device/[0]/encrypted"
15
16 def scan_resource_conf(self, conf):
17 """
18 Looks for encryption configuration at launch configuration:
19 https://www.terraform.io/docs/providers/aws/r/launch_configuration.html or https://www.terraform.io/docs/providers/aws/d/instance.html
20 :param conf: aws_launch_configuration configuration
21 :return: <CheckResult>
22 """
23 have_root_block = 0
24 for key in conf.keys():
25 if "block_device" in key and "ephemeral" not in key:
26 if isinstance(conf[key][0], dict) and conf[key][0].get("encrypted") != [True]:
27 return CheckResult.FAILED
28 if "root_block_device" in key:
29 # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.
30 have_root_block = 1
31 if have_root_block == 0:
32 return CheckResult.FAILED
33
34 return CheckResult.PASSED
35
36
37 check = LaunchConfigurationEBSEncryption()
38
[end of checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py]
[start of checkov/common/runners/runner_registry.py]
1 import json
2 import logging
3 from abc import abstractmethod
4
5
6 class RunnerRegistry(object):
7 runners = []
8 scan_reports = []
9 banner = ""
10
11 def __init__(self, banner, runner_filter, *runners):
12 self.logger = logging.getLogger(__name__)
13 self.runner_filter = runner_filter
14 self.runners = runners
15 self.banner = banner
16 self.filter_runner_framework()
17 self.scan_reports = []
18
19 @abstractmethod
20 def extract_entity_details(self, entity):
21 raise NotImplementedError()
22
23 def run(self, root_folder=None, external_checks_dir=None, files=None, guidelines={}):
24 for runner in self.runners:
25 scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,
26 runner_filter=self.runner_filter)
27 RunnerRegistry.enrich_report_with_guidelines(scan_report, guidelines)
28 self.scan_reports.append(scan_report)
29 return self.scan_reports
30
31 def print_reports(self, scan_reports, args):
32 if args.output != "json" and args.output != "junitxml" and args.output != "github_failed_only":
33 print(f"{self.banner}\n")
34 exit_codes = []
35 report_jsons = []
36 for report in scan_reports:
37 if not report.is_empty():
38 if args.output == "json":
39 report_jsons.append(report.get_dict())
40 elif args.output == "junitxml":
41 report.print_junit_xml()
42 elif args.output == 'github_failed_only':
43 report.print_failed_github_md()
44 else:
45 report.print_console(is_quiet=args.quiet)
46 exit_codes.append(report.get_exit_code(args.soft_fail))
47 if args.output == "json":
48 if len(report_jsons) == 1:
49 print(json.dumps(report_jsons[0], indent=4))
50 else:
51 print(json.dumps(report_jsons, indent=4))
52 exit_code = 1 if 1 in exit_codes else 0
53 exit(exit_code)
54
55 def filter_runner_framework(self):
56 if self.runner_filter.framework == 'all':
57 return
58 for runner in self.runners:
59 if runner.check_type == self.runner_filter.framework:
60 self.runners = [runner]
61 return
62
63 @staticmethod
64 def enrich_report_with_guidelines(scan_report, guidelines):
65 for record in scan_report.failed_checks + scan_report.passed_checks + scan_report.skipped_checks:
66 if record.check_id in guidelines:
67 record.set_guideline(guidelines[record.check_id])
68
[end of checkov/common/runners/runner_registry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/common/runners/runner_registry.py b/checkov/common/runners/runner_registry.py
--- a/checkov/common/runners/runner_registry.py
+++ b/checkov/common/runners/runner_registry.py
@@ -29,7 +29,11 @@
return self.scan_reports
def print_reports(self, scan_reports, args):
- if args.output != "json" and args.output != "junitxml" and args.output != "github_failed_only":
+ if args.output not in (
+ "github_failed_only",
+ "json",
+ "junitxml",
+ ):
print(f"{self.banner}\n")
exit_codes = []
report_jsons = []
diff --git a/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py b/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py
--- a/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py
+++ b/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py
@@ -20,15 +20,25 @@
:param conf: aws_launch_configuration configuration
:return: <CheckResult>
"""
- have_root_block = 0
for key in conf.keys():
- if "block_device" in key and "ephemeral" not in key:
- if isinstance(conf[key][0], dict) and conf[key][0].get("encrypted") != [True]:
+ if (
+ "block_device" in key
+ and
+ "ephemeral" not in key
+ ):
+ if (
+ isinstance(conf[key][0], dict)
+ and
+ conf[key][0].get("encrypted") != [True]
+ and
+ # If present, the encrypted flag will be determined by the snapshot
+ # Note: checkov does not know if snapshot is encrypted, so we default to PASSED
+ not conf[key][0].get("snapshot_id")
+ ):
return CheckResult.FAILED
- if "root_block_device" in key:
- # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.
- have_root_block = 1
- if have_root_block == 0:
+
+ # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.
+ if "root_block_device" not in conf.keys():
return CheckResult.FAILED
return CheckResult.PASSED
diff --git a/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py b/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py
--- a/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py
+++ b/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py
@@ -6,8 +6,13 @@
def __init__(self):
name = "Ensure every security groups rule has a description"
id = "CKV_AWS_23"
- supported_resource = ['aws_security_group', 'aws_security_group_rule', 'aws_db_security_group',
- 'aws_elasticache_security_group', 'aws_redshift_security_group']
+ supported_resource = [
+ 'aws_security_group',
+ 'aws_security_group_rule',
+ 'aws_db_security_group',
+ 'aws_elasticache_security_group',
+ 'aws_redshift_security_group',
+ ]
categories = [CheckCategories.NETWORKING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)
|
{"golden_diff": "diff --git a/checkov/common/runners/runner_registry.py b/checkov/common/runners/runner_registry.py\n--- a/checkov/common/runners/runner_registry.py\n+++ b/checkov/common/runners/runner_registry.py\n@@ -29,7 +29,11 @@\n return self.scan_reports\n \n def print_reports(self, scan_reports, args):\n- if args.output != \"json\" and args.output != \"junitxml\" and args.output != \"github_failed_only\":\n+ if args.output not in (\n+ \"github_failed_only\",\n+ \"json\",\n+ \"junitxml\",\n+ ):\n print(f\"{self.banner}\\n\")\n exit_codes = []\n report_jsons = []\ndiff --git a/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py b/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py\n--- a/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py\n+++ b/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py\n@@ -20,15 +20,25 @@\n :param conf: aws_launch_configuration configuration\n :return: <CheckResult>\n \"\"\"\n- have_root_block = 0\n for key in conf.keys():\n- if \"block_device\" in key and \"ephemeral\" not in key:\n- if isinstance(conf[key][0], dict) and conf[key][0].get(\"encrypted\") != [True]:\n+ if (\n+ \"block_device\" in key\n+ and\n+ \"ephemeral\" not in key\n+ ):\n+ if (\n+ isinstance(conf[key][0], dict)\n+ and\n+ conf[key][0].get(\"encrypted\") != [True]\n+ and\n+ # If present, the encrypted flag will be determined by the snapshot\n+ # Note: checkov does not know if snapshot is encrypted, so we default to PASSED\n+ not conf[key][0].get(\"snapshot_id\")\n+ ):\n return CheckResult.FAILED\n- if \"root_block_device\" in key:\n- # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.\n- have_root_block = 1\n- if have_root_block == 0: \n+\n+ # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.\n+ if \"root_block_device\" not in conf.keys():\n return CheckResult.FAILED\n \n return CheckResult.PASSED\ndiff --git a/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py b/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py\n--- a/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py\n+++ b/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py\n@@ -6,8 +6,13 @@\n def __init__(self):\n name = \"Ensure every security groups rule has a description\"\n id = \"CKV_AWS_23\"\n- supported_resource = ['aws_security_group', 'aws_security_group_rule', 'aws_db_security_group',\n- 'aws_elasticache_security_group', 'aws_redshift_security_group']\n+ supported_resource = [\n+ 'aws_security_group',\n+ 'aws_security_group_rule',\n+ 'aws_db_security_group',\n+ 'aws_elasticache_security_group',\n+ 'aws_redshift_security_group',\n+ ]\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)\n", "issue": "LaunchConfigurationEBSEncryption gives false-positives, due to not checking snapshot_id\nIn the relevant `aws_launch_configuration` Terraform docs https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_configuration\r\n\r\n```\r\nencrypted - (Optional) Whether the volume should be encrypted or not. \r\nDo not use this option if you are using snapshot_id as the encrypted flag will be determined by the snapshot.\r\n(Default: false).\r\n```\r\nThe relevant part is **Do not use this option if you are using snapshot_id as the encrypted flag will be determined by the snapshot.** `snapshot_id` is not taken into account in the current check:\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/d67ec380daf81b889e83d4d7c7d33f490525e899/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py#L16-L34\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass SecurityGroupRuleDescription(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure every security groups rule has a description\"\n id = \"CKV_AWS_23\"\n supported_resource = ['aws_security_group', 'aws_security_group_rule', 'aws_db_security_group',\n 'aws_elasticache_security_group', 'aws_redshift_security_group']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for description at security group rules :\n https://www.terraform.io/docs/providers/aws/r/security_group.html\n :param conf: aws_security_group configuration\n :return: <CheckResult>\n \"\"\"\n if 'description' in conf.keys():\n if conf['description']:\n return CheckResult.PASSED\n egress_result = self.check_rule(rule_type='egress', conf=conf)\n ingress_result = self.check_rule(rule_type='ingress', conf=conf)\n if egress_result == CheckResult.PASSED and ingress_result == CheckResult.PASSED:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n def check_rule(self, rule_type, conf):\n if rule_type in conf.keys():\n for rule in conf[rule_type]:\n if isinstance(rule, dict):\n if 'description' not in rule.keys() or not rule['description']:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = SecurityGroupRuleDescription()\n", "path": "checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass LaunchConfigurationEBSEncryption(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure all data stored in the Launch configuration EBS is securely encrypted\"\n id = \"CKV_AWS_8\"\n supported_resources = ['aws_launch_configuration', 'aws_instance']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"*_block_device/[0]/encrypted\"\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for encryption configuration at launch configuration:\n https://www.terraform.io/docs/providers/aws/r/launch_configuration.html or https://www.terraform.io/docs/providers/aws/d/instance.html\n :param conf: aws_launch_configuration configuration\n :return: <CheckResult>\n \"\"\"\n have_root_block = 0\n for key in conf.keys():\n if \"block_device\" in key and \"ephemeral\" not in key:\n if isinstance(conf[key][0], dict) and conf[key][0].get(\"encrypted\") != [True]:\n return CheckResult.FAILED\n if \"root_block_device\" in key:\n # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.\n have_root_block = 1\n if have_root_block == 0: \n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n\ncheck = LaunchConfigurationEBSEncryption()\n", "path": "checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py"}, {"content": "import json\nimport logging\nfrom abc import abstractmethod\n\n\nclass RunnerRegistry(object):\n runners = []\n scan_reports = []\n banner = \"\"\n\n def __init__(self, banner, runner_filter, *runners):\n self.logger = logging.getLogger(__name__)\n self.runner_filter = runner_filter\n self.runners = runners\n self.banner = banner\n self.filter_runner_framework()\n self.scan_reports = []\n\n @abstractmethod\n def extract_entity_details(self, entity):\n raise NotImplementedError()\n\n def run(self, root_folder=None, external_checks_dir=None, files=None, guidelines={}):\n for runner in self.runners:\n scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,\n runner_filter=self.runner_filter)\n RunnerRegistry.enrich_report_with_guidelines(scan_report, guidelines)\n self.scan_reports.append(scan_report)\n return self.scan_reports\n\n def print_reports(self, scan_reports, args):\n if args.output != \"json\" and args.output != \"junitxml\" and args.output != \"github_failed_only\":\n print(f\"{self.banner}\\n\")\n exit_codes = []\n report_jsons = []\n for report in scan_reports:\n if not report.is_empty():\n if args.output == \"json\":\n report_jsons.append(report.get_dict())\n elif args.output == \"junitxml\":\n report.print_junit_xml()\n elif args.output == 'github_failed_only':\n report.print_failed_github_md()\n else:\n report.print_console(is_quiet=args.quiet)\n exit_codes.append(report.get_exit_code(args.soft_fail))\n if args.output == \"json\":\n if len(report_jsons) == 1:\n print(json.dumps(report_jsons[0], indent=4))\n else:\n print(json.dumps(report_jsons, indent=4))\n exit_code = 1 if 1 in exit_codes else 0\n exit(exit_code)\n\n def filter_runner_framework(self):\n if self.runner_filter.framework == 'all':\n return\n for runner in self.runners:\n if runner.check_type == self.runner_filter.framework:\n self.runners = [runner]\n return\n\n @staticmethod\n def enrich_report_with_guidelines(scan_report, guidelines):\n for record in scan_report.failed_checks + scan_report.passed_checks + scan_report.skipped_checks:\n if record.check_id in guidelines:\n record.set_guideline(guidelines[record.check_id])\n", "path": "checkov/common/runners/runner_registry.py"}]}
| 2,340 | 804 |
gh_patches_debug_12143
|
rasdani/github-patches
|
git_diff
|
google__turbinia-294
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Programatically setting config file
I am trying to write tests for the dftimewolf turbinia module. Loading the configuration on a system that doesn't have one will fail, so I am trying to manually feed in a test config data to see if the module behaves as expected.
I tried setting the `TURBINIA_CONFIG_PATH` environment variable, but this just *adds* the path the list of possible config paths. This would work in a pristine test environment, but it will break in my dev setup where I already have a production turbinia config file set up.
What do you think of giving `TURBINIA_CONFIG_PATH` environment variable precedence over the other potential config locations?
</issue>
<code>
[start of turbinia/config/__init__.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2016 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Basic Turbinia config."""
16
17 from __future__ import unicode_literals
18
19 import imp
20 import itertools
21 import logging
22 import os
23 import sys
24
25 log = logging.getLogger('turbinia')
26
27 # Look for config files with these names
28 CONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config.py']
29 # Look in homedir first, then /etc/turbinia, and finally in the source
30 # config dir for config files
31 CONFIGPATH = [
32 os.path.expanduser('~'),
33 '/etc/turbinia',
34 os.path.dirname(os.path.abspath(__file__))]
35 # Config vars that we expect to exist in the configuration
36 CONFIGVARS = [
37 # Turbinia Config
38 'TASK_MANAGER',
39 'LOG_FILE',
40 'LOCK_FILE',
41 'OUTPUT_DIR',
42 'SLEEP_TIME',
43 'SINGLE_RUN',
44 'MOUNT_DIR_PREFIX',
45 'SHARED_FILESYSTEM',
46 # TODO(aarontp): Move this to the recipe config when it's available.
47 'DEBUG_TASKS',
48 # GCE CONFIG
49 'PROJECT',
50 'ZONE',
51 'TURBINIA_REGION',
52 'BUCKET_NAME',
53 'PSQ_TOPIC',
54 'PUBSUB_TOPIC',
55 'GCS_OUTPUT_PATH',
56 'STATE_MANAGER',
57 'INSTANCE_ID',
58 # REDIS CONFIG
59 'REDIS_HOST',
60 'REDIS_PORT',
61 'REDIS_DB',
62 # Celery config
63 'CELERY_BROKER',
64 'CELERY_BACKEND',
65 'KOMBU_BROKER',
66 'KOMBU_CHANNEL',
67 'KOMBU_DURABLE',]
68 # Environment variable to look for path data in
69 ENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'
70
71 CONFIG = None
72
73
74 class TurbiniaConfigException(Exception):
75 """Exception for Turbinia configuration."""
76 pass
77
78
79 def LoadConfig():
80 """Finds Turbinia config file and loads it."""
81 # TODO(aarontp): Find way to not require global var here. Maybe a singleton
82 # pattern on the config class.
83 # pylint: disable=global-statement
84 global CONFIG
85 if CONFIG:
86 return CONFIG
87
88 if ENVCONFIGVAR in os.environ:
89 CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))
90
91 config_file = None
92 # Load first file found
93 for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):
94 if os.path.exists(os.path.join(_dir, _file)):
95 config_file = os.path.join(_dir, _file)
96 break
97
98 if config_file is None:
99 raise TurbiniaConfigException('No config files found')
100
101 log.info('Loading config from {0:s}'.format(config_file))
102 _config = imp.load_source('config', config_file)
103 _config.configSource = config_file
104 ValidateAndSetConfig(_config)
105 CONFIG = _config
106 return _config
107
108
109 def ValidateAndSetConfig(_config):
110 """Makes sure that the config has the vars loaded and set in the module."""
111 # TODO(aarontp): Allow for non-mandatory config options
112 for var in CONFIGVARS:
113 if not hasattr(_config, var):
114 raise TurbiniaConfigException(
115 'No config attribute {0:s}:{1:s}'.format(_config.configSource, var))
116 if getattr(_config, var) is None:
117 raise TurbiniaConfigException(
118 'Config attribute {0:s}:{1:s} is not set'.format(
119 _config.configSource, var))
120
121 # Set the attribute in the current module
122 setattr(sys.modules[__name__], var, getattr(_config, var))
123
[end of turbinia/config/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/turbinia/config/__init__.py b/turbinia/config/__init__.py
--- a/turbinia/config/__init__.py
+++ b/turbinia/config/__init__.py
@@ -85,12 +85,16 @@
if CONFIG:
return CONFIG
+ # If the environment variable is set, take precedence over the pre-defined
+ # CONFIGPATHs.
+ configpath = CONFIGPATH
if ENVCONFIGVAR in os.environ:
- CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))
+ configpath = os.environ[ENVCONFIGVAR].split(':')
+
config_file = None
# Load first file found
- for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):
+ for _dir, _file in itertools.product(configpath, CONFIGFILES):
if os.path.exists(os.path.join(_dir, _file)):
config_file = os.path.join(_dir, _file)
break
|
{"golden_diff": "diff --git a/turbinia/config/__init__.py b/turbinia/config/__init__.py\n--- a/turbinia/config/__init__.py\n+++ b/turbinia/config/__init__.py\n@@ -85,12 +85,16 @@\n if CONFIG:\n return CONFIG\n \n+ # If the environment variable is set, take precedence over the pre-defined\n+ # CONFIGPATHs.\n+ configpath = CONFIGPATH\n if ENVCONFIGVAR in os.environ:\n- CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))\n+ configpath = os.environ[ENVCONFIGVAR].split(':')\n+\n \n config_file = None\n # Load first file found\n- for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):\n+ for _dir, _file in itertools.product(configpath, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n", "issue": "Programatically setting config file\nI am trying to write tests for the dftimewolf turbinia module. Loading the configuration on a system that doesn't have one will fail, so I am trying to manually feed in a test config data to see if the module behaves as expected.\r\n\r\nI tried setting the `TURBINIA_CONFIG_PATH` environment variable, but this just *adds* the path the list of possible config paths. This would work in a pristine test environment, but it will break in my dev setup where I already have a production turbinia config file set up.\r\n\r\nWhat do you think of giving `TURBINIA_CONFIG_PATH` environment variable precedence over the other potential config locations?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic Turbinia config.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport imp\nimport itertools\nimport logging\nimport os\nimport sys\n\nlog = logging.getLogger('turbinia')\n\n# Look for config files with these names\nCONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config.py']\n# Look in homedir first, then /etc/turbinia, and finally in the source\n# config dir for config files\nCONFIGPATH = [\n os.path.expanduser('~'),\n '/etc/turbinia',\n os.path.dirname(os.path.abspath(__file__))]\n# Config vars that we expect to exist in the configuration\nCONFIGVARS = [\n # Turbinia Config\n 'TASK_MANAGER',\n 'LOG_FILE',\n 'LOCK_FILE',\n 'OUTPUT_DIR',\n 'SLEEP_TIME',\n 'SINGLE_RUN',\n 'MOUNT_DIR_PREFIX',\n 'SHARED_FILESYSTEM',\n # TODO(aarontp): Move this to the recipe config when it's available.\n 'DEBUG_TASKS',\n # GCE CONFIG\n 'PROJECT',\n 'ZONE',\n 'TURBINIA_REGION',\n 'BUCKET_NAME',\n 'PSQ_TOPIC',\n 'PUBSUB_TOPIC',\n 'GCS_OUTPUT_PATH',\n 'STATE_MANAGER',\n 'INSTANCE_ID',\n # REDIS CONFIG\n 'REDIS_HOST',\n 'REDIS_PORT',\n 'REDIS_DB',\n # Celery config\n 'CELERY_BROKER',\n 'CELERY_BACKEND',\n 'KOMBU_BROKER',\n 'KOMBU_CHANNEL',\n 'KOMBU_DURABLE',]\n# Environment variable to look for path data in\nENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'\n\nCONFIG = None\n\n\nclass TurbiniaConfigException(Exception):\n \"\"\"Exception for Turbinia configuration.\"\"\"\n pass\n\n\ndef LoadConfig():\n \"\"\"Finds Turbinia config file and loads it.\"\"\"\n # TODO(aarontp): Find way to not require global var here. Maybe a singleton\n # pattern on the config class.\n # pylint: disable=global-statement\n global CONFIG\n if CONFIG:\n return CONFIG\n\n if ENVCONFIGVAR in os.environ:\n CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))\n\n config_file = None\n # Load first file found\n for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n\n if config_file is None:\n raise TurbiniaConfigException('No config files found')\n\n log.info('Loading config from {0:s}'.format(config_file))\n _config = imp.load_source('config', config_file)\n _config.configSource = config_file\n ValidateAndSetConfig(_config)\n CONFIG = _config\n return _config\n\n\ndef ValidateAndSetConfig(_config):\n \"\"\"Makes sure that the config has the vars loaded and set in the module.\"\"\"\n # TODO(aarontp): Allow for non-mandatory config options\n for var in CONFIGVARS:\n if not hasattr(_config, var):\n raise TurbiniaConfigException(\n 'No config attribute {0:s}:{1:s}'.format(_config.configSource, var))\n if getattr(_config, var) is None:\n raise TurbiniaConfigException(\n 'Config attribute {0:s}:{1:s} is not set'.format(\n _config.configSource, var))\n\n # Set the attribute in the current module\n setattr(sys.modules[__name__], var, getattr(_config, var))\n", "path": "turbinia/config/__init__.py"}]}
| 1,877 | 220 |
gh_patches_debug_171
|
rasdani/github-patches
|
git_diff
|
HypothesisWorks__hypothesis-563
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
External pull requests currently fail the deploy task
The build on #536 is currently failing because the decryption is trying to run and it doesn't have access to the decryption environment variables because it comes from @Zac-HD's fork rather than the main repo.
The solution is just to have that task skip for external pull requests I think.
</issue>
<code>
[start of scripts/deploy.py]
1 #!/usr/bin/env python
2
3 # coding=utf-8
4 #
5 # This file is part of Hypothesis, which may be found at
6 # https://github.com/HypothesisWorks/hypothesis-python
7 #
8 # Most of this work is copyright (C) 2013-2017 David R. MacIver
9 # ([email protected]), but it contains contributions by others. See
10 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
11 # consult the git log if you need to determine who owns an individual
12 # contribution.
13 #
14 # This Source Code Form is subject to the terms of the Mozilla Public License,
15 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
16 # obtain one at http://mozilla.org/MPL/2.0/.
17 #
18 # END HEADER
19
20 from __future__ import division, print_function, absolute_import
21
22 import os
23 import sys
24 import random
25 import shutil
26 import subprocess
27 from time import time, sleep
28
29 import hypothesistooling as tools
30
31 sys.path.append(os.path.dirname(__file__)) # noqa
32
33
34 DIST = os.path.join(tools.ROOT, 'dist')
35
36
37 PENDING_STATUS = ('started', 'created')
38
39
40 if __name__ == '__main__':
41
42 print('Decrypting secrets')
43
44 # We'd normally avoid the use of shell=True, but this is more or less
45 # intended as an opaque string that was given to us by Travis that happens
46 # to be a shell command that we run, and there are a number of good reasons
47 # this particular instance is harmless and would be high effort to
48 # convert (principally: Lack of programmatic generation of the string and
49 # extensive use of environment variables in it), so we're making an
50 # exception here.
51 subprocess.check_call(
52 'openssl aes-256-cbc -K $encrypted_39cb4cc39a80_key '
53 '-iv $encrypted_39cb4cc39a80_iv -in secrets.tar.enc '
54 '-out secrets.tar -d',
55 shell=True
56 )
57
58 subprocess.check_call([
59 'tar', '-xvf', 'secrets.tar',
60 ])
61
62 last_release = tools.latest_version()
63
64 print('Current version: %s. Latest released version: %s' % (
65 tools.__version__, last_release
66 ))
67
68 print('Building an sdist...')
69
70 if os.path.exists(DIST):
71 shutil.rmtree(DIST)
72
73 subprocess.check_output([
74 sys.executable, 'setup.py', 'sdist', '--dist-dir', DIST,
75 ])
76
77 if not tools.on_master():
78 print('Not deploying due to not being on master')
79 sys.exit(0)
80
81 if not tools.has_source_changes(last_release):
82 print('Not deploying due to no source changes')
83 sys.exit(0)
84
85 start_time = time()
86
87 prev_pending = None
88
89 # We time out after an hour, which is a stupidly long time and it should
90 # never actually take that long: A full Travis run only takes about 20-30
91 # minutes! This is really just here as a guard in case something goes
92 # wrong and we're not paying attention so as to not be too mean to Travis..
93 while time() <= start_time + 60 * 60:
94 jobs = tools.build_jobs()
95
96 failed_jobs = [
97 (k, v)
98 for k, vs in jobs.items()
99 if k not in PENDING_STATUS + ('passed',)
100 for v in vs
101 ]
102
103 if failed_jobs:
104 print('Failing this due to failure of jobs %s' % (
105 ', '.join('%s(%s)' % (s, j) for j, s in failed_jobs),
106 ))
107 sys.exit(1)
108 else:
109 pending = [j for s in PENDING_STATUS for j in jobs.get(s, ())]
110 try:
111 # This allows us to test the deploy job for a build locally.
112 pending.remove('deploy')
113 except ValueError:
114 pass
115 if pending:
116 still_pending = set(pending)
117 if prev_pending is None:
118 print('Waiting for the following jobs to complete:')
119 for p in sorted(still_pending):
120 print(' * %s' % (p,))
121 print()
122 else:
123 completed = prev_pending - still_pending
124 if completed:
125 print('%s completed since last check.' % (
126 ', '.join(sorted(completed)),))
127 prev_pending = still_pending
128 naptime = 10.0 * (2 + random.random())
129 print('Waiting %.2fs for %d more job%s to complete' % (
130 naptime, len(pending), 's' if len(pending) > 1 else '',))
131 sleep(naptime)
132 else:
133 break
134 else:
135 print("We've been waiting for an hour. That seems bad. Failing now.")
136 sys.exit(1)
137
138 print('Looks good to release!')
139 print('Now uploading to pypi.')
140
141 subprocess.check_output([
142 sys.executable, '-m', 'twine', 'upload',
143 '--config-file', './.pypirc',
144 os.path.join(DIST, '*'),
145 ])
146
147 print('Release seems good. Pushing the tag now.')
148
149 tools.create_tag()
150 sys.exit(0)
151
[end of scripts/deploy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/deploy.py b/scripts/deploy.py
--- a/scripts/deploy.py
+++ b/scripts/deploy.py
@@ -38,6 +38,8 @@
if __name__ == '__main__':
+ if os.environ.get('TRAVIS_SECURE_ENV_VARS', None) != 'true':
+ sys.exit(0)
print('Decrypting secrets')
|
{"golden_diff": "diff --git a/scripts/deploy.py b/scripts/deploy.py\n--- a/scripts/deploy.py\n+++ b/scripts/deploy.py\n@@ -38,6 +38,8 @@\n \n \n if __name__ == '__main__':\n+ if os.environ.get('TRAVIS_SECURE_ENV_VARS', None) != 'true':\n+ sys.exit(0)\n \n print('Decrypting secrets')\n", "issue": "External pull requests currently fail the deploy task\nThe build on #536 is currently failing because the decryption is trying to run and it doesn't have access to the decryption environment variables because it comes from @Zac-HD's fork rather than the main repo.\r\n\r\nThe solution is just to have that task skip for external pull requests I think.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2017 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nimport random\nimport shutil\nimport subprocess\nfrom time import time, sleep\n\nimport hypothesistooling as tools\n\nsys.path.append(os.path.dirname(__file__)) # noqa\n\n\nDIST = os.path.join(tools.ROOT, 'dist')\n\n\nPENDING_STATUS = ('started', 'created')\n\n\nif __name__ == '__main__':\n\n print('Decrypting secrets')\n\n # We'd normally avoid the use of shell=True, but this is more or less\n # intended as an opaque string that was given to us by Travis that happens\n # to be a shell command that we run, and there are a number of good reasons\n # this particular instance is harmless and would be high effort to\n # convert (principally: Lack of programmatic generation of the string and\n # extensive use of environment variables in it), so we're making an\n # exception here.\n subprocess.check_call(\n 'openssl aes-256-cbc -K $encrypted_39cb4cc39a80_key '\n '-iv $encrypted_39cb4cc39a80_iv -in secrets.tar.enc '\n '-out secrets.tar -d',\n shell=True\n )\n\n subprocess.check_call([\n 'tar', '-xvf', 'secrets.tar',\n ])\n\n last_release = tools.latest_version()\n\n print('Current version: %s. Latest released version: %s' % (\n tools.__version__, last_release\n ))\n\n print('Building an sdist...')\n\n if os.path.exists(DIST):\n shutil.rmtree(DIST)\n\n subprocess.check_output([\n sys.executable, 'setup.py', 'sdist', '--dist-dir', DIST,\n ])\n\n if not tools.on_master():\n print('Not deploying due to not being on master')\n sys.exit(0)\n\n if not tools.has_source_changes(last_release):\n print('Not deploying due to no source changes')\n sys.exit(0)\n\n start_time = time()\n\n prev_pending = None\n\n # We time out after an hour, which is a stupidly long time and it should\n # never actually take that long: A full Travis run only takes about 20-30\n # minutes! This is really just here as a guard in case something goes\n # wrong and we're not paying attention so as to not be too mean to Travis..\n while time() <= start_time + 60 * 60:\n jobs = tools.build_jobs()\n\n failed_jobs = [\n (k, v)\n for k, vs in jobs.items()\n if k not in PENDING_STATUS + ('passed',)\n for v in vs\n ]\n\n if failed_jobs:\n print('Failing this due to failure of jobs %s' % (\n ', '.join('%s(%s)' % (s, j) for j, s in failed_jobs),\n ))\n sys.exit(1)\n else:\n pending = [j for s in PENDING_STATUS for j in jobs.get(s, ())]\n try:\n # This allows us to test the deploy job for a build locally.\n pending.remove('deploy')\n except ValueError:\n pass\n if pending:\n still_pending = set(pending)\n if prev_pending is None:\n print('Waiting for the following jobs to complete:')\n for p in sorted(still_pending):\n print(' * %s' % (p,))\n print()\n else:\n completed = prev_pending - still_pending\n if completed:\n print('%s completed since last check.' % (\n ', '.join(sorted(completed)),))\n prev_pending = still_pending\n naptime = 10.0 * (2 + random.random())\n print('Waiting %.2fs for %d more job%s to complete' % (\n naptime, len(pending), 's' if len(pending) > 1 else '',))\n sleep(naptime)\n else:\n break\n else:\n print(\"We've been waiting for an hour. That seems bad. Failing now.\")\n sys.exit(1)\n\n print('Looks good to release!')\n print('Now uploading to pypi.')\n\n subprocess.check_output([\n sys.executable, '-m', 'twine', 'upload',\n '--config-file', './.pypirc',\n os.path.join(DIST, '*'),\n ])\n\n print('Release seems good. Pushing the tag now.')\n\n tools.create_tag()\n sys.exit(0)\n", "path": "scripts/deploy.py"}]}
| 2,128 | 83 |
gh_patches_debug_5678
|
rasdani/github-patches
|
git_diff
|
stephenmcd__mezzanine-1954
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Small typo in mezzanine/core/auth_backends.py
Should read verification rather than verficiation.
</issue>
<code>
[start of mezzanine/core/auth_backends.py]
1 from __future__ import unicode_literals
2
3 from django.contrib.auth import get_user_model
4 from django.contrib.auth.backends import ModelBackend
5 from django.contrib.auth.tokens import default_token_generator
6 from django.db.models import Q
7 from django.utils.http import base36_to_int
8
9
10 User = get_user_model()
11
12
13 class MezzanineBackend(ModelBackend):
14 """
15 Extends Django's ``ModelBackend`` to allow login via username,
16 email, or verification token.
17
18 Args are either ``username`` and ``password``, or ``uidb36``
19 and ``token``. In either case, ``is_active`` can also be given.
20
21 For login, is_active is not given, so that the login form can
22 raise a specific error for inactive users.
23 For password reset, True is given for is_active.
24 For signup verficiation, False is given for is_active.
25 """
26
27 def authenticate(self, *args, **kwargs):
28 if kwargs:
29 username = kwargs.pop("username", None)
30 if username:
31 username_or_email = Q(username=username) | Q(email=username)
32 password = kwargs.pop("password", None)
33 try:
34 user = User.objects.get(username_or_email, **kwargs)
35 except User.DoesNotExist:
36 pass
37 else:
38 if user.check_password(password):
39 return user
40 else:
41 if 'uidb36' not in kwargs:
42 return
43 kwargs["id"] = base36_to_int(kwargs.pop("uidb36"))
44 token = kwargs.pop("token")
45 try:
46 user = User.objects.get(**kwargs)
47 except User.DoesNotExist:
48 pass
49 else:
50 if default_token_generator.check_token(user, token):
51 return user
52
[end of mezzanine/core/auth_backends.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mezzanine/core/auth_backends.py b/mezzanine/core/auth_backends.py
--- a/mezzanine/core/auth_backends.py
+++ b/mezzanine/core/auth_backends.py
@@ -21,7 +21,7 @@
For login, is_active is not given, so that the login form can
raise a specific error for inactive users.
For password reset, True is given for is_active.
- For signup verficiation, False is given for is_active.
+ For signup verification, False is given for is_active.
"""
def authenticate(self, *args, **kwargs):
|
{"golden_diff": "diff --git a/mezzanine/core/auth_backends.py b/mezzanine/core/auth_backends.py\n--- a/mezzanine/core/auth_backends.py\n+++ b/mezzanine/core/auth_backends.py\n@@ -21,7 +21,7 @@\n For login, is_active is not given, so that the login form can\n raise a specific error for inactive users.\n For password reset, True is given for is_active.\n- For signup verficiation, False is given for is_active.\n+ For signup verification, False is given for is_active.\n \"\"\"\n \n def authenticate(self, *args, **kwargs):\n", "issue": "Small typo in mezzanine/core/auth_backends.py\nShould read verification rather than verficiation.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.db.models import Q\nfrom django.utils.http import base36_to_int\n\n\nUser = get_user_model()\n\n\nclass MezzanineBackend(ModelBackend):\n \"\"\"\n Extends Django's ``ModelBackend`` to allow login via username,\n email, or verification token.\n\n Args are either ``username`` and ``password``, or ``uidb36``\n and ``token``. In either case, ``is_active`` can also be given.\n\n For login, is_active is not given, so that the login form can\n raise a specific error for inactive users.\n For password reset, True is given for is_active.\n For signup verficiation, False is given for is_active.\n \"\"\"\n\n def authenticate(self, *args, **kwargs):\n if kwargs:\n username = kwargs.pop(\"username\", None)\n if username:\n username_or_email = Q(username=username) | Q(email=username)\n password = kwargs.pop(\"password\", None)\n try:\n user = User.objects.get(username_or_email, **kwargs)\n except User.DoesNotExist:\n pass\n else:\n if user.check_password(password):\n return user\n else:\n if 'uidb36' not in kwargs:\n return\n kwargs[\"id\"] = base36_to_int(kwargs.pop(\"uidb36\"))\n token = kwargs.pop(\"token\")\n try:\n user = User.objects.get(**kwargs)\n except User.DoesNotExist:\n pass\n else:\n if default_token_generator.check_token(user, token):\n return user\n", "path": "mezzanine/core/auth_backends.py"}]}
| 1,024 | 137 |
gh_patches_debug_30239
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__pytorch-lightning-3261
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
auto_scale_batch_size won't reset current_epoch
## 🐛 Bug
When `auto_scale_batch_size` is enabled, the model is initially trained with varying batch sizes. When training begins, `trainer.current_epoch` equals 1 instead of 0.
### To Reproduce
Either observe the progress bar or use a simple callback to track the epoch number, once with `auto_scale_batch_size` enabled and once with `auto_scale_batch_size` disabled.
```
from pytorch_lightning import Callback
class PrintCallback(Callback):
def __init__(self):
self.observed_epochs = []
def on_train_epoch_start(self, trainer, pl_module):
print(f'Current Epoch: {trainer.current_epoch}')
self.observed_epochs.append(trainer.current_epoch)
```
</issue>
<code>
[start of pytorch_lightning/tuner/batch_size_scaling.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License
14 import os
15 from pytorch_lightning.core.lightning import LightningModule
16 from pytorch_lightning.utilities.data import has_len
17 from pytorch_lightning.utilities.parsing import lightning_hasattr, lightning_getattr, lightning_setattr
18 from pytorch_lightning.utilities import rank_zero_warn
19 from pytorch_lightning.utilities.exceptions import MisconfigurationException
20 from pytorch_lightning.utilities.memory import is_oom_error, garbage_collection_cuda
21 from pytorch_lightning.loggers.base import DummyLogger
22 from pytorch_lightning import _logger as log
23 from typing import Optional, Tuple
24
25
26 def scale_batch_size(trainer,
27 model: LightningModule,
28 mode: str = 'power',
29 steps_per_trial: int = 3,
30 init_val: int = 2,
31 max_trials: int = 25,
32 batch_arg_name: str = 'batch_size',
33 **fit_kwargs):
34 r"""
35 Will iteratively try to find the largest batch size for a given model
36 that does not give an out of memory (OOM) error.
37
38 Args:
39 trainer: The Trainer
40 model: Model to fit.
41
42 mode: string setting the search mode. Either `power` or `binsearch`.
43 If mode is `power` we keep multiplying the batch size by 2, until
44 we get an OOM error. If mode is 'binsearch', we will initially
45 also keep multiplying by 2 and after encountering an OOM error
46 do a binary search between the last successful batch size and the
47 batch size that failed.
48
49 steps_per_trial: number of steps to run with a given batch size.
50 Idealy 1 should be enough to test if a OOM error occurs,
51 however in practise a few are needed
52
53 init_val: initial batch size to start the search with
54
55 max_trials: max number of increase in batch size done before
56 algorithm is terminated
57
58 batch_arg_name: name of the attribute that stores the batch size.
59 It is expected that the user has provided a model or datamodule that has a hyperparameter
60 with that name. We will look for this attribute name in the following places
61
62 - `model`
63 - `model.hparams`
64 - `model.datamodule`
65 - `trainer.datamodule` (the datamodule passed to the tune method)
66
67 **fit_kwargs: remaining arguments to be passed to .fit(), e.g., dataloader
68 or datamodule.
69 """
70 if not lightning_hasattr(model, batch_arg_name):
71 raise MisconfigurationException(
72 f'Field {batch_arg_name} not found in both `model` and `model.hparams`')
73 if hasattr(model, batch_arg_name) and hasattr(model, "hparams") and batch_arg_name in model.hparams:
74 rank_zero_warn(
75 f'Field `model.{batch_arg_name}` and `model.hparams.{batch_arg_name}` are mutually exclusive!'
76 f' `model.{batch_arg_name}` will be used as the initial batch size for scaling.'
77 f' If this is not the intended behavior, please remove either one.'
78 )
79
80 if hasattr(model.train_dataloader, 'patch_loader_code'):
81 raise MisconfigurationException('The batch scaling feature cannot be used with dataloaders'
82 ' passed directly to `.fit()`. Please disable the feature or'
83 ' incorporate the dataloader into the model.')
84
85 # Arguments we adjust during the batch size finder, save for restoring
86 __scale_batch_dump_params(trainer)
87
88 # Set to values that are required by the algorithm
89 __scale_batch_reset_params(trainer, model, steps_per_trial)
90
91 # Save initial model, that is loaded after batch size is found
92 save_path = os.path.join(trainer.default_root_dir, 'temp_model.ckpt')
93 trainer.save_checkpoint(str(save_path))
94
95 if trainer.progress_bar_callback:
96 trainer.progress_bar_callback.disable()
97
98 # Initially we just double in size until an OOM is encountered
99 new_size = _adjust_batch_size(trainer, value=init_val) # initially set to init_val
100 if mode == 'power':
101 new_size = _run_power_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs)
102 elif mode == 'binsearch':
103 new_size = _run_binsearch_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs)
104 else:
105 raise ValueError('mode in method `scale_batch_size` can only be `power` or `binsearch')
106
107 garbage_collection_cuda()
108 log.info(f'Finished batch size finder, will continue with full run using batch size {new_size}')
109
110 # Restore initial state of model
111 trainer.checkpoint_connector.restore(str(save_path), on_gpu=trainer.on_gpu)
112 os.remove(save_path)
113
114 # Finish by resetting variables so trainer is ready to fit model
115 __scale_batch_restore_params(trainer)
116 if trainer.progress_bar_callback:
117 trainer.progress_bar_callback.enable()
118
119 return new_size
120
121
122 def __scale_batch_dump_params(trainer):
123 # Prevent going into infinite loop
124 trainer.__dumped_params = {
125 'auto_lr_find': trainer.auto_lr_find,
126 'max_steps': trainer.max_steps,
127 'weights_summary': trainer.weights_summary,
128 'logger': trainer.logger,
129 'callbacks': trainer.callbacks,
130 'checkpoint_callback': trainer.checkpoint_callback,
131 'early_stop_callback': trainer.early_stop_callback,
132 'auto_scale_batch_size': trainer.auto_scale_batch_size,
133 'limit_train_batches': trainer.limit_train_batches,
134 'model': trainer.model,
135 }
136
137
138 def __scale_batch_reset_params(trainer, model, steps_per_trial):
139 trainer.auto_scale_batch_size = None # prevent recursion
140 trainer.auto_lr_find = False # avoid lr find being called multiple times
141 trainer.max_steps = steps_per_trial # take few steps
142 trainer.weights_summary = None # not needed before full run
143 trainer.logger = DummyLogger()
144 trainer.callbacks = [] # not needed before full run
145 trainer.checkpoint_callback = False # required for saving
146 trainer.early_stop_callback = None
147 trainer.limit_train_batches = 1.0
148 trainer.optimizers, trainer.schedulers = [], [] # required for saving
149 trainer.model = model # required for saving
150
151
152 def __scale_batch_restore_params(trainer):
153 trainer.auto_lr_find = trainer.__dumped_params['auto_lr_find']
154 trainer.max_steps = trainer.__dumped_params['max_steps']
155 trainer.weights_summary = trainer.__dumped_params['weights_summary']
156 trainer.logger = trainer.__dumped_params['logger']
157 trainer.callbacks = trainer.__dumped_params['callbacks']
158 trainer.checkpoint_callback = trainer.__dumped_params['checkpoint_callback']
159 trainer.auto_scale_batch_size = trainer.__dumped_params['auto_scale_batch_size']
160 trainer.early_stop_callback = trainer.__dumped_params['early_stop_callback']
161 trainer.limit_train_batches = trainer.__dumped_params['limit_train_batches']
162 trainer.model = trainer.__dumped_params['model']
163 del trainer.__dumped_params
164
165
166 def _run_power_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs):
167 """ Batch scaling mode where the size is doubled at each iteration until an
168 OOM error is encountered. """
169 for _ in range(max_trials):
170 garbage_collection_cuda()
171 trainer.global_step = 0 # reset after each try
172 try:
173 # Try fit
174 trainer.fit(model, **fit_kwargs)
175 # Double in size
176 new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc='succeeded')
177 except RuntimeError as exception:
178 # Only these errors should trigger an adjustment
179 if is_oom_error(exception):
180 # If we fail in power mode, half the size and return
181 garbage_collection_cuda()
182 new_size, _ = _adjust_batch_size(trainer, batch_arg_name, factor=0.5, desc='failed')
183 break
184 else:
185 raise # some other error not memory related
186
187 if not changed:
188 break
189 return new_size
190
191
192 def _run_binsearch_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs):
193 """ Batch scaling mode where the size is initially is doubled at each iteration
194 until an OOM error is encountered. Hereafter, the batch size is further
195 refined using a binary search """
196 high = None
197 count = 0
198 while True:
199 garbage_collection_cuda()
200 trainer.global_step = 0 # reset after each try
201 try:
202 # Try fit
203 trainer.fit(model, **fit_kwargs)
204 count += 1
205 if count > max_trials:
206 break
207 # Double in size
208 low = new_size
209 if high:
210 if high - low <= 1:
211 break
212 midval = (high + low) // 2
213 new_size, changed = _adjust_batch_size(trainer, batch_arg_name, value=midval, desc='succeeded')
214 else:
215 new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc='succeeded')
216
217 if not changed:
218 break
219
220 except RuntimeError as exception:
221 # Only these errors should trigger an adjustment
222 if is_oom_error(exception):
223 # If we fail in power mode, half the size and return
224 garbage_collection_cuda()
225 high = new_size
226 midval = (high + low) // 2
227 new_size, _ = _adjust_batch_size(trainer, value=midval, desc='failed')
228 if high - low <= 1:
229 break
230 else:
231 raise # some other error not memory related
232
233 return new_size
234
235
236 def _adjust_batch_size(trainer,
237 batch_arg_name: str = 'batch_size',
238 factor: float = 1.0,
239 value: Optional[int] = None,
240 desc: str = None) -> Tuple[int, bool]:
241 """ Helper function for adjusting the batch size.
242
243 Args:
244 trainer: instance of pytorch_lightning.Trainer
245
246 batch_arg_name: name of the field where batch_size is stored.
247
248 factor: value which the old batch size is multiplied by to get the
249 new batch size
250
251 value: if a value is given, will override the batch size with this value.
252 Note that the value of `factor` will not have an effect in this case
253
254 desc: either `succeeded` or `failed`. Used purely for logging
255
256 Returns:
257 The new batch size for the next trial and a bool that signals whether the
258 new value is different than the previous batch size.
259 """
260 model = trainer.get_model()
261 batch_size = lightning_getattr(model, batch_arg_name)
262 new_size = value if value is not None else int(batch_size * factor)
263 if desc:
264 log.info(f'Batch size {batch_size} {desc}, trying batch size {new_size}')
265
266 if not _is_valid_batch_size(new_size, trainer.train_dataloader):
267 new_size = min(new_size, len(trainer.train_dataloader.dataset))
268
269 changed = new_size != batch_size
270 lightning_setattr(model, batch_arg_name, new_size)
271 return new_size, changed
272
273
274 def _is_valid_batch_size(current_size, dataloader):
275 return not has_len(dataloader) or current_size <= len(dataloader)
276
[end of pytorch_lightning/tuner/batch_size_scaling.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pytorch_lightning/tuner/batch_size_scaling.py b/pytorch_lightning/tuner/batch_size_scaling.py
--- a/pytorch_lightning/tuner/batch_size_scaling.py
+++ b/pytorch_lightning/tuner/batch_size_scaling.py
@@ -123,6 +123,7 @@
# Prevent going into infinite loop
trainer.__dumped_params = {
'auto_lr_find': trainer.auto_lr_find,
+ 'current_epoch': trainer.current_epoch,
'max_steps': trainer.max_steps,
'weights_summary': trainer.weights_summary,
'logger': trainer.logger,
@@ -138,6 +139,7 @@
def __scale_batch_reset_params(trainer, model, steps_per_trial):
trainer.auto_scale_batch_size = None # prevent recursion
trainer.auto_lr_find = False # avoid lr find being called multiple times
+ trainer.current_epoch = 0
trainer.max_steps = steps_per_trial # take few steps
trainer.weights_summary = None # not needed before full run
trainer.logger = DummyLogger()
@@ -151,6 +153,7 @@
def __scale_batch_restore_params(trainer):
trainer.auto_lr_find = trainer.__dumped_params['auto_lr_find']
+ trainer.current_epoch = trainer.__dumped_params['current_epoch']
trainer.max_steps = trainer.__dumped_params['max_steps']
trainer.weights_summary = trainer.__dumped_params['weights_summary']
trainer.logger = trainer.__dumped_params['logger']
|
{"golden_diff": "diff --git a/pytorch_lightning/tuner/batch_size_scaling.py b/pytorch_lightning/tuner/batch_size_scaling.py\n--- a/pytorch_lightning/tuner/batch_size_scaling.py\n+++ b/pytorch_lightning/tuner/batch_size_scaling.py\n@@ -123,6 +123,7 @@\n # Prevent going into infinite loop\n trainer.__dumped_params = {\n 'auto_lr_find': trainer.auto_lr_find,\n+ 'current_epoch': trainer.current_epoch,\n 'max_steps': trainer.max_steps,\n 'weights_summary': trainer.weights_summary,\n 'logger': trainer.logger,\n@@ -138,6 +139,7 @@\n def __scale_batch_reset_params(trainer, model, steps_per_trial):\n trainer.auto_scale_batch_size = None # prevent recursion\n trainer.auto_lr_find = False # avoid lr find being called multiple times\n+ trainer.current_epoch = 0\n trainer.max_steps = steps_per_trial # take few steps\n trainer.weights_summary = None # not needed before full run\n trainer.logger = DummyLogger()\n@@ -151,6 +153,7 @@\n \n def __scale_batch_restore_params(trainer):\n trainer.auto_lr_find = trainer.__dumped_params['auto_lr_find']\n+ trainer.current_epoch = trainer.__dumped_params['current_epoch']\n trainer.max_steps = trainer.__dumped_params['max_steps']\n trainer.weights_summary = trainer.__dumped_params['weights_summary']\n trainer.logger = trainer.__dumped_params['logger']\n", "issue": "auto_scale_batch_size won't reset current_epoch\n## \ud83d\udc1b Bug\r\n\r\nWhen `auto_scale_batch_size` is enabled, the model is initially trained with varying batch sizes. When training begins, `trainer.current_epoch` equals 1 instead of 0.\r\n\r\n### To Reproduce\r\n\r\nEither observe the progress bar or use a simple callback to track the epoch number, once with `auto_scale_batch_size` enabled and once with `auto_scale_batch_size` disabled.\r\n\r\n```\r\nfrom pytorch_lightning import Callback\r\n\r\nclass PrintCallback(Callback):\r\n \r\n def __init__(self):\r\n self.observed_epochs = []\r\n \r\n def on_train_epoch_start(self, trainer, pl_module):\r\n print(f'Current Epoch: {trainer.current_epoch}')\r\n self.observed_epochs.append(trainer.current_epoch)\r\n\r\n```\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\nimport os\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning.utilities.data import has_len\nfrom pytorch_lightning.utilities.parsing import lightning_hasattr, lightning_getattr, lightning_setattr\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.memory import is_oom_error, garbage_collection_cuda\nfrom pytorch_lightning.loggers.base import DummyLogger\nfrom pytorch_lightning import _logger as log\nfrom typing import Optional, Tuple\n\n\ndef scale_batch_size(trainer,\n model: LightningModule,\n mode: str = 'power',\n steps_per_trial: int = 3,\n init_val: int = 2,\n max_trials: int = 25,\n batch_arg_name: str = 'batch_size',\n **fit_kwargs):\n r\"\"\"\n Will iteratively try to find the largest batch size for a given model\n that does not give an out of memory (OOM) error.\n\n Args:\n trainer: The Trainer\n model: Model to fit.\n\n mode: string setting the search mode. Either `power` or `binsearch`.\n If mode is `power` we keep multiplying the batch size by 2, until\n we get an OOM error. If mode is 'binsearch', we will initially\n also keep multiplying by 2 and after encountering an OOM error\n do a binary search between the last successful batch size and the\n batch size that failed.\n\n steps_per_trial: number of steps to run with a given batch size.\n Idealy 1 should be enough to test if a OOM error occurs,\n however in practise a few are needed\n\n init_val: initial batch size to start the search with\n\n max_trials: max number of increase in batch size done before\n algorithm is terminated\n\n batch_arg_name: name of the attribute that stores the batch size.\n It is expected that the user has provided a model or datamodule that has a hyperparameter\n with that name. We will look for this attribute name in the following places\n\n - `model`\n - `model.hparams`\n - `model.datamodule`\n - `trainer.datamodule` (the datamodule passed to the tune method)\n\n **fit_kwargs: remaining arguments to be passed to .fit(), e.g., dataloader\n or datamodule.\n \"\"\"\n if not lightning_hasattr(model, batch_arg_name):\n raise MisconfigurationException(\n f'Field {batch_arg_name} not found in both `model` and `model.hparams`')\n if hasattr(model, batch_arg_name) and hasattr(model, \"hparams\") and batch_arg_name in model.hparams:\n rank_zero_warn(\n f'Field `model.{batch_arg_name}` and `model.hparams.{batch_arg_name}` are mutually exclusive!'\n f' `model.{batch_arg_name}` will be used as the initial batch size for scaling.'\n f' If this is not the intended behavior, please remove either one.'\n )\n\n if hasattr(model.train_dataloader, 'patch_loader_code'):\n raise MisconfigurationException('The batch scaling feature cannot be used with dataloaders'\n ' passed directly to `.fit()`. Please disable the feature or'\n ' incorporate the dataloader into the model.')\n\n # Arguments we adjust during the batch size finder, save for restoring\n __scale_batch_dump_params(trainer)\n\n # Set to values that are required by the algorithm\n __scale_batch_reset_params(trainer, model, steps_per_trial)\n\n # Save initial model, that is loaded after batch size is found\n save_path = os.path.join(trainer.default_root_dir, 'temp_model.ckpt')\n trainer.save_checkpoint(str(save_path))\n\n if trainer.progress_bar_callback:\n trainer.progress_bar_callback.disable()\n\n # Initially we just double in size until an OOM is encountered\n new_size = _adjust_batch_size(trainer, value=init_val) # initially set to init_val\n if mode == 'power':\n new_size = _run_power_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs)\n elif mode == 'binsearch':\n new_size = _run_binsearch_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs)\n else:\n raise ValueError('mode in method `scale_batch_size` can only be `power` or `binsearch')\n\n garbage_collection_cuda()\n log.info(f'Finished batch size finder, will continue with full run using batch size {new_size}')\n\n # Restore initial state of model\n trainer.checkpoint_connector.restore(str(save_path), on_gpu=trainer.on_gpu)\n os.remove(save_path)\n\n # Finish by resetting variables so trainer is ready to fit model\n __scale_batch_restore_params(trainer)\n if trainer.progress_bar_callback:\n trainer.progress_bar_callback.enable()\n\n return new_size\n\n\ndef __scale_batch_dump_params(trainer):\n # Prevent going into infinite loop\n trainer.__dumped_params = {\n 'auto_lr_find': trainer.auto_lr_find,\n 'max_steps': trainer.max_steps,\n 'weights_summary': trainer.weights_summary,\n 'logger': trainer.logger,\n 'callbacks': trainer.callbacks,\n 'checkpoint_callback': trainer.checkpoint_callback,\n 'early_stop_callback': trainer.early_stop_callback,\n 'auto_scale_batch_size': trainer.auto_scale_batch_size,\n 'limit_train_batches': trainer.limit_train_batches,\n 'model': trainer.model,\n }\n\n\ndef __scale_batch_reset_params(trainer, model, steps_per_trial):\n trainer.auto_scale_batch_size = None # prevent recursion\n trainer.auto_lr_find = False # avoid lr find being called multiple times\n trainer.max_steps = steps_per_trial # take few steps\n trainer.weights_summary = None # not needed before full run\n trainer.logger = DummyLogger()\n trainer.callbacks = [] # not needed before full run\n trainer.checkpoint_callback = False # required for saving\n trainer.early_stop_callback = None\n trainer.limit_train_batches = 1.0\n trainer.optimizers, trainer.schedulers = [], [] # required for saving\n trainer.model = model # required for saving\n\n\ndef __scale_batch_restore_params(trainer):\n trainer.auto_lr_find = trainer.__dumped_params['auto_lr_find']\n trainer.max_steps = trainer.__dumped_params['max_steps']\n trainer.weights_summary = trainer.__dumped_params['weights_summary']\n trainer.logger = trainer.__dumped_params['logger']\n trainer.callbacks = trainer.__dumped_params['callbacks']\n trainer.checkpoint_callback = trainer.__dumped_params['checkpoint_callback']\n trainer.auto_scale_batch_size = trainer.__dumped_params['auto_scale_batch_size']\n trainer.early_stop_callback = trainer.__dumped_params['early_stop_callback']\n trainer.limit_train_batches = trainer.__dumped_params['limit_train_batches']\n trainer.model = trainer.__dumped_params['model']\n del trainer.__dumped_params\n\n\ndef _run_power_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs):\n \"\"\" Batch scaling mode where the size is doubled at each iteration until an\n OOM error is encountered. \"\"\"\n for _ in range(max_trials):\n garbage_collection_cuda()\n trainer.global_step = 0 # reset after each try\n try:\n # Try fit\n trainer.fit(model, **fit_kwargs)\n # Double in size\n new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc='succeeded')\n except RuntimeError as exception:\n # Only these errors should trigger an adjustment\n if is_oom_error(exception):\n # If we fail in power mode, half the size and return\n garbage_collection_cuda()\n new_size, _ = _adjust_batch_size(trainer, batch_arg_name, factor=0.5, desc='failed')\n break\n else:\n raise # some other error not memory related\n\n if not changed:\n break\n return new_size\n\n\ndef _run_binsearch_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs):\n \"\"\" Batch scaling mode where the size is initially is doubled at each iteration\n until an OOM error is encountered. Hereafter, the batch size is further\n refined using a binary search \"\"\"\n high = None\n count = 0\n while True:\n garbage_collection_cuda()\n trainer.global_step = 0 # reset after each try\n try:\n # Try fit\n trainer.fit(model, **fit_kwargs)\n count += 1\n if count > max_trials:\n break\n # Double in size\n low = new_size\n if high:\n if high - low <= 1:\n break\n midval = (high + low) // 2\n new_size, changed = _adjust_batch_size(trainer, batch_arg_name, value=midval, desc='succeeded')\n else:\n new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc='succeeded')\n\n if not changed:\n break\n\n except RuntimeError as exception:\n # Only these errors should trigger an adjustment\n if is_oom_error(exception):\n # If we fail in power mode, half the size and return\n garbage_collection_cuda()\n high = new_size\n midval = (high + low) // 2\n new_size, _ = _adjust_batch_size(trainer, value=midval, desc='failed')\n if high - low <= 1:\n break\n else:\n raise # some other error not memory related\n\n return new_size\n\n\ndef _adjust_batch_size(trainer,\n batch_arg_name: str = 'batch_size',\n factor: float = 1.0,\n value: Optional[int] = None,\n desc: str = None) -> Tuple[int, bool]:\n \"\"\" Helper function for adjusting the batch size.\n\n Args:\n trainer: instance of pytorch_lightning.Trainer\n\n batch_arg_name: name of the field where batch_size is stored.\n\n factor: value which the old batch size is multiplied by to get the\n new batch size\n\n value: if a value is given, will override the batch size with this value.\n Note that the value of `factor` will not have an effect in this case\n\n desc: either `succeeded` or `failed`. Used purely for logging\n\n Returns:\n The new batch size for the next trial and a bool that signals whether the\n new value is different than the previous batch size.\n \"\"\"\n model = trainer.get_model()\n batch_size = lightning_getattr(model, batch_arg_name)\n new_size = value if value is not None else int(batch_size * factor)\n if desc:\n log.info(f'Batch size {batch_size} {desc}, trying batch size {new_size}')\n\n if not _is_valid_batch_size(new_size, trainer.train_dataloader):\n new_size = min(new_size, len(trainer.train_dataloader.dataset))\n\n changed = new_size != batch_size\n lightning_setattr(model, batch_arg_name, new_size)\n return new_size, changed\n\n\ndef _is_valid_batch_size(current_size, dataloader):\n return not has_len(dataloader) or current_size <= len(dataloader)\n", "path": "pytorch_lightning/tuner/batch_size_scaling.py"}]}
| 4,035 | 330 |
gh_patches_debug_794
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-3650
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tifffile: try to use the one in the user's install first
Should we try importing tifffile before using the one we versionned it?
</issue>
<code>
[start of skimage/io/_plugins/tifffile_plugin.py]
1 from ...external.tifffile import TiffFile, imsave, parse_kwargs
2
3
4 def imread(fname, dtype=None, **kwargs):
5 """Load a tiff image from file.
6
7 Parameters
8 ----------
9 fname : str or file
10 File name or file-like-object.
11 dtype : numpy dtype object or string specifier
12 Specifies data type of array elements (Not currently used).
13 kwargs : keyword pairs, optional
14 Additional keyword arguments to pass through (see ``tifffile``'s
15 ``imread`` function).
16
17 Notes
18 -----
19 Provided by Christophe Golhke's tifffile.py [1]_, and supports many
20 advanced image types including multi-page and floating point.
21
22 References
23 ----------
24 .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py
25
26 """
27
28 if 'img_num' in kwargs:
29 kwargs['key'] = kwargs.pop('img_num')
30
31 # parse_kwargs will extract keyword arguments intended for the TiffFile
32 # class and remove them from the kwargs dictionary in-place
33 tiff_keys = ['multifile', 'multifile_close', 'pages', 'fastij', 'is_ome']
34 kwargs_tiff = parse_kwargs(kwargs, *tiff_keys)
35
36 # read and return tiff as numpy array
37 with TiffFile(fname, **kwargs_tiff) as tif:
38 return tif.asarray(**kwargs)
39
[end of skimage/io/_plugins/tifffile_plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py
--- a/skimage/io/_plugins/tifffile_plugin.py
+++ b/skimage/io/_plugins/tifffile_plugin.py
@@ -1,4 +1,7 @@
-from ...external.tifffile import TiffFile, imsave, parse_kwargs
+try:
+ from tifffile import TiffFile, imsave, parse_kwargs
+except ImportError:
+ from ...external.tifffile import TiffFile, imsave, parse_kwargs
def imread(fname, dtype=None, **kwargs):
|
{"golden_diff": "diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py\n--- a/skimage/io/_plugins/tifffile_plugin.py\n+++ b/skimage/io/_plugins/tifffile_plugin.py\n@@ -1,4 +1,7 @@\n-from ...external.tifffile import TiffFile, imsave, parse_kwargs\n+try:\n+ from tifffile import TiffFile, imsave, parse_kwargs\n+except ImportError:\n+ from ...external.tifffile import TiffFile, imsave, parse_kwargs\n \n \n def imread(fname, dtype=None, **kwargs):\n", "issue": "tifffile: try to use the one in the user's install first\nShould we try importing tifffile before using the one we versionned it?\n", "before_files": [{"content": "from ...external.tifffile import TiffFile, imsave, parse_kwargs\n\n\ndef imread(fname, dtype=None, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n dtype : numpy dtype object or string specifier\n Specifies data type of array elements (Not currently used).\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by Christophe Golhke's tifffile.py [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py\n\n \"\"\"\n\n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n\n # parse_kwargs will extract keyword arguments intended for the TiffFile \n # class and remove them from the kwargs dictionary in-place\n tiff_keys = ['multifile', 'multifile_close', 'pages', 'fastij', 'is_ome']\n kwargs_tiff = parse_kwargs(kwargs, *tiff_keys)\n\n # read and return tiff as numpy array\n with TiffFile(fname, **kwargs_tiff) as tif:\n return tif.asarray(**kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py"}]}
| 962 | 142 |
gh_patches_debug_6145
|
rasdani/github-patches
|
git_diff
|
cisagov__manage.get.gov-1743
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Submission dates for fixture domain requests are blank
### Current Behavior
For applications generated by fixtures, submission dates are not showing because they are not set to begin with

### Expected Behavior
Submission dates are shown
### Steps to Reproduce
1. Logon as Domain Manager with an account associated with fixtures
2. View Domain request table
3. Date submitted column is blank (for any requests from fixtures)
### Environment
Development sandbox
### Additional Context
Per Alysia, submission date should be added to fixtures for the fake data
### Issue Links
_No response_
</issue>
<code>
[start of src/registrar/fixtures_applications.py]
1 import logging
2 import random
3 from faker import Faker
4
5 from registrar.models import (
6 User,
7 DomainApplication,
8 DraftDomain,
9 Contact,
10 Website,
11 )
12
13 fake = Faker()
14 logger = logging.getLogger(__name__)
15
16
17 class DomainApplicationFixture:
18 """
19 Load domain applications into the database.
20
21 Make sure this class' `load` method is called from `handle`
22 in management/commands/load.py, then use `./manage.py load`
23 to run this code.
24 """
25
26 # any fields not specified here will be filled in with fake data or defaults
27 # NOTE BENE: each fixture must have `organization_name` for uniqueness!
28 # Here is a more complete example as a template:
29 # {
30 # "status": "started",
31 # "organization_name": "Example - Just started",
32 # "organization_type": "federal",
33 # "federal_agency": None,
34 # "federal_type": None,
35 # "address_line1": None,
36 # "address_line2": None,
37 # "city": None,
38 # "state_territory": None,
39 # "zipcode": None,
40 # "urbanization": None,
41 # "purpose": None,
42 # "anything_else": None,
43 # "is_policy_acknowledged": None,
44 # "authorizing_official": None,
45 # "submitter": None,
46 # "other_contacts": [],
47 # "current_websites": [],
48 # "alternative_domains": [],
49 # },
50 DA = [
51 {
52 "status": DomainApplication.ApplicationStatus.STARTED,
53 "organization_name": "Example - Finished but not submitted",
54 },
55 {
56 "status": DomainApplication.ApplicationStatus.SUBMITTED,
57 "organization_name": "Example - Submitted but pending investigation",
58 },
59 {
60 "status": DomainApplication.ApplicationStatus.IN_REVIEW,
61 "organization_name": "Example - In investigation",
62 },
63 {
64 "status": DomainApplication.ApplicationStatus.IN_REVIEW,
65 "organization_name": "Example - Approved",
66 },
67 {
68 "status": DomainApplication.ApplicationStatus.WITHDRAWN,
69 "organization_name": "Example - Withdrawn",
70 },
71 {
72 "status": DomainApplication.ApplicationStatus.ACTION_NEEDED,
73 "organization_name": "Example - Action needed",
74 },
75 {
76 "status": "rejected",
77 "organization_name": "Example - Rejected",
78 },
79 ]
80
81 @classmethod
82 def fake_contact(cls):
83 return {
84 "first_name": fake.first_name(),
85 "middle_name": None,
86 "last_name": fake.last_name(),
87 "title": fake.job(),
88 "email": fake.ascii_safe_email(),
89 "phone": "201-555-5555",
90 }
91
92 @classmethod
93 def fake_dot_gov(cls):
94 return f"{fake.slug()}.gov"
95
96 @classmethod
97 def _set_non_foreign_key_fields(cls, da: DomainApplication, app: dict):
98 """Helper method used by `load`."""
99 da.status = app["status"] if "status" in app else "started"
100 da.organization_type = app["organization_type"] if "organization_type" in app else "federal"
101 da.federal_agency = (
102 app["federal_agency"]
103 if "federal_agency" in app
104 # Random choice of agency for selects, used as placeholders for testing.
105 else random.choice(DomainApplication.AGENCIES) # nosec
106 )
107
108 da.federal_type = (
109 app["federal_type"]
110 if "federal_type" in app
111 else random.choice(["executive", "judicial", "legislative"]) # nosec
112 )
113 da.address_line1 = app["address_line1"] if "address_line1" in app else fake.street_address()
114 da.address_line2 = app["address_line2"] if "address_line2" in app else None
115 da.city = app["city"] if "city" in app else fake.city()
116 da.state_territory = app["state_territory"] if "state_territory" in app else fake.state_abbr()
117 da.zipcode = app["zipcode"] if "zipcode" in app else fake.postalcode()
118 da.urbanization = app["urbanization"] if "urbanization" in app else None
119 da.purpose = app["purpose"] if "purpose" in app else fake.paragraph()
120 da.anything_else = app["anything_else"] if "anything_else" in app else None
121 da.is_policy_acknowledged = app["is_policy_acknowledged"] if "is_policy_acknowledged" in app else True
122
123 @classmethod
124 def _set_foreign_key_fields(cls, da: DomainApplication, app: dict, user: User):
125 """Helper method used by `load`."""
126 if not da.investigator:
127 da.investigator = User.objects.get(username=user.username) if "investigator" in app else None
128
129 if not da.authorizing_official:
130 if "authorizing_official" in app and app["authorizing_official"] is not None:
131 da.authorizing_official, _ = Contact.objects.get_or_create(**app["authorizing_official"])
132 else:
133 da.authorizing_official = Contact.objects.create(**cls.fake_contact())
134
135 if not da.submitter:
136 if "submitter" in app and app["submitter"] is not None:
137 da.submitter, _ = Contact.objects.get_or_create(**app["submitter"])
138 else:
139 da.submitter = Contact.objects.create(**cls.fake_contact())
140
141 if not da.requested_domain:
142 if "requested_domain" in app and app["requested_domain"] is not None:
143 da.requested_domain, _ = DraftDomain.objects.get_or_create(name=app["requested_domain"])
144 else:
145 da.requested_domain = DraftDomain.objects.create(name=cls.fake_dot_gov())
146
147 @classmethod
148 def _set_many_to_many_relations(cls, da: DomainApplication, app: dict):
149 """Helper method used by `load`."""
150 if "other_contacts" in app:
151 for contact in app["other_contacts"]:
152 da.other_contacts.add(Contact.objects.get_or_create(**contact)[0])
153 elif not da.other_contacts.exists():
154 other_contacts = [
155 Contact.objects.create(**cls.fake_contact()) for _ in range(random.randint(0, 3)) # nosec
156 ]
157 da.other_contacts.add(*other_contacts)
158
159 if "current_websites" in app:
160 for website in app["current_websites"]:
161 da.current_websites.add(Website.objects.get_or_create(website=website)[0])
162 elif not da.current_websites.exists():
163 current_websites = [
164 Website.objects.create(website=fake.uri()) for _ in range(random.randint(0, 3)) # nosec
165 ]
166 da.current_websites.add(*current_websites)
167
168 if "alternative_domains" in app:
169 for domain in app["alternative_domains"]:
170 da.alternative_domains.add(Website.objects.get_or_create(website=domain)[0])
171 elif not da.alternative_domains.exists():
172 alternative_domains = [
173 Website.objects.create(website=cls.fake_dot_gov()) for _ in range(random.randint(0, 3)) # nosec
174 ]
175 da.alternative_domains.add(*alternative_domains)
176
177 @classmethod
178 def load(cls):
179 """Creates domain applications for each user in the database."""
180 logger.info("Going to load %s domain applications" % len(cls.DA))
181 try:
182 users = list(User.objects.all()) # force evaluation to catch db errors
183 except Exception as e:
184 logger.warning(e)
185 return
186
187 for user in users:
188 logger.debug("Loading domain applications for %s" % user)
189 for app in cls.DA:
190 try:
191 da, _ = DomainApplication.objects.get_or_create(
192 creator=user,
193 organization_name=app["organization_name"],
194 )
195 cls._set_non_foreign_key_fields(da, app)
196 cls._set_foreign_key_fields(da, app, user)
197 da.save()
198 cls._set_many_to_many_relations(da, app)
199 except Exception as e:
200 logger.warning(e)
201
202
203 class DomainFixture(DomainApplicationFixture):
204
205 """Create one domain and permissions on it for each user."""
206
207 @classmethod
208 def load(cls):
209 try:
210 users = list(User.objects.all()) # force evaluation to catch db errors
211 except Exception as e:
212 logger.warning(e)
213 return
214
215 for user in users:
216 # approve one of each users in review status domains
217 application = DomainApplication.objects.filter(
218 creator=user, status=DomainApplication.ApplicationStatus.IN_REVIEW
219 ).last()
220 logger.debug(f"Approving {application} for {user}")
221
222 # We don't want fixtures sending out real emails to
223 # fake email addresses, so we just skip that and log it instead
224 application.approve(send_email=False)
225 application.save()
226
[end of src/registrar/fixtures_applications.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/registrar/fixtures_applications.py b/src/registrar/fixtures_applications.py
--- a/src/registrar/fixtures_applications.py
+++ b/src/registrar/fixtures_applications.py
@@ -104,7 +104,7 @@
# Random choice of agency for selects, used as placeholders for testing.
else random.choice(DomainApplication.AGENCIES) # nosec
)
-
+ da.submission_date = fake.date()
da.federal_type = (
app["federal_type"]
if "federal_type" in app
|
{"golden_diff": "diff --git a/src/registrar/fixtures_applications.py b/src/registrar/fixtures_applications.py\n--- a/src/registrar/fixtures_applications.py\n+++ b/src/registrar/fixtures_applications.py\n@@ -104,7 +104,7 @@\n # Random choice of agency for selects, used as placeholders for testing.\n else random.choice(DomainApplication.AGENCIES) # nosec\n )\n-\n+ da.submission_date = fake.date()\n da.federal_type = (\n app[\"federal_type\"]\n if \"federal_type\" in app\n", "issue": "Submission dates for fixture domain requests are blank\n### Current Behavior\r\n\r\nFor applications generated by fixtures, submission dates are not showing because they are not set to begin with\r\n\r\n\r\n\r\n\r\n### Expected Behavior\r\n\r\nSubmission dates are shown\r\n\r\n### Steps to Reproduce\r\n\r\n1. Logon as Domain Manager with an account associated with fixtures\r\n2. View Domain request table\r\n3. Date submitted column is blank (for any requests from fixtures)\r\n\r\n\r\n### Environment\r\n\r\nDevelopment sandbox\r\n\r\n### Additional Context\r\n\r\nPer Alysia, submission date should be added to fixtures for the fake data\r\n\r\n### Issue Links\r\n\r\n_No response_\n", "before_files": [{"content": "import logging\nimport random\nfrom faker import Faker\n\nfrom registrar.models import (\n User,\n DomainApplication,\n DraftDomain,\n Contact,\n Website,\n)\n\nfake = Faker()\nlogger = logging.getLogger(__name__)\n\n\nclass DomainApplicationFixture:\n \"\"\"\n Load domain applications into the database.\n\n Make sure this class' `load` method is called from `handle`\n in management/commands/load.py, then use `./manage.py load`\n to run this code.\n \"\"\"\n\n # any fields not specified here will be filled in with fake data or defaults\n # NOTE BENE: each fixture must have `organization_name` for uniqueness!\n # Here is a more complete example as a template:\n # {\n # \"status\": \"started\",\n # \"organization_name\": \"Example - Just started\",\n # \"organization_type\": \"federal\",\n # \"federal_agency\": None,\n # \"federal_type\": None,\n # \"address_line1\": None,\n # \"address_line2\": None,\n # \"city\": None,\n # \"state_territory\": None,\n # \"zipcode\": None,\n # \"urbanization\": None,\n # \"purpose\": None,\n # \"anything_else\": None,\n # \"is_policy_acknowledged\": None,\n # \"authorizing_official\": None,\n # \"submitter\": None,\n # \"other_contacts\": [],\n # \"current_websites\": [],\n # \"alternative_domains\": [],\n # },\n DA = [\n {\n \"status\": DomainApplication.ApplicationStatus.STARTED,\n \"organization_name\": \"Example - Finished but not submitted\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.SUBMITTED,\n \"organization_name\": \"Example - Submitted but pending investigation\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.IN_REVIEW,\n \"organization_name\": \"Example - In investigation\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.IN_REVIEW,\n \"organization_name\": \"Example - Approved\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.WITHDRAWN,\n \"organization_name\": \"Example - Withdrawn\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.ACTION_NEEDED,\n \"organization_name\": \"Example - Action needed\",\n },\n {\n \"status\": \"rejected\",\n \"organization_name\": \"Example - Rejected\",\n },\n ]\n\n @classmethod\n def fake_contact(cls):\n return {\n \"first_name\": fake.first_name(),\n \"middle_name\": None,\n \"last_name\": fake.last_name(),\n \"title\": fake.job(),\n \"email\": fake.ascii_safe_email(),\n \"phone\": \"201-555-5555\",\n }\n\n @classmethod\n def fake_dot_gov(cls):\n return f\"{fake.slug()}.gov\"\n\n @classmethod\n def _set_non_foreign_key_fields(cls, da: DomainApplication, app: dict):\n \"\"\"Helper method used by `load`.\"\"\"\n da.status = app[\"status\"] if \"status\" in app else \"started\"\n da.organization_type = app[\"organization_type\"] if \"organization_type\" in app else \"federal\"\n da.federal_agency = (\n app[\"federal_agency\"]\n if \"federal_agency\" in app\n # Random choice of agency for selects, used as placeholders for testing.\n else random.choice(DomainApplication.AGENCIES) # nosec\n )\n\n da.federal_type = (\n app[\"federal_type\"]\n if \"federal_type\" in app\n else random.choice([\"executive\", \"judicial\", \"legislative\"]) # nosec\n )\n da.address_line1 = app[\"address_line1\"] if \"address_line1\" in app else fake.street_address()\n da.address_line2 = app[\"address_line2\"] if \"address_line2\" in app else None\n da.city = app[\"city\"] if \"city\" in app else fake.city()\n da.state_territory = app[\"state_territory\"] if \"state_territory\" in app else fake.state_abbr()\n da.zipcode = app[\"zipcode\"] if \"zipcode\" in app else fake.postalcode()\n da.urbanization = app[\"urbanization\"] if \"urbanization\" in app else None\n da.purpose = app[\"purpose\"] if \"purpose\" in app else fake.paragraph()\n da.anything_else = app[\"anything_else\"] if \"anything_else\" in app else None\n da.is_policy_acknowledged = app[\"is_policy_acknowledged\"] if \"is_policy_acknowledged\" in app else True\n\n @classmethod\n def _set_foreign_key_fields(cls, da: DomainApplication, app: dict, user: User):\n \"\"\"Helper method used by `load`.\"\"\"\n if not da.investigator:\n da.investigator = User.objects.get(username=user.username) if \"investigator\" in app else None\n\n if not da.authorizing_official:\n if \"authorizing_official\" in app and app[\"authorizing_official\"] is not None:\n da.authorizing_official, _ = Contact.objects.get_or_create(**app[\"authorizing_official\"])\n else:\n da.authorizing_official = Contact.objects.create(**cls.fake_contact())\n\n if not da.submitter:\n if \"submitter\" in app and app[\"submitter\"] is not None:\n da.submitter, _ = Contact.objects.get_or_create(**app[\"submitter\"])\n else:\n da.submitter = Contact.objects.create(**cls.fake_contact())\n\n if not da.requested_domain:\n if \"requested_domain\" in app and app[\"requested_domain\"] is not None:\n da.requested_domain, _ = DraftDomain.objects.get_or_create(name=app[\"requested_domain\"])\n else:\n da.requested_domain = DraftDomain.objects.create(name=cls.fake_dot_gov())\n\n @classmethod\n def _set_many_to_many_relations(cls, da: DomainApplication, app: dict):\n \"\"\"Helper method used by `load`.\"\"\"\n if \"other_contacts\" in app:\n for contact in app[\"other_contacts\"]:\n da.other_contacts.add(Contact.objects.get_or_create(**contact)[0])\n elif not da.other_contacts.exists():\n other_contacts = [\n Contact.objects.create(**cls.fake_contact()) for _ in range(random.randint(0, 3)) # nosec\n ]\n da.other_contacts.add(*other_contacts)\n\n if \"current_websites\" in app:\n for website in app[\"current_websites\"]:\n da.current_websites.add(Website.objects.get_or_create(website=website)[0])\n elif not da.current_websites.exists():\n current_websites = [\n Website.objects.create(website=fake.uri()) for _ in range(random.randint(0, 3)) # nosec\n ]\n da.current_websites.add(*current_websites)\n\n if \"alternative_domains\" in app:\n for domain in app[\"alternative_domains\"]:\n da.alternative_domains.add(Website.objects.get_or_create(website=domain)[0])\n elif not da.alternative_domains.exists():\n alternative_domains = [\n Website.objects.create(website=cls.fake_dot_gov()) for _ in range(random.randint(0, 3)) # nosec\n ]\n da.alternative_domains.add(*alternative_domains)\n\n @classmethod\n def load(cls):\n \"\"\"Creates domain applications for each user in the database.\"\"\"\n logger.info(\"Going to load %s domain applications\" % len(cls.DA))\n try:\n users = list(User.objects.all()) # force evaluation to catch db errors\n except Exception as e:\n logger.warning(e)\n return\n\n for user in users:\n logger.debug(\"Loading domain applications for %s\" % user)\n for app in cls.DA:\n try:\n da, _ = DomainApplication.objects.get_or_create(\n creator=user,\n organization_name=app[\"organization_name\"],\n )\n cls._set_non_foreign_key_fields(da, app)\n cls._set_foreign_key_fields(da, app, user)\n da.save()\n cls._set_many_to_many_relations(da, app)\n except Exception as e:\n logger.warning(e)\n\n\nclass DomainFixture(DomainApplicationFixture):\n\n \"\"\"Create one domain and permissions on it for each user.\"\"\"\n\n @classmethod\n def load(cls):\n try:\n users = list(User.objects.all()) # force evaluation to catch db errors\n except Exception as e:\n logger.warning(e)\n return\n\n for user in users:\n # approve one of each users in review status domains\n application = DomainApplication.objects.filter(\n creator=user, status=DomainApplication.ApplicationStatus.IN_REVIEW\n ).last()\n logger.debug(f\"Approving {application} for {user}\")\n\n # We don't want fixtures sending out real emails to\n # fake email addresses, so we just skip that and log it instead\n application.approve(send_email=False)\n application.save()\n", "path": "src/registrar/fixtures_applications.py"}]}
| 3,237 | 123 |
gh_patches_debug_54541
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-1148
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support jinja expression statements
## Feature
### Feature description
http://jinja.pocoo.org/docs/2.10/extensions/#expression-statement
Presently, we hack expressions with:
```
{% set _ = my_dict.update({"a": 1, "b": 2}) %}
```
Instead, the jinja expression statement will make it possible to write:
```
{% do my_dict.update({"a": 1, "b": 2}) %}
```
This is a minor difference, but it will make jinja sql more readable and idiomatic.
### Who will this benefit?
jinja writers
</issue>
<code>
[start of dbt/clients/jinja.py]
1 import codecs
2 import linecache
3 import os
4
5 import jinja2
6 import jinja2._compat
7 import jinja2.ext
8 import jinja2.nodes
9 import jinja2.parser
10 import jinja2.sandbox
11
12 import dbt.compat
13 import dbt.exceptions
14
15 from dbt.node_types import NodeType
16 from dbt.utils import AttrDict
17
18 from dbt.logger import GLOBAL_LOGGER as logger # noqa
19
20
21 class MacroFuzzParser(jinja2.parser.Parser):
22 def parse_macro(self):
23 node = jinja2.nodes.Macro(lineno=next(self.stream).lineno)
24
25 # modified to fuzz macros defined in the same file. this way
26 # dbt can understand the stack of macros being called.
27 # - @cmcarthur
28 node.name = dbt.utils.get_dbt_macro_name(
29 self.parse_assign_target(name_only=True).name)
30
31 self.parse_signature(node)
32 node.body = self.parse_statements(('name:endmacro',),
33 drop_needle=True)
34 return node
35
36
37 class MacroFuzzEnvironment(jinja2.sandbox.SandboxedEnvironment):
38 def _parse(self, source, name, filename):
39 return MacroFuzzParser(
40 self, source, name,
41 jinja2._compat.encode_filename(filename)
42 ).parse()
43
44 def _compile(self, source, filename):
45 """Override jinja's compilation to stash the rendered source inside
46 the python linecache for debugging.
47 """
48 if filename == '<template>':
49 # make a better filename
50 filename = 'dbt-{}'.format(
51 codecs.encode(os.urandom(12), 'hex').decode('ascii')
52 )
53 # encode, though I don't think this matters
54 filename = jinja2._compat.encode_filename(filename)
55 # put ourselves in the cache using the 'lazycache' method
56 linecache.cache[filename] = (lambda: source,)
57
58 return super(MacroFuzzEnvironment, self)._compile(source, filename)
59
60
61 class TemplateCache(object):
62
63 def __init__(self):
64 self.file_cache = {}
65
66 def get_node_template(self, node):
67 key = (node['package_name'], node['original_file_path'])
68
69 if key in self.file_cache:
70 return self.file_cache[key]
71
72 template = get_template(
73 string=node.get('raw_sql'),
74 ctx={},
75 node=node
76 )
77 self.file_cache[key] = template
78
79 return template
80
81 def clear(self):
82 self.file_cache.clear()
83
84
85 template_cache = TemplateCache()
86
87
88 def macro_generator(node):
89 def apply_context(context):
90 def call(*args, **kwargs):
91 name = node.get('name')
92 template = template_cache.get_node_template(node)
93 module = template.make_module(context, False, context)
94
95 if node['resource_type'] == NodeType.Operation:
96 macro = module.__dict__[dbt.utils.get_dbt_operation_name(name)]
97 else:
98 macro = module.__dict__[dbt.utils.get_dbt_macro_name(name)]
99 module.__dict__.update(context)
100
101 try:
102 return macro(*args, **kwargs)
103 except dbt.exceptions.MacroReturn as e:
104 return e.value
105 except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e:
106 dbt.exceptions.raise_compiler_error(str(e), node)
107 except dbt.exceptions.CompilationException as e:
108 e.stack.append(node)
109 raise e
110
111 return call
112 return apply_context
113
114
115 class MaterializationExtension(jinja2.ext.Extension):
116 tags = ['materialization']
117
118 def parse(self, parser):
119 node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)
120 materialization_name = \
121 parser.parse_assign_target(name_only=True).name
122
123 adapter_name = 'default'
124 node.args = []
125 node.defaults = []
126
127 while parser.stream.skip_if('comma'):
128 target = parser.parse_assign_target(name_only=True)
129
130 if target.name == 'default':
131 pass
132
133 elif target.name == 'adapter':
134 parser.stream.expect('assign')
135 value = parser.parse_expression()
136 adapter_name = value.value
137
138 else:
139 dbt.exceptions.invalid_materialization_argument(
140 materialization_name, target.name)
141
142 node.name = dbt.utils.get_materialization_macro_name(
143 materialization_name, adapter_name)
144
145 node.body = parser.parse_statements(('name:endmaterialization',),
146 drop_needle=True)
147
148 return node
149
150
151 class OperationExtension(jinja2.ext.Extension):
152 tags = ['operation']
153
154 def parse(self, parser):
155 node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)
156 operation_name = \
157 parser.parse_assign_target(name_only=True).name
158
159 node.args = []
160 node.defaults = []
161
162 while parser.stream.skip_if('comma'):
163 target = parser.parse_assign_target(name_only=True)
164
165 node.name = dbt.utils.get_operation_macro_name(operation_name)
166
167 node.body = parser.parse_statements(('name:endoperation',),
168 drop_needle=True)
169
170 return node
171
172
173 class DocumentationExtension(jinja2.ext.Extension):
174 tags = ['docs']
175
176 def parse(self, parser):
177 node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)
178 docs_name = parser.parse_assign_target(name_only=True).name
179
180 node.args = []
181 node.defaults = []
182 node.name = dbt.utils.get_docs_macro_name(docs_name)
183 node.body = parser.parse_statements(('name:enddocs',),
184 drop_needle=True)
185 return node
186
187
188 def _is_dunder_name(name):
189 return name.startswith('__') and name.endswith('__')
190
191
192 def create_macro_capture_env(node):
193
194 class ParserMacroCapture(jinja2.Undefined):
195 """
196 This class sets up the parser to capture macros.
197 """
198 def __init__(self, hint=None, obj=None, name=None, exc=None):
199 super(ParserMacroCapture, self).__init__(hint=hint, name=name)
200 self.node = node
201 self.name = name
202 self.package_name = node.get('package_name')
203 # jinja uses these for safety, so we have to override them.
204 # see https://github.com/pallets/jinja/blob/master/jinja2/sandbox.py#L332-L339 # noqa
205 self.unsafe_callable = False
206 self.alters_data = False
207
208 def __deepcopy__(self, memo):
209 path = os.path.join(self.node.get('root_path'),
210 self.node.get('original_file_path'))
211
212 logger.debug(
213 'dbt encountered an undefined variable, "{}" in node {}.{} '
214 '(source path: {})'
215 .format(self.name, self.node.get('package_name'),
216 self.node.get('name'), path))
217
218 # match jinja's message
219 dbt.exceptions.raise_compiler_error(
220 "{!r} is undefined".format(self.name),
221 node=self.node
222 )
223
224 def __getitem__(self, name):
225 # Propagate the undefined value if a caller accesses this as if it
226 # were a dictionary
227 return self
228
229 def __getattr__(self, name):
230 if name == 'name' or _is_dunder_name(name):
231 raise AttributeError(
232 "'{}' object has no attribute '{}'"
233 .format(type(self).__name__, name)
234 )
235
236 self.package_name = self.name
237 self.name = name
238
239 return self
240
241 def __call__(self, *args, **kwargs):
242 return True
243
244 return ParserMacroCapture
245
246
247 def get_environment(node=None, capture_macros=False):
248 args = {
249 'extensions': []
250 }
251
252 if capture_macros:
253 args['undefined'] = create_macro_capture_env(node)
254
255 args['extensions'].append(MaterializationExtension)
256 args['extensions'].append(OperationExtension)
257 args['extensions'].append(DocumentationExtension)
258
259 return MacroFuzzEnvironment(**args)
260
261
262 def parse(string):
263 try:
264 return get_environment().parse(dbt.compat.to_string(string))
265
266 except (jinja2.exceptions.TemplateSyntaxError,
267 jinja2.exceptions.UndefinedError) as e:
268 e.translated = False
269 dbt.exceptions.raise_compiler_error(str(e))
270
271
272 def get_template(string, ctx, node=None, capture_macros=False):
273 try:
274 env = get_environment(node, capture_macros)
275
276 template_source = dbt.compat.to_string(string)
277 return env.from_string(template_source, globals=ctx)
278
279 except (jinja2.exceptions.TemplateSyntaxError,
280 jinja2.exceptions.UndefinedError) as e:
281 e.translated = False
282 dbt.exceptions.raise_compiler_error(str(e), node)
283
284
285 def render_template(template, ctx, node=None):
286 try:
287 return template.render(ctx)
288
289 except (jinja2.exceptions.TemplateSyntaxError,
290 jinja2.exceptions.UndefinedError) as e:
291 e.translated = False
292 dbt.exceptions.raise_compiler_error(str(e), node)
293
294
295 def get_rendered(string, ctx, node=None,
296 capture_macros=False):
297 template = get_template(string, ctx, node,
298 capture_macros=capture_macros)
299
300 return render_template(template, ctx, node)
301
302
303 def undefined_error(msg):
304 raise jinja2.exceptions.UndefinedError(msg)
305
[end of dbt/clients/jinja.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dbt/clients/jinja.py b/dbt/clients/jinja.py
--- a/dbt/clients/jinja.py
+++ b/dbt/clients/jinja.py
@@ -246,7 +246,7 @@
def get_environment(node=None, capture_macros=False):
args = {
- 'extensions': []
+ 'extensions': ['jinja2.ext.do']
}
if capture_macros:
|
{"golden_diff": "diff --git a/dbt/clients/jinja.py b/dbt/clients/jinja.py\n--- a/dbt/clients/jinja.py\n+++ b/dbt/clients/jinja.py\n@@ -246,7 +246,7 @@\n \n def get_environment(node=None, capture_macros=False):\n args = {\n- 'extensions': []\n+ 'extensions': ['jinja2.ext.do']\n }\n \n if capture_macros:\n", "issue": "Support jinja expression statements\n## Feature\r\n\r\n### Feature description\r\nhttp://jinja.pocoo.org/docs/2.10/extensions/#expression-statement\r\n\r\nPresently, we hack expressions with:\r\n```\r\n{% set _ = my_dict.update({\"a\": 1, \"b\": 2}) %}\r\n```\r\n\r\nInstead, the jinja expression statement will make it possible to write:\r\n\r\n```\r\n{% do my_dict.update({\"a\": 1, \"b\": 2}) %}\r\n```\r\n\r\nThis is a minor difference, but it will make jinja sql more readable and idiomatic.\r\n\r\n### Who will this benefit?\r\njinja writers\n", "before_files": [{"content": "import codecs\nimport linecache\nimport os\n\nimport jinja2\nimport jinja2._compat\nimport jinja2.ext\nimport jinja2.nodes\nimport jinja2.parser\nimport jinja2.sandbox\n\nimport dbt.compat\nimport dbt.exceptions\n\nfrom dbt.node_types import NodeType\nfrom dbt.utils import AttrDict\n\nfrom dbt.logger import GLOBAL_LOGGER as logger # noqa\n\n\nclass MacroFuzzParser(jinja2.parser.Parser):\n def parse_macro(self):\n node = jinja2.nodes.Macro(lineno=next(self.stream).lineno)\n\n # modified to fuzz macros defined in the same file. this way\n # dbt can understand the stack of macros being called.\n # - @cmcarthur\n node.name = dbt.utils.get_dbt_macro_name(\n self.parse_assign_target(name_only=True).name)\n\n self.parse_signature(node)\n node.body = self.parse_statements(('name:endmacro',),\n drop_needle=True)\n return node\n\n\nclass MacroFuzzEnvironment(jinja2.sandbox.SandboxedEnvironment):\n def _parse(self, source, name, filename):\n return MacroFuzzParser(\n self, source, name,\n jinja2._compat.encode_filename(filename)\n ).parse()\n\n def _compile(self, source, filename):\n \"\"\"Override jinja's compilation to stash the rendered source inside\n the python linecache for debugging.\n \"\"\"\n if filename == '<template>':\n # make a better filename\n filename = 'dbt-{}'.format(\n codecs.encode(os.urandom(12), 'hex').decode('ascii')\n )\n # encode, though I don't think this matters\n filename = jinja2._compat.encode_filename(filename)\n # put ourselves in the cache using the 'lazycache' method\n linecache.cache[filename] = (lambda: source,)\n\n return super(MacroFuzzEnvironment, self)._compile(source, filename)\n\n\nclass TemplateCache(object):\n\n def __init__(self):\n self.file_cache = {}\n\n def get_node_template(self, node):\n key = (node['package_name'], node['original_file_path'])\n\n if key in self.file_cache:\n return self.file_cache[key]\n\n template = get_template(\n string=node.get('raw_sql'),\n ctx={},\n node=node\n )\n self.file_cache[key] = template\n\n return template\n\n def clear(self):\n self.file_cache.clear()\n\n\ntemplate_cache = TemplateCache()\n\n\ndef macro_generator(node):\n def apply_context(context):\n def call(*args, **kwargs):\n name = node.get('name')\n template = template_cache.get_node_template(node)\n module = template.make_module(context, False, context)\n\n if node['resource_type'] == NodeType.Operation:\n macro = module.__dict__[dbt.utils.get_dbt_operation_name(name)]\n else:\n macro = module.__dict__[dbt.utils.get_dbt_macro_name(name)]\n module.__dict__.update(context)\n\n try:\n return macro(*args, **kwargs)\n except dbt.exceptions.MacroReturn as e:\n return e.value\n except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e:\n dbt.exceptions.raise_compiler_error(str(e), node)\n except dbt.exceptions.CompilationException as e:\n e.stack.append(node)\n raise e\n\n return call\n return apply_context\n\n\nclass MaterializationExtension(jinja2.ext.Extension):\n tags = ['materialization']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n materialization_name = \\\n parser.parse_assign_target(name_only=True).name\n\n adapter_name = 'default'\n node.args = []\n node.defaults = []\n\n while parser.stream.skip_if('comma'):\n target = parser.parse_assign_target(name_only=True)\n\n if target.name == 'default':\n pass\n\n elif target.name == 'adapter':\n parser.stream.expect('assign')\n value = parser.parse_expression()\n adapter_name = value.value\n\n else:\n dbt.exceptions.invalid_materialization_argument(\n materialization_name, target.name)\n\n node.name = dbt.utils.get_materialization_macro_name(\n materialization_name, adapter_name)\n\n node.body = parser.parse_statements(('name:endmaterialization',),\n drop_needle=True)\n\n return node\n\n\nclass OperationExtension(jinja2.ext.Extension):\n tags = ['operation']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n operation_name = \\\n parser.parse_assign_target(name_only=True).name\n\n node.args = []\n node.defaults = []\n\n while parser.stream.skip_if('comma'):\n target = parser.parse_assign_target(name_only=True)\n\n node.name = dbt.utils.get_operation_macro_name(operation_name)\n\n node.body = parser.parse_statements(('name:endoperation',),\n drop_needle=True)\n\n return node\n\n\nclass DocumentationExtension(jinja2.ext.Extension):\n tags = ['docs']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n docs_name = parser.parse_assign_target(name_only=True).name\n\n node.args = []\n node.defaults = []\n node.name = dbt.utils.get_docs_macro_name(docs_name)\n node.body = parser.parse_statements(('name:enddocs',),\n drop_needle=True)\n return node\n\n\ndef _is_dunder_name(name):\n return name.startswith('__') and name.endswith('__')\n\n\ndef create_macro_capture_env(node):\n\n class ParserMacroCapture(jinja2.Undefined):\n \"\"\"\n This class sets up the parser to capture macros.\n \"\"\"\n def __init__(self, hint=None, obj=None, name=None, exc=None):\n super(ParserMacroCapture, self).__init__(hint=hint, name=name)\n self.node = node\n self.name = name\n self.package_name = node.get('package_name')\n # jinja uses these for safety, so we have to override them.\n # see https://github.com/pallets/jinja/blob/master/jinja2/sandbox.py#L332-L339 # noqa\n self.unsafe_callable = False\n self.alters_data = False\n\n def __deepcopy__(self, memo):\n path = os.path.join(self.node.get('root_path'),\n self.node.get('original_file_path'))\n\n logger.debug(\n 'dbt encountered an undefined variable, \"{}\" in node {}.{} '\n '(source path: {})'\n .format(self.name, self.node.get('package_name'),\n self.node.get('name'), path))\n\n # match jinja's message\n dbt.exceptions.raise_compiler_error(\n \"{!r} is undefined\".format(self.name),\n node=self.node\n )\n\n def __getitem__(self, name):\n # Propagate the undefined value if a caller accesses this as if it\n # were a dictionary\n return self\n\n def __getattr__(self, name):\n if name == 'name' or _is_dunder_name(name):\n raise AttributeError(\n \"'{}' object has no attribute '{}'\"\n .format(type(self).__name__, name)\n )\n\n self.package_name = self.name\n self.name = name\n\n return self\n\n def __call__(self, *args, **kwargs):\n return True\n\n return ParserMacroCapture\n\n\ndef get_environment(node=None, capture_macros=False):\n args = {\n 'extensions': []\n }\n\n if capture_macros:\n args['undefined'] = create_macro_capture_env(node)\n\n args['extensions'].append(MaterializationExtension)\n args['extensions'].append(OperationExtension)\n args['extensions'].append(DocumentationExtension)\n\n return MacroFuzzEnvironment(**args)\n\n\ndef parse(string):\n try:\n return get_environment().parse(dbt.compat.to_string(string))\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e))\n\n\ndef get_template(string, ctx, node=None, capture_macros=False):\n try:\n env = get_environment(node, capture_macros)\n\n template_source = dbt.compat.to_string(string)\n return env.from_string(template_source, globals=ctx)\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e), node)\n\n\ndef render_template(template, ctx, node=None):\n try:\n return template.render(ctx)\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e), node)\n\n\ndef get_rendered(string, ctx, node=None,\n capture_macros=False):\n template = get_template(string, ctx, node,\n capture_macros=capture_macros)\n\n return render_template(template, ctx, node)\n\n\ndef undefined_error(msg):\n raise jinja2.exceptions.UndefinedError(msg)\n", "path": "dbt/clients/jinja.py"}]}
| 3,483 | 95 |
gh_patches_debug_1461
|
rasdani/github-patches
|
git_diff
|
kartoza__prj.app-346
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Display thumbnails in a modal window when we click on fullscreen
We can see a lot of GIF in the QGIS changelog. These thumbnails are too small to see so I have to click on the button to see it fullscreen. For now, it redirects to the GIF url like http://changelog.qgis.org/media/images/entries/53f72a9cf1bf32d73eb5174c37e54c60002b9707.gif
The user needs to use the "previous" button in the web browser to come back to the changelog.
It would be better to implement a javascript modal window to show the GIF and to stay on the URL http://changelog.qgis.org/en/qgis/version/2.16.0/
</issue>
<code>
[start of django_project/core/settings/project.py]
1 # coding=utf-8
2
3 """Project level settings.
4
5 Adjust these values as needed but don't commit passwords etc. to any public
6 repository!
7 """
8
9 import os # noqa
10 from django.utils.translation import ugettext_lazy as _
11 from .utils import absolute_path
12 from .contrib import * # noqa
13
14 # Project apps
15 INSTALLED_APPS += (
16 'base',
17 'changes',
18 'github_issue',
19 'vota',
20 )
21
22 # Due to profile page does not available,
23 # this will redirect to home page after login
24 LOGIN_REDIRECT_URL = '/'
25
26 # How many versions to list in each project box
27 PROJECT_VERSION_LIST_SIZE = 10
28
29 # Set debug to false for production
30 DEBUG = TEMPLATE_DEBUG = False
31
32 SOUTH_TESTS_MIGRATE = False
33
34
35 # Set languages which want to be translated
36 LANGUAGES = (
37 ('en', _('English')),
38 ('af', _('Afrikaans')),
39 ('id', _('Indonesian')),
40 ('ko', _('Korean')),
41 )
42
43 # Set storage path for the translation files
44 LOCALE_PATHS = (absolute_path('locale'),)
45
46
47 MIDDLEWARE_CLASSES = (
48 # For nav bar generation
49 'core.custom_middleware.NavContextMiddleware',
50 ) + MIDDLEWARE_CLASSES
51
52 # Project specific javascript files to be pipelined
53 # For third party libs like jquery should go in contrib.py
54 PIPELINE_JS['project'] = {
55 'source_filenames': (
56 'js/csrf-ajax.js',
57 'js/changelog.js',
58 'js/github-issue.js'
59 ),
60 'output_filename': 'js/project.js',
61 }
62
63 # Project specific css files to be pipelined
64 # For third party libs like bootstrap should go in contrib.py
65 PIPELINE_CSS['project'] = {
66 'source_filenames': (
67 'css/changelog.css',
68 'css/form.css',
69 'css/fonts.css'
70 ),
71 'output_filename': 'css/project.css',
72 'extra_context': {
73 'media': 'screen, projection',
74 },
75 }
76
[end of django_project/core/settings/project.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/django_project/core/settings/project.py b/django_project/core/settings/project.py
--- a/django_project/core/settings/project.py
+++ b/django_project/core/settings/project.py
@@ -55,7 +55,8 @@
'source_filenames': (
'js/csrf-ajax.js',
'js/changelog.js',
- 'js/github-issue.js'
+ 'js/github-issue.js',
+ 'js/entry.js',
),
'output_filename': 'js/project.js',
}
|
{"golden_diff": "diff --git a/django_project/core/settings/project.py b/django_project/core/settings/project.py\n--- a/django_project/core/settings/project.py\n+++ b/django_project/core/settings/project.py\n@@ -55,7 +55,8 @@\n 'source_filenames': (\n 'js/csrf-ajax.js',\n 'js/changelog.js',\n- 'js/github-issue.js'\n+ 'js/github-issue.js',\n+ 'js/entry.js',\n ),\n 'output_filename': 'js/project.js',\n }\n", "issue": "Display thumbnails in a modal window when we click on fullscreen\nWe can see a lot of GIF in the QGIS changelog. These thumbnails are too small to see so I have to click on the button to see it fullscreen. For now, it redirects to the GIF url like http://changelog.qgis.org/media/images/entries/53f72a9cf1bf32d73eb5174c37e54c60002b9707.gif\nThe user needs to use the \"previous\" button in the web browser to come back to the changelog.\n\nIt would be better to implement a javascript modal window to show the GIF and to stay on the URL http://changelog.qgis.org/en/qgis/version/2.16.0/\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Project level settings.\n\nAdjust these values as needed but don't commit passwords etc. to any public\nrepository!\n\"\"\"\n\nimport os # noqa\nfrom django.utils.translation import ugettext_lazy as _\nfrom .utils import absolute_path\nfrom .contrib import * # noqa\n\n# Project apps\nINSTALLED_APPS += (\n 'base',\n 'changes',\n 'github_issue',\n 'vota',\n)\n\n# Due to profile page does not available,\n# this will redirect to home page after login\nLOGIN_REDIRECT_URL = '/'\n\n# How many versions to list in each project box\nPROJECT_VERSION_LIST_SIZE = 10\n\n# Set debug to false for production\nDEBUG = TEMPLATE_DEBUG = False\n\nSOUTH_TESTS_MIGRATE = False\n\n\n# Set languages which want to be translated\nLANGUAGES = (\n ('en', _('English')),\n ('af', _('Afrikaans')),\n ('id', _('Indonesian')),\n ('ko', _('Korean')),\n)\n\n# Set storage path for the translation files\nLOCALE_PATHS = (absolute_path('locale'),)\n\n\nMIDDLEWARE_CLASSES = (\n # For nav bar generation\n 'core.custom_middleware.NavContextMiddleware',\n) + MIDDLEWARE_CLASSES\n\n# Project specific javascript files to be pipelined\n# For third party libs like jquery should go in contrib.py\nPIPELINE_JS['project'] = {\n 'source_filenames': (\n 'js/csrf-ajax.js',\n 'js/changelog.js',\n 'js/github-issue.js'\n ),\n 'output_filename': 'js/project.js',\n}\n\n# Project specific css files to be pipelined\n# For third party libs like bootstrap should go in contrib.py\nPIPELINE_CSS['project'] = {\n 'source_filenames': (\n 'css/changelog.css',\n 'css/form.css',\n 'css/fonts.css'\n ),\n 'output_filename': 'css/project.css',\n 'extra_context': {\n 'media': 'screen, projection',\n },\n}\n", "path": "django_project/core/settings/project.py"}]}
| 1,279 | 110 |
gh_patches_debug_50751
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-16113
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pants poetry-based lockfiles fail to include hashes.
This was detected in a unit test in the Pants repo, but is a wider problem for all versions of Pants that support generating lockfiles using Poetry.
The proximal cause is this announcement from PyPI:
https://discuss.python.org/t/backwards-incompatible-change-to-pypi-json-api/17154
And the root cause is this Poetry code:
https://github.com/python-poetry/poetry/blob/bce13c14f73060b3abbb791dea585d8fde26eaef/poetry/repositories/pypi_repository.py#L272-L283
There was a Poetry fix released and backported to the 1.1. branch here:
https://github.com/python-poetry/poetry/pull/5973
Users can fix with 2 steps:
1. Update Pants config
```toml
[poetry]
# N.B.: Works around issue described at https://github.com/pantsbuild/pants/issues/16111
# Undo once on a Pants with this version or greater as the default.
version = "poetry==1.1.14"
```
2. Clear Poetry caches with `rm -rf ~/.cache/pypoetry` on Linux and `rm -rf ~/Library/Caches/pypoetry` on Mac.
This issue tracks bumping Pants default to this fixed Poetry version.
</issue>
<code>
[start of src/python/pants/backend/python/subsystems/poetry.py]
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from collections import defaultdict
7 from dataclasses import dataclass
8 from textwrap import dedent
9 from typing import Any, Iterable, Sequence
10
11 import toml
12 from pkg_resources import Requirement
13
14 from pants.backend.python.subsystems.python_tool_base import PythonToolRequirementsBase
15 from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
16 from pants.engine.fs import FileContent
17
18 # ----------------------------------------------------------------------------------------
19 # Subsystem
20 # ----------------------------------------------------------------------------------------
21
22
23 class PoetrySubsystem(PythonToolRequirementsBase):
24 options_scope = "poetry"
25 help = "Used to generate lockfiles for third-party Python dependencies."
26
27 default_version = "poetry==1.1.8"
28
29 register_interpreter_constraints = True
30 default_interpreter_constraints = ["CPython>=3.7,<4"]
31
32
33 # We must monkeypatch Poetry to include `setuptools` and `wheel` in the lockfile. This was fixed
34 # in Poetry 1.2. See https://github.com/python-poetry/poetry/issues/1584.
35 # WONTFIX(#12314): only use this custom launcher if using Poetry 1.1..
36 POETRY_LAUNCHER = FileContent(
37 "__pants_poetry_launcher.py",
38 dedent(
39 """\
40 from poetry.console import main
41 from poetry.puzzle.provider import Provider
42
43 Provider.UNSAFE_PACKAGES = set()
44 main()
45 """
46 ).encode(),
47 )
48
49
50 # ----------------------------------------------------------------------------------------
51 # Parsing
52 # ----------------------------------------------------------------------------------------
53
54 _HEADER = {
55 "name": "pants-lockfile-generation",
56 "version": "0.1.0",
57 "description": "",
58 "authors": ["pantsbuild"],
59 }
60
61
62 def create_pyproject_toml(
63 requirements: Iterable[str], interpreter_constraints: InterpreterConstraints
64 ) -> str:
65 return toml.dumps(create_pyproject_toml_as_dict(requirements, interpreter_constraints))
66
67
68 def create_pyproject_toml_as_dict(
69 raw_requirements: Iterable[str], interpreter_constraints: InterpreterConstraints
70 ) -> dict:
71 python_constraint = {"python": interpreter_constraints.to_poetry_constraint()}
72 project_name_to_poetry_deps = defaultdict(list)
73 for raw_req in raw_requirements:
74 # WONTFIX(#12314): add error handling.
75 req = Requirement.parse(raw_req)
76 poetry_dep = PoetryDependency.from_requirement(req)
77 project_name_to_poetry_deps[req.project_name].append(poetry_dep)
78
79 deps = {
80 project_name: PoetryDependency.to_pyproject_toml_metadata(poetry_deps)
81 for project_name, poetry_deps in project_name_to_poetry_deps.items()
82 }
83 return {"tool": {"poetry": {**_HEADER, "dependencies": {**python_constraint, **deps}}}}
84
85
86 @dataclass(frozen=True)
87 class PoetryDependency:
88 name: str
89 version: str | None
90 extras: tuple[str, ...] = ()
91 markers: str | None = None
92
93 @classmethod
94 def from_requirement(cls, requirement: Requirement) -> PoetryDependency:
95 return PoetryDependency(
96 requirement.project_name,
97 version=str(requirement.specifier) or None, # type: ignore[attr-defined]
98 extras=tuple(sorted(requirement.extras)),
99 markers=str(requirement.marker) if requirement.marker else None,
100 )
101
102 @classmethod
103 def to_pyproject_toml_metadata(
104 cls, deps: Sequence[PoetryDependency]
105 ) -> dict[str, Any] | list[dict[str, Any]]:
106 def convert_dep(dep: PoetryDependency) -> dict[str, Any]:
107 metadata: dict[str, Any] = {"version": dep.version or "*"}
108 if dep.extras:
109 metadata["extras"] = dep.extras
110 if dep.markers:
111 metadata["markers"] = dep.markers
112 return metadata
113
114 if not deps:
115 raise AssertionError("Must have at least one element!")
116 if len(deps) == 1:
117 return convert_dep(deps[0])
118
119 entries = []
120 name = deps[0].name
121 for dep in deps:
122 if dep.name != name:
123 raise AssertionError(f"All elements must have the same project name. Given: {deps}")
124 entries.append(convert_dep(dep))
125 return entries
126
[end of src/python/pants/backend/python/subsystems/poetry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/python/pants/backend/python/subsystems/poetry.py b/src/python/pants/backend/python/subsystems/poetry.py
--- a/src/python/pants/backend/python/subsystems/poetry.py
+++ b/src/python/pants/backend/python/subsystems/poetry.py
@@ -24,7 +24,7 @@
options_scope = "poetry"
help = "Used to generate lockfiles for third-party Python dependencies."
- default_version = "poetry==1.1.8"
+ default_version = "poetry==1.1.14"
register_interpreter_constraints = True
default_interpreter_constraints = ["CPython>=3.7,<4"]
|
{"golden_diff": "diff --git a/src/python/pants/backend/python/subsystems/poetry.py b/src/python/pants/backend/python/subsystems/poetry.py\n--- a/src/python/pants/backend/python/subsystems/poetry.py\n+++ b/src/python/pants/backend/python/subsystems/poetry.py\n@@ -24,7 +24,7 @@\n options_scope = \"poetry\"\n help = \"Used to generate lockfiles for third-party Python dependencies.\"\n \n- default_version = \"poetry==1.1.8\"\n+ default_version = \"poetry==1.1.14\"\n \n register_interpreter_constraints = True\n default_interpreter_constraints = [\"CPython>=3.7,<4\"]\n", "issue": "Pants poetry-based lockfiles fail to include hashes.\nThis was detected in a unit test in the Pants repo, but is a wider problem for all versions of Pants that support generating lockfiles using Poetry.\r\n\r\nThe proximal cause is this announcement from PyPI:\r\n https://discuss.python.org/t/backwards-incompatible-change-to-pypi-json-api/17154\r\n\r\nAnd the root cause is this Poetry code:\r\n https://github.com/python-poetry/poetry/blob/bce13c14f73060b3abbb791dea585d8fde26eaef/poetry/repositories/pypi_repository.py#L272-L283\r\n\r\nThere was a Poetry fix released and backported to the 1.1. branch here:\r\n https://github.com/python-poetry/poetry/pull/5973\r\n\r\nUsers can fix with 2 steps:\r\n1. Update Pants config\r\n```toml\r\n[poetry]\r\n# N.B.: Works around issue described at https://github.com/pantsbuild/pants/issues/16111\r\n# Undo once on a Pants with this version or greater as the default.\r\nversion = \"poetry==1.1.14\"\r\n```\r\n2. Clear Poetry caches with `rm -rf ~/.cache/pypoetry` on Linux and `rm -rf ~/Library/Caches/pypoetry` on Mac.\r\n\r\nThis issue tracks bumping Pants default to this fixed Poetry version.\r\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom textwrap import dedent\nfrom typing import Any, Iterable, Sequence\n\nimport toml\nfrom pkg_resources import Requirement\n\nfrom pants.backend.python.subsystems.python_tool_base import PythonToolRequirementsBase\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.engine.fs import FileContent\n\n# ----------------------------------------------------------------------------------------\n# Subsystem\n# ----------------------------------------------------------------------------------------\n\n\nclass PoetrySubsystem(PythonToolRequirementsBase):\n options_scope = \"poetry\"\n help = \"Used to generate lockfiles for third-party Python dependencies.\"\n\n default_version = \"poetry==1.1.8\"\n\n register_interpreter_constraints = True\n default_interpreter_constraints = [\"CPython>=3.7,<4\"]\n\n\n# We must monkeypatch Poetry to include `setuptools` and `wheel` in the lockfile. This was fixed\n# in Poetry 1.2. See https://github.com/python-poetry/poetry/issues/1584.\n# WONTFIX(#12314): only use this custom launcher if using Poetry 1.1..\nPOETRY_LAUNCHER = FileContent(\n \"__pants_poetry_launcher.py\",\n dedent(\n \"\"\"\\\n from poetry.console import main\n from poetry.puzzle.provider import Provider\n\n Provider.UNSAFE_PACKAGES = set()\n main()\n \"\"\"\n ).encode(),\n)\n\n\n# ----------------------------------------------------------------------------------------\n# Parsing\n# ----------------------------------------------------------------------------------------\n\n_HEADER = {\n \"name\": \"pants-lockfile-generation\",\n \"version\": \"0.1.0\",\n \"description\": \"\",\n \"authors\": [\"pantsbuild\"],\n}\n\n\ndef create_pyproject_toml(\n requirements: Iterable[str], interpreter_constraints: InterpreterConstraints\n) -> str:\n return toml.dumps(create_pyproject_toml_as_dict(requirements, interpreter_constraints))\n\n\ndef create_pyproject_toml_as_dict(\n raw_requirements: Iterable[str], interpreter_constraints: InterpreterConstraints\n) -> dict:\n python_constraint = {\"python\": interpreter_constraints.to_poetry_constraint()}\n project_name_to_poetry_deps = defaultdict(list)\n for raw_req in raw_requirements:\n # WONTFIX(#12314): add error handling.\n req = Requirement.parse(raw_req)\n poetry_dep = PoetryDependency.from_requirement(req)\n project_name_to_poetry_deps[req.project_name].append(poetry_dep)\n\n deps = {\n project_name: PoetryDependency.to_pyproject_toml_metadata(poetry_deps)\n for project_name, poetry_deps in project_name_to_poetry_deps.items()\n }\n return {\"tool\": {\"poetry\": {**_HEADER, \"dependencies\": {**python_constraint, **deps}}}}\n\n\n@dataclass(frozen=True)\nclass PoetryDependency:\n name: str\n version: str | None\n extras: tuple[str, ...] = ()\n markers: str | None = None\n\n @classmethod\n def from_requirement(cls, requirement: Requirement) -> PoetryDependency:\n return PoetryDependency(\n requirement.project_name,\n version=str(requirement.specifier) or None, # type: ignore[attr-defined]\n extras=tuple(sorted(requirement.extras)),\n markers=str(requirement.marker) if requirement.marker else None,\n )\n\n @classmethod\n def to_pyproject_toml_metadata(\n cls, deps: Sequence[PoetryDependency]\n ) -> dict[str, Any] | list[dict[str, Any]]:\n def convert_dep(dep: PoetryDependency) -> dict[str, Any]:\n metadata: dict[str, Any] = {\"version\": dep.version or \"*\"}\n if dep.extras:\n metadata[\"extras\"] = dep.extras\n if dep.markers:\n metadata[\"markers\"] = dep.markers\n return metadata\n\n if not deps:\n raise AssertionError(\"Must have at least one element!\")\n if len(deps) == 1:\n return convert_dep(deps[0])\n\n entries = []\n name = deps[0].name\n for dep in deps:\n if dep.name != name:\n raise AssertionError(f\"All elements must have the same project name. Given: {deps}\")\n entries.append(convert_dep(dep))\n return entries\n", "path": "src/python/pants/backend/python/subsystems/poetry.py"}]}
| 2,068 | 152 |
gh_patches_debug_13638
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-2503
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Current `tox` configuration ends up testing old code
For example:
``` bash
$ rm -r .tox/
$ tox -e system-tests --notest
GLOB sdist-make: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/setup.py
system-tests create: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/.tox/system-tests
system-tests inst: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/.tox/dist/google-cloud-0.20.0.zip
system-tests installed: -f file:///home/tseaver/.pip/wheels,enum34==1.1.6,future==0.15.2,futures==3.0.5,gapic-google-logging-v2==0.10.1,gapic-google-pubsub-v1==0.10.1,google-cloud==0.20.0,google-cloud-bigquery==0.20.0,google-cloud-bigtable==0.20.0,google-cloud-core==0.20.0,google-cloud-datastore==0.20.0,google-cloud-dns==0.20.0,google-cloud-error-reporting==0.20.0,google-cloud-happybase==0.20.0,google-cloud-language==0.20.0,google-cloud-logging==0.20.0,google-cloud-monitoring==0.20.0,google-cloud-pubsub==0.20.0,google-cloud-resource-manager==0.20.0,google-cloud-storage==0.20.0,google-cloud-translate==0.20.0,google-cloud-vision==0.20.0,google-gax==0.14.1,googleapis-common-protos==1.3.5,grpc-google-iam-v1==0.10.1,grpc-google-logging-v2==0.10.1,grpc-google-pubsub-v1==0.10.1,grpcio==1.0.0,httplib2==0.9.2,oauth2client==3.0.0,ply==3.8,protobuf==3.1.0.post1,pyasn1==0.1.9,pyasn1-modules==0.0.8,rsa==3.4.2,six==1.10.0
___________________________________ summary ____________________________________
system-tests: skipped tests
congratulations :)
$ diff -ru .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/ storage/google/cloud/storage/ --exclude="*.pyc"
diff -ru '--exclude=*.pyc' .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/blob.py storage/google/cloud/storage/blob.py
--- .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/blob.py 2016-10-05 18:15:48.724796000 -0400
+++ storage/google/cloud/storage/blob.py 2016-10-05 18:02:55.872830411 -0400
@@ -655,6 +655,32 @@
self.acl.all().grant_read()
self.acl.save(client=client)
+ def compose(self, sources, client=None):
+ """Concatenate source blobs into this one.
+
+ :type sources: list of :class:`Blob`
+ :param sources: blobs whose contents will be composed into this blob.
+
+ :type client: :class:`~google.cloud.storage.client.Client` or
+ ``NoneType``
+ :param client: Optional. The client to use. If not passed, falls back
+ to the ``client`` stored on the blob's bucket.
+
+ :raises: :exc:`ValueError` if this blob does not have its
+ :attr:`content_type` set.
+ """
+ if self.content_type is None:
+ raise ValueError("Destination 'content_type' not set.")
+ client = self._require_client(client)
+ request = {
+ 'sourceObjects': [{'name': source.name} for source in sources],
+ 'destination': self._properties.copy(),
+ }
+ api_response = client.connection.api_request(
+ method='POST', path=self.path + '/compose', data=request,
+ _target_object=self)
+ self._set_properties(api_response)
+
cache_control = _scalar_property('cacheControl')
"""HTTP 'Cache-Control' header for this object.
```
Somehow, the tarball / wheel is being cached.
</issue>
<code>
[start of scripts/pycodestyle_on_repo.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Custom script to run pycodestyle on google-cloud codebase.
16
17 This runs pycodestyle as a script via subprocess but only runs it on the
18 .py files that are checked in to the repository.
19 """
20
21
22 import os
23 import subprocess
24 import sys
25
26 from script_utils import get_affected_files
27
28
29 def main():
30 """Run pycodestyle on all Python files in the repository."""
31 git_root = subprocess.check_output(
32 ['git', 'rev-parse', '--show-toplevel']).strip()
33 os.chdir(git_root)
34 candidates, _ = get_affected_files()
35 python_files = [
36 candidate for candidate in candidates if candidate.endswith('.py')]
37
38 pycodestyle_command = ['pycodestyle'] + python_files
39 status_code = subprocess.call(pycodestyle_command)
40 sys.exit(status_code)
41
42
43 if __name__ == '__main__':
44 main()
45
[end of scripts/pycodestyle_on_repo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/pycodestyle_on_repo.py b/scripts/pycodestyle_on_repo.py
--- a/scripts/pycodestyle_on_repo.py
+++ b/scripts/pycodestyle_on_repo.py
@@ -19,6 +19,8 @@
"""
+from __future__ import print_function
+
import os
import subprocess
import sys
@@ -35,9 +37,12 @@
python_files = [
candidate for candidate in candidates if candidate.endswith('.py')]
- pycodestyle_command = ['pycodestyle'] + python_files
- status_code = subprocess.call(pycodestyle_command)
- sys.exit(status_code)
+ if not python_files:
+ print('No Python files to lint, exiting.')
+ else:
+ pycodestyle_command = ['pycodestyle'] + python_files
+ status_code = subprocess.call(pycodestyle_command)
+ sys.exit(status_code)
if __name__ == '__main__':
|
{"golden_diff": "diff --git a/scripts/pycodestyle_on_repo.py b/scripts/pycodestyle_on_repo.py\n--- a/scripts/pycodestyle_on_repo.py\n+++ b/scripts/pycodestyle_on_repo.py\n@@ -19,6 +19,8 @@\n \"\"\"\n \n \n+from __future__ import print_function\n+\n import os\n import subprocess\n import sys\n@@ -35,9 +37,12 @@\n python_files = [\n candidate for candidate in candidates if candidate.endswith('.py')]\n \n- pycodestyle_command = ['pycodestyle'] + python_files\n- status_code = subprocess.call(pycodestyle_command)\n- sys.exit(status_code)\n+ if not python_files:\n+ print('No Python files to lint, exiting.')\n+ else:\n+ pycodestyle_command = ['pycodestyle'] + python_files\n+ status_code = subprocess.call(pycodestyle_command)\n+ sys.exit(status_code)\n \n \n if __name__ == '__main__':\n", "issue": "Current `tox` configuration ends up testing old code\nFor example:\n\n``` bash\n$ rm -r .tox/\n$ tox -e system-tests --notest\nGLOB sdist-make: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/setup.py\nsystem-tests create: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/.tox/system-tests\nsystem-tests inst: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/.tox/dist/google-cloud-0.20.0.zip\nsystem-tests installed: -f file:///home/tseaver/.pip/wheels,enum34==1.1.6,future==0.15.2,futures==3.0.5,gapic-google-logging-v2==0.10.1,gapic-google-pubsub-v1==0.10.1,google-cloud==0.20.0,google-cloud-bigquery==0.20.0,google-cloud-bigtable==0.20.0,google-cloud-core==0.20.0,google-cloud-datastore==0.20.0,google-cloud-dns==0.20.0,google-cloud-error-reporting==0.20.0,google-cloud-happybase==0.20.0,google-cloud-language==0.20.0,google-cloud-logging==0.20.0,google-cloud-monitoring==0.20.0,google-cloud-pubsub==0.20.0,google-cloud-resource-manager==0.20.0,google-cloud-storage==0.20.0,google-cloud-translate==0.20.0,google-cloud-vision==0.20.0,google-gax==0.14.1,googleapis-common-protos==1.3.5,grpc-google-iam-v1==0.10.1,grpc-google-logging-v2==0.10.1,grpc-google-pubsub-v1==0.10.1,grpcio==1.0.0,httplib2==0.9.2,oauth2client==3.0.0,ply==3.8,protobuf==3.1.0.post1,pyasn1==0.1.9,pyasn1-modules==0.0.8,rsa==3.4.2,six==1.10.0\n___________________________________ summary ____________________________________\n system-tests: skipped tests\n congratulations :)\n$ diff -ru .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/ storage/google/cloud/storage/ --exclude=\"*.pyc\"\ndiff -ru '--exclude=*.pyc' .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/blob.py storage/google/cloud/storage/blob.py\n--- .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/blob.py 2016-10-05 18:15:48.724796000 -0400\n+++ storage/google/cloud/storage/blob.py 2016-10-05 18:02:55.872830411 -0400\n@@ -655,6 +655,32 @@\n self.acl.all().grant_read()\n self.acl.save(client=client)\n\n+ def compose(self, sources, client=None):\n+ \"\"\"Concatenate source blobs into this one.\n+\n+ :type sources: list of :class:`Blob`\n+ :param sources: blobs whose contents will be composed into this blob.\n+\n+ :type client: :class:`~google.cloud.storage.client.Client` or\n+ ``NoneType``\n+ :param client: Optional. The client to use. If not passed, falls back\n+ to the ``client`` stored on the blob's bucket.\n+\n+ :raises: :exc:`ValueError` if this blob does not have its\n+ :attr:`content_type` set.\n+ \"\"\"\n+ if self.content_type is None:\n+ raise ValueError(\"Destination 'content_type' not set.\")\n+ client = self._require_client(client)\n+ request = {\n+ 'sourceObjects': [{'name': source.name} for source in sources],\n+ 'destination': self._properties.copy(),\n+ }\n+ api_response = client.connection.api_request(\n+ method='POST', path=self.path + '/compose', data=request,\n+ _target_object=self)\n+ self._set_properties(api_response)\n+\n cache_control = _scalar_property('cacheControl')\n \"\"\"HTTP 'Cache-Control' header for this object.\n```\n\nSomehow, the tarball / wheel is being cached.\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Custom script to run pycodestyle on google-cloud codebase.\n\nThis runs pycodestyle as a script via subprocess but only runs it on the\n.py files that are checked in to the repository.\n\"\"\"\n\n\nimport os\nimport subprocess\nimport sys\n\nfrom script_utils import get_affected_files\n\n\ndef main():\n \"\"\"Run pycodestyle on all Python files in the repository.\"\"\"\n git_root = subprocess.check_output(\n ['git', 'rev-parse', '--show-toplevel']).strip()\n os.chdir(git_root)\n candidates, _ = get_affected_files()\n python_files = [\n candidate for candidate in candidates if candidate.endswith('.py')]\n\n pycodestyle_command = ['pycodestyle'] + python_files\n status_code = subprocess.call(pycodestyle_command)\n sys.exit(status_code)\n\n\nif __name__ == '__main__':\n main()\n", "path": "scripts/pycodestyle_on_repo.py"}]}
| 1,980 | 210 |
gh_patches_debug_8101
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-1983
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
empty WARNING message in scrapy.core.downloader.tls (1.1.0rc4/master)
Sometimes I'm getting empty warnings now, on 1.1.0rc4 and master branch.
(at least on rc3 as well)
```
2016-05-07 00:33:46 [scrapy.core.downloader.tls] WARNING:
2016-05-07 00:33:47 [scrapy.core.downloader.tls] WARNING:
2016-05-07 00:33:48 [scrapy.core.downloader.tls] WARNING:
```
It happens in a broad linkcheck crawl; so I couldn't pinpoint what URLs might be responsible for that, at this time. The only other observation so far is, that it doesn't happen on a cache-replayed run (which might be obvious, as there is no TLS there).
</issue>
<code>
[start of scrapy/core/downloader/tls.py]
1 import logging
2 from OpenSSL import SSL
3
4
5 logger = logging.getLogger(__name__)
6
7 METHOD_SSLv3 = 'SSLv3'
8 METHOD_TLS = 'TLS'
9 METHOD_TLSv10 = 'TLSv1.0'
10 METHOD_TLSv11 = 'TLSv1.1'
11 METHOD_TLSv12 = 'TLSv1.2'
12
13 openssl_methods = {
14 METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)
15 METHOD_SSLv3: SSL.SSLv3_METHOD, # SSL 3 (NOT recommended)
16 METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only
17 METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only
18 METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only
19 }
20
21 # ClientTLSOptions requires a recent-enough version of Twisted
22 try:
23
24 # taken from twisted/twisted/internet/_sslverify.py
25 try:
26 from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START
27 except ImportError:
28 SSL_CB_HANDSHAKE_START = 0x10
29 SSL_CB_HANDSHAKE_DONE = 0x20
30
31 from twisted.internet._sslverify import (ClientTLSOptions,
32 _maybeSetHostNameIndication,
33 verifyHostname,
34 VerificationError)
35
36 class ScrapyClientTLSOptions(ClientTLSOptions):
37 # same as Twisted's ClientTLSOptions,
38 # except that VerificationError is caught
39 # and doesn't close the connection
40 def _identityVerifyingInfoCallback(self, connection, where, ret):
41 if where & SSL_CB_HANDSHAKE_START:
42 _maybeSetHostNameIndication(connection, self._hostnameBytes)
43 elif where & SSL_CB_HANDSHAKE_DONE:
44 try:
45 verifyHostname(connection, self._hostnameASCII)
46 except VerificationError as e:
47 logger.warning(e)
48
49 except ImportError:
50 # ImportError should not matter for older Twisted versions
51 # as the above is not used in the fallback ScrapyClientContextFactory
52 pass
53
[end of scrapy/core/downloader/tls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/core/downloader/tls.py b/scrapy/core/downloader/tls.py
--- a/scrapy/core/downloader/tls.py
+++ b/scrapy/core/downloader/tls.py
@@ -44,7 +44,9 @@
try:
verifyHostname(connection, self._hostnameASCII)
except VerificationError as e:
- logger.warning(e)
+ logger.warning(
+ 'Remote certificate is not valid for hostname "{}"; {}'.format(
+ self._hostnameASCII, e))
except ImportError:
# ImportError should not matter for older Twisted versions
|
{"golden_diff": "diff --git a/scrapy/core/downloader/tls.py b/scrapy/core/downloader/tls.py\n--- a/scrapy/core/downloader/tls.py\n+++ b/scrapy/core/downloader/tls.py\n@@ -44,7 +44,9 @@\n try:\n verifyHostname(connection, self._hostnameASCII)\n except VerificationError as e:\n- logger.warning(e)\n+ logger.warning(\n+ 'Remote certificate is not valid for hostname \"{}\"; {}'.format(\n+ self._hostnameASCII, e))\n \n except ImportError:\n # ImportError should not matter for older Twisted versions\n", "issue": "empty WARNING message in scrapy.core.downloader.tls (1.1.0rc4/master)\nSometimes I'm getting empty warnings now, on 1.1.0rc4 and master branch.\n(at least on rc3 as well)\n\n```\n2016-05-07 00:33:46 [scrapy.core.downloader.tls] WARNING: \n2016-05-07 00:33:47 [scrapy.core.downloader.tls] WARNING: \n2016-05-07 00:33:48 [scrapy.core.downloader.tls] WARNING: \n```\n\nIt happens in a broad linkcheck crawl; so I couldn't pinpoint what URLs might be responsible for that, at this time. The only other observation so far is, that it doesn't happen on a cache-replayed run (which might be obvious, as there is no TLS there).\n\n", "before_files": [{"content": "import logging\nfrom OpenSSL import SSL\n\n\nlogger = logging.getLogger(__name__)\n\nMETHOD_SSLv3 = 'SSLv3'\nMETHOD_TLS = 'TLS'\nMETHOD_TLSv10 = 'TLSv1.0'\nMETHOD_TLSv11 = 'TLSv1.1'\nMETHOD_TLSv12 = 'TLSv1.2'\n\nopenssl_methods = {\n METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)\n METHOD_SSLv3: SSL.SSLv3_METHOD, # SSL 3 (NOT recommended)\n METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only\n METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only\n METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only\n}\n\n# ClientTLSOptions requires a recent-enough version of Twisted\ntry:\n\n # taken from twisted/twisted/internet/_sslverify.py\n try:\n from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START\n except ImportError:\n SSL_CB_HANDSHAKE_START = 0x10\n SSL_CB_HANDSHAKE_DONE = 0x20\n\n from twisted.internet._sslverify import (ClientTLSOptions,\n _maybeSetHostNameIndication,\n verifyHostname,\n VerificationError)\n\n class ScrapyClientTLSOptions(ClientTLSOptions):\n # same as Twisted's ClientTLSOptions,\n # except that VerificationError is caught\n # and doesn't close the connection\n def _identityVerifyingInfoCallback(self, connection, where, ret):\n if where & SSL_CB_HANDSHAKE_START:\n _maybeSetHostNameIndication(connection, self._hostnameBytes)\n elif where & SSL_CB_HANDSHAKE_DONE:\n try:\n verifyHostname(connection, self._hostnameASCII)\n except VerificationError as e:\n logger.warning(e)\n\nexcept ImportError:\n # ImportError should not matter for older Twisted versions\n # as the above is not used in the fallback ScrapyClientContextFactory\n pass\n", "path": "scrapy/core/downloader/tls.py"}]}
| 1,319 | 127 |
gh_patches_debug_7730
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-3737
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SecureDrop backups from previous versions don't work if database migration has occurred
## Description
Backup restore (https://github.com/freedomofpress/securedrop/blob/develop/install_files/ansible-base/roles/restore/files/restore.py) script does not apply database migrations, and as such breaks the application upon backup restore. Manual workaround that appears to have no side-effects is to run `sudo dpkg-reconfigure securedrop-app-code` on the app server.
## Steps to Reproduce
1. Install SecureDrop 0.8.0
2. `securedrop-admin backup`
3. Upgrade to 0.9.0 (or higher)
4. `securedrop-admin restore`
5. Observe source and journalist interface return error 500
## Expected Behavior
The application should be operational.
## Actual Behavior
The source and journalist interfaces return error 500s.
## Comments
Running `sudo dpkg-reconfigure securedrop-app-code` calls the postinst script which will apply migration. Based on my testing, this seems to work reliably.
</issue>
<code>
[start of install_files/ansible-base/roles/restore/files/restore.py]
1 #!/usr/bin/python2.7
2 """
3 This script and backup archive should be copied to the App server and run by
4 the Ansible playbook. When run (as root), it restores the contents of the 0.3
5 backup file to the machine it's run on.
6
7 python restore.py sd-backup-TIMESTAMP.tar.gz
8 """
9
10 import os
11 import subprocess
12 import sys
13 import tarfile
14
15
16 def verify_args():
17 usage = """
18 Usage: restore.py <backup file>
19
20 <backup file> Path to a SecureDrop 0.3 backup created by backup.py"
21 """
22 if len(sys.argv) != 2:
23 print(usage)
24 sys.exit(1)
25
26 if not os.path.exists(sys.argv[1]):
27 print("<backup file> '{}' not found".format(sys.argv[1]))
28 sys.exit(1)
29
30 if os.geteuid() != 0:
31 print("This program must be run as root!")
32 sys.exit(1)
33
34
35 def main():
36 verify_args()
37
38 with tarfile.open(sys.argv[1], 'r:*') as backup:
39 # This assumes that both the old installation (source of the backup)
40 # and the new installation (destination of the restore) used the
41 # default paths for various locations.
42 backup.extractall(path='/')
43
44 # Reload Tor and the web server so they pick up the new configuration
45 # If the process exits with a non-zero return code, raises an exception.
46 subprocess.check_call(['service', 'apache2', 'restart'])
47 subprocess.check_call(['service', 'tor', 'reload'])
48
49
50 if __name__ == "__main__":
51 main()
52
[end of install_files/ansible-base/roles/restore/files/restore.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/install_files/ansible-base/roles/restore/files/restore.py b/install_files/ansible-base/roles/restore/files/restore.py
--- a/install_files/ansible-base/roles/restore/files/restore.py
+++ b/install_files/ansible-base/roles/restore/files/restore.py
@@ -45,6 +45,8 @@
# If the process exits with a non-zero return code, raises an exception.
subprocess.check_call(['service', 'apache2', 'restart'])
subprocess.check_call(['service', 'tor', 'reload'])
+ # Apply database migrations (if backed-up version < version to restore)
+ subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code'])
if __name__ == "__main__":
|
{"golden_diff": "diff --git a/install_files/ansible-base/roles/restore/files/restore.py b/install_files/ansible-base/roles/restore/files/restore.py\n--- a/install_files/ansible-base/roles/restore/files/restore.py\n+++ b/install_files/ansible-base/roles/restore/files/restore.py\n@@ -45,6 +45,8 @@\n # If the process exits with a non-zero return code, raises an exception.\n subprocess.check_call(['service', 'apache2', 'restart'])\n subprocess.check_call(['service', 'tor', 'reload'])\n+ # Apply database migrations (if backed-up version < version to restore)\n+ subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code'])\n \n \n if __name__ == \"__main__\":\n", "issue": "SecureDrop backups from previous versions don't work if database migration has occurred\n## Description\r\n\r\nBackup restore (https://github.com/freedomofpress/securedrop/blob/develop/install_files/ansible-base/roles/restore/files/restore.py) script does not apply database migrations, and as such breaks the application upon backup restore. Manual workaround that appears to have no side-effects is to run `sudo dpkg-reconfigure securedrop-app-code` on the app server.\r\n\r\n## Steps to Reproduce\r\n\r\n1. Install SecureDrop 0.8.0\r\n2. `securedrop-admin backup`\r\n3. Upgrade to 0.9.0 (or higher)\r\n4. `securedrop-admin restore`\r\n5. Observe source and journalist interface return error 500\r\n\r\n## Expected Behavior\r\n\r\nThe application should be operational.\r\n\r\n## Actual Behavior\r\n\r\nThe source and journalist interfaces return error 500s.\r\n\r\n## Comments\r\n\r\nRunning `sudo dpkg-reconfigure securedrop-app-code` calls the postinst script which will apply migration. Based on my testing, this seems to work reliably.\n", "before_files": [{"content": "#!/usr/bin/python2.7\n\"\"\"\nThis script and backup archive should be copied to the App server and run by\nthe Ansible playbook. When run (as root), it restores the contents of the 0.3\nbackup file to the machine it's run on.\n\npython restore.py sd-backup-TIMESTAMP.tar.gz\n\"\"\"\n\nimport os\nimport subprocess\nimport sys\nimport tarfile\n\n\ndef verify_args():\n usage = \"\"\"\nUsage: restore.py <backup file>\n\n <backup file> Path to a SecureDrop 0.3 backup created by backup.py\"\n \"\"\"\n if len(sys.argv) != 2:\n print(usage)\n sys.exit(1)\n\n if not os.path.exists(sys.argv[1]):\n print(\"<backup file> '{}' not found\".format(sys.argv[1]))\n sys.exit(1)\n\n if os.geteuid() != 0:\n print(\"This program must be run as root!\")\n sys.exit(1)\n\n\ndef main():\n verify_args()\n\n with tarfile.open(sys.argv[1], 'r:*') as backup:\n # This assumes that both the old installation (source of the backup)\n # and the new installation (destination of the restore) used the\n # default paths for various locations.\n backup.extractall(path='/')\n\n # Reload Tor and the web server so they pick up the new configuration\n # If the process exits with a non-zero return code, raises an exception.\n subprocess.check_call(['service', 'apache2', 'restart'])\n subprocess.check_call(['service', 'tor', 'reload'])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install_files/ansible-base/roles/restore/files/restore.py"}]}
| 1,225 | 162 |
gh_patches_debug_5171
|
rasdani/github-patches
|
git_diff
|
strawberry-graphql__strawberry-2581
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing type annotation in `strawberry.fastapi.BaseContext` causes mypy to trip
## Describe the Bug
I built a custom context based on [the guide in the docs](https://strawberry.rocks/docs/guides/authentication):
```python
from strawberry.fastapi import BaseContext
class CustomContext(BaseContext):
@property
def user(self) -> User:
...
async def get_context() -> CustomContext:
return CustomContext()
```
With that I receive the following mypy error:
```shell
error: Call to untyped function "CustomContext" in typed context [no-untyped-call]
```
For now, I added the following workaround to my code:
```python
class CustomContext(BaseContext):
if typing.TYPE_CHECKING:
def __init__(self) -> None:
pass
...
```
## System Information
- Operating system: macOS Monterey
- Strawberry version (if applicable): `0.158.1`
- FastAPI version (if applicable): `0.92.0`
- mypy version (if applicable): `0.991` (also tested with `1.0.1`)
## Additional Context
I'm happy to provide a PR to address the issue myself.
</issue>
<code>
[start of strawberry/fastapi/context.py]
1 from typing import Any, Dict, Optional, Union
2
3 from starlette.background import BackgroundTasks
4 from starlette.requests import Request
5 from starlette.responses import Response
6 from starlette.websockets import WebSocket
7
8 CustomContext = Union["BaseContext", Dict[str, Any]]
9 MergedContext = Union[
10 "BaseContext", Dict[str, Union[Any, BackgroundTasks, Request, Response, WebSocket]]
11 ]
12
13
14 class BaseContext:
15 connection_params: Optional[Any] = None
16
17 def __init__(self):
18 self.request: Optional[Union[Request, WebSocket]] = None
19 self.background_tasks: Optional[BackgroundTasks] = None
20 self.response: Optional[Response] = None
21
[end of strawberry/fastapi/context.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/strawberry/fastapi/context.py b/strawberry/fastapi/context.py
--- a/strawberry/fastapi/context.py
+++ b/strawberry/fastapi/context.py
@@ -14,7 +14,7 @@
class BaseContext:
connection_params: Optional[Any] = None
- def __init__(self):
+ def __init__(self) -> None:
self.request: Optional[Union[Request, WebSocket]] = None
self.background_tasks: Optional[BackgroundTasks] = None
self.response: Optional[Response] = None
|
{"golden_diff": "diff --git a/strawberry/fastapi/context.py b/strawberry/fastapi/context.py\n--- a/strawberry/fastapi/context.py\n+++ b/strawberry/fastapi/context.py\n@@ -14,7 +14,7 @@\n class BaseContext:\n connection_params: Optional[Any] = None\n \n- def __init__(self):\n+ def __init__(self) -> None:\n self.request: Optional[Union[Request, WebSocket]] = None\n self.background_tasks: Optional[BackgroundTasks] = None\n self.response: Optional[Response] = None\n", "issue": "Missing type annotation in `strawberry.fastapi.BaseContext` causes mypy to trip\n## Describe the Bug\r\n\r\nI built a custom context based on [the guide in the docs](https://strawberry.rocks/docs/guides/authentication):\r\n\r\n```python\r\nfrom strawberry.fastapi import BaseContext\r\n\r\nclass CustomContext(BaseContext):\r\n @property\r\n def user(self) -> User:\r\n ...\r\n\r\nasync def get_context() -> CustomContext:\r\n return CustomContext()\r\n```\r\n\r\nWith that I receive the following mypy error:\r\n```shell\r\n error: Call to untyped function \"CustomContext\" in typed context [no-untyped-call]\r\n```\r\n\r\nFor now, I added the following workaround to my code:\r\n```python\r\nclass CustomContext(BaseContext):\r\n if typing.TYPE_CHECKING:\r\n def __init__(self) -> None:\r\n pass\r\n \r\n ...\r\n```\r\n\r\n## System Information\r\n\r\n - Operating system: macOS Monterey\r\n - Strawberry version (if applicable): `0.158.1`\r\n - FastAPI version (if applicable): `0.92.0`\r\n - mypy version (if applicable): `0.991` (also tested with `1.0.1`)\r\n\r\n## Additional Context\r\n\r\nI'm happy to provide a PR to address the issue myself.\r\n\n", "before_files": [{"content": "from typing import Any, Dict, Optional, Union\n\nfrom starlette.background import BackgroundTasks\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.websockets import WebSocket\n\nCustomContext = Union[\"BaseContext\", Dict[str, Any]]\nMergedContext = Union[\n \"BaseContext\", Dict[str, Union[Any, BackgroundTasks, Request, Response, WebSocket]]\n]\n\n\nclass BaseContext:\n connection_params: Optional[Any] = None\n\n def __init__(self):\n self.request: Optional[Union[Request, WebSocket]] = None\n self.background_tasks: Optional[BackgroundTasks] = None\n self.response: Optional[Response] = None\n", "path": "strawberry/fastapi/context.py"}]}
| 981 | 128 |
gh_patches_debug_11668
|
rasdani/github-patches
|
git_diff
|
boto__boto-1776
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
attrs parameter to Item.__init__ and table.new_item don't work as documented
In dynamodb/table.py, `new_item` is documented to take an `attrs` parameter which will be used to populate the new item.
However, this doesn't work:
`
In [3]: item = table.new_item('key', attrs={"a":"test"})
In [4]: item.save()
Out[4]: {'ConsumedCapacityUnits': 1.0}
In [5]: read = table.get_item('key')
In [6]: print read
{'key': 'key'}
`
The bug is in item.py in `__init__` - `self._updates` is `None` until after the `attrs` dictionary is copied - but `__setitem__` doesn't update the `_updates` dictionary if it is None.
</issue>
<code>
[start of boto/dynamodb/item.py]
1 # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
2 # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish, dis-
8 # tribute, sublicense, and/or sell copies of the Software, and to permit
9 # persons to whom the Software is furnished to do so, subject to the fol-
10 # lowing conditions:
11 #
12 # The above copyright notice and this permission notice shall be included
13 # in all copies or substantial portions of the Software.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
17 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
18 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
19 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 # IN THE SOFTWARE.
22 #
23
24 from boto.dynamodb.exceptions import DynamoDBItemError
25
26
27 class Item(dict):
28 """
29 An item in Amazon DynamoDB.
30
31 :ivar hash_key: The HashKey of this item.
32 :ivar range_key: The RangeKey of this item or None if no RangeKey
33 is defined.
34 :ivar hash_key_name: The name of the HashKey associated with this item.
35 :ivar range_key_name: The name of the RangeKey associated with this item.
36 :ivar table: The Table this item belongs to.
37 """
38
39 def __init__(self, table, hash_key=None, range_key=None, attrs=None):
40 self.table = table
41 self._updates = None
42 self._hash_key_name = self.table.schema.hash_key_name
43 self._range_key_name = self.table.schema.range_key_name
44 if attrs == None:
45 attrs = {}
46 if hash_key == None:
47 hash_key = attrs.get(self._hash_key_name, None)
48 self[self._hash_key_name] = hash_key
49 if self._range_key_name:
50 if range_key == None:
51 range_key = attrs.get(self._range_key_name, None)
52 self[self._range_key_name] = range_key
53 for key, value in attrs.items():
54 if key != self._hash_key_name and key != self._range_key_name:
55 self[key] = value
56 self.consumed_units = 0
57 self._updates = {}
58
59 @property
60 def hash_key(self):
61 return self[self._hash_key_name]
62
63 @property
64 def range_key(self):
65 return self.get(self._range_key_name)
66
67 @property
68 def hash_key_name(self):
69 return self._hash_key_name
70
71 @property
72 def range_key_name(self):
73 return self._range_key_name
74
75 def add_attribute(self, attr_name, attr_value):
76 """
77 Queue the addition of an attribute to an item in DynamoDB.
78 This will eventually result in an UpdateItem request being issued
79 with an update action of ADD when the save method is called.
80
81 :type attr_name: str
82 :param attr_name: Name of the attribute you want to alter.
83
84 :type attr_value: int|long|float|set
85 :param attr_value: Value which is to be added to the attribute.
86 """
87 self._updates[attr_name] = ("ADD", attr_value)
88
89 def delete_attribute(self, attr_name, attr_value=None):
90 """
91 Queue the deletion of an attribute from an item in DynamoDB.
92 This call will result in a UpdateItem request being issued
93 with update action of DELETE when the save method is called.
94
95 :type attr_name: str
96 :param attr_name: Name of the attribute you want to alter.
97
98 :type attr_value: set
99 :param attr_value: A set of values to be removed from the attribute.
100 This parameter is optional. If None, the whole attribute is
101 removed from the item.
102 """
103 self._updates[attr_name] = ("DELETE", attr_value)
104
105 def put_attribute(self, attr_name, attr_value):
106 """
107 Queue the putting of an attribute to an item in DynamoDB.
108 This call will result in an UpdateItem request being issued
109 with the update action of PUT when the save method is called.
110
111 :type attr_name: str
112 :param attr_name: Name of the attribute you want to alter.
113
114 :type attr_value: int|long|float|str|set
115 :param attr_value: New value of the attribute.
116 """
117 self._updates[attr_name] = ("PUT", attr_value)
118
119 def save(self, expected_value=None, return_values=None):
120 """
121 Commits pending updates to Amazon DynamoDB.
122
123 :type expected_value: dict
124 :param expected_value: A dictionary of name/value pairs that
125 you expect. This dictionary should have name/value pairs
126 where the name is the name of the attribute and the value is
127 either the value you are expecting or False if you expect
128 the attribute not to exist.
129
130 :type return_values: str
131 :param return_values: Controls the return of attribute name/value pairs
132 before they were updated. Possible values are: None, 'ALL_OLD',
133 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is
134 specified and the item is overwritten, the content of the old item
135 is returned. If 'ALL_NEW' is specified, then all the attributes of
136 the new version of the item are returned. If 'UPDATED_NEW' is
137 specified, the new versions of only the updated attributes are
138 returned.
139 """
140 return self.table.layer2.update_item(self, expected_value,
141 return_values)
142
143 def delete(self, expected_value=None, return_values=None):
144 """
145 Delete the item from DynamoDB.
146
147 :type expected_value: dict
148 :param expected_value: A dictionary of name/value pairs that
149 you expect. This dictionary should have name/value pairs
150 where the name is the name of the attribute and the value
151 is either the value you are expecting or False if you expect
152 the attribute not to exist.
153
154 :type return_values: str
155 :param return_values: Controls the return of attribute
156 name-value pairs before then were changed. Possible
157 values are: None or 'ALL_OLD'. If 'ALL_OLD' is
158 specified and the item is overwritten, the content
159 of the old item is returned.
160 """
161 return self.table.layer2.delete_item(self, expected_value,
162 return_values)
163
164 def put(self, expected_value=None, return_values=None):
165 """
166 Store a new item or completely replace an existing item
167 in Amazon DynamoDB.
168
169 :type expected_value: dict
170 :param expected_value: A dictionary of name/value pairs that
171 you expect. This dictionary should have name/value pairs
172 where the name is the name of the attribute and the value
173 is either the value you are expecting or False if you expect
174 the attribute not to exist.
175
176 :type return_values: str
177 :param return_values: Controls the return of attribute
178 name-value pairs before then were changed. Possible
179 values are: None or 'ALL_OLD'. If 'ALL_OLD' is
180 specified and the item is overwritten, the content
181 of the old item is returned.
182 """
183 return self.table.layer2.put_item(self, expected_value, return_values)
184
185 def __setitem__(self, key, value):
186 """Overrwrite the setter to instead update the _updates
187 method so this can act like a normal dict"""
188 if self._updates is not None:
189 self.put_attribute(key, value)
190 dict.__setitem__(self, key, value)
191
192 def __delitem__(self, key):
193 """Remove this key from the items"""
194 if self._updates is not None:
195 self.delete_attribute(key)
196 dict.__delitem__(self, key)
197
198 # Allow this item to still be pickled
199 def __getstate__(self):
200 return self.__dict__
201 def __setstate__(self, d):
202 self.__dict__.update(d)
203
[end of boto/dynamodb/item.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/boto/dynamodb/item.py b/boto/dynamodb/item.py
--- a/boto/dynamodb/item.py
+++ b/boto/dynamodb/item.py
@@ -50,11 +50,11 @@
if range_key == None:
range_key = attrs.get(self._range_key_name, None)
self[self._range_key_name] = range_key
+ self._updates = {}
for key, value in attrs.items():
if key != self._hash_key_name and key != self._range_key_name:
self[key] = value
self.consumed_units = 0
- self._updates = {}
@property
def hash_key(self):
|
{"golden_diff": "diff --git a/boto/dynamodb/item.py b/boto/dynamodb/item.py\n--- a/boto/dynamodb/item.py\n+++ b/boto/dynamodb/item.py\n@@ -50,11 +50,11 @@\n if range_key == None:\n range_key = attrs.get(self._range_key_name, None)\n self[self._range_key_name] = range_key\n+ self._updates = {}\n for key, value in attrs.items():\n if key != self._hash_key_name and key != self._range_key_name:\n self[key] = value\n self.consumed_units = 0\n- self._updates = {}\n \n @property\n def hash_key(self):\n", "issue": "attrs parameter to Item.__init__ and table.new_item don't work as documented\nIn dynamodb/table.py, `new_item` is documented to take an `attrs` parameter which will be used to populate the new item.\n\nHowever, this doesn't work:\n`\n In [3]: item = table.new_item('key', attrs={\"a\":\"test\"})\n In [4]: item.save()\n Out[4]: {'ConsumedCapacityUnits': 1.0}\n In [5]: read = table.get_item('key')\n In [6]: print read\n {'key': 'key'}\n`\n\nThe bug is in item.py in `__init__` - `self._updates` is `None` until after the `attrs` dictionary is copied - but `__setitem__` doesn't update the `_updates` dictionary if it is None.\n\n", "before_files": [{"content": "# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n\nfrom boto.dynamodb.exceptions import DynamoDBItemError\n\n\nclass Item(dict):\n \"\"\"\n An item in Amazon DynamoDB.\n\n :ivar hash_key: The HashKey of this item.\n :ivar range_key: The RangeKey of this item or None if no RangeKey\n is defined.\n :ivar hash_key_name: The name of the HashKey associated with this item.\n :ivar range_key_name: The name of the RangeKey associated with this item.\n :ivar table: The Table this item belongs to.\n \"\"\"\n \n def __init__(self, table, hash_key=None, range_key=None, attrs=None):\n self.table = table\n self._updates = None\n self._hash_key_name = self.table.schema.hash_key_name\n self._range_key_name = self.table.schema.range_key_name\n if attrs == None:\n attrs = {}\n if hash_key == None:\n hash_key = attrs.get(self._hash_key_name, None)\n self[self._hash_key_name] = hash_key\n if self._range_key_name:\n if range_key == None:\n range_key = attrs.get(self._range_key_name, None)\n self[self._range_key_name] = range_key\n for key, value in attrs.items():\n if key != self._hash_key_name and key != self._range_key_name:\n self[key] = value\n self.consumed_units = 0\n self._updates = {}\n\n @property\n def hash_key(self):\n return self[self._hash_key_name]\n\n @property\n def range_key(self):\n return self.get(self._range_key_name)\n\n @property\n def hash_key_name(self):\n return self._hash_key_name\n\n @property\n def range_key_name(self):\n return self._range_key_name\n\n def add_attribute(self, attr_name, attr_value):\n \"\"\"\n Queue the addition of an attribute to an item in DynamoDB.\n This will eventually result in an UpdateItem request being issued\n with an update action of ADD when the save method is called.\n\n :type attr_name: str\n :param attr_name: Name of the attribute you want to alter.\n\n :type attr_value: int|long|float|set\n :param attr_value: Value which is to be added to the attribute.\n \"\"\"\n self._updates[attr_name] = (\"ADD\", attr_value)\n\n def delete_attribute(self, attr_name, attr_value=None):\n \"\"\"\n Queue the deletion of an attribute from an item in DynamoDB.\n This call will result in a UpdateItem request being issued\n with update action of DELETE when the save method is called.\n\n :type attr_name: str\n :param attr_name: Name of the attribute you want to alter.\n\n :type attr_value: set\n :param attr_value: A set of values to be removed from the attribute.\n This parameter is optional. If None, the whole attribute is\n removed from the item.\n \"\"\"\n self._updates[attr_name] = (\"DELETE\", attr_value)\n\n def put_attribute(self, attr_name, attr_value):\n \"\"\"\n Queue the putting of an attribute to an item in DynamoDB.\n This call will result in an UpdateItem request being issued\n with the update action of PUT when the save method is called.\n\n :type attr_name: str\n :param attr_name: Name of the attribute you want to alter.\n\n :type attr_value: int|long|float|str|set\n :param attr_value: New value of the attribute.\n \"\"\"\n self._updates[attr_name] = (\"PUT\", attr_value)\n\n def save(self, expected_value=None, return_values=None):\n \"\"\"\n Commits pending updates to Amazon DynamoDB.\n\n :type expected_value: dict\n :param expected_value: A dictionary of name/value pairs that\n you expect. This dictionary should have name/value pairs\n where the name is the name of the attribute and the value is\n either the value you are expecting or False if you expect\n the attribute not to exist.\n\n :type return_values: str\n :param return_values: Controls the return of attribute name/value pairs\n before they were updated. Possible values are: None, 'ALL_OLD',\n 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is\n specified and the item is overwritten, the content of the old item\n is returned. If 'ALL_NEW' is specified, then all the attributes of\n the new version of the item are returned. If 'UPDATED_NEW' is\n specified, the new versions of only the updated attributes are\n returned.\n \"\"\"\n return self.table.layer2.update_item(self, expected_value,\n return_values)\n\n def delete(self, expected_value=None, return_values=None):\n \"\"\"\n Delete the item from DynamoDB.\n\n :type expected_value: dict\n :param expected_value: A dictionary of name/value pairs that\n you expect. This dictionary should have name/value pairs\n where the name is the name of the attribute and the value\n is either the value you are expecting or False if you expect\n the attribute not to exist.\n\n :type return_values: str\n :param return_values: Controls the return of attribute\n name-value pairs before then were changed. Possible\n values are: None or 'ALL_OLD'. If 'ALL_OLD' is\n specified and the item is overwritten, the content\n of the old item is returned.\n \"\"\"\n return self.table.layer2.delete_item(self, expected_value,\n return_values)\n\n def put(self, expected_value=None, return_values=None):\n \"\"\"\n Store a new item or completely replace an existing item\n in Amazon DynamoDB.\n\n :type expected_value: dict\n :param expected_value: A dictionary of name/value pairs that\n you expect. This dictionary should have name/value pairs\n where the name is the name of the attribute and the value\n is either the value you are expecting or False if you expect\n the attribute not to exist.\n\n :type return_values: str\n :param return_values: Controls the return of attribute\n name-value pairs before then were changed. Possible\n values are: None or 'ALL_OLD'. If 'ALL_OLD' is\n specified and the item is overwritten, the content\n of the old item is returned.\n \"\"\"\n return self.table.layer2.put_item(self, expected_value, return_values)\n\n def __setitem__(self, key, value):\n \"\"\"Overrwrite the setter to instead update the _updates\n method so this can act like a normal dict\"\"\"\n if self._updates is not None:\n self.put_attribute(key, value)\n dict.__setitem__(self, key, value)\n\n def __delitem__(self, key):\n \"\"\"Remove this key from the items\"\"\"\n if self._updates is not None:\n self.delete_attribute(key)\n dict.__delitem__(self, key)\n\n # Allow this item to still be pickled\n def __getstate__(self):\n return self.__dict__\n def __setstate__(self, d):\n self.__dict__.update(d)\n", "path": "boto/dynamodb/item.py"}]}
| 3,046 | 152 |
gh_patches_debug_21603
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-1190
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG][CV] Model Error Analysis - the exception for task OTHER is different than other checks
**Describe the bug**
When running a OTHER task, the Model Error Analysis exception is different from the rest of the checks. Need to standardize it.
**To Reproduce**
Run a "OTHER" task in the model performance suite.
</issue>
<code>
[start of deepchecks/vision/checks/performance/model_error_analysis.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module containing class performance check."""
12 import typing as t
13 from collections import defaultdict
14
15 import pandas as pd
16 import torch
17
18 from deepchecks.core import CheckResult, DatasetKind
19 from deepchecks.core.errors import DeepchecksValueError
20 from deepchecks.utils.performance.error_model import error_model_display_dataframe, model_error_contribution
21 from deepchecks.utils.single_sample_metrics import per_sample_cross_entropy
22 from deepchecks.vision.utils.image_properties import default_image_properties, validate_properties
23 from deepchecks.vision import TrainTestCheck, Context, Batch
24 from deepchecks.vision.vision_data import TaskType
25 from deepchecks.vision.metrics_utils.iou_utils import per_sample_mean_iou
26
27 __all__ = ['ModelErrorAnalysis']
28
29
30 class ModelErrorAnalysis(TrainTestCheck):
31 """Find the properties that best split the data into segments of high and low model error.
32
33 The check trains a regression model to predict the error of the user's model. Then, the properties scoring the
34 highest feature importance for the error regression model are selected and the distribution of the error vs the
35 property values is plotted. The check results are shown only if the error regression model manages to predict the
36 error well enough.
37
38 Parameters
39 ----------
40 image_properties : List[Dict[str, Any]], default: None
41 List of properties. Replaces the default deepchecks properties.
42 Each property is dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str),
43 representing attributes of said method. 'output_type' must be one of 'continuous'/'discrete'
44 max_properties_to_show : int , default: 3
45 maximal number of properties to show error distribution for.
46 min_property_contribution : float , default: 0.15
47 minimum feature importance of a property to the error regression model
48 in order to show the property.
49 min_error_model_score : float , default: 0.5
50 minimum r^2 score of the error regression model for displaying the check.
51 min_segment_size : float , default: 0.05
52 minimal fraction of data that can comprise a weak segment.
53 n_display_samples : int , default: 5_000
54 number of samples to display in scatter plot.
55 random_state : int, default: 42
56 random seed for all check internals.
57 """
58
59 def __init__(self,
60 image_properties: t.List[t.Dict[str, t.Any]] = None,
61 max_properties_to_show: int = 20,
62 min_property_contribution: float = 0.15,
63 min_error_model_score: float = 0.5,
64 min_segment_size: float = 0.05,
65 n_display_samples: int = 5_000,
66 random_state: int = 42,
67 **kwargs):
68 super().__init__(**kwargs)
69 self.random_state = random_state
70 self.min_error_model_score = min_error_model_score
71 self.min_segment_size = min_segment_size
72 self.max_properties_to_show = max_properties_to_show
73 self.min_property_contribution = min_property_contribution
74 self.n_display_samples = n_display_samples
75 self._train_properties = None
76 self._test_properties = None
77 self._train_scores = None
78 self._test_scores = None
79
80 if image_properties is None:
81 self.image_properties = default_image_properties
82 else:
83 validate_properties(image_properties)
84 self.image_properties = image_properties
85
86 def initialize_run(self, context: Context):
87 """Initialize property and score lists."""
88 self._train_properties = defaultdict(list)
89 self._test_properties = defaultdict(list)
90 self._train_scores = []
91 self._test_scores = []
92
93 def update(self, context: Context, batch: Batch, dataset_kind):
94 """Accumulate property data of images and scores."""
95 if dataset_kind == DatasetKind.TRAIN:
96 dataset = context.train
97 properties = self._train_properties
98 scores = self._train_scores
99 elif dataset_kind == DatasetKind.TEST:
100 dataset = context.test
101 properties = self._test_properties
102 scores = self._test_scores
103 else:
104 raise RuntimeError(
105 'Internal Error! Part of code that must '
106 'be unreacheable was reached.'
107 )
108
109 images = batch.images
110 predictions = batch.predictions
111 labels = batch.labels
112
113 for single_property in self.image_properties:
114 properties[single_property['name']].extend(single_property['method'](images))
115
116 if dataset.task_type == TaskType.CLASSIFICATION:
117 def scoring_func(predictions, labels):
118 return per_sample_cross_entropy(labels, predictions)
119 elif dataset.task_type == TaskType.OBJECT_DETECTION:
120 def scoring_func(predictions, labels):
121 return per_sample_mean_iou(predictions, labels)
122 else:
123 raise DeepchecksValueError(f'Unsupported task type {dataset.task_type}')
124
125 if isinstance(predictions, torch.Tensor):
126 predictions = predictions.cpu().detach().numpy()
127 if isinstance(labels, torch.Tensor):
128 labels = labels.cpu().detach().numpy()
129
130 # get score using scoring_function
131 scores.extend(scoring_func(predictions, labels))
132
133 def compute(self, context: Context) -> CheckResult:
134 """Find segments that contribute to model error.
135
136 Returns
137 -------
138 CheckResult:
139 value: dictionary of details for each property segment that split the effect on the error of the model
140 display: plots of results
141 """
142 # build dataframe of properties and scores
143 train_property_df = pd.DataFrame(self._train_properties).dropna(axis=1, how='all')
144 test_property_df = pd.DataFrame(self._test_properties)[train_property_df.columns]
145
146 error_fi, error_model_predicted = \
147 model_error_contribution(train_property_df,
148 self._train_scores,
149 test_property_df,
150 self._test_scores,
151 train_property_df.columns.to_list(),
152 [],
153 min_error_model_score=self.min_error_model_score,
154 random_state=self.random_state)
155
156 display, value = error_model_display_dataframe(error_fi,
157 error_model_predicted,
158 test_property_df,
159 [],
160 self.max_properties_to_show,
161 self.min_property_contribution,
162 self.n_display_samples,
163 self.min_segment_size,
164 self.random_state)
165
166 headnote = """<span>
167 The following graphs show the distribution of error for top properties that are most useful for
168 distinguishing high error samples from low error samples.
169 </span>"""
170 display = [headnote] + display if display else None
171
172 return CheckResult(value, display=display)
173
[end of deepchecks/vision/checks/performance/model_error_analysis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deepchecks/vision/checks/performance/model_error_analysis.py b/deepchecks/vision/checks/performance/model_error_analysis.py
--- a/deepchecks/vision/checks/performance/model_error_analysis.py
+++ b/deepchecks/vision/checks/performance/model_error_analysis.py
@@ -85,6 +85,7 @@
def initialize_run(self, context: Context):
"""Initialize property and score lists."""
+ context.assert_task_type(TaskType.CLASSIFICATION, TaskType.OBJECT_DETECTION)
self._train_properties = defaultdict(list)
self._test_properties = defaultdict(list)
self._train_scores = []
@@ -120,7 +121,7 @@
def scoring_func(predictions, labels):
return per_sample_mean_iou(predictions, labels)
else:
- raise DeepchecksValueError(f'Unsupported task type {dataset.task_type}')
+ raise DeepchecksValueError(f'Should not reach here! Unsupported task type {dataset.task_type}')
if isinstance(predictions, torch.Tensor):
predictions = predictions.cpu().detach().numpy()
|
{"golden_diff": "diff --git a/deepchecks/vision/checks/performance/model_error_analysis.py b/deepchecks/vision/checks/performance/model_error_analysis.py\n--- a/deepchecks/vision/checks/performance/model_error_analysis.py\n+++ b/deepchecks/vision/checks/performance/model_error_analysis.py\n@@ -85,6 +85,7 @@\n \n def initialize_run(self, context: Context):\n \"\"\"Initialize property and score lists.\"\"\"\n+ context.assert_task_type(TaskType.CLASSIFICATION, TaskType.OBJECT_DETECTION)\n self._train_properties = defaultdict(list)\n self._test_properties = defaultdict(list)\n self._train_scores = []\n@@ -120,7 +121,7 @@\n def scoring_func(predictions, labels):\n return per_sample_mean_iou(predictions, labels)\n else:\n- raise DeepchecksValueError(f'Unsupported task type {dataset.task_type}')\n+ raise DeepchecksValueError(f'Should not reach here! Unsupported task type {dataset.task_type}')\n \n if isinstance(predictions, torch.Tensor):\n predictions = predictions.cpu().detach().numpy()\n", "issue": "[BUG][CV] Model Error Analysis - the exception for task OTHER is different than other checks\n**Describe the bug**\r\nWhen running a OTHER task, the Model Error Analysis exception is different from the rest of the checks. Need to standardize it.\r\n\r\n**To Reproduce**\r\nRun a \"OTHER\" task in the model performance suite. \n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing class performance check.\"\"\"\nimport typing as t\nfrom collections import defaultdict\n\nimport pandas as pd\nimport torch\n\nfrom deepchecks.core import CheckResult, DatasetKind\nfrom deepchecks.core.errors import DeepchecksValueError\nfrom deepchecks.utils.performance.error_model import error_model_display_dataframe, model_error_contribution\nfrom deepchecks.utils.single_sample_metrics import per_sample_cross_entropy\nfrom deepchecks.vision.utils.image_properties import default_image_properties, validate_properties\nfrom deepchecks.vision import TrainTestCheck, Context, Batch\nfrom deepchecks.vision.vision_data import TaskType\nfrom deepchecks.vision.metrics_utils.iou_utils import per_sample_mean_iou\n\n__all__ = ['ModelErrorAnalysis']\n\n\nclass ModelErrorAnalysis(TrainTestCheck):\n \"\"\"Find the properties that best split the data into segments of high and low model error.\n\n The check trains a regression model to predict the error of the user's model. Then, the properties scoring the\n highest feature importance for the error regression model are selected and the distribution of the error vs the\n property values is plotted. The check results are shown only if the error regression model manages to predict the\n error well enough.\n\n Parameters\n ----------\n image_properties : List[Dict[str, Any]], default: None\n List of properties. Replaces the default deepchecks properties.\n Each property is dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str),\n representing attributes of said method. 'output_type' must be one of 'continuous'/'discrete'\n max_properties_to_show : int , default: 3\n maximal number of properties to show error distribution for.\n min_property_contribution : float , default: 0.15\n minimum feature importance of a property to the error regression model\n in order to show the property.\n min_error_model_score : float , default: 0.5\n minimum r^2 score of the error regression model for displaying the check.\n min_segment_size : float , default: 0.05\n minimal fraction of data that can comprise a weak segment.\n n_display_samples : int , default: 5_000\n number of samples to display in scatter plot.\n random_state : int, default: 42\n random seed for all check internals.\n \"\"\"\n\n def __init__(self,\n image_properties: t.List[t.Dict[str, t.Any]] = None,\n max_properties_to_show: int = 20,\n min_property_contribution: float = 0.15,\n min_error_model_score: float = 0.5,\n min_segment_size: float = 0.05,\n n_display_samples: int = 5_000,\n random_state: int = 42,\n **kwargs):\n super().__init__(**kwargs)\n self.random_state = random_state\n self.min_error_model_score = min_error_model_score\n self.min_segment_size = min_segment_size\n self.max_properties_to_show = max_properties_to_show\n self.min_property_contribution = min_property_contribution\n self.n_display_samples = n_display_samples\n self._train_properties = None\n self._test_properties = None\n self._train_scores = None\n self._test_scores = None\n\n if image_properties is None:\n self.image_properties = default_image_properties\n else:\n validate_properties(image_properties)\n self.image_properties = image_properties\n\n def initialize_run(self, context: Context):\n \"\"\"Initialize property and score lists.\"\"\"\n self._train_properties = defaultdict(list)\n self._test_properties = defaultdict(list)\n self._train_scores = []\n self._test_scores = []\n\n def update(self, context: Context, batch: Batch, dataset_kind):\n \"\"\"Accumulate property data of images and scores.\"\"\"\n if dataset_kind == DatasetKind.TRAIN:\n dataset = context.train\n properties = self._train_properties\n scores = self._train_scores\n elif dataset_kind == DatasetKind.TEST:\n dataset = context.test\n properties = self._test_properties\n scores = self._test_scores\n else:\n raise RuntimeError(\n 'Internal Error! Part of code that must '\n 'be unreacheable was reached.'\n )\n\n images = batch.images\n predictions = batch.predictions\n labels = batch.labels\n\n for single_property in self.image_properties:\n properties[single_property['name']].extend(single_property['method'](images))\n\n if dataset.task_type == TaskType.CLASSIFICATION:\n def scoring_func(predictions, labels):\n return per_sample_cross_entropy(labels, predictions)\n elif dataset.task_type == TaskType.OBJECT_DETECTION:\n def scoring_func(predictions, labels):\n return per_sample_mean_iou(predictions, labels)\n else:\n raise DeepchecksValueError(f'Unsupported task type {dataset.task_type}')\n\n if isinstance(predictions, torch.Tensor):\n predictions = predictions.cpu().detach().numpy()\n if isinstance(labels, torch.Tensor):\n labels = labels.cpu().detach().numpy()\n\n # get score using scoring_function\n scores.extend(scoring_func(predictions, labels))\n\n def compute(self, context: Context) -> CheckResult:\n \"\"\"Find segments that contribute to model error.\n\n Returns\n -------\n CheckResult:\n value: dictionary of details for each property segment that split the effect on the error of the model\n display: plots of results\n \"\"\"\n # build dataframe of properties and scores\n train_property_df = pd.DataFrame(self._train_properties).dropna(axis=1, how='all')\n test_property_df = pd.DataFrame(self._test_properties)[train_property_df.columns]\n\n error_fi, error_model_predicted = \\\n model_error_contribution(train_property_df,\n self._train_scores,\n test_property_df,\n self._test_scores,\n train_property_df.columns.to_list(),\n [],\n min_error_model_score=self.min_error_model_score,\n random_state=self.random_state)\n\n display, value = error_model_display_dataframe(error_fi,\n error_model_predicted,\n test_property_df,\n [],\n self.max_properties_to_show,\n self.min_property_contribution,\n self.n_display_samples,\n self.min_segment_size,\n self.random_state)\n\n headnote = \"\"\"<span>\n The following graphs show the distribution of error for top properties that are most useful for\n distinguishing high error samples from low error samples.\n </span>\"\"\"\n display = [headnote] + display if display else None\n\n return CheckResult(value, display=display)\n", "path": "deepchecks/vision/checks/performance/model_error_analysis.py"}]}
| 2,525 | 233 |
gh_patches_debug_11212
|
rasdani/github-patches
|
git_diff
|
conda__conda-build-2420
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
noarch: python packages strip data_files
in setup.py, `data_files` can be used to deliver support files, especially to `$PREFIX/share`. `noarch: python` [doesn't know how to handle these](https://github.com/conda/conda-build/blob/3.0.23/conda_build/noarch_python.py#L90) and ends up omitting them.
It seems like 'unknown' files in the prefix should be included as-is relative to $PREFIX (exactly as in a non-noarch package) rather than excluded.
</issue>
<code>
[start of conda_build/noarch_python.py]
1 import io
2 import json
3 import locale
4 import logging
5 import os
6 from os.path import basename, dirname, isdir, join, isfile
7 import shutil
8 import sys
9
10 ISWIN = sys.platform.startswith('win')
11
12
13 def _force_dir(dirname):
14 if not isdir(dirname):
15 os.makedirs(dirname)
16
17
18 def _error_exit(exit_message):
19 sys.exit("[noarch_python] %s" % exit_message)
20
21
22 def rewrite_script(fn, prefix):
23 """Take a file from the bin directory and rewrite it into the python-scripts
24 directory with the same permissions after it passes some sanity checks for
25 noarch pacakges"""
26
27 # Load and check the source file for not being a binary
28 src = join(prefix, 'Scripts' if ISWIN else 'bin', fn)
29 with io.open(src, encoding=locale.getpreferredencoding()) as fi:
30 try:
31 data = fi.read()
32 except UnicodeDecodeError: # file is binary
33 _error_exit("Noarch package contains binary script: %s" % fn)
34 src_mode = os.stat(src).st_mode
35 os.unlink(src)
36
37 # Get rid of '-script.py' suffix on Windows
38 if ISWIN and fn.endswith('-script.py'):
39 fn = fn[:-10]
40
41 # Rewrite the file to the python-scripts directory
42 dst_dir = join(prefix, 'python-scripts')
43 _force_dir(dst_dir)
44 dst = join(dst_dir, fn)
45 with open(dst, 'w') as fo:
46 fo.write(data)
47 os.chmod(dst, src_mode)
48 return fn
49
50
51 def handle_file(f, d, prefix):
52 """Process a file for inclusion in a noarch python package.
53 """
54 path = join(prefix, f)
55
56 # Ignore egg-info and pyc files.
57 if f.endswith(('.egg-info', '.pyc', '.pyo')):
58 os.unlink(path)
59
60 # The presence of .so indicated this is not a noarch package
61 elif f.endswith(('.so', '.dll', '.pyd', '.exe', '.dylib')):
62 if f.endswith('.exe') and (isfile(os.path.join(prefix, f[:-4] + '-script.py')) or
63 basename(f[:-4]) in d['python-scripts']):
64 os.unlink(path) # this is an entry point with a matching xx-script.py
65 return
66 _error_exit("Error: Binary library or executable found: %s" % f)
67
68 elif 'site-packages' in f:
69 nsp = join(prefix, 'site-packages')
70 _force_dir(nsp)
71
72 g = f[f.find('site-packages'):]
73 dst = join(prefix, g)
74 dst_dir = dirname(dst)
75 _force_dir(dst_dir)
76 os.rename(path, dst)
77 d['site-packages'].append(g[14:])
78
79 # Treat scripts specially with the logic from above
80 elif f.startswith(('bin/', 'Scripts')):
81 fn = basename(path)
82 fn = rewrite_script(fn, prefix)
83 d['python-scripts'].append(fn)
84
85 # Include examples in the metadata doc
86 elif f.startswith(('Examples/', 'Examples\\')):
87 d['Examples'].append(f[9:])
88 else:
89 log = logging.getLogger(__name__)
90 log.warn("Don't know how to handle file: %s. Omitting it from package." % f)
91 os.unlink(path)
92
93
94 def populate_files(m, files, prefix, entry_point_scripts=None):
95 d = {'dist': m.dist(),
96 'site-packages': [],
97 'python-scripts': [],
98 'Examples': []}
99
100 # Populate site-package, python-scripts, and Examples into above
101 for f in files:
102 handle_file(f, d, prefix)
103
104 # Windows path conversion
105 if ISWIN:
106 for fns in (d['site-packages'], d['Examples']):
107 for i, fn in enumerate(fns):
108 fns[i] = fn.replace('\\', '/')
109
110 if entry_point_scripts:
111 for entry_point in entry_point_scripts:
112 src = join(prefix, entry_point)
113 if os.path.isfile(src):
114 os.unlink(src)
115
116 return d
117
118
119 def transform(m, files, prefix):
120 bin_dir = join(prefix, 'bin')
121 _force_dir(bin_dir)
122
123 scripts_dir = join(prefix, 'Scripts')
124 _force_dir(scripts_dir)
125
126 name = m.name()
127
128 # Create *nix prelink script
129 # Note: it's important to use LF newlines or it wont work if we build on Win
130 with open(join(bin_dir, '.%s-pre-link.sh' % name), 'wb') as fo:
131 fo.write('''\
132 #!/bin/bash
133 $PREFIX/bin/python $SOURCE_DIR/link.py
134 '''.encode('utf-8'))
135
136 # Create windows prelink script (be nice and use Windows newlines)
137 with open(join(scripts_dir, '.%s-pre-link.bat' % name), 'wb') as fo:
138 fo.write('''\
139 @echo off
140 "%PREFIX%\\python.exe" "%SOURCE_DIR%\\link.py"
141 '''.replace('\n', '\r\n').encode('utf-8'))
142
143 d = populate_files(m, files, prefix)
144
145 # Find our way to this directory
146 this_dir = dirname(__file__)
147
148 # copy in windows exe shims if there are any python-scripts
149 if d['python-scripts']:
150 for fn in 'cli-32.exe', 'cli-64.exe':
151 shutil.copyfile(join(this_dir, fn), join(prefix, fn))
152
153 # Read the local _link.py
154 with open(join(this_dir, '_link.py')) as fi:
155 link_code = fi.read()
156
157 # Write the package metadata, and bumper with code for linking
158 with open(join(prefix, 'link.py'), 'w') as fo:
159 fo.write('DATA = ')
160 json.dump(d, fo, indent=2, sort_keys=True)
161 fo.write('\n## END DATA\n\n')
162 fo.write(link_code)
163
[end of conda_build/noarch_python.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conda_build/noarch_python.py b/conda_build/noarch_python.py
--- a/conda_build/noarch_python.py
+++ b/conda_build/noarch_python.py
@@ -85,10 +85,11 @@
# Include examples in the metadata doc
elif f.startswith(('Examples/', 'Examples\\')):
d['Examples'].append(f[9:])
+ # No special treatment for other files
+ # leave them as-is
else:
log = logging.getLogger(__name__)
- log.warn("Don't know how to handle file: %s. Omitting it from package." % f)
- os.unlink(path)
+ log.debug("Don't know how to handle file: %s. Including it as-is." % f)
def populate_files(m, files, prefix, entry_point_scripts=None):
|
{"golden_diff": "diff --git a/conda_build/noarch_python.py b/conda_build/noarch_python.py\n--- a/conda_build/noarch_python.py\n+++ b/conda_build/noarch_python.py\n@@ -85,10 +85,11 @@\n # Include examples in the metadata doc\n elif f.startswith(('Examples/', 'Examples\\\\')):\n d['Examples'].append(f[9:])\n+ # No special treatment for other files\n+ # leave them as-is\n else:\n log = logging.getLogger(__name__)\n- log.warn(\"Don't know how to handle file: %s. Omitting it from package.\" % f)\n- os.unlink(path)\n+ log.debug(\"Don't know how to handle file: %s. Including it as-is.\" % f)\n \n \n def populate_files(m, files, prefix, entry_point_scripts=None):\n", "issue": "noarch: python packages strip data_files\nin setup.py, `data_files` can be used to deliver support files, especially to `$PREFIX/share`. `noarch: python` [doesn't know how to handle these](https://github.com/conda/conda-build/blob/3.0.23/conda_build/noarch_python.py#L90) and ends up omitting them.\r\n\r\nIt seems like 'unknown' files in the prefix should be included as-is relative to $PREFIX (exactly as in a non-noarch package) rather than excluded.\n", "before_files": [{"content": "import io\nimport json\nimport locale\nimport logging\nimport os\nfrom os.path import basename, dirname, isdir, join, isfile\nimport shutil\nimport sys\n\nISWIN = sys.platform.startswith('win')\n\n\ndef _force_dir(dirname):\n if not isdir(dirname):\n os.makedirs(dirname)\n\n\ndef _error_exit(exit_message):\n sys.exit(\"[noarch_python] %s\" % exit_message)\n\n\ndef rewrite_script(fn, prefix):\n \"\"\"Take a file from the bin directory and rewrite it into the python-scripts\n directory with the same permissions after it passes some sanity checks for\n noarch pacakges\"\"\"\n\n # Load and check the source file for not being a binary\n src = join(prefix, 'Scripts' if ISWIN else 'bin', fn)\n with io.open(src, encoding=locale.getpreferredencoding()) as fi:\n try:\n data = fi.read()\n except UnicodeDecodeError: # file is binary\n _error_exit(\"Noarch package contains binary script: %s\" % fn)\n src_mode = os.stat(src).st_mode\n os.unlink(src)\n\n # Get rid of '-script.py' suffix on Windows\n if ISWIN and fn.endswith('-script.py'):\n fn = fn[:-10]\n\n # Rewrite the file to the python-scripts directory\n dst_dir = join(prefix, 'python-scripts')\n _force_dir(dst_dir)\n dst = join(dst_dir, fn)\n with open(dst, 'w') as fo:\n fo.write(data)\n os.chmod(dst, src_mode)\n return fn\n\n\ndef handle_file(f, d, prefix):\n \"\"\"Process a file for inclusion in a noarch python package.\n \"\"\"\n path = join(prefix, f)\n\n # Ignore egg-info and pyc files.\n if f.endswith(('.egg-info', '.pyc', '.pyo')):\n os.unlink(path)\n\n # The presence of .so indicated this is not a noarch package\n elif f.endswith(('.so', '.dll', '.pyd', '.exe', '.dylib')):\n if f.endswith('.exe') and (isfile(os.path.join(prefix, f[:-4] + '-script.py')) or\n basename(f[:-4]) in d['python-scripts']):\n os.unlink(path) # this is an entry point with a matching xx-script.py\n return\n _error_exit(\"Error: Binary library or executable found: %s\" % f)\n\n elif 'site-packages' in f:\n nsp = join(prefix, 'site-packages')\n _force_dir(nsp)\n\n g = f[f.find('site-packages'):]\n dst = join(prefix, g)\n dst_dir = dirname(dst)\n _force_dir(dst_dir)\n os.rename(path, dst)\n d['site-packages'].append(g[14:])\n\n # Treat scripts specially with the logic from above\n elif f.startswith(('bin/', 'Scripts')):\n fn = basename(path)\n fn = rewrite_script(fn, prefix)\n d['python-scripts'].append(fn)\n\n # Include examples in the metadata doc\n elif f.startswith(('Examples/', 'Examples\\\\')):\n d['Examples'].append(f[9:])\n else:\n log = logging.getLogger(__name__)\n log.warn(\"Don't know how to handle file: %s. Omitting it from package.\" % f)\n os.unlink(path)\n\n\ndef populate_files(m, files, prefix, entry_point_scripts=None):\n d = {'dist': m.dist(),\n 'site-packages': [],\n 'python-scripts': [],\n 'Examples': []}\n\n # Populate site-package, python-scripts, and Examples into above\n for f in files:\n handle_file(f, d, prefix)\n\n # Windows path conversion\n if ISWIN:\n for fns in (d['site-packages'], d['Examples']):\n for i, fn in enumerate(fns):\n fns[i] = fn.replace('\\\\', '/')\n\n if entry_point_scripts:\n for entry_point in entry_point_scripts:\n src = join(prefix, entry_point)\n if os.path.isfile(src):\n os.unlink(src)\n\n return d\n\n\ndef transform(m, files, prefix):\n bin_dir = join(prefix, 'bin')\n _force_dir(bin_dir)\n\n scripts_dir = join(prefix, 'Scripts')\n _force_dir(scripts_dir)\n\n name = m.name()\n\n # Create *nix prelink script\n # Note: it's important to use LF newlines or it wont work if we build on Win\n with open(join(bin_dir, '.%s-pre-link.sh' % name), 'wb') as fo:\n fo.write('''\\\n #!/bin/bash\n $PREFIX/bin/python $SOURCE_DIR/link.py\n '''.encode('utf-8'))\n\n # Create windows prelink script (be nice and use Windows newlines)\n with open(join(scripts_dir, '.%s-pre-link.bat' % name), 'wb') as fo:\n fo.write('''\\\n @echo off\n \"%PREFIX%\\\\python.exe\" \"%SOURCE_DIR%\\\\link.py\"\n '''.replace('\\n', '\\r\\n').encode('utf-8'))\n\n d = populate_files(m, files, prefix)\n\n # Find our way to this directory\n this_dir = dirname(__file__)\n\n # copy in windows exe shims if there are any python-scripts\n if d['python-scripts']:\n for fn in 'cli-32.exe', 'cli-64.exe':\n shutil.copyfile(join(this_dir, fn), join(prefix, fn))\n\n # Read the local _link.py\n with open(join(this_dir, '_link.py')) as fi:\n link_code = fi.read()\n\n # Write the package metadata, and bumper with code for linking\n with open(join(prefix, 'link.py'), 'w') as fo:\n fo.write('DATA = ')\n json.dump(d, fo, indent=2, sort_keys=True)\n fo.write('\\n## END DATA\\n\\n')\n fo.write(link_code)\n", "path": "conda_build/noarch_python.py"}]}
| 2,340 | 186 |
gh_patches_debug_18850
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1066
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
opentelemetry-instrument command can cause recursive creation of subprocesses
**Describe your environment**
Python3.9, linux.
**Steps to reproduce**
Using `opentelemetry-instrument` with any exporter or instrumentation which invokes a python subprocess **during initialization**. For example, the `opentelemetry-exporter-gcp-trace` exporter may invoke the `gcloud` (written in python) command in a subprocess to get project information and authentication tokens. The subprocess will then try to autoinstrument, creating a recursive loop of subprocesses being created.
**What is the expected behavior?**
Auto-instrumentation should not apply to subprocesses created in the `initialize()` phase of auto-instrumentation. The `PYTHONPATH` environment variable should have the `sitecustomize.py` dirname stripped out at the beginning of `sitecustomize.py`. This would prevent subprocesses from being autoinstrumented during setup, which can cause a loop.
**What is the actual behavior?**
`PYTHONPATH` is correctly stripped later on to avoid this https://github.com/open-telemetry/opentelemetry-python-contrib/blob/e9f83e1292b0fe5f3478c9b23f3b5a5508481e68/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py#L120-L125
However, any subprocesses created in [these lines](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/e9f83e1292b0fe5f3478c9b23f3b5a5508481e68/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py#L114-L117) will cause a loop.
**Additional context**
I can write a repro if necessary.
</issue>
<code>
[start of opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from logging import getLogger
16 from os import environ
17 from os.path import abspath, dirname, pathsep
18 from re import sub
19
20 from pkg_resources import iter_entry_points
21
22 from opentelemetry.instrumentation.dependencies import (
23 get_dist_dependency_conflicts,
24 )
25 from opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro
26 from opentelemetry.instrumentation.environment_variables import (
27 OTEL_PYTHON_DISABLED_INSTRUMENTATIONS,
28 )
29 from opentelemetry.instrumentation.version import __version__
30
31 logger = getLogger(__name__)
32
33
34 def _load_distros() -> BaseDistro:
35 for entry_point in iter_entry_points("opentelemetry_distro"):
36 try:
37 distro = entry_point.load()()
38 if not isinstance(distro, BaseDistro):
39 logger.debug(
40 "%s is not an OpenTelemetry Distro. Skipping",
41 entry_point.name,
42 )
43 continue
44 logger.debug(
45 "Distribution %s will be configured", entry_point.name
46 )
47 return distro
48 except Exception as exc: # pylint: disable=broad-except
49 logger.exception(
50 "Distribution %s configuration failed", entry_point.name
51 )
52 raise exc
53 return DefaultDistro()
54
55
56 def _load_instrumentors(distro):
57 package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, [])
58 if isinstance(package_to_exclude, str):
59 package_to_exclude = package_to_exclude.split(",")
60 # to handle users entering "requests , flask" or "requests, flask" with spaces
61 package_to_exclude = [x.strip() for x in package_to_exclude]
62
63 for entry_point in iter_entry_points("opentelemetry_pre_instrument"):
64 entry_point.load()()
65
66 for entry_point in iter_entry_points("opentelemetry_instrumentor"):
67 if entry_point.name in package_to_exclude:
68 logger.debug(
69 "Instrumentation skipped for library %s", entry_point.name
70 )
71 continue
72
73 try:
74 conflict = get_dist_dependency_conflicts(entry_point.dist)
75 if conflict:
76 logger.debug(
77 "Skipping instrumentation %s: %s",
78 entry_point.name,
79 conflict,
80 )
81 continue
82
83 # tell instrumentation to not run dep checks again as we already did it above
84 distro.load_instrumentor(entry_point, skip_dep_check=True)
85 logger.debug("Instrumented %s", entry_point.name)
86 except Exception as exc: # pylint: disable=broad-except
87 logger.exception("Instrumenting of %s failed", entry_point.name)
88 raise exc
89
90 for entry_point in iter_entry_points("opentelemetry_post_instrument"):
91 entry_point.load()()
92
93
94 def _load_configurators():
95 configured = None
96 for entry_point in iter_entry_points("opentelemetry_configurator"):
97 if configured is not None:
98 logger.warning(
99 "Configuration of %s not loaded, %s already loaded",
100 entry_point.name,
101 configured,
102 )
103 continue
104 try:
105 entry_point.load()().configure(auto_instrumentation_version=__version__) # type: ignore
106 configured = entry_point.name
107 except Exception as exc: # pylint: disable=broad-except
108 logger.exception("Configuration of %s failed", entry_point.name)
109 raise exc
110
111
112 def initialize():
113 try:
114 distro = _load_distros()
115 distro.configure()
116 _load_configurators()
117 _load_instrumentors(distro)
118 except Exception: # pylint: disable=broad-except
119 logger.exception("Failed to auto initialize opentelemetry")
120 finally:
121 environ["PYTHONPATH"] = sub(
122 rf"{dirname(abspath(__file__))}{pathsep}?",
123 "",
124 environ["PYTHONPATH"],
125 )
126
127
128 initialize()
129
[end of opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py
--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py
+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py
@@ -110,6 +110,13 @@
def initialize():
+ # prevents auto-instrumentation of subprocesses if code execs another python process
+ environ["PYTHONPATH"] = sub(
+ rf"{dirname(abspath(__file__))}{pathsep}?",
+ "",
+ environ["PYTHONPATH"],
+ )
+
try:
distro = _load_distros()
distro.configure()
@@ -117,12 +124,6 @@
_load_instrumentors(distro)
except Exception: # pylint: disable=broad-except
logger.exception("Failed to auto initialize opentelemetry")
- finally:
- environ["PYTHONPATH"] = sub(
- rf"{dirname(abspath(__file__))}{pathsep}?",
- "",
- environ["PYTHONPATH"],
- )
initialize()
|
{"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n@@ -110,6 +110,13 @@\n \n \n def initialize():\n+ # prevents auto-instrumentation of subprocesses if code execs another python process\n+ environ[\"PYTHONPATH\"] = sub(\n+ rf\"{dirname(abspath(__file__))}{pathsep}?\",\n+ \"\",\n+ environ[\"PYTHONPATH\"],\n+ )\n+\n try:\n distro = _load_distros()\n distro.configure()\n@@ -117,12 +124,6 @@\n _load_instrumentors(distro)\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to auto initialize opentelemetry\")\n- finally:\n- environ[\"PYTHONPATH\"] = sub(\n- rf\"{dirname(abspath(__file__))}{pathsep}?\",\n- \"\",\n- environ[\"PYTHONPATH\"],\n- )\n \n \n initialize()\n", "issue": "opentelemetry-instrument command can cause recursive creation of subprocesses\n**Describe your environment**\r\n\r\nPython3.9, linux.\r\n\r\n**Steps to reproduce**\r\nUsing `opentelemetry-instrument` with any exporter or instrumentation which invokes a python subprocess **during initialization**. For example, the `opentelemetry-exporter-gcp-trace` exporter may invoke the `gcloud` (written in python) command in a subprocess to get project information and authentication tokens. The subprocess will then try to autoinstrument, creating a recursive loop of subprocesses being created.\r\n\r\n**What is the expected behavior?**\r\nAuto-instrumentation should not apply to subprocesses created in the `initialize()` phase of auto-instrumentation. The `PYTHONPATH` environment variable should have the `sitecustomize.py` dirname stripped out at the beginning of `sitecustomize.py`. This would prevent subprocesses from being autoinstrumented during setup, which can cause a loop.\r\n\r\n**What is the actual behavior?**\r\n`PYTHONPATH` is correctly stripped later on to avoid this https://github.com/open-telemetry/opentelemetry-python-contrib/blob/e9f83e1292b0fe5f3478c9b23f3b5a5508481e68/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py#L120-L125\r\n\r\nHowever, any subprocesses created in [these lines](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/e9f83e1292b0fe5f3478c9b23f3b5a5508481e68/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py#L114-L117) will cause a loop.\r\n\r\n**Additional context**\r\nI can write a repro if necessary.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom logging import getLogger\nfrom os import environ\nfrom os.path import abspath, dirname, pathsep\nfrom re import sub\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry.instrumentation.dependencies import (\n get_dist_dependency_conflicts,\n)\nfrom opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro\nfrom opentelemetry.instrumentation.environment_variables import (\n OTEL_PYTHON_DISABLED_INSTRUMENTATIONS,\n)\nfrom opentelemetry.instrumentation.version import __version__\n\nlogger = getLogger(__name__)\n\n\ndef _load_distros() -> BaseDistro:\n for entry_point in iter_entry_points(\"opentelemetry_distro\"):\n try:\n distro = entry_point.load()()\n if not isinstance(distro, BaseDistro):\n logger.debug(\n \"%s is not an OpenTelemetry Distro. Skipping\",\n entry_point.name,\n )\n continue\n logger.debug(\n \"Distribution %s will be configured\", entry_point.name\n )\n return distro\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\n \"Distribution %s configuration failed\", entry_point.name\n )\n raise exc\n return DefaultDistro()\n\n\ndef _load_instrumentors(distro):\n package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, [])\n if isinstance(package_to_exclude, str):\n package_to_exclude = package_to_exclude.split(\",\")\n # to handle users entering \"requests , flask\" or \"requests, flask\" with spaces\n package_to_exclude = [x.strip() for x in package_to_exclude]\n\n for entry_point in iter_entry_points(\"opentelemetry_pre_instrument\"):\n entry_point.load()()\n\n for entry_point in iter_entry_points(\"opentelemetry_instrumentor\"):\n if entry_point.name in package_to_exclude:\n logger.debug(\n \"Instrumentation skipped for library %s\", entry_point.name\n )\n continue\n\n try:\n conflict = get_dist_dependency_conflicts(entry_point.dist)\n if conflict:\n logger.debug(\n \"Skipping instrumentation %s: %s\",\n entry_point.name,\n conflict,\n )\n continue\n\n # tell instrumentation to not run dep checks again as we already did it above\n distro.load_instrumentor(entry_point, skip_dep_check=True)\n logger.debug(\"Instrumented %s\", entry_point.name)\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Instrumenting of %s failed\", entry_point.name)\n raise exc\n\n for entry_point in iter_entry_points(\"opentelemetry_post_instrument\"):\n entry_point.load()()\n\n\ndef _load_configurators():\n configured = None\n for entry_point in iter_entry_points(\"opentelemetry_configurator\"):\n if configured is not None:\n logger.warning(\n \"Configuration of %s not loaded, %s already loaded\",\n entry_point.name,\n configured,\n )\n continue\n try:\n entry_point.load()().configure(auto_instrumentation_version=__version__) # type: ignore\n configured = entry_point.name\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Configuration of %s failed\", entry_point.name)\n raise exc\n\n\ndef initialize():\n try:\n distro = _load_distros()\n distro.configure()\n _load_configurators()\n _load_instrumentors(distro)\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to auto initialize opentelemetry\")\n finally:\n environ[\"PYTHONPATH\"] = sub(\n rf\"{dirname(abspath(__file__))}{pathsep}?\",\n \"\",\n environ[\"PYTHONPATH\"],\n )\n\n\ninitialize()\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py"}]}
| 2,179 | 281 |
gh_patches_debug_6822
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-2163
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NIkola does not see plugins listed under DISABLED_PLUGINS
First noticed while running `nikola build`:
`ERROR: _switch to py3:please!. Task dependency 'sitemap' does not exist.`
> polyzen> there's no longer a way to disable the sitemap? i don't see it under `nikola plugin --list-installed` nor `nikola list` unless it's part of render_site
> +ralsina> sitemap at /home/ralsina/Desktop/proyectos/nikola/master/nikola/plugins/task/sitemap/
> +ralsina> I suspect disabled plugins are not listed anymore, try that in some other folder
> polyzen> ah yes
</issue>
<code>
[start of nikola/plugins/command/plugin.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2015 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Manage plugins."""
28
29 from __future__ import print_function
30 import io
31 import os
32 import shutil
33 import subprocess
34 import time
35 import requests
36
37 import pygments
38 from pygments.lexers import PythonLexer
39 from pygments.formatters import TerminalFormatter
40
41 from nikola.plugin_categories import Command
42 from nikola import utils
43
44 LOGGER = utils.get_logger('plugin', utils.STDERR_HANDLER)
45
46
47 class CommandPlugin(Command):
48 """Manage plugins."""
49
50 json = None
51 name = "plugin"
52 doc_usage = "[[-u][--user] --install name] | [[-u] [-l |--upgrade|--list-installed] | [--uninstall name]]"
53 doc_purpose = "manage plugins"
54 output_dir = None
55 needs_config = False
56 cmd_options = [
57 {
58 'name': 'install',
59 'short': 'i',
60 'long': 'install',
61 'type': str,
62 'default': '',
63 'help': 'Install a plugin.',
64 },
65 {
66 'name': 'uninstall',
67 'long': 'uninstall',
68 'short': 'r',
69 'type': str,
70 'default': '',
71 'help': 'Uninstall a plugin.'
72 },
73 {
74 'name': 'list',
75 'short': 'l',
76 'long': 'list',
77 'type': bool,
78 'default': False,
79 'help': 'Show list of available plugins.'
80 },
81 {
82 'name': 'url',
83 'short': 'u',
84 'long': 'url',
85 'type': str,
86 'help': "URL for the plugin repository (default: "
87 "https://plugins.getnikola.com/v7/plugins.json)",
88 'default': 'https://plugins.getnikola.com/v7/plugins.json'
89 },
90 {
91 'name': 'user',
92 'long': 'user',
93 'type': bool,
94 'help': "Install user-wide, available for all sites.",
95 'default': False
96 },
97 {
98 'name': 'upgrade',
99 'long': 'upgrade',
100 'type': bool,
101 'help': "Upgrade all installed plugins.",
102 'default': False
103 },
104 {
105 'name': 'list_installed',
106 'long': 'list-installed',
107 'type': bool,
108 'help': "List the installed plugins with their location.",
109 'default': False
110 },
111 ]
112
113 def _execute(self, options, args):
114 """Install plugin into current site."""
115 url = options['url']
116 user_mode = options['user']
117
118 # See the "mode" we need to operate in
119 install = options.get('install')
120 uninstall = options.get('uninstall')
121 upgrade = options.get('upgrade')
122 list_available = options.get('list')
123 list_installed = options.get('list_installed')
124 show_install_notes = options.get('show_install_notes', True)
125 command_count = [bool(x) for x in (
126 install,
127 uninstall,
128 upgrade,
129 list_available,
130 list_installed)].count(True)
131 if command_count > 1 or command_count == 0:
132 print(self.help())
133 return 2
134
135 if options.get('output_dir') is not None:
136 self.output_dir = options.get('output_dir')
137 else:
138 if not self.site.configured and not user_mode and install:
139 LOGGER.notice('No site found, assuming --user')
140 user_mode = True
141
142 if user_mode:
143 self.output_dir = os.path.expanduser('~/.nikola/plugins')
144 else:
145 self.output_dir = 'plugins'
146
147 if list_available:
148 return self.list_available(url)
149 elif list_installed:
150 return self.list_installed()
151 elif upgrade:
152 return self.do_upgrade(url)
153 elif uninstall:
154 return self.do_uninstall(uninstall)
155 elif install:
156 return self.do_install(url, install, show_install_notes)
157
158 def list_available(self, url):
159 """List all available plugins."""
160 data = self.get_json(url)
161 print("Available Plugins:")
162 print("------------------")
163 for plugin in sorted(data.keys()):
164 print(plugin)
165 return 0
166
167 def list_installed(self):
168 """List installed plugins."""
169 plugins = []
170 for plugin in self.site.plugin_manager.getAllPlugins():
171 p = plugin.path
172 if os.path.isdir(p):
173 p = p + os.sep
174 else:
175 p = p + '.py'
176 plugins.append([plugin.name, p])
177
178 plugins.sort()
179 for name, path in plugins:
180 print('{0} at {1}'.format(name, path))
181 return 0
182
183 def do_upgrade(self, url):
184 """Upgrade all installed plugins."""
185 LOGGER.warning('This is not very smart, it just reinstalls some plugins and hopes for the best')
186 data = self.get_json(url)
187 plugins = []
188 for plugin in self.site.plugin_manager.getAllPlugins():
189 p = plugin.path
190 if os.path.isdir(p):
191 p = p + os.sep
192 else:
193 p = p + '.py'
194 if plugin.name in data:
195 plugins.append([plugin.name, p])
196 print('Will upgrade {0} plugins: {1}'.format(len(plugins), ', '.join(n for n, _ in plugins)))
197 for name, path in plugins:
198 print('Upgrading {0}'.format(name))
199 p = path
200 while True:
201 tail, head = os.path.split(path)
202 if head == 'plugins':
203 self.output_dir = path
204 break
205 elif tail == '':
206 LOGGER.error("Can't find the plugins folder for path: {0}".format(p))
207 return 1
208 else:
209 path = tail
210 self.do_install(url, name)
211 return 0
212
213 def do_install(self, url, name, show_install_notes=True):
214 """Download and install a plugin."""
215 data = self.get_json(url)
216 if name in data:
217 utils.makedirs(self.output_dir)
218 url = data[name]
219 LOGGER.info("Downloading '{0}'".format(url))
220 try:
221 zip_data = requests.get(url).content
222 except requests.exceptions.SSLError:
223 LOGGER.warning("SSL error, using http instead of https (press ^C to abort)")
224 time.sleep(1)
225 url = url.replace('https', 'http', 1)
226 zip_data = requests.get(url).content
227
228 zip_file = io.BytesIO()
229 zip_file.write(zip_data)
230 LOGGER.info('Extracting: {0} into {1}/'.format(name, self.output_dir))
231 utils.extract_all(zip_file, self.output_dir)
232 dest_path = os.path.join(self.output_dir, name)
233 else:
234 try:
235 plugin_path = utils.get_plugin_path(name)
236 except:
237 LOGGER.error("Can't find plugin " + name)
238 return 1
239
240 utils.makedirs(self.output_dir)
241 dest_path = os.path.join(self.output_dir, name)
242 if os.path.exists(dest_path):
243 LOGGER.error("{0} is already installed".format(name))
244 return 1
245
246 LOGGER.info('Copying {0} into plugins'.format(plugin_path))
247 shutil.copytree(plugin_path, dest_path)
248
249 reqpath = os.path.join(dest_path, 'requirements.txt')
250 if os.path.exists(reqpath):
251 LOGGER.notice('This plugin has Python dependencies.')
252 LOGGER.info('Installing dependencies with pip...')
253 try:
254 subprocess.check_call(('pip', 'install', '-r', reqpath))
255 except subprocess.CalledProcessError:
256 LOGGER.error('Could not install the dependencies.')
257 print('Contents of the requirements.txt file:\n')
258 with io.open(reqpath, 'r', encoding='utf-8') as fh:
259 print(utils.indent(fh.read(), 4 * ' '))
260 print('You have to install those yourself or through a '
261 'package manager.')
262 else:
263 LOGGER.info('Dependency installation succeeded.')
264 reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt')
265 if os.path.exists(reqnpypath):
266 LOGGER.notice('This plugin has third-party '
267 'dependencies you need to install '
268 'manually.')
269 print('Contents of the requirements-nonpy.txt file:\n')
270 with io.open(reqnpypath, 'r', encoding='utf-8') as fh:
271 for l in fh.readlines():
272 i, j = l.split('::')
273 print(utils.indent(i.strip(), 4 * ' '))
274 print(utils.indent(j.strip(), 8 * ' '))
275 print()
276
277 print('You have to install those yourself or through a package '
278 'manager.')
279 confpypath = os.path.join(dest_path, 'conf.py.sample')
280 if os.path.exists(confpypath) and show_install_notes:
281 LOGGER.notice('This plugin has a sample config file. Integrate it with yours in order to make this plugin work!')
282 print('Contents of the conf.py.sample file:\n')
283 with io.open(confpypath, 'r', encoding='utf-8') as fh:
284 if self.site.colorful:
285 print(utils.indent(pygments.highlight(
286 fh.read(), PythonLexer(), TerminalFormatter()),
287 4 * ' '))
288 else:
289 print(utils.indent(fh.read(), 4 * ' '))
290 return 0
291
292 def do_uninstall(self, name):
293 """Uninstall a plugin."""
294 for plugin in self.site.plugin_manager.getAllPlugins(): # FIXME: this is repeated thrice
295 p = plugin.path
296 if os.path.isdir(p):
297 p = p + os.sep
298 else:
299 p = os.path.dirname(p)
300 if name == plugin.name: # Uninstall this one
301 LOGGER.warning('About to uninstall plugin: {0}'.format(name))
302 LOGGER.warning('This will delete {0}'.format(p))
303 sure = utils.ask_yesno('Are you sure?')
304 if sure:
305 LOGGER.warning('Removing {0}'.format(p))
306 shutil.rmtree(p)
307 return 0
308 return 1
309 LOGGER.error('Unknown plugin: {0}'.format(name))
310 return 1
311
312 def get_json(self, url):
313 """Download the JSON file with all plugins."""
314 if self.json is None:
315 try:
316 self.json = requests.get(url).json()
317 except requests.exceptions.SSLError:
318 LOGGER.warning("SSL error, using http instead of https (press ^C to abort)")
319 time.sleep(1)
320 url = url.replace('https', 'http', 1)
321 self.json = requests.get(url).json()
322 return self.json
323
[end of nikola/plugins/command/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nikola/plugins/command/plugin.py b/nikola/plugins/command/plugin.py
--- a/nikola/plugins/command/plugin.py
+++ b/nikola/plugins/command/plugin.py
@@ -176,8 +176,11 @@
plugins.append([plugin.name, p])
plugins.sort()
+ print('Installed Plugins')
+ print('-----------------')
for name, path in plugins:
print('{0} at {1}'.format(name, path))
+ print('\n\nAlso, you have disabled these plugins: {}'.format(self.site.config['DISABLED_PLUGINS']))
return 0
def do_upgrade(self, url):
|
{"golden_diff": "diff --git a/nikola/plugins/command/plugin.py b/nikola/plugins/command/plugin.py\n--- a/nikola/plugins/command/plugin.py\n+++ b/nikola/plugins/command/plugin.py\n@@ -176,8 +176,11 @@\n plugins.append([plugin.name, p])\n \n plugins.sort()\n+ print('Installed Plugins')\n+ print('-----------------')\n for name, path in plugins:\n print('{0} at {1}'.format(name, path))\n+ print('\\n\\nAlso, you have disabled these plugins: {}'.format(self.site.config['DISABLED_PLUGINS']))\n return 0\n \n def do_upgrade(self, url):\n", "issue": "NIkola does not see plugins listed under DISABLED_PLUGINS\nFirst noticed while running `nikola build`:\n`ERROR: _switch to py3:please!. Task dependency 'sitemap' does not exist.`\n\n> polyzen> there's no longer a way to disable the sitemap? i don't see it under `nikola plugin --list-installed` nor `nikola list` unless it's part of render_site\n> +ralsina> sitemap at /home/ralsina/Desktop/proyectos/nikola/master/nikola/plugins/task/sitemap/\n> +ralsina> I suspect disabled plugins are not listed anymore, try that in some other folder\n> polyzen> ah yes\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Manage plugins.\"\"\"\n\nfrom __future__ import print_function\nimport io\nimport os\nimport shutil\nimport subprocess\nimport time\nimport requests\n\nimport pygments\nfrom pygments.lexers import PythonLexer\nfrom pygments.formatters import TerminalFormatter\n\nfrom nikola.plugin_categories import Command\nfrom nikola import utils\n\nLOGGER = utils.get_logger('plugin', utils.STDERR_HANDLER)\n\n\nclass CommandPlugin(Command):\n \"\"\"Manage plugins.\"\"\"\n\n json = None\n name = \"plugin\"\n doc_usage = \"[[-u][--user] --install name] | [[-u] [-l |--upgrade|--list-installed] | [--uninstall name]]\"\n doc_purpose = \"manage plugins\"\n output_dir = None\n needs_config = False\n cmd_options = [\n {\n 'name': 'install',\n 'short': 'i',\n 'long': 'install',\n 'type': str,\n 'default': '',\n 'help': 'Install a plugin.',\n },\n {\n 'name': 'uninstall',\n 'long': 'uninstall',\n 'short': 'r',\n 'type': str,\n 'default': '',\n 'help': 'Uninstall a plugin.'\n },\n {\n 'name': 'list',\n 'short': 'l',\n 'long': 'list',\n 'type': bool,\n 'default': False,\n 'help': 'Show list of available plugins.'\n },\n {\n 'name': 'url',\n 'short': 'u',\n 'long': 'url',\n 'type': str,\n 'help': \"URL for the plugin repository (default: \"\n \"https://plugins.getnikola.com/v7/plugins.json)\",\n 'default': 'https://plugins.getnikola.com/v7/plugins.json'\n },\n {\n 'name': 'user',\n 'long': 'user',\n 'type': bool,\n 'help': \"Install user-wide, available for all sites.\",\n 'default': False\n },\n {\n 'name': 'upgrade',\n 'long': 'upgrade',\n 'type': bool,\n 'help': \"Upgrade all installed plugins.\",\n 'default': False\n },\n {\n 'name': 'list_installed',\n 'long': 'list-installed',\n 'type': bool,\n 'help': \"List the installed plugins with their location.\",\n 'default': False\n },\n ]\n\n def _execute(self, options, args):\n \"\"\"Install plugin into current site.\"\"\"\n url = options['url']\n user_mode = options['user']\n\n # See the \"mode\" we need to operate in\n install = options.get('install')\n uninstall = options.get('uninstall')\n upgrade = options.get('upgrade')\n list_available = options.get('list')\n list_installed = options.get('list_installed')\n show_install_notes = options.get('show_install_notes', True)\n command_count = [bool(x) for x in (\n install,\n uninstall,\n upgrade,\n list_available,\n list_installed)].count(True)\n if command_count > 1 or command_count == 0:\n print(self.help())\n return 2\n\n if options.get('output_dir') is not None:\n self.output_dir = options.get('output_dir')\n else:\n if not self.site.configured and not user_mode and install:\n LOGGER.notice('No site found, assuming --user')\n user_mode = True\n\n if user_mode:\n self.output_dir = os.path.expanduser('~/.nikola/plugins')\n else:\n self.output_dir = 'plugins'\n\n if list_available:\n return self.list_available(url)\n elif list_installed:\n return self.list_installed()\n elif upgrade:\n return self.do_upgrade(url)\n elif uninstall:\n return self.do_uninstall(uninstall)\n elif install:\n return self.do_install(url, install, show_install_notes)\n\n def list_available(self, url):\n \"\"\"List all available plugins.\"\"\"\n data = self.get_json(url)\n print(\"Available Plugins:\")\n print(\"------------------\")\n for plugin in sorted(data.keys()):\n print(plugin)\n return 0\n\n def list_installed(self):\n \"\"\"List installed plugins.\"\"\"\n plugins = []\n for plugin in self.site.plugin_manager.getAllPlugins():\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = p + '.py'\n plugins.append([plugin.name, p])\n\n plugins.sort()\n for name, path in plugins:\n print('{0} at {1}'.format(name, path))\n return 0\n\n def do_upgrade(self, url):\n \"\"\"Upgrade all installed plugins.\"\"\"\n LOGGER.warning('This is not very smart, it just reinstalls some plugins and hopes for the best')\n data = self.get_json(url)\n plugins = []\n for plugin in self.site.plugin_manager.getAllPlugins():\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = p + '.py'\n if plugin.name in data:\n plugins.append([plugin.name, p])\n print('Will upgrade {0} plugins: {1}'.format(len(plugins), ', '.join(n for n, _ in plugins)))\n for name, path in plugins:\n print('Upgrading {0}'.format(name))\n p = path\n while True:\n tail, head = os.path.split(path)\n if head == 'plugins':\n self.output_dir = path\n break\n elif tail == '':\n LOGGER.error(\"Can't find the plugins folder for path: {0}\".format(p))\n return 1\n else:\n path = tail\n self.do_install(url, name)\n return 0\n\n def do_install(self, url, name, show_install_notes=True):\n \"\"\"Download and install a plugin.\"\"\"\n data = self.get_json(url)\n if name in data:\n utils.makedirs(self.output_dir)\n url = data[name]\n LOGGER.info(\"Downloading '{0}'\".format(url))\n try:\n zip_data = requests.get(url).content\n except requests.exceptions.SSLError:\n LOGGER.warning(\"SSL error, using http instead of https (press ^C to abort)\")\n time.sleep(1)\n url = url.replace('https', 'http', 1)\n zip_data = requests.get(url).content\n\n zip_file = io.BytesIO()\n zip_file.write(zip_data)\n LOGGER.info('Extracting: {0} into {1}/'.format(name, self.output_dir))\n utils.extract_all(zip_file, self.output_dir)\n dest_path = os.path.join(self.output_dir, name)\n else:\n try:\n plugin_path = utils.get_plugin_path(name)\n except:\n LOGGER.error(\"Can't find plugin \" + name)\n return 1\n\n utils.makedirs(self.output_dir)\n dest_path = os.path.join(self.output_dir, name)\n if os.path.exists(dest_path):\n LOGGER.error(\"{0} is already installed\".format(name))\n return 1\n\n LOGGER.info('Copying {0} into plugins'.format(plugin_path))\n shutil.copytree(plugin_path, dest_path)\n\n reqpath = os.path.join(dest_path, 'requirements.txt')\n if os.path.exists(reqpath):\n LOGGER.notice('This plugin has Python dependencies.')\n LOGGER.info('Installing dependencies with pip...')\n try:\n subprocess.check_call(('pip', 'install', '-r', reqpath))\n except subprocess.CalledProcessError:\n LOGGER.error('Could not install the dependencies.')\n print('Contents of the requirements.txt file:\\n')\n with io.open(reqpath, 'r', encoding='utf-8') as fh:\n print(utils.indent(fh.read(), 4 * ' '))\n print('You have to install those yourself or through a '\n 'package manager.')\n else:\n LOGGER.info('Dependency installation succeeded.')\n reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt')\n if os.path.exists(reqnpypath):\n LOGGER.notice('This plugin has third-party '\n 'dependencies you need to install '\n 'manually.')\n print('Contents of the requirements-nonpy.txt file:\\n')\n with io.open(reqnpypath, 'r', encoding='utf-8') as fh:\n for l in fh.readlines():\n i, j = l.split('::')\n print(utils.indent(i.strip(), 4 * ' '))\n print(utils.indent(j.strip(), 8 * ' '))\n print()\n\n print('You have to install those yourself or through a package '\n 'manager.')\n confpypath = os.path.join(dest_path, 'conf.py.sample')\n if os.path.exists(confpypath) and show_install_notes:\n LOGGER.notice('This plugin has a sample config file. Integrate it with yours in order to make this plugin work!')\n print('Contents of the conf.py.sample file:\\n')\n with io.open(confpypath, 'r', encoding='utf-8') as fh:\n if self.site.colorful:\n print(utils.indent(pygments.highlight(\n fh.read(), PythonLexer(), TerminalFormatter()),\n 4 * ' '))\n else:\n print(utils.indent(fh.read(), 4 * ' '))\n return 0\n\n def do_uninstall(self, name):\n \"\"\"Uninstall a plugin.\"\"\"\n for plugin in self.site.plugin_manager.getAllPlugins(): # FIXME: this is repeated thrice\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = os.path.dirname(p)\n if name == plugin.name: # Uninstall this one\n LOGGER.warning('About to uninstall plugin: {0}'.format(name))\n LOGGER.warning('This will delete {0}'.format(p))\n sure = utils.ask_yesno('Are you sure?')\n if sure:\n LOGGER.warning('Removing {0}'.format(p))\n shutil.rmtree(p)\n return 0\n return 1\n LOGGER.error('Unknown plugin: {0}'.format(name))\n return 1\n\n def get_json(self, url):\n \"\"\"Download the JSON file with all plugins.\"\"\"\n if self.json is None:\n try:\n self.json = requests.get(url).json()\n except requests.exceptions.SSLError:\n LOGGER.warning(\"SSL error, using http instead of https (press ^C to abort)\")\n time.sleep(1)\n url = url.replace('https', 'http', 1)\n self.json = requests.get(url).json()\n return self.json\n", "path": "nikola/plugins/command/plugin.py"}]}
| 4,058 | 143 |
gh_patches_debug_19558
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmcv-97
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mmcv error
My environment is macOS Mojave 10.14.4, Anaconda 4.4.0,Python 3.6.1.
I directly use "pip install mmcv and got:
"Running setup.py clean for mmcv
Failed to build mmcv
Installing collected packages: mmcv
Running setup.py install for mmcv ... error" and :
"In file included from ./mmcv/video/optflow_warp/flow_warp.cpp:1:
./mmcv/video/optflow_warp/flow_warp.hpp:3:10: fatal error: 'iostream' file not found
#include <iostream>"
Anybody help? Thank you very much.
</issue>
<code>
[start of setup.py]
1 import sys
2 from io import open # for Python 2 (identical to builtin in Python 3)
3
4 from setuptools import Extension, find_packages, setup
5
6 import numpy
7 from Cython.Distutils import build_ext
8
9 install_requires = [
10 'numpy>=1.11.1', 'pyyaml', 'six', 'addict', 'requests', 'opencv-python',
11 'Cython'
12 ]
13 if sys.version_info < (3, 3):
14 install_requires.append('backports.shutil_get_terminal_size')
15 if sys.version_info < (3, 4):
16 install_requires.extend(['enum34', 'pathlib'])
17
18
19 def readme():
20 with open('README.rst', encoding='utf-8') as f:
21 content = f.read()
22 return content
23
24
25 def get_version():
26 version_file = 'mmcv/version.py'
27 with open(version_file, 'r', encoding='utf-8') as f:
28 exec(compile(f.read(), version_file, 'exec'))
29 return locals()['__version__']
30
31
32 EXT_MODULES = [
33 Extension(
34 name='mmcv._ext',
35 sources=[
36 './mmcv/video/optflow_warp/flow_warp.cpp',
37 './mmcv/video/optflow_warp/flow_warp_module.pyx'
38 ],
39 include_dirs=[numpy.get_include()],
40 language="c++",
41 ),
42 ]
43
44 setup(
45 name='mmcv',
46 version=get_version(),
47 description='Open MMLab Computer Vision Foundation',
48 long_description=readme(),
49 keywords='computer vision',
50 packages=find_packages(),
51 classifiers=[
52 'Development Status :: 4 - Beta',
53 'License :: OSI Approved :: Apache Software License',
54 'Operating System :: OS Independent',
55 'Programming Language :: Python :: 2',
56 'Programming Language :: Python :: 2.7',
57 'Programming Language :: Python :: 3',
58 'Programming Language :: Python :: 3.4',
59 'Programming Language :: Python :: 3.5',
60 'Programming Language :: Python :: 3.6',
61 'Programming Language :: Python :: 3.7',
62 'Topic :: Utilities',
63 ],
64 url='https://github.com/open-mmlab/mmcv',
65 author='Kai Chen',
66 author_email='[email protected]',
67 setup_requires=['pytest-runner'],
68 tests_require=['pytest'],
69 install_requires=install_requires,
70 ext_modules=EXT_MODULES,
71 cmdclass={'build_ext': build_ext},
72 zip_safe=False)
73
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,6 @@
+import platform
import sys
from io import open # for Python 2 (identical to builtin in Python 3)
-
from setuptools import Extension, find_packages, setup
import numpy
@@ -29,6 +29,13 @@
return locals()['__version__']
+if platform.system() == 'Darwin':
+ extra_compile_args = ['-stdlib=libc++']
+ extra_link_args = ['-stdlib=libc++']
+else:
+ extra_compile_args = []
+ extra_link_args = []
+
EXT_MODULES = [
Extension(
name='mmcv._ext',
@@ -37,7 +44,9 @@
'./mmcv/video/optflow_warp/flow_warp_module.pyx'
],
include_dirs=[numpy.get_include()],
- language="c++",
+ language='c++',
+ extra_compile_args=extra_compile_args,
+ extra_link_args=extra_link_args,
),
]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,6 @@\n+import platform\n import sys\n from io import open # for Python 2 (identical to builtin in Python 3)\n-\n from setuptools import Extension, find_packages, setup\n \n import numpy\n@@ -29,6 +29,13 @@\n return locals()['__version__']\n \n \n+if platform.system() == 'Darwin':\n+ extra_compile_args = ['-stdlib=libc++']\n+ extra_link_args = ['-stdlib=libc++']\n+else:\n+ extra_compile_args = []\n+ extra_link_args = []\n+\n EXT_MODULES = [\n Extension(\n name='mmcv._ext',\n@@ -37,7 +44,9 @@\n './mmcv/video/optflow_warp/flow_warp_module.pyx'\n ],\n include_dirs=[numpy.get_include()],\n- language=\"c++\",\n+ language='c++',\n+ extra_compile_args=extra_compile_args,\n+ extra_link_args=extra_link_args,\n ),\n ]\n", "issue": "mmcv error\nMy environment is macOS Mojave 10.14.4, Anaconda 4.4.0,Python 3.6.1.\r\n I directly use \"pip install mmcv and got:\r\n\"Running setup.py clean for mmcv\r\nFailed to build mmcv\r\nInstalling collected packages: mmcv\r\nRunning setup.py install for mmcv ... error\" and :\r\n\"In file included from ./mmcv/video/optflow_warp/flow_warp.cpp:1:\r\n./mmcv/video/optflow_warp/flow_warp.hpp:3:10: fatal error: 'iostream' file not found\r\n#include <iostream>\"\r\nAnybody help? Thank you very much.\n", "before_files": [{"content": "import sys\nfrom io import open # for Python 2 (identical to builtin in Python 3)\n\nfrom setuptools import Extension, find_packages, setup\n\nimport numpy\nfrom Cython.Distutils import build_ext\n\ninstall_requires = [\n 'numpy>=1.11.1', 'pyyaml', 'six', 'addict', 'requests', 'opencv-python',\n 'Cython'\n]\nif sys.version_info < (3, 3):\n install_requires.append('backports.shutil_get_terminal_size')\nif sys.version_info < (3, 4):\n install_requires.extend(['enum34', 'pathlib'])\n\n\ndef readme():\n with open('README.rst', encoding='utf-8') as f:\n content = f.read()\n return content\n\n\ndef get_version():\n version_file = 'mmcv/version.py'\n with open(version_file, 'r', encoding='utf-8') as f:\n exec(compile(f.read(), version_file, 'exec'))\n return locals()['__version__']\n\n\nEXT_MODULES = [\n Extension(\n name='mmcv._ext',\n sources=[\n './mmcv/video/optflow_warp/flow_warp.cpp',\n './mmcv/video/optflow_warp/flow_warp_module.pyx'\n ],\n include_dirs=[numpy.get_include()],\n language=\"c++\",\n ),\n]\n\nsetup(\n name='mmcv',\n version=get_version(),\n description='Open MMLab Computer Vision Foundation',\n long_description=readme(),\n keywords='computer vision',\n packages=find_packages(),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Utilities',\n ],\n url='https://github.com/open-mmlab/mmcv',\n author='Kai Chen',\n author_email='[email protected]',\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=install_requires,\n ext_modules=EXT_MODULES,\n cmdclass={'build_ext': build_ext},\n zip_safe=False)\n", "path": "setup.py"}]}
| 1,340 | 236 |
gh_patches_debug_19740
|
rasdani/github-patches
|
git_diff
|
tough-dev-school__education-backend-180
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Кастомные теги в мейлчимпе
Чтобы можно было в лид-магните указать теги, которые пробрасываются в аудиторию мейлчимпа
</issue>
<code>
[start of src/magnets/creator.py]
1 from magnets.models import EmailLeadMagnetCampaign, LeadCampaignLogEntry
2 from users.creator import UserCreator
3 from users.models import User
4
5
6 class LeadCreator:
7 def __init__(self, campaign: EmailLeadMagnetCampaign, email: str, name: str = None):
8 self.data = {
9 'name': name,
10 'email': email,
11 }
12
13 self.campaign = campaign
14
15 def __call__(self):
16 self.user = self._create_user()
17 self._create_log_entry()
18
19 self.campaign.execute(self.user)
20
21 def _create_user(self) -> User:
22 return UserCreator(
23 name=self.data['name'],
24 email=self.data['email'],
25 subscribe=True,
26 )()
27
28 def _create_log_entry(self):
29 LeadCampaignLogEntry.objects.create(
30 user=self.user,
31 campaign=self.campaign,
32 )
33
[end of src/magnets/creator.py]
[start of src/shipping/shipments/course.py]
1 from typing import Optional
2
3 from app.tasks import invite_to_clickmeeting, invite_to_zoomus, send_mail, subscribe_to_mailchimp
4 from products.models import Course
5 from shipping import factory
6 from shipping.shipments.base import BaseShipment
7
8
9 @factory.register(Course)
10 class CourseShipment(BaseShipment):
11 @property
12 def course(self):
13 return self.stuff_to_ship
14
15 def ship(self):
16 self.invite_to_clickmeeting()
17 self.invite_to_zoomus()
18 self.subscribe_to_mailchimp()
19
20 self.send_welcome_letter()
21
22 def subscribe_to_mailchimp(self):
23 if self.course.mailchimp_list_id is not None:
24 subscribe_to_mailchimp.delay(
25 list_id=self.course.mailchimp_list_id,
26 user_id=self.user.pk,
27 tags=[self.course.slug],
28 )
29
30 def invite_to_clickmeeting(self):
31 if self.course.clickmeeting_room_url is not None:
32 invite_to_clickmeeting.delay(
33 room_url=self.course.clickmeeting_room_url,
34 email=self.user.email,
35 )
36
37 def invite_to_zoomus(self):
38 if self.course.zoomus_webinar_id is not None and len(self.course.zoomus_webinar_id):
39 invite_to_zoomus.delay(
40 webinar_id=self.course.zoomus_webinar_id,
41 user_id=self.user.id,
42 )
43
44 def send_welcome_letter(self):
45 if self.welcome_letter_template_id is not None:
46 send_mail.delay(
47 to=self.user.email,
48 template_id=self.welcome_letter_template_id,
49 ctx=self.get_template_context(),
50 disable_antispam=True,
51 )
52
53 def get_template_context(self) -> dict:
54 return {
55 'name': self.course.name,
56 'slug': self.course.slug,
57 'name_genitive': self.course.name_genitive,
58 **self.get_gift_template_context(),
59 }
60
61 @property
62 def welcome_letter_template_id(self) -> Optional[str]:
63 """Get special gift template letter id if order is a gift and it is present"""
64 template_id = self.course.welcome_letter_template_id
65
66 if self.order is not None and self.order.giver is not None: # this is a gift
67 template_id = self.course.gift_welcome_letter_template_id or self.course.welcome_letter_template_id
68
69 if template_id is None or not len(template_id): # fuck this null=True in CharFields
70 return None
71
72 return template_id
73
[end of src/shipping/shipments/course.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/magnets/creator.py b/src/magnets/creator.py
--- a/src/magnets/creator.py
+++ b/src/magnets/creator.py
@@ -23,6 +23,7 @@
name=self.data['name'],
email=self.data['email'],
subscribe=True,
+ tags=self.tags,
)()
def _create_log_entry(self):
@@ -30,3 +31,7 @@
user=self.user,
campaign=self.campaign,
)
+
+ @property
+ def tags(self):
+ return [f'{self.campaign.slug}-lead-magnet']
diff --git a/src/shipping/shipments/course.py b/src/shipping/shipments/course.py
--- a/src/shipping/shipments/course.py
+++ b/src/shipping/shipments/course.py
@@ -24,7 +24,7 @@
subscribe_to_mailchimp.delay(
list_id=self.course.mailchimp_list_id,
user_id=self.user.pk,
- tags=[self.course.slug],
+ tags=[self.course.slug, f'{self.course.slug}-purchased'],
)
def invite_to_clickmeeting(self):
|
{"golden_diff": "diff --git a/src/magnets/creator.py b/src/magnets/creator.py\n--- a/src/magnets/creator.py\n+++ b/src/magnets/creator.py\n@@ -23,6 +23,7 @@\n name=self.data['name'],\n email=self.data['email'],\n subscribe=True,\n+ tags=self.tags,\n )()\n \n def _create_log_entry(self):\n@@ -30,3 +31,7 @@\n user=self.user,\n campaign=self.campaign,\n )\n+\n+ @property\n+ def tags(self):\n+ return [f'{self.campaign.slug}-lead-magnet']\ndiff --git a/src/shipping/shipments/course.py b/src/shipping/shipments/course.py\n--- a/src/shipping/shipments/course.py\n+++ b/src/shipping/shipments/course.py\n@@ -24,7 +24,7 @@\n subscribe_to_mailchimp.delay(\n list_id=self.course.mailchimp_list_id,\n user_id=self.user.pk,\n- tags=[self.course.slug],\n+ tags=[self.course.slug, f'{self.course.slug}-purchased'],\n )\n \n def invite_to_clickmeeting(self):\n", "issue": "\u041a\u0430\u0441\u0442\u043e\u043c\u043d\u044b\u0435 \u0442\u0435\u0433\u0438 \u0432 \u043c\u0435\u0439\u043b\u0447\u0438\u043c\u043f\u0435\n\u0427\u0442\u043e\u0431\u044b \u043c\u043e\u0436\u043d\u043e \u0431\u044b\u043b\u043e \u0432 \u043b\u0438\u0434-\u043c\u0430\u0433\u043d\u0438\u0442\u0435 \u0443\u043a\u0430\u0437\u0430\u0442\u044c \u0442\u0435\u0433\u0438, \u043a\u043e\u0442\u043e\u0440\u044b\u0435 \u043f\u0440\u043e\u0431\u0440\u0430\u0441\u044b\u0432\u0430\u044e\u0442\u0441\u044f \u0432 \u0430\u0443\u0434\u0438\u0442\u043e\u0440\u0438\u044e \u043c\u0435\u0439\u043b\u0447\u0438\u043c\u043f\u0430\n", "before_files": [{"content": "from magnets.models import EmailLeadMagnetCampaign, LeadCampaignLogEntry\nfrom users.creator import UserCreator\nfrom users.models import User\n\n\nclass LeadCreator:\n def __init__(self, campaign: EmailLeadMagnetCampaign, email: str, name: str = None):\n self.data = {\n 'name': name,\n 'email': email,\n }\n\n self.campaign = campaign\n\n def __call__(self):\n self.user = self._create_user()\n self._create_log_entry()\n\n self.campaign.execute(self.user)\n\n def _create_user(self) -> User:\n return UserCreator(\n name=self.data['name'],\n email=self.data['email'],\n subscribe=True,\n )()\n\n def _create_log_entry(self):\n LeadCampaignLogEntry.objects.create(\n user=self.user,\n campaign=self.campaign,\n )\n", "path": "src/magnets/creator.py"}, {"content": "from typing import Optional\n\nfrom app.tasks import invite_to_clickmeeting, invite_to_zoomus, send_mail, subscribe_to_mailchimp\nfrom products.models import Course\nfrom shipping import factory\nfrom shipping.shipments.base import BaseShipment\n\n\[email protected](Course)\nclass CourseShipment(BaseShipment):\n @property\n def course(self):\n return self.stuff_to_ship\n\n def ship(self):\n self.invite_to_clickmeeting()\n self.invite_to_zoomus()\n self.subscribe_to_mailchimp()\n\n self.send_welcome_letter()\n\n def subscribe_to_mailchimp(self):\n if self.course.mailchimp_list_id is not None:\n subscribe_to_mailchimp.delay(\n list_id=self.course.mailchimp_list_id,\n user_id=self.user.pk,\n tags=[self.course.slug],\n )\n\n def invite_to_clickmeeting(self):\n if self.course.clickmeeting_room_url is not None:\n invite_to_clickmeeting.delay(\n room_url=self.course.clickmeeting_room_url,\n email=self.user.email,\n )\n\n def invite_to_zoomus(self):\n if self.course.zoomus_webinar_id is not None and len(self.course.zoomus_webinar_id):\n invite_to_zoomus.delay(\n webinar_id=self.course.zoomus_webinar_id,\n user_id=self.user.id,\n )\n\n def send_welcome_letter(self):\n if self.welcome_letter_template_id is not None:\n send_mail.delay(\n to=self.user.email,\n template_id=self.welcome_letter_template_id,\n ctx=self.get_template_context(),\n disable_antispam=True,\n )\n\n def get_template_context(self) -> dict:\n return {\n 'name': self.course.name,\n 'slug': self.course.slug,\n 'name_genitive': self.course.name_genitive,\n **self.get_gift_template_context(),\n }\n\n @property\n def welcome_letter_template_id(self) -> Optional[str]:\n \"\"\"Get special gift template letter id if order is a gift and it is present\"\"\"\n template_id = self.course.welcome_letter_template_id\n\n if self.order is not None and self.order.giver is not None: # this is a gift\n template_id = self.course.gift_welcome_letter_template_id or self.course.welcome_letter_template_id\n\n if template_id is None or not len(template_id): # fuck this null=True in CharFields\n return None\n\n return template_id\n", "path": "src/shipping/shipments/course.py"}]}
| 1,505 | 256 |
gh_patches_debug_23292
|
rasdani/github-patches
|
git_diff
|
PennyLaneAI__pennylane-327
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing plot lines in VQE tutorial
#### Issue description
The second and third plots in the VQE tutorial do not show the plot line of the gradient descent. The legends are shown, however.
Is this problem coming from making this file a "run" file?
https://pennylane.readthedocs.io/en/latest/tutorials/pennylane_run_variational_quantum_eigensolver.html#vqe
* *Expected behavior:*
The plots should show the gradient descent on top of the optimization landscape.
</issue>
<code>
[start of examples/pennylane_run_variational_quantum_eigensolver.py]
1 r"""
2 .. _vqe:
3
4 Variational quantum eigensolver
5 ===============================
6
7 This example demonstrates the principle of a variational quantum
8 eigensolver (VQE), originally proposed in `Peruzzo et al.
9 (2014) <https://www.nature.com/articles/ncomms5213>`__. To showcase the
10 hybrid computational capabilities of PennyLane, we first train a quantum
11 circuit to minimize the squared energy expectation for a Hamiltonian
12 :math:`H`,
13
14 .. math::
15
16 \langle \psi_v | H | \psi_v \rangle^2 =( 0.1 \langle \psi_{v} | X_2 |
17 \psi_v \rangle + 0.5 \langle \psi_v | Y_2 | \psi_v \rangle )^2.
18
19 Here, :math:`|\psi_v\rangle` is the state
20 obtained after applying a quantum circuit to an initial state
21 :math:`|0\rangle`. The quantum circuit depends on trainable variables
22 :math:`v = \{v_1, v_2\}`, and :math:`X_2`, :math:`Y_2` denote the
23 Pauli-X and Pauli-Y operator acting on the second qubit (*Note: We apply
24 the square to make the optimization landscapes more interesting, but in
25 common applications the cost is directly the energy expectation value*).
26
27 After doing this, we will then turn things around and use a fixed
28 quantum circuit to prepare a state :math:`|\psi\rangle`, but train the coefficients of
29 the Hamiltonian to minimize
30
31 .. math::
32
33 \langle \psi | H | \psi \rangle^2 = (v_1 \langle \psi | X_2 | \psi
34 \rangle + v_2 \langle \psi | Y_2 | \psi \rangle )^2 .
35 """
36
37 ##############################################################################
38 # 1. Optimizing the quantum circuit
39 # ---------------------------------
40 #
41 # Imports
42 # ~~~~~~~
43 #
44 # We begin by importing PennyLane, the PennyLane-wrapped version of NumPy,
45 # and the GradientDescentOptimizer.
46
47 import pennylane as qml
48 from pennylane import numpy as np
49 from pennylane.optimize import GradientDescentOptimizer
50
51 ##############################################################################
52 # We use the default qubit simulator as a device.
53
54 dev = qml.device("default.qubit", wires=2)
55
56 ##############################################################################
57 # Quantum nodes
58 # ~~~~~~~~~~~~~
59 #
60 # The quantum circuit of the variational eigensolver is an ansatz that
61 # defines a manifold of possible quantum states. We use a Hadamard, two
62 # rotations and a CNOT gate to construct our circuit.
63
64
65 def ansatz(var):
66 qml.Rot(0.3, 1.8, 5.4, wires=1)
67 qml.RX(var[0], wires=0)
68 qml.RY(var[1], wires=1)
69 qml.CNOT(wires=[0, 1])
70
71
72 ##############################################################################
73 # A variational eigensolver requires us to evaluate expectations of
74 # different Pauli operators. In this example, the Hamiltonian is expressed
75 # by only two single-qubit Pauli operators, namely the X and Y operator
76 # applied to the first qubit.
77 #
78 # Since these operators will be measured on the same wire, we will need to
79 # create two quantum nodes (one for each operator whose expectation value
80 # we measure), but we can reuse the same device.
81 #
82 # .. note::
83 #
84 # If the Pauli observables were evaluated on different wires, we
85 # could use one quantum node and return a tuple of expectations in only
86 # one quantum node:
87 # ``return qml.expectation.PauliX(0), qml.expectation.PauliY(1)``
88
89
90 @qml.qnode(dev)
91 def circuit_X(var):
92 ansatz(var)
93 return qml.expval(qml.PauliX(1))
94
95
96 @qml.qnode(dev)
97 def circuit_Y(var):
98 ansatz(var)
99 return qml.expval(qml.PauliY(1))
100
101
102 ##############################################################################
103 # Objective
104 # ~~~~~~~~~
105
106 # The cost function to be optimized in VQE is simply a linear combination
107 # of the expectations, which defines the expectation of the Hamiltonian we
108 # are interested in. In our case, we square this cost function to provide
109 # a more interesting landscape with the same minima.
110
111
112 def cost(var):
113 expX = circuit_X(var)
114 expY = circuit_Y(var)
115 return (0.1 * expX + 0.5 * expY) ** 2
116
117
118 ##############################################################################
119 # This cost defines the following landscape:
120 #
121 # *Note: To run the following cell you need the matplotlib library.*
122
123 import matplotlib.pyplot as plt
124 from mpl_toolkits.mplot3d import Axes3D
125 from matplotlib import cm
126 from matplotlib.ticker import MaxNLocator
127
128 fig = plt.figure(figsize=(6, 4))
129 ax = fig.gca(projection="3d")
130
131 X = np.linspace(-3.0, 3.0, 20)
132 Y = np.linspace(-3.0, 3.0, 20)
133 xx, yy = np.meshgrid(X, Y)
134 Z = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))
135 surf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)
136
137 ax.set_xlabel("v1")
138 ax.set_ylabel("v2")
139 ax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune="lower"))
140
141 plt.show()
142
143 ##############################################################################
144 # Optimization
145 # ~~~~~~~~~~~~
146 #
147 # We create a GradientDescentOptimizer and use it to optimize the cost
148 # function.
149
150 opt = GradientDescentOptimizer(0.5)
151
152 var = [0.3, 2.5]
153 var_gd = [var]
154 for it in range(20):
155 var = opt.step(cost, var)
156 var_gd.append(var)
157
158 print(
159 "Cost after step {:5d}: {: .7f} | Variables: [{: .5f},{: .5f}]".format(
160 it + 1, cost(var), var[0], var[1]
161 )
162 )
163
164 ##############################################################################
165 # We can plot the path that the variables took during gradient descent. To
166 # make the plot more clear, we will shorten the range for :math:`v_2`.
167
168 fig = plt.figure(figsize=(6, 4))
169 ax = fig.gca(projection="3d")
170
171 X = np.linspace(-3, np.pi / 2, 20)
172 Y = np.linspace(-3, 3, 20)
173 xx, yy = np.meshgrid(X, Y)
174 Z = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))
175 surf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)
176
177 path_z = [cost(var) + 1e-8 for var in var_gd]
178 path_x = [v[0] for v in var_gd]
179 path_y = [v[1] for v in var_gd]
180 ax.plot(path_x, path_y, path_z, c="green", marker=".", label="graddesc")
181
182 ax.set_xlabel("v1")
183 ax.set_ylabel("v2")
184 ax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune="lower"))
185
186 plt.legend()
187 plt.show()
188
189
190 ##############################################################################
191 # 2. Optimizing the Hamiltonian coefficients
192 # ------------------------------------------
193 #
194 # Instead of optimizing the circuit parameters, we can also use a fixed
195 # circuit,
196
197
198 def ansatz():
199 qml.Rot(0.3, 1.8, 5.4, wires=1)
200 qml.RX(-0.5, wires=0)
201 qml.RY(0.5, wires=1)
202 qml.CNOT(wires=[0, 1])
203
204
205 @qml.qnode(dev)
206 def circuit_X():
207 ansatz()
208 return qml.expval(qml.PauliX(1))
209
210
211 @qml.qnode(dev)
212 def circuit_Y():
213 ansatz()
214 return qml.expval(qml.PauliY(1))
215
216
217 ##############################################################################
218 # and make the classical coefficients that appear in the Hamiltonian the
219 # trainable variables.
220
221
222 def cost(var):
223 expX = circuit_X()
224 expY = circuit_Y()
225 return (var[0] * expX + var[1] * expY) ** 2
226
227
228 opt = GradientDescentOptimizer(0.5)
229
230 var = [0.3, 2.5]
231 var_gd = [var]
232 for it in range(20):
233 var = opt.step(cost, var)
234 var_gd.append(var)
235
236 print(
237 "Cost after step {:5d}: {: .7f} | Variables: [{: .5f},{: .5f}]".format(
238 it + 1, cost(var), var[0], var[1]
239 )
240 )
241
242 ##############################################################################
243 # The landscape has a quadratic shape.
244
245 fig = plt.figure(figsize=(6, 4))
246 ax = fig.gca(projection="3d")
247
248 X = np.linspace(-3, np.pi / 2, 20)
249 Y = np.linspace(-3, 3, 20)
250 xx, yy = np.meshgrid(X, Y)
251 Z = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))
252 surf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)
253
254 path_z = [cost(var) + 1e-8 for var in var_gd]
255 path_x = [v[0] for v in var_gd]
256 path_y = [v[1] for v in var_gd]
257 ax.plot(path_x, path_y, path_z, c="pink", marker=".", label="graddesc")
258
259 ax.set_xlabel("v1")
260 ax.set_ylabel("v2")
261 ax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune="lower"))
262
263 plt.legend()
264 plt.show()
265
266
267 ##############################################################################
268 # 3. Optimizing classical and quantum parameters
269 # ----------------------------------------------
270
271 # Finally, we can optimize *classical* and *quantum* weights together by
272 # combining the two approaches from above.
273
274
275 def ansatz(var):
276
277 qml.Rot(0.3, 1.8, 5.4, wires=1)
278 qml.RX(var[0], wires=0)
279 qml.RY(var[1], wires=1)
280 qml.CNOT(wires=[0, 1])
281
282
283 @qml.qnode(dev)
284 def circuit_X(var):
285 ansatz(var)
286 return qml.expval(qml.PauliX(1))
287
288
289 @qml.qnode(dev)
290 def circuit_Y(var):
291 ansatz(var)
292 return qml.expval(qml.PauliY(1))
293
294
295 def cost(var):
296
297 expX = circuit_X(var)
298 expY = circuit_Y(var)
299
300 return (var[2] * expX + var[3] * expY) ** 2
301
302
303 opt = GradientDescentOptimizer(0.5)
304 var = [0.3, 2.5, 0.3, 2.5]
305
306 for it in range(10):
307 var = opt.step(cost, var)
308 print("Cost after step {:5d}: {: 0.7f}".format(it + 1, cost(var)))
309
[end of examples/pennylane_run_variational_quantum_eigensolver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/pennylane_run_variational_quantum_eigensolver.py b/examples/pennylane_run_variational_quantum_eigensolver.py
--- a/examples/pennylane_run_variational_quantum_eigensolver.py
+++ b/examples/pennylane_run_variational_quantum_eigensolver.py
@@ -172,12 +172,13 @@
Y = np.linspace(-3, 3, 20)
xx, yy = np.meshgrid(X, Y)
Z = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))
+
surf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)
path_z = [cost(var) + 1e-8 for var in var_gd]
path_x = [v[0] for v in var_gd]
path_y = [v[1] for v in var_gd]
-ax.plot(path_x, path_y, path_z, c="green", marker=".", label="graddesc")
+ax.plot(path_x, path_y, path_z, c="green", marker=".", label="graddesc", zorder=10)
ax.set_xlabel("v1")
ax.set_ylabel("v2")
@@ -254,7 +255,7 @@
path_z = [cost(var) + 1e-8 for var in var_gd]
path_x = [v[0] for v in var_gd]
path_y = [v[1] for v in var_gd]
-ax.plot(path_x, path_y, path_z, c="pink", marker=".", label="graddesc")
+ax.plot(path_x, path_y, path_z, c="pink", marker=".", label="graddesc", zorder=10)
ax.set_xlabel("v1")
ax.set_ylabel("v2")
|
{"golden_diff": "diff --git a/examples/pennylane_run_variational_quantum_eigensolver.py b/examples/pennylane_run_variational_quantum_eigensolver.py\n--- a/examples/pennylane_run_variational_quantum_eigensolver.py\n+++ b/examples/pennylane_run_variational_quantum_eigensolver.py\n@@ -172,12 +172,13 @@\n Y = np.linspace(-3, 3, 20)\n xx, yy = np.meshgrid(X, Y)\n Z = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))\n+\n surf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)\n \n path_z = [cost(var) + 1e-8 for var in var_gd]\n path_x = [v[0] for v in var_gd]\n path_y = [v[1] for v in var_gd]\n-ax.plot(path_x, path_y, path_z, c=\"green\", marker=\".\", label=\"graddesc\")\n+ax.plot(path_x, path_y, path_z, c=\"green\", marker=\".\", label=\"graddesc\", zorder=10)\n \n ax.set_xlabel(\"v1\")\n ax.set_ylabel(\"v2\")\n@@ -254,7 +255,7 @@\n path_z = [cost(var) + 1e-8 for var in var_gd]\n path_x = [v[0] for v in var_gd]\n path_y = [v[1] for v in var_gd]\n-ax.plot(path_x, path_y, path_z, c=\"pink\", marker=\".\", label=\"graddesc\")\n+ax.plot(path_x, path_y, path_z, c=\"pink\", marker=\".\", label=\"graddesc\", zorder=10)\n \n ax.set_xlabel(\"v1\")\n ax.set_ylabel(\"v2\")\n", "issue": "Missing plot lines in VQE tutorial \n#### Issue description\r\n\r\nThe second and third plots in the VQE tutorial do not show the plot line of the gradient descent. The legends are shown, however. \r\n\r\nIs this problem coming from making this file a \"run\" file? \r\n\r\nhttps://pennylane.readthedocs.io/en/latest/tutorials/pennylane_run_variational_quantum_eigensolver.html#vqe\r\n\r\n* *Expected behavior:*\r\n\r\nThe plots should show the gradient descent on top of the optimization landscape. \r\n\r\n\n", "before_files": [{"content": "r\"\"\"\n.. _vqe:\n\nVariational quantum eigensolver\n===============================\n\nThis example demonstrates the principle of a variational quantum\neigensolver (VQE), originally proposed in `Peruzzo et al.\n(2014) <https://www.nature.com/articles/ncomms5213>`__. To showcase the\nhybrid computational capabilities of PennyLane, we first train a quantum\ncircuit to minimize the squared energy expectation for a Hamiltonian\n:math:`H`,\n\n.. math::\n\n \\langle \\psi_v | H | \\psi_v \\rangle^2 =( 0.1 \\langle \\psi_{v} | X_2 |\n \\psi_v \\rangle + 0.5 \\langle \\psi_v | Y_2 | \\psi_v \\rangle )^2.\n\nHere, :math:`|\\psi_v\\rangle` is the state\nobtained after applying a quantum circuit to an initial state\n:math:`|0\\rangle`. The quantum circuit depends on trainable variables\n:math:`v = \\{v_1, v_2\\}`, and :math:`X_2`, :math:`Y_2` denote the\nPauli-X and Pauli-Y operator acting on the second qubit (*Note: We apply\nthe square to make the optimization landscapes more interesting, but in\ncommon applications the cost is directly the energy expectation value*).\n\nAfter doing this, we will then turn things around and use a fixed\nquantum circuit to prepare a state :math:`|\\psi\\rangle`, but train the coefficients of\nthe Hamiltonian to minimize\n\n.. math::\n\n \\langle \\psi | H | \\psi \\rangle^2 = (v_1 \\langle \\psi | X_2 | \\psi\n \\rangle + v_2 \\langle \\psi | Y_2 | \\psi \\rangle )^2 .\n\"\"\"\n\n##############################################################################\n# 1. Optimizing the quantum circuit\n# ---------------------------------\n#\n# Imports\n# ~~~~~~~\n#\n# We begin by importing PennyLane, the PennyLane-wrapped version of NumPy,\n# and the GradientDescentOptimizer.\n\nimport pennylane as qml\nfrom pennylane import numpy as np\nfrom pennylane.optimize import GradientDescentOptimizer\n\n##############################################################################\n# We use the default qubit simulator as a device.\n\ndev = qml.device(\"default.qubit\", wires=2)\n\n##############################################################################\n# Quantum nodes\n# ~~~~~~~~~~~~~\n#\n# The quantum circuit of the variational eigensolver is an ansatz that\n# defines a manifold of possible quantum states. We use a Hadamard, two\n# rotations and a CNOT gate to construct our circuit.\n\n\ndef ansatz(var):\n qml.Rot(0.3, 1.8, 5.4, wires=1)\n qml.RX(var[0], wires=0)\n qml.RY(var[1], wires=1)\n qml.CNOT(wires=[0, 1])\n\n\n##############################################################################\n# A variational eigensolver requires us to evaluate expectations of\n# different Pauli operators. In this example, the Hamiltonian is expressed\n# by only two single-qubit Pauli operators, namely the X and Y operator\n# applied to the first qubit.\n#\n# Since these operators will be measured on the same wire, we will need to\n# create two quantum nodes (one for each operator whose expectation value\n# we measure), but we can reuse the same device.\n#\n# .. note::\n#\n# If the Pauli observables were evaluated on different wires, we\n# could use one quantum node and return a tuple of expectations in only\n# one quantum node:\n# ``return qml.expectation.PauliX(0), qml.expectation.PauliY(1)``\n\n\[email protected](dev)\ndef circuit_X(var):\n ansatz(var)\n return qml.expval(qml.PauliX(1))\n\n\[email protected](dev)\ndef circuit_Y(var):\n ansatz(var)\n return qml.expval(qml.PauliY(1))\n\n\n##############################################################################\n# Objective\n# ~~~~~~~~~\n\n# The cost function to be optimized in VQE is simply a linear combination\n# of the expectations, which defines the expectation of the Hamiltonian we\n# are interested in. In our case, we square this cost function to provide\n# a more interesting landscape with the same minima.\n\n\ndef cost(var):\n expX = circuit_X(var)\n expY = circuit_Y(var)\n return (0.1 * expX + 0.5 * expY) ** 2\n\n\n##############################################################################\n# This cost defines the following landscape:\n#\n# *Note: To run the following cell you need the matplotlib library.*\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import MaxNLocator\n\nfig = plt.figure(figsize=(6, 4))\nax = fig.gca(projection=\"3d\")\n\nX = np.linspace(-3.0, 3.0, 20)\nY = np.linspace(-3.0, 3.0, 20)\nxx, yy = np.meshgrid(X, Y)\nZ = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))\nsurf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)\n\nax.set_xlabel(\"v1\")\nax.set_ylabel(\"v2\")\nax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune=\"lower\"))\n\nplt.show()\n\n##############################################################################\n# Optimization\n# ~~~~~~~~~~~~\n#\n# We create a GradientDescentOptimizer and use it to optimize the cost\n# function.\n\nopt = GradientDescentOptimizer(0.5)\n\nvar = [0.3, 2.5]\nvar_gd = [var]\nfor it in range(20):\n var = opt.step(cost, var)\n var_gd.append(var)\n\n print(\n \"Cost after step {:5d}: {: .7f} | Variables: [{: .5f},{: .5f}]\".format(\n it + 1, cost(var), var[0], var[1]\n )\n )\n\n##############################################################################\n# We can plot the path that the variables took during gradient descent. To\n# make the plot more clear, we will shorten the range for :math:`v_2`.\n\nfig = plt.figure(figsize=(6, 4))\nax = fig.gca(projection=\"3d\")\n\nX = np.linspace(-3, np.pi / 2, 20)\nY = np.linspace(-3, 3, 20)\nxx, yy = np.meshgrid(X, Y)\nZ = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))\nsurf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)\n\npath_z = [cost(var) + 1e-8 for var in var_gd]\npath_x = [v[0] for v in var_gd]\npath_y = [v[1] for v in var_gd]\nax.plot(path_x, path_y, path_z, c=\"green\", marker=\".\", label=\"graddesc\")\n\nax.set_xlabel(\"v1\")\nax.set_ylabel(\"v2\")\nax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune=\"lower\"))\n\nplt.legend()\nplt.show()\n\n\n##############################################################################\n# 2. Optimizing the Hamiltonian coefficients\n# ------------------------------------------\n#\n# Instead of optimizing the circuit parameters, we can also use a fixed\n# circuit,\n\n\ndef ansatz():\n qml.Rot(0.3, 1.8, 5.4, wires=1)\n qml.RX(-0.5, wires=0)\n qml.RY(0.5, wires=1)\n qml.CNOT(wires=[0, 1])\n\n\[email protected](dev)\ndef circuit_X():\n ansatz()\n return qml.expval(qml.PauliX(1))\n\n\[email protected](dev)\ndef circuit_Y():\n ansatz()\n return qml.expval(qml.PauliY(1))\n\n\n##############################################################################\n# and make the classical coefficients that appear in the Hamiltonian the\n# trainable variables.\n\n\ndef cost(var):\n expX = circuit_X()\n expY = circuit_Y()\n return (var[0] * expX + var[1] * expY) ** 2\n\n\nopt = GradientDescentOptimizer(0.5)\n\nvar = [0.3, 2.5]\nvar_gd = [var]\nfor it in range(20):\n var = opt.step(cost, var)\n var_gd.append(var)\n\n print(\n \"Cost after step {:5d}: {: .7f} | Variables: [{: .5f},{: .5f}]\".format(\n it + 1, cost(var), var[0], var[1]\n )\n )\n\n##############################################################################\n# The landscape has a quadratic shape.\n\nfig = plt.figure(figsize=(6, 4))\nax = fig.gca(projection=\"3d\")\n\nX = np.linspace(-3, np.pi / 2, 20)\nY = np.linspace(-3, 3, 20)\nxx, yy = np.meshgrid(X, Y)\nZ = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))\nsurf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)\n\npath_z = [cost(var) + 1e-8 for var in var_gd]\npath_x = [v[0] for v in var_gd]\npath_y = [v[1] for v in var_gd]\nax.plot(path_x, path_y, path_z, c=\"pink\", marker=\".\", label=\"graddesc\")\n\nax.set_xlabel(\"v1\")\nax.set_ylabel(\"v2\")\nax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune=\"lower\"))\n\nplt.legend()\nplt.show()\n\n\n##############################################################################\n# 3. Optimizing classical and quantum parameters\n# ----------------------------------------------\n\n# Finally, we can optimize *classical* and *quantum* weights together by\n# combining the two approaches from above.\n\n\ndef ansatz(var):\n\n qml.Rot(0.3, 1.8, 5.4, wires=1)\n qml.RX(var[0], wires=0)\n qml.RY(var[1], wires=1)\n qml.CNOT(wires=[0, 1])\n\n\[email protected](dev)\ndef circuit_X(var):\n ansatz(var)\n return qml.expval(qml.PauliX(1))\n\n\[email protected](dev)\ndef circuit_Y(var):\n ansatz(var)\n return qml.expval(qml.PauliY(1))\n\n\ndef cost(var):\n\n expX = circuit_X(var)\n expY = circuit_Y(var)\n\n return (var[2] * expX + var[3] * expY) ** 2\n\n\nopt = GradientDescentOptimizer(0.5)\nvar = [0.3, 2.5, 0.3, 2.5]\n\nfor it in range(10):\n var = opt.step(cost, var)\n print(\"Cost after step {:5d}: {: 0.7f}\".format(it + 1, cost(var)))\n", "path": "examples/pennylane_run_variational_quantum_eigensolver.py"}]}
| 4,015 | 426 |
gh_patches_debug_4029
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-723
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add checkout steps navigation
Now there's no explicit navigation. Using browser back button can be dangerous in some cases and it's not a common behavior in ecommerce
</issue>
<code>
[start of saleor/order/views.py]
1 import logging
2
3 from django.conf import settings
4 from django.contrib import messages, auth
5 from django.db import transaction
6 from django.http import Http404, HttpResponseForbidden
7 from django.shortcuts import get_object_or_404, redirect
8 from django.utils.translation import ugettext as _
9 from django.template.response import TemplateResponse
10 from payments import RedirectNeeded
11
12 from .forms import PaymentDeleteForm, PaymentMethodsForm, PasswordForm
13 from .models import Order, Payment
14 from ..core.utils import get_client_ip
15 from .utils import check_order_status
16
17 logger = logging.getLogger(__name__)
18
19
20 def details(request, token):
21 orders = Order.objects.prefetch_related('groups__items')
22 order = get_object_or_404(orders, token=token)
23 groups = order.groups.all()
24 return TemplateResponse(request, 'order/details.html',
25 {'order': order, 'groups': groups})
26
27
28 def payment(request, token):
29 orders = Order.objects.prefetch_related('groups__items')
30 order = get_object_or_404(orders, token=token)
31 groups = order.groups.all()
32 payments = order.payments.all()
33 form_data = request.POST or None
34 try:
35 waiting_payment = order.payments.get(status='waiting')
36 except Payment.DoesNotExist:
37 waiting_payment = None
38 waiting_payment_form = None
39 else:
40 form_data = None
41 waiting_payment_form = PaymentDeleteForm(
42 None, order=order, initial={'payment_id': waiting_payment.id})
43 if order.is_fully_paid():
44 form_data = None
45 payment_form = None
46 if not order.is_pre_authorized():
47 payment_form = PaymentMethodsForm(form_data)
48 # FIXME: redirect if there is only one payment method
49 if payment_form.is_valid():
50 payment_method = payment_form.cleaned_data['method']
51 return redirect('order:payment', token=order.token,
52 variant=payment_method)
53 return TemplateResponse(request, 'order/payment.html',
54 {'order': order, 'groups': groups,
55 'payment_form': payment_form,
56 'waiting_payment': waiting_payment,
57 'waiting_payment_form': waiting_payment_form,
58 'payments': payments})
59
60
61 @check_order_status
62 def start_payment(request, order, variant):
63 waiting_payments = order.payments.filter(status='waiting').exists()
64 if waiting_payments:
65 return redirect('order:payment', token=order.token)
66 billing = order.billing_address
67 total = order.get_total()
68 defaults = {'total': total.gross,
69 'tax': total.tax, 'currency': total.currency,
70 'delivery': order.get_delivery_total().gross,
71 'billing_first_name': billing.first_name,
72 'billing_last_name': billing.last_name,
73 'billing_address_1': billing.street_address_1,
74 'billing_address_2': billing.street_address_2,
75 'billing_city': billing.city,
76 'billing_postcode': billing.postal_code,
77 'billing_country_code': billing.country,
78 'billing_email': order.user_email,
79 'description': _('Order %(order_number)s') % {
80 'order_number': order},
81 'billing_country_area': billing.country_area,
82 'customer_ip_address': get_client_ip(request)}
83 variant_choices = settings.CHECKOUT_PAYMENT_CHOICES
84 if variant not in [code for code, dummy_name in variant_choices]:
85 raise Http404('%r is not a valid payment variant' % (variant,))
86 with transaction.atomic():
87 order.change_status('payment-pending')
88 payment, dummy_created = Payment.objects.get_or_create(
89 variant=variant, status='waiting', order=order, defaults=defaults)
90 try:
91 form = payment.get_form(data=request.POST or None)
92 except RedirectNeeded as redirect_to:
93 return redirect(str(redirect_to))
94 except Exception:
95 logger.exception('Error communicating with the payment gateway')
96 messages.error(
97 request,
98 _('Oops, it looks like we were unable to contact the selected'
99 ' payment service'))
100 payment.change_status('error')
101 return redirect('order:payment', token=order.token)
102 template = 'order/payment/%s.html' % variant
103 return TemplateResponse(request, [template, 'order/payment/default.html'],
104 {'form': form, 'payment': payment})
105
106
107 @check_order_status
108 def cancel_payment(request, order):
109 form = PaymentDeleteForm(request.POST or None, order=order)
110 if form.is_valid():
111 with transaction.atomic():
112 form.save()
113 return redirect('order:payment', token=order.token)
114 return HttpResponseForbidden()
115
116
117 def create_password(request, token):
118 if request.user.is_authenticated():
119 return redirect('order:details', token=token)
120 order = get_object_or_404(Order, token=token)
121 email = order.user_email
122 form_data = request.POST.copy()
123 if form_data:
124 form_data.update({'email': email})
125 form = PasswordForm(form_data or None)
126
127 if form.is_valid():
128 user = form.save(request)
129 order.user = user
130 order.save(update_fields=['user'])
131 password = form_data.get('password1')
132 auth_user = auth.authenticate(email=email, password=password)
133 if auth_user is not None:
134 auth.login(request, auth_user)
135 return redirect('order:details', token=token)
136 ctx = {'form': form, 'email': email}
137 return TemplateResponse(request, 'order/create_password.html', ctx)
138
[end of saleor/order/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/saleor/order/views.py b/saleor/order/views.py
--- a/saleor/order/views.py
+++ b/saleor/order/views.py
@@ -133,5 +133,5 @@
if auth_user is not None:
auth.login(request, auth_user)
return redirect('order:details', token=token)
- ctx = {'form': form, 'email': email}
+ ctx = {'form': form, 'email': email, 'order': order}
return TemplateResponse(request, 'order/create_password.html', ctx)
|
{"golden_diff": "diff --git a/saleor/order/views.py b/saleor/order/views.py\n--- a/saleor/order/views.py\n+++ b/saleor/order/views.py\n@@ -133,5 +133,5 @@\n if auth_user is not None:\n auth.login(request, auth_user)\n return redirect('order:details', token=token)\n- ctx = {'form': form, 'email': email}\n+ ctx = {'form': form, 'email': email, 'order': order}\n return TemplateResponse(request, 'order/create_password.html', ctx)\n", "issue": "Add checkout steps navigation\nNow there's no explicit navigation. Using browser back button can be dangerous in some cases and it's not a common behavior in ecommerce\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages, auth\nfrom django.db import transaction\nfrom django.http import Http404, HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.translation import ugettext as _\nfrom django.template.response import TemplateResponse\nfrom payments import RedirectNeeded\n\nfrom .forms import PaymentDeleteForm, PaymentMethodsForm, PasswordForm\nfrom .models import Order, Payment\nfrom ..core.utils import get_client_ip\nfrom .utils import check_order_status\n\nlogger = logging.getLogger(__name__)\n\n\ndef details(request, token):\n orders = Order.objects.prefetch_related('groups__items')\n order = get_object_or_404(orders, token=token)\n groups = order.groups.all()\n return TemplateResponse(request, 'order/details.html',\n {'order': order, 'groups': groups})\n\n\ndef payment(request, token):\n orders = Order.objects.prefetch_related('groups__items')\n order = get_object_or_404(orders, token=token)\n groups = order.groups.all()\n payments = order.payments.all()\n form_data = request.POST or None\n try:\n waiting_payment = order.payments.get(status='waiting')\n except Payment.DoesNotExist:\n waiting_payment = None\n waiting_payment_form = None\n else:\n form_data = None\n waiting_payment_form = PaymentDeleteForm(\n None, order=order, initial={'payment_id': waiting_payment.id})\n if order.is_fully_paid():\n form_data = None\n payment_form = None\n if not order.is_pre_authorized():\n payment_form = PaymentMethodsForm(form_data)\n # FIXME: redirect if there is only one payment method\n if payment_form.is_valid():\n payment_method = payment_form.cleaned_data['method']\n return redirect('order:payment', token=order.token,\n variant=payment_method)\n return TemplateResponse(request, 'order/payment.html',\n {'order': order, 'groups': groups,\n 'payment_form': payment_form,\n 'waiting_payment': waiting_payment,\n 'waiting_payment_form': waiting_payment_form,\n 'payments': payments})\n\n\n@check_order_status\ndef start_payment(request, order, variant):\n waiting_payments = order.payments.filter(status='waiting').exists()\n if waiting_payments:\n return redirect('order:payment', token=order.token)\n billing = order.billing_address\n total = order.get_total()\n defaults = {'total': total.gross,\n 'tax': total.tax, 'currency': total.currency,\n 'delivery': order.get_delivery_total().gross,\n 'billing_first_name': billing.first_name,\n 'billing_last_name': billing.last_name,\n 'billing_address_1': billing.street_address_1,\n 'billing_address_2': billing.street_address_2,\n 'billing_city': billing.city,\n 'billing_postcode': billing.postal_code,\n 'billing_country_code': billing.country,\n 'billing_email': order.user_email,\n 'description': _('Order %(order_number)s') % {\n 'order_number': order},\n 'billing_country_area': billing.country_area,\n 'customer_ip_address': get_client_ip(request)}\n variant_choices = settings.CHECKOUT_PAYMENT_CHOICES\n if variant not in [code for code, dummy_name in variant_choices]:\n raise Http404('%r is not a valid payment variant' % (variant,))\n with transaction.atomic():\n order.change_status('payment-pending')\n payment, dummy_created = Payment.objects.get_or_create(\n variant=variant, status='waiting', order=order, defaults=defaults)\n try:\n form = payment.get_form(data=request.POST or None)\n except RedirectNeeded as redirect_to:\n return redirect(str(redirect_to))\n except Exception:\n logger.exception('Error communicating with the payment gateway')\n messages.error(\n request,\n _('Oops, it looks like we were unable to contact the selected'\n ' payment service'))\n payment.change_status('error')\n return redirect('order:payment', token=order.token)\n template = 'order/payment/%s.html' % variant\n return TemplateResponse(request, [template, 'order/payment/default.html'],\n {'form': form, 'payment': payment})\n\n\n@check_order_status\ndef cancel_payment(request, order):\n form = PaymentDeleteForm(request.POST or None, order=order)\n if form.is_valid():\n with transaction.atomic():\n form.save()\n return redirect('order:payment', token=order.token)\n return HttpResponseForbidden()\n\n\ndef create_password(request, token):\n if request.user.is_authenticated():\n return redirect('order:details', token=token)\n order = get_object_or_404(Order, token=token)\n email = order.user_email\n form_data = request.POST.copy()\n if form_data:\n form_data.update({'email': email})\n form = PasswordForm(form_data or None)\n\n if form.is_valid():\n user = form.save(request)\n order.user = user\n order.save(update_fields=['user'])\n password = form_data.get('password1')\n auth_user = auth.authenticate(email=email, password=password)\n if auth_user is not None:\n auth.login(request, auth_user)\n return redirect('order:details', token=token)\n ctx = {'form': form, 'email': email}\n return TemplateResponse(request, 'order/create_password.html', ctx)\n", "path": "saleor/order/views.py"}]}
| 2,026 | 125 |
gh_patches_debug_5719
|
rasdani/github-patches
|
git_diff
|
NVIDIA-Merlin__NVTabular-1587
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] configure_tensorflow raises TypeError: expected string or bytes-like object in Horovod
**Describe the bug**
While trying to integrate Horovod's KerasEstimator w/ NVTabular, I'm seeing this error:
```
[1,0]<stderr>:/nvtabular/nvtabular/loader/tf_utils.py:53: UserWarning: TensorFlow runtime already initialized, may not be enough memory for cudf
[1,0]<stderr>: warnings.warn(
[1,0]<stderr>:Traceback (most recent call last):
[1,0]<stderr>: File "/nvtabular/nvtabular/loader/tf_utils.py", line 57, in configure_tensorflow
[1,0]<stderr>: tf.config.experimental.set_virtual_device_configuration(
[1,0]<stderr>: File "/opt/conda/envs/rapids/lib/python3.8/site-packages/tensorflow/python/framework/config.py", line 871, in set_logical_device_configuration
[1,0]<stderr>: context.context().set_logical_device_configuration(device, logical_devices)
[1,0]<stderr>: File "/opt/conda/envs/rapids/lib/python3.8/site-packages/tensorflow/python/eager/context.py", line 1629, in set_logical_device_configuration
[1,0]<stderr>: raise RuntimeError(
[1,0]<stderr>:RuntimeError: Virtual devices cannot be modified after being initialized[1,0]<stderr>:
[1,0]<stderr>:
[1,0]<stderr>:During handling of the above exception, another exception occurred:
[1,0]<stderr>:
[1,0]<stderr>:Traceback (most recent call last):
[1,0]<stderr>: File "/opt/conda/envs/rapids/lib/python3.8/runpy.py", line 194, in _run_module_as_main
[1,0]<stderr>: return _run_code(code, main_globals, None,
[1,0]<stderr>: File "/opt/conda/envs/rapids/lib/python3.8/runpy.py", line 87, in _run_code
[1,0]<stderr>: exec(code, run_globals)
[1,0]<stderr>: File "/horovod/horovod/spark/task/mpirun_exec_fn.py", line 52, in <module>
[1,0]<stderr>: main(codec.loads_base64(sys.argv[1]), codec.loads_base64(sys.argv[2]))
[1,0]<stderr>: File "/horovod/horovod/spark/task/mpirun_exec_fn.py", line 45, in main
[1,0]<stderr>: task_exec(driver_addresses, settings, 'OMPI_COMM_WORLD_RANK', 'OMPI_COMM_WORLD_LOCAL_RANK')
[1,0]<stderr>: File "/horovod/horovod/spark/task/__init__.py", line 61, in task_exec
[1,0]<stderr>: result = fn(*args, **kwargs)
[1,0]<stderr>: File "/horovod/horovod/spark/keras/remote.py", line 261, in train
[1,0]<stderr>: history = fit(model, dm, steps_per_epoch,
[1,0]<stderr>: File "/horovod/horovod/spark/keras/util.py", line 41, in fn
[1,0]<stderr>: train_data = data_module.train_data(train_reader)
[1,0]<stderr>: File "/horovod/horovod/spark/keras/datamodule.py", line 110, in train_data
[1,0]<stderr>: from nvtabular.loader.tensorflow import KerasSequenceLoader
[1,0]<stderr>: File "/nvtabular/nvtabular/loader/tensorflow.py", line 28, in <module>
[1,0]<stderr>: from_dlpack = configure_tensorflow()
[1,0]<stderr>: File "/nvtabular/nvtabular/loader/tf_utils.py", line 63, in configure_tensorflow
[1,0]<stderr>: warnings.warn(e)
[1,0]<stderr>:TypeError: expected string or bytes-like object
```
This is resulting in a fatal error instead of a non-fatal warning. My current workaround is to just use `warnings.warn(str(e))` here, although I'm not sure why the `RuntimeError` isn't automatically cast to string (when it normally would cast to string).
**Steps/Code to reproduce bug**
Unfortunately, since this involves integration with horovod, so the setup to reproduce is non-trivial.
**Expected behavior**
A non-fatal warning, and continued execution.
**Environment details (please complete the following information):**
- Environment location: Docker on Linux with single GPU.
- Method of NVTabular install: custom docker image based on `nvcr.io/nvidia/merlin/merlin-tensorflow-training:22.05`
</issue>
<code>
[start of nvtabular/loader/tf_utils.py]
1 #
2 # Copyright (c) 2021, NVIDIA CORPORATION.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16
17 import os
18 import warnings
19
20 import tensorflow as tf
21 from packaging import version
22 from tensorflow.python.feature_column import feature_column_v2 as fc
23
24 from merlin.core.dispatch import HAS_GPU
25 from merlin.core.utils import device_mem_size
26
27
28 def configure_tensorflow(memory_allocation=None, device=None):
29 total_gpu_mem_mb = device_mem_size(kind="total", cpu=(not HAS_GPU)) / (1024 ** 2)
30
31 if memory_allocation is None:
32 memory_allocation = os.environ.get("TF_MEMORY_ALLOCATION", 0.5)
33
34 if float(memory_allocation) < 1:
35 memory_allocation = total_gpu_mem_mb * float(memory_allocation)
36 memory_allocation = int(memory_allocation)
37 assert memory_allocation < total_gpu_mem_mb
38
39 # TODO: what will this look like in any sort
40 # of distributed set up?
41 if device is None:
42 device = int(os.environ.get("TF_VISIBLE_DEVICE", 0))
43 tf_devices = tf.config.list_physical_devices("GPU")
44 if HAS_GPU and len(tf_devices) == 0:
45 raise ImportError("TensorFlow is not configured for GPU")
46 if HAS_GPU:
47 try:
48 tf.config.set_logical_device_configuration(
49 tf_devices[device],
50 [tf.config.LogicalDeviceConfiguration(memory_limit=memory_allocation)],
51 )
52 except RuntimeError:
53 warnings.warn(
54 "TensorFlow runtime already initialized, may not be enough memory for cudf"
55 )
56 try:
57 tf.config.experimental.set_virtual_device_configuration(
58 tf_devices[device],
59 [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_allocation)],
60 )
61 except RuntimeError as e:
62 # Virtual devices must be set before GPUs have been initialized
63 warnings.warn(e)
64
65 # versions using TF earlier than 2.3.0 need to use extension
66 # library for dlpack support to avoid memory leak issue
67 __TF_DLPACK_STABLE_VERSION = "2.3.0"
68 if version.parse(tf.__version__) < version.parse(__TF_DLPACK_STABLE_VERSION):
69 try:
70 from tfdlpack import from_dlpack
71 except ModuleNotFoundError as e:
72 message = "If using TensorFlow < 2.3.0, you must install tfdlpack-gpu extension library"
73 raise ModuleNotFoundError(message) from e
74
75 else:
76 from tensorflow.experimental.dlpack import from_dlpack
77
78 return from_dlpack
79
80
81 def _get_parents(column):
82 """
83 recursive function for finding the feature columns
84 that supply inputs for a given `column`. If there are
85 none, returns the column. Uses sets so is not
86 deterministic.
87 """
88 if isinstance(column.parents[0], str):
89 return set([column])
90 parents = set()
91 for parent in column.parents:
92 parents |= _get_parents(parent)
93 return parents
94
95
96 def get_dataset_schema_from_feature_columns(feature_columns):
97 """
98 maps from a list of TensorFlow `feature_column`s to
99 lists giving the categorical and continuous feature
100 names for a dataset. Useful for constructing NVTabular
101 Workflows from feature columns
102 """
103 base_columns = set()
104 for column in feature_columns:
105 base_columns |= _get_parents(column)
106
107 cat_names, cont_names = [], []
108 for column in base_columns:
109 if isinstance(column, fc.CategoricalColumn):
110 cat_names.append(column.name)
111 else:
112 cont_names.append(column.name)
113
114 cat_names = sorted(cat_names)
115 cont_names = sorted(cont_names)
116 return cat_names, cont_names
117
[end of nvtabular/loader/tf_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nvtabular/loader/tf_utils.py b/nvtabular/loader/tf_utils.py
--- a/nvtabular/loader/tf_utils.py
+++ b/nvtabular/loader/tf_utils.py
@@ -60,7 +60,7 @@
)
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
- warnings.warn(e)
+ warnings.warn(str(e))
# versions using TF earlier than 2.3.0 need to use extension
# library for dlpack support to avoid memory leak issue
|
{"golden_diff": "diff --git a/nvtabular/loader/tf_utils.py b/nvtabular/loader/tf_utils.py\n--- a/nvtabular/loader/tf_utils.py\n+++ b/nvtabular/loader/tf_utils.py\n@@ -60,7 +60,7 @@\n )\n except RuntimeError as e:\n # Virtual devices must be set before GPUs have been initialized\n- warnings.warn(e)\n+ warnings.warn(str(e))\n \n # versions using TF earlier than 2.3.0 need to use extension\n # library for dlpack support to avoid memory leak issue\n", "issue": "[BUG] configure_tensorflow raises TypeError: expected string or bytes-like object in Horovod\n**Describe the bug**\r\nWhile trying to integrate Horovod's KerasEstimator w/ NVTabular, I'm seeing this error:\r\n```\r\n[1,0]<stderr>:/nvtabular/nvtabular/loader/tf_utils.py:53: UserWarning: TensorFlow runtime already initialized, may not be enough memory for cudf\r\n[1,0]<stderr>: warnings.warn(\r\n[1,0]<stderr>:Traceback (most recent call last):\r\n[1,0]<stderr>: File \"/nvtabular/nvtabular/loader/tf_utils.py\", line 57, in configure_tensorflow\r\n[1,0]<stderr>: tf.config.experimental.set_virtual_device_configuration(\r\n[1,0]<stderr>: File \"/opt/conda/envs/rapids/lib/python3.8/site-packages/tensorflow/python/framework/config.py\", line 871, in set_logical_device_configuration\r\n[1,0]<stderr>: context.context().set_logical_device_configuration(device, logical_devices)\r\n[1,0]<stderr>: File \"/opt/conda/envs/rapids/lib/python3.8/site-packages/tensorflow/python/eager/context.py\", line 1629, in set_logical_device_configuration\r\n[1,0]<stderr>: raise RuntimeError(\r\n[1,0]<stderr>:RuntimeError: Virtual devices cannot be modified after being initialized[1,0]<stderr>:\r\n[1,0]<stderr>:\r\n[1,0]<stderr>:During handling of the above exception, another exception occurred:\r\n[1,0]<stderr>:\r\n[1,0]<stderr>:Traceback (most recent call last):\r\n[1,0]<stderr>: File \"/opt/conda/envs/rapids/lib/python3.8/runpy.py\", line 194, in _run_module_as_main\r\n[1,0]<stderr>: return _run_code(code, main_globals, None,\r\n[1,0]<stderr>: File \"/opt/conda/envs/rapids/lib/python3.8/runpy.py\", line 87, in _run_code\r\n[1,0]<stderr>: exec(code, run_globals)\r\n[1,0]<stderr>: File \"/horovod/horovod/spark/task/mpirun_exec_fn.py\", line 52, in <module>\r\n[1,0]<stderr>: main(codec.loads_base64(sys.argv[1]), codec.loads_base64(sys.argv[2]))\r\n[1,0]<stderr>: File \"/horovod/horovod/spark/task/mpirun_exec_fn.py\", line 45, in main\r\n[1,0]<stderr>: task_exec(driver_addresses, settings, 'OMPI_COMM_WORLD_RANK', 'OMPI_COMM_WORLD_LOCAL_RANK')\r\n[1,0]<stderr>: File \"/horovod/horovod/spark/task/__init__.py\", line 61, in task_exec\r\n[1,0]<stderr>: result = fn(*args, **kwargs)\r\n[1,0]<stderr>: File \"/horovod/horovod/spark/keras/remote.py\", line 261, in train\r\n[1,0]<stderr>: history = fit(model, dm, steps_per_epoch,\r\n[1,0]<stderr>: File \"/horovod/horovod/spark/keras/util.py\", line 41, in fn\r\n[1,0]<stderr>: train_data = data_module.train_data(train_reader)\r\n[1,0]<stderr>: File \"/horovod/horovod/spark/keras/datamodule.py\", line 110, in train_data\r\n[1,0]<stderr>: from nvtabular.loader.tensorflow import KerasSequenceLoader\r\n[1,0]<stderr>: File \"/nvtabular/nvtabular/loader/tensorflow.py\", line 28, in <module>\r\n[1,0]<stderr>: from_dlpack = configure_tensorflow()\r\n[1,0]<stderr>: File \"/nvtabular/nvtabular/loader/tf_utils.py\", line 63, in configure_tensorflow\r\n[1,0]<stderr>: warnings.warn(e)\r\n[1,0]<stderr>:TypeError: expected string or bytes-like object\r\n```\r\n\r\nThis is resulting in a fatal error instead of a non-fatal warning. My current workaround is to just use `warnings.warn(str(e))` here, although I'm not sure why the `RuntimeError` isn't automatically cast to string (when it normally would cast to string).\r\n\r\n**Steps/Code to reproduce bug**\r\nUnfortunately, since this involves integration with horovod, so the setup to reproduce is non-trivial.\r\n\r\n**Expected behavior**\r\nA non-fatal warning, and continued execution.\r\n\r\n**Environment details (please complete the following information):**\r\n - Environment location: Docker on Linux with single GPU.\r\n - Method of NVTabular install: custom docker image based on `nvcr.io/nvidia/merlin/merlin-tensorflow-training:22.05` \n", "before_files": [{"content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport warnings\n\nimport tensorflow as tf\nfrom packaging import version\nfrom tensorflow.python.feature_column import feature_column_v2 as fc\n\nfrom merlin.core.dispatch import HAS_GPU\nfrom merlin.core.utils import device_mem_size\n\n\ndef configure_tensorflow(memory_allocation=None, device=None):\n total_gpu_mem_mb = device_mem_size(kind=\"total\", cpu=(not HAS_GPU)) / (1024 ** 2)\n\n if memory_allocation is None:\n memory_allocation = os.environ.get(\"TF_MEMORY_ALLOCATION\", 0.5)\n\n if float(memory_allocation) < 1:\n memory_allocation = total_gpu_mem_mb * float(memory_allocation)\n memory_allocation = int(memory_allocation)\n assert memory_allocation < total_gpu_mem_mb\n\n # TODO: what will this look like in any sort\n # of distributed set up?\n if device is None:\n device = int(os.environ.get(\"TF_VISIBLE_DEVICE\", 0))\n tf_devices = tf.config.list_physical_devices(\"GPU\")\n if HAS_GPU and len(tf_devices) == 0:\n raise ImportError(\"TensorFlow is not configured for GPU\")\n if HAS_GPU:\n try:\n tf.config.set_logical_device_configuration(\n tf_devices[device],\n [tf.config.LogicalDeviceConfiguration(memory_limit=memory_allocation)],\n )\n except RuntimeError:\n warnings.warn(\n \"TensorFlow runtime already initialized, may not be enough memory for cudf\"\n )\n try:\n tf.config.experimental.set_virtual_device_configuration(\n tf_devices[device],\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_allocation)],\n )\n except RuntimeError as e:\n # Virtual devices must be set before GPUs have been initialized\n warnings.warn(e)\n\n # versions using TF earlier than 2.3.0 need to use extension\n # library for dlpack support to avoid memory leak issue\n __TF_DLPACK_STABLE_VERSION = \"2.3.0\"\n if version.parse(tf.__version__) < version.parse(__TF_DLPACK_STABLE_VERSION):\n try:\n from tfdlpack import from_dlpack\n except ModuleNotFoundError as e:\n message = \"If using TensorFlow < 2.3.0, you must install tfdlpack-gpu extension library\"\n raise ModuleNotFoundError(message) from e\n\n else:\n from tensorflow.experimental.dlpack import from_dlpack\n\n return from_dlpack\n\n\ndef _get_parents(column):\n \"\"\"\n recursive function for finding the feature columns\n that supply inputs for a given `column`. If there are\n none, returns the column. Uses sets so is not\n deterministic.\n \"\"\"\n if isinstance(column.parents[0], str):\n return set([column])\n parents = set()\n for parent in column.parents:\n parents |= _get_parents(parent)\n return parents\n\n\ndef get_dataset_schema_from_feature_columns(feature_columns):\n \"\"\"\n maps from a list of TensorFlow `feature_column`s to\n lists giving the categorical and continuous feature\n names for a dataset. Useful for constructing NVTabular\n Workflows from feature columns\n \"\"\"\n base_columns = set()\n for column in feature_columns:\n base_columns |= _get_parents(column)\n\n cat_names, cont_names = [], []\n for column in base_columns:\n if isinstance(column, fc.CategoricalColumn):\n cat_names.append(column.name)\n else:\n cont_names.append(column.name)\n\n cat_names = sorted(cat_names)\n cont_names = sorted(cont_names)\n return cat_names, cont_names\n", "path": "nvtabular/loader/tf_utils.py"}]}
| 2,759 | 126 |
gh_patches_debug_2865
|
rasdani/github-patches
|
git_diff
|
rootpy__rootpy-511
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'TCanvas' object has no attribute 'name'
Hi,
I am seeing weird issues with the interactive module. It looks like the TCanvas is not 'decorated' when loading rootpy.interactive.
```
>>> from ROOT import *
>>> t = TCanvas()
>>> from rootpy.interactive import wait
/usr/local/lib/python2.7/site-packages/IPython/frontend.py:30: UserWarning: The top-level `frontend` package has been deprecated. All its subpackages have been moved to the top `IPython` level.
warn("The top-level `frontend` package has been deprecated. "
w>>> wait()
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/usr/local/Cellar/python/2.7.5/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rootpy-dev-py2.7.egg/rootpy/interactive/rootwait.py", line 206, in wait_for_zero_canvases
log.debug("waiting for canvas {0} to close".format(canvas.name))
AttributeError: 'TCanvas' object has no attribute 'name'
```
Albert
</issue>
<code>
[start of rootpy/interactive/rootwait.py]
1 # Copyright 2012 the rootpy developers
2 # distributed under the terms of the GNU General Public License
3 """
4 The functions in this module provide a way of pausing code execution until
5 canvases are closed. This can be useful when testing code and you don't want to
6 keep the objects alive outside of your function.
7
8 The wait function can be called repeatedly to pause multiple times.
9
10 wait_for_zero_canvases()
11 Keeps root alive until CTRL-c is pressed or all canvases are closed
12
13 wait_for_zero_canvases(middle_mouse_close=True)
14 allows canvases to be closed with the middle mouse button (see below)
15
16 wait is shorthand for wait_for_zero_canvases
17
18 Examples
19 --------
20
21 from rootpy.plotting import Canvas
22 from rootpy.interactive import wait
23
24 c = Canvas()
25 c.Update()
26 wait()
27
28 c2 = Canvas()
29 c2.Update()
30 wait(True)
31 # This canvas can be killed by middle clicking on it or hitting
32 # escape whilst it has focus
33
34 """
35 from __future__ import absolute_import
36
37 import threading
38 from atexit import register
39
40 import ROOT
41
42 from . import log; log = log[__name__]
43 from ..defaults import extra_initialization
44 from ..memory.keepalive import keepalive
45 from .canvas_events import attach_event_handler
46
47 __all__ = [
48 'wait_for_zero_canvases',
49 'wait_for_browser_close',
50 'wait',
51 ]
52
53 _processRootEvents = None
54 _finishSchedule = None
55 __ACTIVE = False
56
57
58 @extra_initialization
59 def fetch_vars():
60 global _processRootEvents, _finishSchedule, __ACTIVE
61 PyGUIThread = getattr(ROOT, 'PyGUIThread', None)
62 if PyGUIThread is not None:
63 _processRootEvents = getattr(PyGUIThread, "_Thread__target", None)
64 _finishSchedule = getattr(PyGUIThread, "finishSchedule", None)
65 if _processRootEvents is None:
66 log.warning(
67 "unable to access ROOT's GUI thread either because "
68 "PyROOT's finalSetup() was called while in batch mode "
69 "or because PyROOT is using the new PyOS_InputHook "
70 "based mechanism that is not yet supported in rootpy "
71 "(PyConfig.StartGuiThread == 'inputhook' or "
72 "gSystem.InheritsFrom('TMacOSXSystem')). wait() etc. will "
73 "instead call raw_input() and wait for [Enter]")
74 else:
75 __ACTIVE = True
76
77
78 def wait_failover(caller):
79 if not ROOT.gROOT.IsBatch():
80 log.warning(
81 "{0} is failing over to raw_input()".format(caller.__name__))
82 raw_input("press [Enter] to continue")
83
84
85 def start_new_gui_thread():
86 """
87 Attempt to start a new GUI thread, if possible.
88
89 It is only possible to start one if there was one running on module import.
90 """
91 PyGUIThread = getattr(ROOT, 'PyGUIThread', None)
92
93 if PyGUIThread is not None:
94 assert not PyGUIThread.isAlive(), "GUI thread already running!"
95
96 assert _processRootEvents, (
97 "GUI thread wasn't started when rootwait was imported, "
98 "so it can't be restarted")
99
100 ROOT.keeppolling = 1
101 ROOT.PyGUIThread = threading.Thread(
102 None, _processRootEvents, None, (ROOT,))
103
104 ROOT.PyGUIThread.finishSchedule = _finishSchedule
105 ROOT.PyGUIThread.setDaemon(1)
106 ROOT.PyGUIThread.start()
107 log.debug("successfully started a new GUI thread")
108
109
110 def stop_gui_thread():
111 """
112 Try to stop the GUI thread. If it was running returns True,
113 otherwise False.
114 """
115 PyGUIThread = getattr(ROOT, 'PyGUIThread', None)
116
117 if PyGUIThread is None or not PyGUIThread.isAlive():
118 log.debug("no existing GUI thread is runnng")
119 return False
120
121 ROOT.keeppolling = 0
122 try:
123 PyGUIThread.finishSchedule()
124 except AttributeError:
125 log.debug("unable to call finishSchedule() on PyGUIThread")
126 pass
127 PyGUIThread.join()
128 log.debug("successfully stopped the existing GUI thread")
129 return True
130
131
132 def get_visible_canvases():
133 """
134 Return a list of active GUI canvases
135 (as opposed to invisible Batch canvases)
136 """
137 try:
138 return [c for c in ROOT.gROOT.GetListOfCanvases() if not c.IsBatch()]
139 except AttributeError:
140 # We might be exiting and ROOT.gROOT will raise an AttributeError
141 return []
142
143
144 def run_application_until_done():
145
146 had_gui_thread = stop_gui_thread()
147
148 ROOT.gApplication._threaded = True
149 ROOT.gApplication.Run(True)
150
151 if had_gui_thread:
152 start_new_gui_thread()
153
154
155 def dispatcher(f):
156 disp = ROOT.TPyDispatcher(f)
157 keepalive(disp, f)
158 return disp
159
160
161 def wait_for_zero_canvases(middle_mouse_close=False):
162 """
163 Wait for all canvases to be closed, or CTRL-c.
164
165 If `middle_mouse_close`, middle click will shut the canvas.
166
167 incpy.ignore
168 """
169 if not __ACTIVE:
170 wait_failover(wait_for_zero_canvases)
171 return
172
173 @dispatcher
174 def count_canvases():
175 """
176 Count the number of active canvases and finish gApplication.Run()
177 if there are none remaining.
178
179 incpy.ignore
180 """
181 if not get_visible_canvases():
182 try:
183 ROOT.gSystem.ExitLoop()
184 except AttributeError:
185 # We might be exiting and ROOT.gROOT will raise an AttributeError
186 pass
187
188 @dispatcher
189 def exit_application_loop():
190 """
191 Signal handler for CTRL-c to cause gApplication.Run() to finish.
192
193 incpy.ignore
194 """
195 ROOT.gSystem.ExitLoop()
196
197 # Handle CTRL-c
198 sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True)
199 sh.Add()
200 sh.Connect("Notified()", "TPyDispatcher",
201 exit_application_loop, "Dispatch()")
202
203 visible_canvases = get_visible_canvases()
204
205 for canvas in visible_canvases:
206 log.debug("waiting for canvas {0} to close".format(canvas.name))
207 canvas.Update()
208
209 if middle_mouse_close:
210 attach_event_handler(canvas)
211
212 if not getattr(canvas, "_py_close_dispatcher_attached", False):
213 # Attach a handler only once to each canvas
214 canvas._py_close_dispatcher_attached = True
215 canvas.Connect("Closed()", "TPyDispatcher",
216 count_canvases, "Dispatch()")
217 keepalive(canvas, count_canvases)
218
219 if visible_canvases and not ROOT.gROOT.IsBatch():
220 run_application_until_done()
221
222 # Disconnect from canvases
223 for canvas in visible_canvases:
224 if getattr(canvas, "_py_close_dispatcher_attached", False):
225 canvas._py_close_dispatcher_attached = False
226 canvas.Disconnect("Closed()", count_canvases, "Dispatch()")
227
228 wait = wait_for_zero_canvases
229
230
231 def wait_for_frame(frame):
232 """
233 wait until a TGMainFrame is closed or ctrl-c
234 """
235 if not frame:
236 # It's already closed or maybe we're in batch mode
237 return
238
239 @dispatcher
240 def close():
241 ROOT.gSystem.ExitLoop()
242
243 if not getattr(frame, "_py_close_dispatcher_attached", False):
244 frame._py_close_dispatcher_attached = True
245 frame.Connect("CloseWindow()", "TPyDispatcher", close, "Dispatch()")
246
247 @dispatcher
248 def exit_application_loop():
249 """
250 Signal handler for CTRL-c to cause gApplication.Run() to finish.
251
252 incpy.ignore
253 """
254 ROOT.gSystem.ExitLoop()
255
256 # Handle CTRL-c
257 sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True)
258 sh.Add()
259 sh.Connect("Notified()", "TPyDispatcher",
260 exit_application_loop, "Dispatch()")
261
262 if not ROOT.gROOT.IsBatch():
263 run_application_until_done()
264 # Need to disconnect to prevent close handler from running when python
265 # teardown has already commenced.
266 frame.Disconnect("CloseWindow()", close, "Dispatch()")
267
268
269 def wait_for_browser_close(b):
270 """
271 Can be used to wait until a TBrowser is closed
272 """
273 if b:
274 if not __ACTIVE:
275 wait_failover(wait_for_browser_close)
276 return
277 wait_for_frame(b.GetBrowserImp().GetMainFrame())
278
279
280 def prevent_close_with_canvases():
281 """
282 Register a handler which prevents python from exiting until
283 all canvases are closed
284 """
285 register(wait_for_zero_canvases)
286
[end of rootpy/interactive/rootwait.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/rootpy/interactive/rootwait.py b/rootpy/interactive/rootwait.py
--- a/rootpy/interactive/rootwait.py
+++ b/rootpy/interactive/rootwait.py
@@ -203,7 +203,7 @@
visible_canvases = get_visible_canvases()
for canvas in visible_canvases:
- log.debug("waiting for canvas {0} to close".format(canvas.name))
+ log.debug("waiting for canvas {0} to close".format(canvas.GetName()))
canvas.Update()
if middle_mouse_close:
|
{"golden_diff": "diff --git a/rootpy/interactive/rootwait.py b/rootpy/interactive/rootwait.py\n--- a/rootpy/interactive/rootwait.py\n+++ b/rootpy/interactive/rootwait.py\n@@ -203,7 +203,7 @@\n visible_canvases = get_visible_canvases()\n \n for canvas in visible_canvases:\n- log.debug(\"waiting for canvas {0} to close\".format(canvas.name))\n+ log.debug(\"waiting for canvas {0} to close\".format(canvas.GetName()))\n canvas.Update()\n \n if middle_mouse_close:\n", "issue": "'TCanvas' object has no attribute 'name'\nHi,\n\nI am seeing weird issues with the interactive module. It looks like the TCanvas is not 'decorated' when loading rootpy.interactive.\n\n```\n>>> from ROOT import *\n>>> t = TCanvas()\n>>> from rootpy.interactive import wait\n/usr/local/lib/python2.7/site-packages/IPython/frontend.py:30: UserWarning: The top-level `frontend` package has been deprecated. All its subpackages have been moved to the top `IPython` level.\n warn(\"The top-level `frontend` package has been deprecated. \"\nw>>> wait()\nTraceback (most recent call last):\n File \"<console>\", line 1, in <module>\n File \"/usr/local/Cellar/python/2.7.5/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rootpy-dev-py2.7.egg/rootpy/interactive/rootwait.py\", line 206, in wait_for_zero_canvases\n log.debug(\"waiting for canvas {0} to close\".format(canvas.name))\nAttributeError: 'TCanvas' object has no attribute 'name'\n```\n\nAlbert\n\n", "before_files": [{"content": "# Copyright 2012 the rootpy developers\n# distributed under the terms of the GNU General Public License\n\"\"\"\nThe functions in this module provide a way of pausing code execution until\ncanvases are closed. This can be useful when testing code and you don't want to\nkeep the objects alive outside of your function.\n\nThe wait function can be called repeatedly to pause multiple times.\n\nwait_for_zero_canvases()\n Keeps root alive until CTRL-c is pressed or all canvases are closed\n\nwait_for_zero_canvases(middle_mouse_close=True)\n allows canvases to be closed with the middle mouse button (see below)\n\nwait is shorthand for wait_for_zero_canvases\n\nExamples\n--------\n\n from rootpy.plotting import Canvas\n from rootpy.interactive import wait\n\n c = Canvas()\n c.Update()\n wait()\n\n c2 = Canvas()\n c2.Update()\n wait(True)\n # This canvas can be killed by middle clicking on it or hitting\n # escape whilst it has focus\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport threading\nfrom atexit import register\n\nimport ROOT\n\nfrom . import log; log = log[__name__]\nfrom ..defaults import extra_initialization\nfrom ..memory.keepalive import keepalive\nfrom .canvas_events import attach_event_handler\n\n__all__ = [\n 'wait_for_zero_canvases',\n 'wait_for_browser_close',\n 'wait',\n]\n\n_processRootEvents = None\n_finishSchedule = None\n__ACTIVE = False\n\n\n@extra_initialization\ndef fetch_vars():\n global _processRootEvents, _finishSchedule, __ACTIVE\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n if PyGUIThread is not None:\n _processRootEvents = getattr(PyGUIThread, \"_Thread__target\", None)\n _finishSchedule = getattr(PyGUIThread, \"finishSchedule\", None)\n if _processRootEvents is None:\n log.warning(\n \"unable to access ROOT's GUI thread either because \"\n \"PyROOT's finalSetup() was called while in batch mode \"\n \"or because PyROOT is using the new PyOS_InputHook \"\n \"based mechanism that is not yet supported in rootpy \"\n \"(PyConfig.StartGuiThread == 'inputhook' or \"\n \"gSystem.InheritsFrom('TMacOSXSystem')). wait() etc. will \"\n \"instead call raw_input() and wait for [Enter]\")\n else:\n __ACTIVE = True\n\n\ndef wait_failover(caller):\n if not ROOT.gROOT.IsBatch():\n log.warning(\n \"{0} is failing over to raw_input()\".format(caller.__name__))\n raw_input(\"press [Enter] to continue\")\n\n\ndef start_new_gui_thread():\n \"\"\"\n Attempt to start a new GUI thread, if possible.\n\n It is only possible to start one if there was one running on module import.\n \"\"\"\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n\n if PyGUIThread is not None:\n assert not PyGUIThread.isAlive(), \"GUI thread already running!\"\n\n assert _processRootEvents, (\n \"GUI thread wasn't started when rootwait was imported, \"\n \"so it can't be restarted\")\n\n ROOT.keeppolling = 1\n ROOT.PyGUIThread = threading.Thread(\n None, _processRootEvents, None, (ROOT,))\n\n ROOT.PyGUIThread.finishSchedule = _finishSchedule\n ROOT.PyGUIThread.setDaemon(1)\n ROOT.PyGUIThread.start()\n log.debug(\"successfully started a new GUI thread\")\n\n\ndef stop_gui_thread():\n \"\"\"\n Try to stop the GUI thread. If it was running returns True,\n otherwise False.\n \"\"\"\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n\n if PyGUIThread is None or not PyGUIThread.isAlive():\n log.debug(\"no existing GUI thread is runnng\")\n return False\n\n ROOT.keeppolling = 0\n try:\n PyGUIThread.finishSchedule()\n except AttributeError:\n log.debug(\"unable to call finishSchedule() on PyGUIThread\")\n pass\n PyGUIThread.join()\n log.debug(\"successfully stopped the existing GUI thread\")\n return True\n\n\ndef get_visible_canvases():\n \"\"\"\n Return a list of active GUI canvases\n (as opposed to invisible Batch canvases)\n \"\"\"\n try:\n return [c for c in ROOT.gROOT.GetListOfCanvases() if not c.IsBatch()]\n except AttributeError:\n # We might be exiting and ROOT.gROOT will raise an AttributeError\n return []\n\n\ndef run_application_until_done():\n\n had_gui_thread = stop_gui_thread()\n\n ROOT.gApplication._threaded = True\n ROOT.gApplication.Run(True)\n\n if had_gui_thread:\n start_new_gui_thread()\n\n\ndef dispatcher(f):\n disp = ROOT.TPyDispatcher(f)\n keepalive(disp, f)\n return disp\n\n\ndef wait_for_zero_canvases(middle_mouse_close=False):\n \"\"\"\n Wait for all canvases to be closed, or CTRL-c.\n\n If `middle_mouse_close`, middle click will shut the canvas.\n\n incpy.ignore\n \"\"\"\n if not __ACTIVE:\n wait_failover(wait_for_zero_canvases)\n return\n\n @dispatcher\n def count_canvases():\n \"\"\"\n Count the number of active canvases and finish gApplication.Run()\n if there are none remaining.\n\n incpy.ignore\n \"\"\"\n if not get_visible_canvases():\n try:\n ROOT.gSystem.ExitLoop()\n except AttributeError:\n # We might be exiting and ROOT.gROOT will raise an AttributeError\n pass\n\n @dispatcher\n def exit_application_loop():\n \"\"\"\n Signal handler for CTRL-c to cause gApplication.Run() to finish.\n\n incpy.ignore\n \"\"\"\n ROOT.gSystem.ExitLoop()\n\n # Handle CTRL-c\n sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True)\n sh.Add()\n sh.Connect(\"Notified()\", \"TPyDispatcher\",\n exit_application_loop, \"Dispatch()\")\n\n visible_canvases = get_visible_canvases()\n\n for canvas in visible_canvases:\n log.debug(\"waiting for canvas {0} to close\".format(canvas.name))\n canvas.Update()\n\n if middle_mouse_close:\n attach_event_handler(canvas)\n\n if not getattr(canvas, \"_py_close_dispatcher_attached\", False):\n # Attach a handler only once to each canvas\n canvas._py_close_dispatcher_attached = True\n canvas.Connect(\"Closed()\", \"TPyDispatcher\",\n count_canvases, \"Dispatch()\")\n keepalive(canvas, count_canvases)\n\n if visible_canvases and not ROOT.gROOT.IsBatch():\n run_application_until_done()\n\n # Disconnect from canvases\n for canvas in visible_canvases:\n if getattr(canvas, \"_py_close_dispatcher_attached\", False):\n canvas._py_close_dispatcher_attached = False\n canvas.Disconnect(\"Closed()\", count_canvases, \"Dispatch()\")\n\nwait = wait_for_zero_canvases\n\n\ndef wait_for_frame(frame):\n \"\"\"\n wait until a TGMainFrame is closed or ctrl-c\n \"\"\"\n if not frame:\n # It's already closed or maybe we're in batch mode\n return\n\n @dispatcher\n def close():\n ROOT.gSystem.ExitLoop()\n\n if not getattr(frame, \"_py_close_dispatcher_attached\", False):\n frame._py_close_dispatcher_attached = True\n frame.Connect(\"CloseWindow()\", \"TPyDispatcher\", close, \"Dispatch()\")\n\n @dispatcher\n def exit_application_loop():\n \"\"\"\n Signal handler for CTRL-c to cause gApplication.Run() to finish.\n\n incpy.ignore\n \"\"\"\n ROOT.gSystem.ExitLoop()\n\n # Handle CTRL-c\n sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True)\n sh.Add()\n sh.Connect(\"Notified()\", \"TPyDispatcher\",\n exit_application_loop, \"Dispatch()\")\n\n if not ROOT.gROOT.IsBatch():\n run_application_until_done()\n # Need to disconnect to prevent close handler from running when python\n # teardown has already commenced.\n frame.Disconnect(\"CloseWindow()\", close, \"Dispatch()\")\n\n\ndef wait_for_browser_close(b):\n \"\"\"\n Can be used to wait until a TBrowser is closed\n \"\"\"\n if b:\n if not __ACTIVE:\n wait_failover(wait_for_browser_close)\n return\n wait_for_frame(b.GetBrowserImp().GetMainFrame())\n\n\ndef prevent_close_with_canvases():\n \"\"\"\n Register a handler which prevents python from exiting until\n all canvases are closed\n \"\"\"\n register(wait_for_zero_canvases)\n", "path": "rootpy/interactive/rootwait.py"}]}
| 3,437 | 123 |
gh_patches_debug_15822
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-7315
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Login fails at GET /api/v3/flows/executor/default-authentication-flow/
**Describe the bug**
Authentication fails. I've tried my own auth flow, which includes a passwordless option. This log is from switching back to the default auth flow.
**To Reproduce**
Steps to reproduce the behavior:
1. Enter Username
2. Enter Password
3. Press Enter
4. See error
**Expected behavior**
This stage would normally be to select the authenticator to use. (Key or TOTP, in my case)
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Logs**
<details>
<summary>Stacktrace from authentik</summary>
```
Traceback (most recent call last):
File "/authentik/flows/views/executor.py", line 287, in get
stage_response = self.current_stage_view.dispatch(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/django/views/generic/base.py", line 143, in dispatch
return handler(request, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/stages/authenticator_validate/stage.py", line 222, in get
challenges = self.get_device_challenges()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/stages/authenticator_validate/stage.py", line 157, in get_device_challenges
user_devices = list(devices_for_user(self.get_pending_user()))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/stages/authenticator/__init__.py", line 93, in devices_for_user
yield from device_set
File "/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py", line 398, in __iter__
self._fetch_all()
File "/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py", line 1881, in _fetch_all
self._result_cache = list(self._iterable_class(self))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py", line 91, in __iter__
results = compiler.execute_sql(
^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/django/db/models/sql/compiler.py", line 1562, in execute_sql
cursor.execute(sql, params)
File "/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py", line 67, in execute
return self._execute_with_wrappers(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py", line 80, in _execute_with_wrappers
return executor(sql, params, many, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py", line 84, in _execute
with self.db.wrap_database_errors:
File "/ak-root/venv/lib/python3.11/site-packages/django/db/utils.py", line 91, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py", line 89, in _execute
return self.cursor.execute(sql, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/django_prometheus/db/common.py", line 69, in execute
return super().execute(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/psycopg/cursor.py", line 737, in execute
raise ex.with_traceback(None)
django.db.utils.ProgrammingError: relation "authentik_stages_authenticator_static_staticdevice" does not exist
LINE 1: ...tic_staticdevice"."throttling_failure_count" FROM "authentik...
^
```
</details>
**Version and Deployment (please complete the following information):**
- authentik version: gh-next as of 10/21/2023
- Deployment: docker-compose
**Additional context**
Add any other context about the problem here.
</issue>
<code>
[start of lifecycle/system_migrations/otp_merge.py]
1 # flake8: noqa
2 from lifecycle.migrate import BaseMigration
3
4 SQL_STATEMENT = """
5 DELETE FROM django_migrations WHERE app = 'otp_static';
6 DELETE FROM django_migrations WHERE app = 'otp_totp';
7 -- Rename tables (static)
8 ALTER TABLE otp_static_staticdevice RENAME TO authentik_stages_authenticator_static_staticdevice;
9 ALTER TABLE otp_static_statictoken RENAME TO authentik_stages_authenticator_static_statictoken;
10 ALTER SEQUENCE otp_static_statictoken_id_seq RENAME TO authentik_stages_authenticator_static_statictoken_id_seq;
11 ALTER SEQUENCE otp_static_staticdevice_id_seq RENAME TO authentik_stages_authenticator_static_staticdevice_id_seq;
12 -- Rename tables (totp)
13 ALTER TABLE otp_totp_totpdevice RENAME TO authentik_stages_authenticator_totp_totpdevice;
14 ALTER SEQUENCE otp_totp_totpdevice_id_seq RENAME TO authentik_stages_authenticator_totp_totpdevice_id_seq;
15 """
16
17
18 class Migration(BaseMigration):
19 def needs_migration(self) -> bool:
20 self.cur.execute(
21 "select * from information_schema.tables WHERE table_name='otp_static_staticdevice'"
22 )
23 return bool(self.cur.rowcount)
24
25 def run(self):
26 self.cur.execute(SQL_STATEMENT)
27 self.fake_migration(
28 (
29 "authentik_stages_authenticator_static",
30 "0008_initial",
31 ),
32 (
33 "authentik_stages_authenticator_static",
34 "0009_throttling",
35 ),
36 (
37 "authentik_stages_authenticator_totp",
38 "0008_initial",
39 ),
40 (
41 "authentik_stages_authenticator_totp",
42 "0009_auto_20190420_0723",
43 ),
44 )
45
[end of lifecycle/system_migrations/otp_merge.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lifecycle/system_migrations/otp_merge.py b/lifecycle/system_migrations/otp_merge.py
--- a/lifecycle/system_migrations/otp_merge.py
+++ b/lifecycle/system_migrations/otp_merge.py
@@ -2,6 +2,7 @@
from lifecycle.migrate import BaseMigration
SQL_STATEMENT = """
+BEGIN TRANSACTION;
DELETE FROM django_migrations WHERE app = 'otp_static';
DELETE FROM django_migrations WHERE app = 'otp_totp';
-- Rename tables (static)
@@ -12,6 +13,7 @@
-- Rename tables (totp)
ALTER TABLE otp_totp_totpdevice RENAME TO authentik_stages_authenticator_totp_totpdevice;
ALTER SEQUENCE otp_totp_totpdevice_id_seq RENAME TO authentik_stages_authenticator_totp_totpdevice_id_seq;
+COMMIT;
"""
|
{"golden_diff": "diff --git a/lifecycle/system_migrations/otp_merge.py b/lifecycle/system_migrations/otp_merge.py\n--- a/lifecycle/system_migrations/otp_merge.py\n+++ b/lifecycle/system_migrations/otp_merge.py\n@@ -2,6 +2,7 @@\n from lifecycle.migrate import BaseMigration\n \n SQL_STATEMENT = \"\"\"\n+BEGIN TRANSACTION;\n DELETE FROM django_migrations WHERE app = 'otp_static';\n DELETE FROM django_migrations WHERE app = 'otp_totp';\n -- Rename tables (static)\n@@ -12,6 +13,7 @@\n -- Rename tables (totp)\n ALTER TABLE otp_totp_totpdevice RENAME TO authentik_stages_authenticator_totp_totpdevice;\n ALTER SEQUENCE otp_totp_totpdevice_id_seq RENAME TO authentik_stages_authenticator_totp_totpdevice_id_seq;\n+COMMIT;\n \"\"\"\n", "issue": "Login fails at GET /api/v3/flows/executor/default-authentication-flow/ \n**Describe the bug**\r\nAuthentication fails. I've tried my own auth flow, which includes a passwordless option. This log is from switching back to the default auth flow.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Enter Username\r\n2. Enter Password\r\n3. Press Enter\r\n4. See error\r\n\r\n**Expected behavior**\r\nThis stage would normally be to select the authenticator to use. (Key or TOTP, in my case)\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Logs**\r\n<details>\r\n <summary>Stacktrace from authentik</summary>\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/authentik/flows/views/executor.py\", line 287, in get\r\n stage_response = self.current_stage_view.dispatch(request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/views/generic/base.py\", line 143, in dispatch\r\n return handler(request, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/authenticator_validate/stage.py\", line 222, in get\r\n challenges = self.get_device_challenges()\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/authenticator_validate/stage.py\", line 157, in get_device_challenges\r\n user_devices = list(devices_for_user(self.get_pending_user()))\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/authenticator/__init__.py\", line 93, in devices_for_user\r\n yield from device_set\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py\", line 398, in __iter__\r\n self._fetch_all()\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py\", line 1881, in _fetch_all\r\n self._result_cache = list(self._iterable_class(self))\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py\", line 91, in __iter__\r\n results = compiler.execute_sql(\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/models/sql/compiler.py\", line 1562, in execute_sql\r\n cursor.execute(sql, params)\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py\", line 67, in execute\r\n return self._execute_with_wrappers(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py\", line 80, in _execute_with_wrappers\r\n return executor(sql, params, many, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py\", line 84, in _execute\r\n with self.db.wrap_database_errors:\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/utils.py\", line 91, in __exit__\r\n raise dj_exc_value.with_traceback(traceback) from exc_value\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py\", line 89, in _execute\r\n return self.cursor.execute(sql, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django_prometheus/db/common.py\", line 69, in execute\r\n return super().execute(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/psycopg/cursor.py\", line 737, in execute\r\n raise ex.with_traceback(None)\r\ndjango.db.utils.ProgrammingError: relation \"authentik_stages_authenticator_static_staticdevice\" does not exist\r\nLINE 1: ...tic_staticdevice\".\"throttling_failure_count\" FROM \"authentik...\r\n ^\r\n```\r\n</details>\r\n\r\n\r\n**Version and Deployment (please complete the following information):**\r\n- authentik version: gh-next as of 10/21/2023\r\n- Deployment: docker-compose\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n \n", "before_files": [{"content": "# flake8: noqa\nfrom lifecycle.migrate import BaseMigration\n\nSQL_STATEMENT = \"\"\"\nDELETE FROM django_migrations WHERE app = 'otp_static';\nDELETE FROM django_migrations WHERE app = 'otp_totp';\n-- Rename tables (static)\nALTER TABLE otp_static_staticdevice RENAME TO authentik_stages_authenticator_static_staticdevice;\nALTER TABLE otp_static_statictoken RENAME TO authentik_stages_authenticator_static_statictoken;\nALTER SEQUENCE otp_static_statictoken_id_seq RENAME TO authentik_stages_authenticator_static_statictoken_id_seq;\nALTER SEQUENCE otp_static_staticdevice_id_seq RENAME TO authentik_stages_authenticator_static_staticdevice_id_seq;\n-- Rename tables (totp)\nALTER TABLE otp_totp_totpdevice RENAME TO authentik_stages_authenticator_totp_totpdevice;\nALTER SEQUENCE otp_totp_totpdevice_id_seq RENAME TO authentik_stages_authenticator_totp_totpdevice_id_seq;\n\"\"\"\n\n\nclass Migration(BaseMigration):\n def needs_migration(self) -> bool:\n self.cur.execute(\n \"select * from information_schema.tables WHERE table_name='otp_static_staticdevice'\"\n )\n return bool(self.cur.rowcount)\n\n def run(self):\n self.cur.execute(SQL_STATEMENT)\n self.fake_migration(\n (\n \"authentik_stages_authenticator_static\",\n \"0008_initial\",\n ),\n (\n \"authentik_stages_authenticator_static\",\n \"0009_throttling\",\n ),\n (\n \"authentik_stages_authenticator_totp\",\n \"0008_initial\",\n ),\n (\n \"authentik_stages_authenticator_totp\",\n \"0009_auto_20190420_0723\",\n ),\n )\n", "path": "lifecycle/system_migrations/otp_merge.py"}]}
| 2,038 | 183 |
gh_patches_debug_18048
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-2747
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
docs: `Message` not exported from `textual.messages`
Just something minor I spotted while playing with the [Messages up](https://textual.textualize.io/guide/widgets/#messages-up) example in the docs. The code still works but my editor complains that:
> "Message" is not exported from module "textual.messages"
> Import from "textual.message" instead [reportPrivateImportUsage]
Happy to submit a quick PR if deemed worth fixing!
</issue>
<code>
[start of docs/examples/guide/compound/byte02.py]
1 from __future__ import annotations
2
3 from textual.app import App, ComposeResult
4 from textual.containers import Container
5 from textual.messages import Message
6 from textual.reactive import reactive
7 from textual.widget import Widget
8 from textual.widgets import Input, Label, Switch
9
10
11 class BitSwitch(Widget):
12 """A Switch with a numeric label above it."""
13
14 DEFAULT_CSS = """
15 BitSwitch {
16 layout: vertical;
17 width: auto;
18 height: auto;
19 }
20 BitSwitch > Label {
21 text-align: center;
22 width: 100%;
23 }
24 """
25
26 class BitChanged(Message):
27 """Sent when the 'bit' changes."""
28
29 def __init__(self, bit: int, value: bool) -> None:
30 super().__init__()
31 self.bit = bit
32 self.value = value
33
34 value = reactive(0) # (1)!
35
36 def __init__(self, bit: int) -> None:
37 self.bit = bit
38 super().__init__()
39
40 def compose(self) -> ComposeResult:
41 yield Label(str(self.bit))
42 yield Switch()
43
44 def on_switch_changed(self, event: Switch.Changed) -> None: # (2)!
45 """When the switch changes, notify the parent via a message."""
46 event.stop() # (3)!
47 self.value = event.value # (4)!
48 self.post_message(self.BitChanged(self.bit, event.value))
49
50
51 class ByteInput(Widget):
52 """A compound widget with 8 switches."""
53
54 DEFAULT_CSS = """
55 ByteInput {
56 width: auto;
57 height: auto;
58 border: blank;
59 layout: horizontal;
60 }
61 ByteInput:focus-within {
62 border: heavy $secondary;
63 }
64 """
65
66 def compose(self) -> ComposeResult:
67 for bit in reversed(range(8)):
68 yield BitSwitch(bit)
69
70
71 class ByteEditor(Widget):
72 DEFAULT_CSS = """
73 ByteEditor > Container {
74 height: 1fr;
75 align: center middle;
76 }
77 ByteEditor > Container.top {
78 background: $boost;
79 }
80 ByteEditor Input {
81 width: 16;
82 }
83 """
84
85 def compose(self) -> ComposeResult:
86 with Container(classes="top"):
87 yield Input(placeholder="byte")
88 with Container():
89 yield ByteInput()
90
91 def on_bit_switch_bit_changed(self, event: BitSwitch.BitChanged) -> None:
92 """When a switch changes, update the value."""
93 value = 0
94 for switch in self.query(BitSwitch):
95 value |= switch.value << switch.bit
96 self.query_one(Input).value = str(value)
97
98
99 class ByteInputApp(App):
100 def compose(self) -> ComposeResult:
101 yield ByteEditor()
102
103
104 if __name__ == "__main__":
105 app = ByteInputApp()
106 app.run()
107
[end of docs/examples/guide/compound/byte02.py]
[start of docs/examples/guide/compound/byte03.py]
1 from __future__ import annotations
2
3 from textual.app import App, ComposeResult
4 from textual.containers import Container
5 from textual.geometry import clamp
6 from textual.messages import Message
7 from textual.reactive import reactive
8 from textual.widget import Widget
9 from textual.widgets import Input, Label, Switch
10
11
12 class BitSwitch(Widget):
13 """A Switch with a numeric label above it."""
14
15 DEFAULT_CSS = """
16 BitSwitch {
17 layout: vertical;
18 width: auto;
19 height: auto;
20 }
21 BitSwitch > Label {
22 text-align: center;
23 width: 100%;
24 }
25 """
26
27 class BitChanged(Message):
28 """Sent when the 'bit' changes."""
29
30 def __init__(self, bit: int, value: bool) -> None:
31 super().__init__()
32 self.bit = bit
33 self.value = value
34
35 value = reactive(0)
36
37 def __init__(self, bit: int) -> None:
38 self.bit = bit
39 super().__init__()
40
41 def compose(self) -> ComposeResult:
42 yield Label(str(self.bit))
43 yield Switch()
44
45 def watch_value(self, value: bool) -> None: # (1)!
46 """When the value changes we want to set the switch accordingly."""
47 self.query_one(Switch).value = value
48
49 def on_switch_changed(self, event: Switch.Changed) -> None:
50 """When the switch changes, notify the parent via a message."""
51 event.stop()
52 self.value = event.value
53 self.post_message(self.BitChanged(self.bit, event.value))
54
55
56 class ByteInput(Widget):
57 """A compound widget with 8 switches."""
58
59 DEFAULT_CSS = """
60 ByteInput {
61 width: auto;
62 height: auto;
63 border: blank;
64 layout: horizontal;
65 }
66 ByteInput:focus-within {
67 border: heavy $secondary;
68 }
69 """
70
71 def compose(self) -> ComposeResult:
72 for bit in reversed(range(8)):
73 yield BitSwitch(bit)
74
75
76 class ByteEditor(Widget):
77 DEFAULT_CSS = """
78 ByteEditor > Container {
79 height: 1fr;
80 align: center middle;
81 }
82 ByteEditor > Container.top {
83 background: $boost;
84 }
85 ByteEditor Input {
86 width: 16;
87 }
88 """
89
90 value = reactive(0)
91
92 def validate_value(self, value: int) -> int: # (2)!
93 """Ensure value is between 0 and 255."""
94 return clamp(value, 0, 255)
95
96 def compose(self) -> ComposeResult:
97 with Container(classes="top"):
98 yield Input(placeholder="byte")
99 with Container():
100 yield ByteInput()
101
102 def on_bit_switch_bit_changed(self, event: BitSwitch.BitChanged) -> None:
103 """When a switch changes, update the value."""
104 value = 0
105 for switch in self.query(BitSwitch):
106 value |= switch.value << switch.bit
107 self.query_one(Input).value = str(value)
108
109 def on_input_changed(self, event: Input.Changed) -> None: # (3)!
110 """When the text changes, set the value of the byte."""
111 try:
112 self.value = int(event.value or "0")
113 except ValueError:
114 pass
115
116 def watch_value(self, value: int) -> None: # (4)!
117 """When self.value changes, update switches."""
118 for switch in self.query(BitSwitch):
119 with switch.prevent(BitSwitch.BitChanged): # (5)!
120 switch.value = bool(value & (1 << switch.bit)) # (6)!
121
122
123 class ByteInputApp(App):
124 def compose(self) -> ComposeResult:
125 yield ByteEditor()
126
127
128 if __name__ == "__main__":
129 app = ByteInputApp()
130 app.run()
131
[end of docs/examples/guide/compound/byte03.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/examples/guide/compound/byte02.py b/docs/examples/guide/compound/byte02.py
--- a/docs/examples/guide/compound/byte02.py
+++ b/docs/examples/guide/compound/byte02.py
@@ -2,7 +2,7 @@
from textual.app import App, ComposeResult
from textual.containers import Container
-from textual.messages import Message
+from textual.message import Message
from textual.reactive import reactive
from textual.widget import Widget
from textual.widgets import Input, Label, Switch
diff --git a/docs/examples/guide/compound/byte03.py b/docs/examples/guide/compound/byte03.py
--- a/docs/examples/guide/compound/byte03.py
+++ b/docs/examples/guide/compound/byte03.py
@@ -3,7 +3,7 @@
from textual.app import App, ComposeResult
from textual.containers import Container
from textual.geometry import clamp
-from textual.messages import Message
+from textual.message import Message
from textual.reactive import reactive
from textual.widget import Widget
from textual.widgets import Input, Label, Switch
|
{"golden_diff": "diff --git a/docs/examples/guide/compound/byte02.py b/docs/examples/guide/compound/byte02.py\n--- a/docs/examples/guide/compound/byte02.py\n+++ b/docs/examples/guide/compound/byte02.py\n@@ -2,7 +2,7 @@\n \n from textual.app import App, ComposeResult\n from textual.containers import Container\n-from textual.messages import Message\n+from textual.message import Message\n from textual.reactive import reactive\n from textual.widget import Widget\n from textual.widgets import Input, Label, Switch\ndiff --git a/docs/examples/guide/compound/byte03.py b/docs/examples/guide/compound/byte03.py\n--- a/docs/examples/guide/compound/byte03.py\n+++ b/docs/examples/guide/compound/byte03.py\n@@ -3,7 +3,7 @@\n from textual.app import App, ComposeResult\n from textual.containers import Container\n from textual.geometry import clamp\n-from textual.messages import Message\n+from textual.message import Message\n from textual.reactive import reactive\n from textual.widget import Widget\n from textual.widgets import Input, Label, Switch\n", "issue": "docs: `Message` not exported from `textual.messages`\nJust something minor I spotted while playing with the [Messages up](https://textual.textualize.io/guide/widgets/#messages-up) example in the docs. The code still works but my editor complains that:\r\n\r\n> \"Message\" is not exported from module \"textual.messages\"\r\n> Import from \"textual.message\" instead [reportPrivateImportUsage]\r\n\r\nHappy to submit a quick PR if deemed worth fixing!\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom textual.app import App, ComposeResult\nfrom textual.containers import Container\nfrom textual.messages import Message\nfrom textual.reactive import reactive\nfrom textual.widget import Widget\nfrom textual.widgets import Input, Label, Switch\n\n\nclass BitSwitch(Widget):\n \"\"\"A Switch with a numeric label above it.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n BitSwitch {\n layout: vertical;\n width: auto;\n height: auto;\n }\n BitSwitch > Label {\n text-align: center;\n width: 100%;\n }\n \"\"\"\n\n class BitChanged(Message):\n \"\"\"Sent when the 'bit' changes.\"\"\"\n\n def __init__(self, bit: int, value: bool) -> None:\n super().__init__()\n self.bit = bit\n self.value = value\n\n value = reactive(0) # (1)!\n\n def __init__(self, bit: int) -> None:\n self.bit = bit\n super().__init__()\n\n def compose(self) -> ComposeResult:\n yield Label(str(self.bit))\n yield Switch()\n\n def on_switch_changed(self, event: Switch.Changed) -> None: # (2)!\n \"\"\"When the switch changes, notify the parent via a message.\"\"\"\n event.stop() # (3)!\n self.value = event.value # (4)!\n self.post_message(self.BitChanged(self.bit, event.value))\n\n\nclass ByteInput(Widget):\n \"\"\"A compound widget with 8 switches.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n ByteInput {\n width: auto;\n height: auto;\n border: blank;\n layout: horizontal;\n }\n ByteInput:focus-within {\n border: heavy $secondary;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n for bit in reversed(range(8)):\n yield BitSwitch(bit)\n\n\nclass ByteEditor(Widget):\n DEFAULT_CSS = \"\"\"\n ByteEditor > Container {\n height: 1fr;\n align: center middle;\n }\n ByteEditor > Container.top {\n background: $boost;\n }\n ByteEditor Input {\n width: 16;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n with Container(classes=\"top\"):\n yield Input(placeholder=\"byte\")\n with Container():\n yield ByteInput()\n\n def on_bit_switch_bit_changed(self, event: BitSwitch.BitChanged) -> None:\n \"\"\"When a switch changes, update the value.\"\"\"\n value = 0\n for switch in self.query(BitSwitch):\n value |= switch.value << switch.bit\n self.query_one(Input).value = str(value)\n\n\nclass ByteInputApp(App):\n def compose(self) -> ComposeResult:\n yield ByteEditor()\n\n\nif __name__ == \"__main__\":\n app = ByteInputApp()\n app.run()\n", "path": "docs/examples/guide/compound/byte02.py"}, {"content": "from __future__ import annotations\n\nfrom textual.app import App, ComposeResult\nfrom textual.containers import Container\nfrom textual.geometry import clamp\nfrom textual.messages import Message\nfrom textual.reactive import reactive\nfrom textual.widget import Widget\nfrom textual.widgets import Input, Label, Switch\n\n\nclass BitSwitch(Widget):\n \"\"\"A Switch with a numeric label above it.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n BitSwitch {\n layout: vertical;\n width: auto;\n height: auto;\n }\n BitSwitch > Label {\n text-align: center;\n width: 100%;\n }\n \"\"\"\n\n class BitChanged(Message):\n \"\"\"Sent when the 'bit' changes.\"\"\"\n\n def __init__(self, bit: int, value: bool) -> None:\n super().__init__()\n self.bit = bit\n self.value = value\n\n value = reactive(0)\n\n def __init__(self, bit: int) -> None:\n self.bit = bit\n super().__init__()\n\n def compose(self) -> ComposeResult:\n yield Label(str(self.bit))\n yield Switch()\n\n def watch_value(self, value: bool) -> None: # (1)!\n \"\"\"When the value changes we want to set the switch accordingly.\"\"\"\n self.query_one(Switch).value = value\n\n def on_switch_changed(self, event: Switch.Changed) -> None:\n \"\"\"When the switch changes, notify the parent via a message.\"\"\"\n event.stop()\n self.value = event.value\n self.post_message(self.BitChanged(self.bit, event.value))\n\n\nclass ByteInput(Widget):\n \"\"\"A compound widget with 8 switches.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n ByteInput {\n width: auto;\n height: auto;\n border: blank;\n layout: horizontal;\n }\n ByteInput:focus-within {\n border: heavy $secondary;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n for bit in reversed(range(8)):\n yield BitSwitch(bit)\n\n\nclass ByteEditor(Widget):\n DEFAULT_CSS = \"\"\"\n ByteEditor > Container {\n height: 1fr;\n align: center middle;\n }\n ByteEditor > Container.top {\n background: $boost;\n }\n ByteEditor Input {\n width: 16;\n }\n \"\"\"\n\n value = reactive(0)\n\n def validate_value(self, value: int) -> int: # (2)!\n \"\"\"Ensure value is between 0 and 255.\"\"\"\n return clamp(value, 0, 255)\n\n def compose(self) -> ComposeResult:\n with Container(classes=\"top\"):\n yield Input(placeholder=\"byte\")\n with Container():\n yield ByteInput()\n\n def on_bit_switch_bit_changed(self, event: BitSwitch.BitChanged) -> None:\n \"\"\"When a switch changes, update the value.\"\"\"\n value = 0\n for switch in self.query(BitSwitch):\n value |= switch.value << switch.bit\n self.query_one(Input).value = str(value)\n\n def on_input_changed(self, event: Input.Changed) -> None: # (3)!\n \"\"\"When the text changes, set the value of the byte.\"\"\"\n try:\n self.value = int(event.value or \"0\")\n except ValueError:\n pass\n\n def watch_value(self, value: int) -> None: # (4)!\n \"\"\"When self.value changes, update switches.\"\"\"\n for switch in self.query(BitSwitch):\n with switch.prevent(BitSwitch.BitChanged): # (5)!\n switch.value = bool(value & (1 << switch.bit)) # (6)!\n\n\nclass ByteInputApp(App):\n def compose(self) -> ComposeResult:\n yield ByteEditor()\n\n\nif __name__ == \"__main__\":\n app = ByteInputApp()\n app.run()\n", "path": "docs/examples/guide/compound/byte03.py"}]}
| 2,604 | 245 |
gh_patches_debug_35030
|
rasdani/github-patches
|
git_diff
|
pfnet__pytorch-pfn-extras-582
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Align terminology (`option` v.s. `config`)
</issue>
<code>
[start of pytorch_pfn_extras/runtime/_to.py]
1 from typing import Any, Dict, Optional, Type, TypeVar
2
3 import torch
4
5 import pytorch_pfn_extras as ppe
6 from pytorch_pfn_extras.runtime._runtime import DeviceLike, BaseRuntime
7
8
9 ModuleOrTensor = TypeVar('ModuleOrTensor', torch.nn.Module, torch.Tensor)
10
11
12 def to(
13 module_or_tensor: ModuleOrTensor,
14 device: DeviceLike,
15 *,
16 config: Optional[Dict[str, Any]] = None,
17 runtime_class: Optional[Type[BaseRuntime]] = None,
18 ) -> ModuleOrTensor:
19 """A function to transfer the given object to the given device.
20
21 If PyTorch's device type is given as the ``device`` argument,
22 the behavior of this function is equivalent to
23 ``module_or_tensor.to(module_or_tensor, device)``.
24
25 Otherwise, this function uses the **Runtime** mechanism.
26 This function looks for the Runtime for the device from the RuntimeRegistry
27 and delegates the actual transfer operation to it.
28
29 See also the documentation of ``ppe.runtime.BaseRuntime`` for details.
30
31 Args:
32 module_or_tensor (torch.nn.Module or torch.Tensor):
33 An object to be transferred.
34 device (torch.device or str):
35 The device that the input object is transferred to.
36 config (dict, optional):
37 A config of dictionary type that is passed to
38 ``runtime_class.__init__`` as an argument.
39 runtime_class:
40 A runtime class inherited from `BaseRuntime` class.
41 If ``None``, a runtime class is automatically selected
42 based on the ``device`` argument from the runtime registry.
43
44 Returns:
45 A `torch.Tensor` with the specified device.
46 """
47 if config is None:
48 config = {}
49 if runtime_class is None:
50 registry = ppe.runtime.runtime_registry
51 runtime_class = registry.get_runtime_class_for_device_spec(device)
52 runtime = runtime_class(device, config)
53 obj = module_or_tensor
54 if isinstance(obj, torch.nn.Module):
55 ppe.runtime._runtime._set_module_runtime_tag(obj, runtime)
56 return runtime.move_module(obj)
57 elif isinstance(obj, torch.Tensor):
58 return runtime.move_tensor(obj)
59 else:
60 raise ValueError('Unsupported type for module_or_tensor')
61
[end of pytorch_pfn_extras/runtime/_to.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pytorch_pfn_extras/runtime/_to.py b/pytorch_pfn_extras/runtime/_to.py
--- a/pytorch_pfn_extras/runtime/_to.py
+++ b/pytorch_pfn_extras/runtime/_to.py
@@ -13,8 +13,9 @@
module_or_tensor: ModuleOrTensor,
device: DeviceLike,
*,
- config: Optional[Dict[str, Any]] = None,
+ options: Optional[Dict[str, Any]] = None,
runtime_class: Optional[Type[BaseRuntime]] = None,
+ config: Optional[Dict[str, Any]] = None,
) -> ModuleOrTensor:
"""A function to transfer the given object to the given device.
@@ -33,23 +34,30 @@
An object to be transferred.
device (torch.device or str):
The device that the input object is transferred to.
- config (dict, optional):
- A config of dictionary type that is passed to
+ options (dict, optional):
+ An options of dictionary type that is passed to
``runtime_class.__init__`` as an argument.
runtime_class:
A runtime class inherited from `BaseRuntime` class.
If ``None``, a runtime class is automatically selected
based on the ``device`` argument from the runtime registry.
+ config (dict, optional):
+ DEPRECATED. Use `options`.
Returns:
A `torch.Tensor` with the specified device.
"""
- if config is None:
- config = {}
+ if options is None:
+ options = {}
+ if config is not None:
+ options = config
+ elif config is not None:
+ raise ValueError('options and config cannot be specified together')
+
if runtime_class is None:
registry = ppe.runtime.runtime_registry
runtime_class = registry.get_runtime_class_for_device_spec(device)
- runtime = runtime_class(device, config)
+ runtime = runtime_class(device, options)
obj = module_or_tensor
if isinstance(obj, torch.nn.Module):
ppe.runtime._runtime._set_module_runtime_tag(obj, runtime)
|
{"golden_diff": "diff --git a/pytorch_pfn_extras/runtime/_to.py b/pytorch_pfn_extras/runtime/_to.py\n--- a/pytorch_pfn_extras/runtime/_to.py\n+++ b/pytorch_pfn_extras/runtime/_to.py\n@@ -13,8 +13,9 @@\n module_or_tensor: ModuleOrTensor,\n device: DeviceLike,\n *,\n- config: Optional[Dict[str, Any]] = None,\n+ options: Optional[Dict[str, Any]] = None,\n runtime_class: Optional[Type[BaseRuntime]] = None,\n+ config: Optional[Dict[str, Any]] = None,\n ) -> ModuleOrTensor:\n \"\"\"A function to transfer the given object to the given device.\n \n@@ -33,23 +34,30 @@\n An object to be transferred.\n device (torch.device or str):\n The device that the input object is transferred to.\n- config (dict, optional):\n- A config of dictionary type that is passed to\n+ options (dict, optional):\n+ An options of dictionary type that is passed to\n ``runtime_class.__init__`` as an argument.\n runtime_class:\n A runtime class inherited from `BaseRuntime` class.\n If ``None``, a runtime class is automatically selected\n based on the ``device`` argument from the runtime registry.\n+ config (dict, optional):\n+ DEPRECATED. Use `options`.\n \n Returns:\n A `torch.Tensor` with the specified device.\n \"\"\"\n- if config is None:\n- config = {}\n+ if options is None:\n+ options = {}\n+ if config is not None:\n+ options = config\n+ elif config is not None:\n+ raise ValueError('options and config cannot be specified together')\n+\n if runtime_class is None:\n registry = ppe.runtime.runtime_registry\n runtime_class = registry.get_runtime_class_for_device_spec(device)\n- runtime = runtime_class(device, config)\n+ runtime = runtime_class(device, options)\n obj = module_or_tensor\n if isinstance(obj, torch.nn.Module):\n ppe.runtime._runtime._set_module_runtime_tag(obj, runtime)\n", "issue": "Align terminology (`option` v.s. `config`)\n\n", "before_files": [{"content": "from typing import Any, Dict, Optional, Type, TypeVar\n\nimport torch\n\nimport pytorch_pfn_extras as ppe\nfrom pytorch_pfn_extras.runtime._runtime import DeviceLike, BaseRuntime\n\n\nModuleOrTensor = TypeVar('ModuleOrTensor', torch.nn.Module, torch.Tensor)\n\n\ndef to(\n module_or_tensor: ModuleOrTensor,\n device: DeviceLike,\n *,\n config: Optional[Dict[str, Any]] = None,\n runtime_class: Optional[Type[BaseRuntime]] = None,\n) -> ModuleOrTensor:\n \"\"\"A function to transfer the given object to the given device.\n\n If PyTorch's device type is given as the ``device`` argument,\n the behavior of this function is equivalent to\n ``module_or_tensor.to(module_or_tensor, device)``.\n\n Otherwise, this function uses the **Runtime** mechanism.\n This function looks for the Runtime for the device from the RuntimeRegistry\n and delegates the actual transfer operation to it.\n\n See also the documentation of ``ppe.runtime.BaseRuntime`` for details.\n\n Args:\n module_or_tensor (torch.nn.Module or torch.Tensor):\n An object to be transferred.\n device (torch.device or str):\n The device that the input object is transferred to.\n config (dict, optional):\n A config of dictionary type that is passed to\n ``runtime_class.__init__`` as an argument.\n runtime_class:\n A runtime class inherited from `BaseRuntime` class.\n If ``None``, a runtime class is automatically selected\n based on the ``device`` argument from the runtime registry.\n\n Returns:\n A `torch.Tensor` with the specified device.\n \"\"\"\n if config is None:\n config = {}\n if runtime_class is None:\n registry = ppe.runtime.runtime_registry\n runtime_class = registry.get_runtime_class_for_device_spec(device)\n runtime = runtime_class(device, config)\n obj = module_or_tensor\n if isinstance(obj, torch.nn.Module):\n ppe.runtime._runtime._set_module_runtime_tag(obj, runtime)\n return runtime.move_module(obj)\n elif isinstance(obj, torch.Tensor):\n return runtime.move_tensor(obj)\n else:\n raise ValueError('Unsupported type for module_or_tensor')\n", "path": "pytorch_pfn_extras/runtime/_to.py"}]}
| 1,148 | 465 |
gh_patches_debug_37046
|
rasdani/github-patches
|
git_diff
|
python-telegram-bot__python-telegram-bot-3053
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecation: `imghdr` module is being deprecated in Python 3.11
The `imghdr` module is being deprected in `3.11` and will be removed in `3.13`. See [PEP 594](https://peps.python.org/pep-0594/#imghdr) for details.
We currently use it only in one area: https://github.com/python-telegram-bot/python-telegram-bot/blob/be8f4f7aad9c3ded333950d00ecde57dbdda59c2/telegram/_files/inputfile.py#L108
Alternatives would be:
1. Just use the `mimetype` module? We were using only the `mimetype` module until [@b83a659](https://github.com/python-telegram-bot/python-telegram-bot/commit/b83a659) where we used a 'hack' to detect the type from the byte stream. This was then [replaced](https://github.com/python-telegram-bot/python-telegram-bot/commit/5dc1e4cac19cdd34d7cad4688afa0277a0c8a436) by the `imghdr` module which did the same thing better.
2. Use a library called [filetype](https://github.com/h2non/filetype.py) to detect images. This was [recommended](https://peps.python.org/pep-0594/#deprecated-modules) by the PEP.
3. Just copy the python [source](https://github.com/python/cpython/blob/3.11/Lib/imghdr.py) of the `imghdr` module into ours?
</issue>
<code>
[start of telegram/_files/inputfile.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2022
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains an object that represents a Telegram InputFile."""
20
21 import imghdr
22 import logging
23 import mimetypes
24 from pathlib import Path
25 from typing import IO, Optional, Union
26 from uuid import uuid4
27
28 from telegram._utils.types import FieldTuple
29
30 _DEFAULT_MIME_TYPE = "application/octet-stream"
31 logger = logging.getLogger(__name__)
32
33
34 class InputFile:
35 """This object represents a Telegram InputFile.
36
37 .. versionchanged:: 20.0
38 The former attribute ``attach`` was renamed to :attr:`attach_name`.
39
40 Args:
41 obj (:term:`file object` | :obj:`bytes` | :obj:`str`): An open file descriptor or the files
42 content as bytes or string.
43
44 Note:
45 If :paramref:`obj` is a string, it will be encoded as bytes via
46 :external:obj:`obj.encode('utf-8') <str.encode>`.
47
48 .. versionchanged:: 20.0
49 Accept string input.
50 filename (:obj:`str`, optional): Filename for this InputFile.
51 attach (:obj:`bool`, optional): Pass :obj:`True` if the parameter this file belongs to in
52 the request to Telegram should point to the multipart data via an ``attach://`` URI.
53 Defaults to `False`.
54
55 Attributes:
56 input_file_content (:obj:`bytes`): The binary content of the file to send.
57 attach_name (:obj:`str`): Optional. If present, the parameter this file belongs to in
58 the request to Telegram should point to the multipart data via a an URI of the form
59 ``attach://<attach_name>`` URI.
60 filename (:obj:`str`): Filename for the file to be sent.
61 mimetype (:obj:`str`): The mimetype inferred from the file to be sent.
62
63 """
64
65 __slots__ = ("filename", "attach_name", "input_file_content", "mimetype")
66
67 def __init__(
68 self, obj: Union[IO[bytes], bytes, str], filename: str = None, attach: bool = False
69 ):
70 if isinstance(obj, bytes):
71 self.input_file_content = obj
72 elif isinstance(obj, str):
73 self.input_file_content = obj.encode("utf-8")
74 else:
75 self.input_file_content = obj.read()
76 self.attach_name: Optional[str] = "attached" + uuid4().hex if attach else None
77
78 if (
79 not filename
80 and hasattr(obj, "name")
81 and not isinstance(obj.name, int) # type: ignore[union-attr]
82 ):
83 filename = Path(obj.name).name # type: ignore[union-attr]
84
85 image_mime_type = self.is_image(self.input_file_content)
86 if image_mime_type:
87 self.mimetype = image_mime_type
88 elif filename:
89 self.mimetype = mimetypes.guess_type(filename)[0] or _DEFAULT_MIME_TYPE
90 else:
91 self.mimetype = _DEFAULT_MIME_TYPE
92
93 self.filename = filename or self.mimetype.replace("/", ".")
94
95 @staticmethod
96 def is_image(stream: bytes) -> Optional[str]:
97 """Check if the content file is an image by analyzing its headers.
98
99 Args:
100 stream (:obj:`bytes`): A byte stream representing the content of a file.
101
102 Returns:
103 :obj:`str` | :obj:`None`: The mime-type of an image, if the input is an image, or
104 :obj:`None` else.
105
106 """
107 try:
108 image = imghdr.what(None, stream)
109 if image:
110 return f"image/{image}"
111 return None
112 except Exception:
113 logger.debug(
114 "Could not parse file content. Assuming that file is not an image.", exc_info=True
115 )
116 return None
117
118 @property
119 def field_tuple(self) -> FieldTuple:
120 """Field tuple representing the contents of the file for upload to the Telegram servers.
121
122 Returns:
123 Tuple[:obj:`str`, :obj:`bytes`, :obj:`str`]:
124 """
125 return self.filename, self.input_file_content, self.mimetype
126
127 @property
128 def attach_uri(self) -> Optional[str]:
129 """URI to insert into the JSON data for uploading the file. Returns :obj:`None`, if
130 :attr:`attach_name` is :obj:`None`.
131 """
132 return f"attach://{self.attach_name}" if self.attach_name else None
133
[end of telegram/_files/inputfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/telegram/_files/inputfile.py b/telegram/_files/inputfile.py
--- a/telegram/_files/inputfile.py
+++ b/telegram/_files/inputfile.py
@@ -18,7 +18,6 @@
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram InputFile."""
-import imghdr
import logging
import mimetypes
from pathlib import Path
@@ -35,7 +34,11 @@
"""This object represents a Telegram InputFile.
.. versionchanged:: 20.0
- The former attribute ``attach`` was renamed to :attr:`attach_name`.
+
+ * The former attribute ``attach`` was renamed to :attr:`attach_name`.
+ * Method ``is_image`` was removed. If you pass :obj:`bytes` to :paramref:`obj` and would
+ like to have the mime type automatically guessed, please pass :paramref:`filename`
+ in addition.
Args:
obj (:term:`file object` | :obj:`bytes` | :obj:`str`): An open file descriptor or the files
@@ -82,39 +85,13 @@
):
filename = Path(obj.name).name # type: ignore[union-attr]
- image_mime_type = self.is_image(self.input_file_content)
- if image_mime_type:
- self.mimetype = image_mime_type
- elif filename:
- self.mimetype = mimetypes.guess_type(filename)[0] or _DEFAULT_MIME_TYPE
+ if filename:
+ self.mimetype = mimetypes.guess_type(filename, strict=False)[0] or _DEFAULT_MIME_TYPE
else:
self.mimetype = _DEFAULT_MIME_TYPE
self.filename = filename or self.mimetype.replace("/", ".")
- @staticmethod
- def is_image(stream: bytes) -> Optional[str]:
- """Check if the content file is an image by analyzing its headers.
-
- Args:
- stream (:obj:`bytes`): A byte stream representing the content of a file.
-
- Returns:
- :obj:`str` | :obj:`None`: The mime-type of an image, if the input is an image, or
- :obj:`None` else.
-
- """
- try:
- image = imghdr.what(None, stream)
- if image:
- return f"image/{image}"
- return None
- except Exception:
- logger.debug(
- "Could not parse file content. Assuming that file is not an image.", exc_info=True
- )
- return None
-
@property
def field_tuple(self) -> FieldTuple:
"""Field tuple representing the contents of the file for upload to the Telegram servers.
|
{"golden_diff": "diff --git a/telegram/_files/inputfile.py b/telegram/_files/inputfile.py\n--- a/telegram/_files/inputfile.py\n+++ b/telegram/_files/inputfile.py\n@@ -18,7 +18,6 @@\n # along with this program. If not, see [http://www.gnu.org/licenses/].\n \"\"\"This module contains an object that represents a Telegram InputFile.\"\"\"\n \n-import imghdr\n import logging\n import mimetypes\n from pathlib import Path\n@@ -35,7 +34,11 @@\n \"\"\"This object represents a Telegram InputFile.\n \n .. versionchanged:: 20.0\n- The former attribute ``attach`` was renamed to :attr:`attach_name`.\n+\n+ * The former attribute ``attach`` was renamed to :attr:`attach_name`.\n+ * Method ``is_image`` was removed. If you pass :obj:`bytes` to :paramref:`obj` and would\n+ like to have the mime type automatically guessed, please pass :paramref:`filename`\n+ in addition.\n \n Args:\n obj (:term:`file object` | :obj:`bytes` | :obj:`str`): An open file descriptor or the files\n@@ -82,39 +85,13 @@\n ):\n filename = Path(obj.name).name # type: ignore[union-attr]\n \n- image_mime_type = self.is_image(self.input_file_content)\n- if image_mime_type:\n- self.mimetype = image_mime_type\n- elif filename:\n- self.mimetype = mimetypes.guess_type(filename)[0] or _DEFAULT_MIME_TYPE\n+ if filename:\n+ self.mimetype = mimetypes.guess_type(filename, strict=False)[0] or _DEFAULT_MIME_TYPE\n else:\n self.mimetype = _DEFAULT_MIME_TYPE\n \n self.filename = filename or self.mimetype.replace(\"/\", \".\")\n \n- @staticmethod\n- def is_image(stream: bytes) -> Optional[str]:\n- \"\"\"Check if the content file is an image by analyzing its headers.\n-\n- Args:\n- stream (:obj:`bytes`): A byte stream representing the content of a file.\n-\n- Returns:\n- :obj:`str` | :obj:`None`: The mime-type of an image, if the input is an image, or\n- :obj:`None` else.\n-\n- \"\"\"\n- try:\n- image = imghdr.what(None, stream)\n- if image:\n- return f\"image/{image}\"\n- return None\n- except Exception:\n- logger.debug(\n- \"Could not parse file content. Assuming that file is not an image.\", exc_info=True\n- )\n- return None\n-\n @property\n def field_tuple(self) -> FieldTuple:\n \"\"\"Field tuple representing the contents of the file for upload to the Telegram servers.\n", "issue": "Deprecation: `imghdr` module is being deprecated in Python 3.11\nThe `imghdr` module is being deprected in `3.11` and will be removed in `3.13`. See [PEP 594](https://peps.python.org/pep-0594/#imghdr) for details. \r\n\r\nWe currently use it only in one area: https://github.com/python-telegram-bot/python-telegram-bot/blob/be8f4f7aad9c3ded333950d00ecde57dbdda59c2/telegram/_files/inputfile.py#L108\r\n\r\nAlternatives would be:\r\n1. Just use the `mimetype` module? We were using only the `mimetype` module until [@b83a659](https://github.com/python-telegram-bot/python-telegram-bot/commit/b83a659) where we used a 'hack' to detect the type from the byte stream. This was then [replaced](https://github.com/python-telegram-bot/python-telegram-bot/commit/5dc1e4cac19cdd34d7cad4688afa0277a0c8a436) by the `imghdr` module which did the same thing better.\r\n2. Use a library called [filetype](https://github.com/h2non/filetype.py) to detect images. This was [recommended](https://peps.python.org/pep-0594/#deprecated-modules) by the PEP.\r\n3. Just copy the python [source](https://github.com/python/cpython/blob/3.11/Lib/imghdr.py) of the `imghdr` module into ours?\r\n\r\n \n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2022\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram InputFile.\"\"\"\n\nimport imghdr\nimport logging\nimport mimetypes\nfrom pathlib import Path\nfrom typing import IO, Optional, Union\nfrom uuid import uuid4\n\nfrom telegram._utils.types import FieldTuple\n\n_DEFAULT_MIME_TYPE = \"application/octet-stream\"\nlogger = logging.getLogger(__name__)\n\n\nclass InputFile:\n \"\"\"This object represents a Telegram InputFile.\n\n .. versionchanged:: 20.0\n The former attribute ``attach`` was renamed to :attr:`attach_name`.\n\n Args:\n obj (:term:`file object` | :obj:`bytes` | :obj:`str`): An open file descriptor or the files\n content as bytes or string.\n\n Note:\n If :paramref:`obj` is a string, it will be encoded as bytes via\n :external:obj:`obj.encode('utf-8') <str.encode>`.\n\n .. versionchanged:: 20.0\n Accept string input.\n filename (:obj:`str`, optional): Filename for this InputFile.\n attach (:obj:`bool`, optional): Pass :obj:`True` if the parameter this file belongs to in\n the request to Telegram should point to the multipart data via an ``attach://`` URI.\n Defaults to `False`.\n\n Attributes:\n input_file_content (:obj:`bytes`): The binary content of the file to send.\n attach_name (:obj:`str`): Optional. If present, the parameter this file belongs to in\n the request to Telegram should point to the multipart data via a an URI of the form\n ``attach://<attach_name>`` URI.\n filename (:obj:`str`): Filename for the file to be sent.\n mimetype (:obj:`str`): The mimetype inferred from the file to be sent.\n\n \"\"\"\n\n __slots__ = (\"filename\", \"attach_name\", \"input_file_content\", \"mimetype\")\n\n def __init__(\n self, obj: Union[IO[bytes], bytes, str], filename: str = None, attach: bool = False\n ):\n if isinstance(obj, bytes):\n self.input_file_content = obj\n elif isinstance(obj, str):\n self.input_file_content = obj.encode(\"utf-8\")\n else:\n self.input_file_content = obj.read()\n self.attach_name: Optional[str] = \"attached\" + uuid4().hex if attach else None\n\n if (\n not filename\n and hasattr(obj, \"name\")\n and not isinstance(obj.name, int) # type: ignore[union-attr]\n ):\n filename = Path(obj.name).name # type: ignore[union-attr]\n\n image_mime_type = self.is_image(self.input_file_content)\n if image_mime_type:\n self.mimetype = image_mime_type\n elif filename:\n self.mimetype = mimetypes.guess_type(filename)[0] or _DEFAULT_MIME_TYPE\n else:\n self.mimetype = _DEFAULT_MIME_TYPE\n\n self.filename = filename or self.mimetype.replace(\"/\", \".\")\n\n @staticmethod\n def is_image(stream: bytes) -> Optional[str]:\n \"\"\"Check if the content file is an image by analyzing its headers.\n\n Args:\n stream (:obj:`bytes`): A byte stream representing the content of a file.\n\n Returns:\n :obj:`str` | :obj:`None`: The mime-type of an image, if the input is an image, or\n :obj:`None` else.\n\n \"\"\"\n try:\n image = imghdr.what(None, stream)\n if image:\n return f\"image/{image}\"\n return None\n except Exception:\n logger.debug(\n \"Could not parse file content. Assuming that file is not an image.\", exc_info=True\n )\n return None\n\n @property\n def field_tuple(self) -> FieldTuple:\n \"\"\"Field tuple representing the contents of the file for upload to the Telegram servers.\n\n Returns:\n Tuple[:obj:`str`, :obj:`bytes`, :obj:`str`]:\n \"\"\"\n return self.filename, self.input_file_content, self.mimetype\n\n @property\n def attach_uri(self) -> Optional[str]:\n \"\"\"URI to insert into the JSON data for uploading the file. Returns :obj:`None`, if\n :attr:`attach_name` is :obj:`None`.\n \"\"\"\n return f\"attach://{self.attach_name}\" if self.attach_name else None\n", "path": "telegram/_files/inputfile.py"}]}
| 2,369 | 620 |
gh_patches_debug_15176
|
rasdani/github-patches
|
git_diff
|
Cog-Creators__Red-DiscordBot-1185
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError: Only messages, members or roles may be passed
https://sentry.io/will-tekulve/bot-development/issues/419244737/
```
TypeError: Only messages, members or roles may be passed
File "redbot/core/utils/mod.py", line 132, in is_mod_or_superior
raise TypeError('Only messages, members or roles may be passed')
Exception in on_message
```
</issue>
<code>
[start of redbot/core/utils/mod.py]
1 import asyncio
2 from datetime import timedelta
3 from typing import List, Iterable, Union
4
5 import discord
6
7 from redbot.core import Config
8 from redbot.core.bot import Red
9
10
11 async def mass_purge(messages: List[discord.Message],
12 channel: discord.TextChannel):
13 """Bulk delete messages from a channel.
14
15 If more than 100 messages are supplied, the bot will delete 100 messages at
16 a time, sleeping between each action.
17
18 Note
19 ----
20 Messages must not be older than 14 days, and the bot must not be a user
21 account.
22
23 Parameters
24 ----------
25 messages : `list` of `discord.Message`
26 The messages to bulk delete.
27 channel : discord.TextChannel
28 The channel to delete messages from.
29
30 Raises
31 ------
32 discord.Forbidden
33 You do not have proper permissions to delete the messages or you’re not
34 using a bot account.
35 discord.HTTPException
36 Deleting the messages failed.
37
38 """
39 while messages:
40 if len(messages) > 1:
41 await channel.delete_messages(messages[:100])
42 messages = messages[100:]
43 else:
44 await messages[0].delete()
45 messages = []
46 await asyncio.sleep(1.5)
47
48
49 async def slow_deletion(messages: Iterable[discord.Message]):
50 """Delete a list of messages one at a time.
51
52 Any exceptions raised when trying to delete the message will be silenced.
53
54 Parameters
55 ----------
56 messages : `iterable` of `discord.Message`
57 The messages to delete.
58
59 """
60 for message in messages:
61 try:
62 await message.delete()
63 except discord.HTTPException:
64 pass
65
66
67 def get_audit_reason(author: discord.Member, reason: str = None):
68 """Construct a reason to appear in the audit log.
69
70 Parameters
71 ----------
72 author : discord.Member
73 The author behind the audit log action.
74 reason : str
75 The reason behidn the audit log action.
76
77 Returns
78 -------
79 str
80 The formatted audit log reason.
81
82 """
83 return \
84 "Action requested by {} (ID {}). Reason: {}".format(author, author.id, reason) if reason else \
85 "Action requested by {} (ID {}).".format(author, author.id)
86
87
88 async def is_allowed_by_hierarchy(bot: Red,
89 settings: Config,
90 guild: discord.Guild,
91 mod: discord.Member,
92 user: discord.Member):
93 if not await settings.guild(guild).respect_hierarchy():
94 return True
95 is_special = mod == guild.owner or await bot.is_owner(mod)
96 return mod.top_role.position > user.top_role.position or is_special
97
98
99 async def is_mod_or_superior(
100 bot: Red, obj: Union[discord.Message, discord.Member, discord.Role]):
101 """Check if an object has mod or superior permissions.
102
103 If a message is passed, its author's permissions are checked. If a role is
104 passed, it simply checks if it is one of either the admin or mod roles.
105
106 Parameters
107 ----------
108 bot : redbot.core.bot.Red
109 The bot object.
110 obj : `discord.Message` or `discord.Member` or `discord.Role`
111 The object to check permissions for.
112
113 Returns
114 -------
115 bool
116 :code:`True` if the object has mod permissions.
117
118 Raises
119 ------
120 TypeError
121 If the wrong type of ``obj`` was passed.
122
123 """
124 user = None
125 if isinstance(obj, discord.Message):
126 user = obj.author
127 elif isinstance(obj, discord.Member):
128 user = obj
129 elif isinstance(obj, discord.Role):
130 pass
131 else:
132 raise TypeError('Only messages, members or roles may be passed')
133
134 server = obj.guild
135 admin_role_id = await bot.db.guild(server).admin_role()
136 mod_role_id = await bot.db.guild(server).mod_role()
137
138 if isinstance(obj, discord.Role):
139 return obj.id in [admin_role_id, mod_role_id]
140 mod_roles = [r for r in server.roles if r.id == mod_role_id]
141 mod_role = mod_roles[0] if len(mod_roles) > 0 else None
142 admin_roles = [r for r in server.roles if r.id == admin_role_id]
143 admin_role = admin_roles[0] if len(admin_roles) > 0 else None
144
145 if user and user == await bot.is_owner(user):
146 return True
147 elif admin_role and discord.utils.get(user.roles, name=admin_role):
148 return True
149 elif mod_role and discord.utils.get(user.roles, name=mod_role):
150 return True
151 else:
152 return False
153
154
155 def strfdelta(delta: timedelta):
156 """Format a timedelta object to a message with time units.
157
158 Parameters
159 ----------
160 delta : datetime.timedelta
161 The duration to parse.
162
163 Returns
164 -------
165 str
166 A message representing the timedelta with units.
167
168 """
169 s = []
170 if delta.days:
171 ds = '%i day' % delta.days
172 if delta.days > 1:
173 ds += 's'
174 s.append(ds)
175 hrs, rem = divmod(delta.seconds, 60*60)
176 if hrs:
177 hs = '%i hr' % hrs
178 if hrs > 1:
179 hs += 's'
180 s.append(hs)
181 mins, secs = divmod(rem, 60)
182 if mins:
183 s.append('%i min' % mins)
184 if secs:
185 s.append('%i sec' % secs)
186 return ' '.join(s)
187
188
189 async def is_admin_or_superior(
190 bot: Red, obj: Union[discord.Message, discord.Member, discord.Role]):
191 """Same as `is_mod_or_superior` except for admin permissions.
192
193 If a message is passed, its author's permissions are checked. If a role is
194 passed, it simply checks if it is the admin role.
195
196 Parameters
197 ----------
198 bot : redbot.core.bot.Red
199 The bot object.
200 obj : `discord.Message` or `discord.Member` or `discord.Role`
201 The object to check permissions for.
202
203 Returns
204 -------
205 bool
206 :code:`True` if the object has admin permissions.
207
208 Raises
209 ------
210 TypeError
211 If the wrong type of ``obj`` was passed.
212
213 """
214 user = None
215 if isinstance(obj, discord.Message):
216 user = obj.author
217 elif isinstance(obj, discord.Member):
218 user = obj
219 elif isinstance(obj, discord.Role):
220 pass
221 else:
222 raise TypeError('Only messages, members or roles may be passed')
223
224 server = obj.guild
225 admin_role_id = await bot.db.guild(server).admin_role()
226
227 if isinstance(obj, discord.Role):
228 return obj.id == admin_role_id
229 admin_roles = [r for r in server.roles if r.id == admin_role_id]
230 admin_role = admin_roles[0] if len(admin_roles) > 0 else None
231
232 if user and await bot.is_owner(user):
233 return True
234 elif admin_roles and discord.utils.get(user.roles, name=admin_role):
235 return True
236 else:
237 return False
238
[end of redbot/core/utils/mod.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/redbot/core/utils/mod.py b/redbot/core/utils/mod.py
--- a/redbot/core/utils/mod.py
+++ b/redbot/core/utils/mod.py
@@ -124,7 +124,7 @@
user = None
if isinstance(obj, discord.Message):
user = obj.author
- elif isinstance(obj, discord.Member):
+ elif isinstance(obj, discord.Member) or isinstance(obj, discord.User):
user = obj
elif isinstance(obj, discord.Role):
pass
@@ -214,7 +214,7 @@
user = None
if isinstance(obj, discord.Message):
user = obj.author
- elif isinstance(obj, discord.Member):
+ elif isinstance(obj, discord.Member) or isinstance(obj, discord.User):
user = obj
elif isinstance(obj, discord.Role):
pass
|
{"golden_diff": "diff --git a/redbot/core/utils/mod.py b/redbot/core/utils/mod.py\n--- a/redbot/core/utils/mod.py\n+++ b/redbot/core/utils/mod.py\n@@ -124,7 +124,7 @@\n user = None\n if isinstance(obj, discord.Message):\n user = obj.author\n- elif isinstance(obj, discord.Member):\n+ elif isinstance(obj, discord.Member) or isinstance(obj, discord.User):\n user = obj\n elif isinstance(obj, discord.Role):\n pass\n@@ -214,7 +214,7 @@\n user = None\n if isinstance(obj, discord.Message):\n user = obj.author\n- elif isinstance(obj, discord.Member):\n+ elif isinstance(obj, discord.Member) or isinstance(obj, discord.User):\n user = obj\n elif isinstance(obj, discord.Role):\n pass\n", "issue": "TypeError: Only messages, members or roles may be passed\nhttps://sentry.io/will-tekulve/bot-development/issues/419244737/\n\n```\nTypeError: Only messages, members or roles may be passed\n File \"redbot/core/utils/mod.py\", line 132, in is_mod_or_superior\n raise TypeError('Only messages, members or roles may be passed')\n\nException in on_message\n```\n", "before_files": [{"content": "import asyncio\nfrom datetime import timedelta\nfrom typing import List, Iterable, Union\n\nimport discord\n\nfrom redbot.core import Config\nfrom redbot.core.bot import Red\n\n\nasync def mass_purge(messages: List[discord.Message],\n channel: discord.TextChannel):\n \"\"\"Bulk delete messages from a channel.\n\n If more than 100 messages are supplied, the bot will delete 100 messages at\n a time, sleeping between each action.\n\n Note\n ----\n Messages must not be older than 14 days, and the bot must not be a user\n account.\n\n Parameters\n ----------\n messages : `list` of `discord.Message`\n The messages to bulk delete.\n channel : discord.TextChannel\n The channel to delete messages from.\n\n Raises\n ------\n discord.Forbidden\n You do not have proper permissions to delete the messages or you\u2019re not\n using a bot account.\n discord.HTTPException\n Deleting the messages failed.\n\n \"\"\"\n while messages:\n if len(messages) > 1:\n await channel.delete_messages(messages[:100])\n messages = messages[100:]\n else:\n await messages[0].delete()\n messages = []\n await asyncio.sleep(1.5)\n\n\nasync def slow_deletion(messages: Iterable[discord.Message]):\n \"\"\"Delete a list of messages one at a time.\n\n Any exceptions raised when trying to delete the message will be silenced.\n\n Parameters\n ----------\n messages : `iterable` of `discord.Message`\n The messages to delete.\n\n \"\"\"\n for message in messages:\n try:\n await message.delete()\n except discord.HTTPException:\n pass\n\n\ndef get_audit_reason(author: discord.Member, reason: str = None):\n \"\"\"Construct a reason to appear in the audit log.\n\n Parameters\n ----------\n author : discord.Member\n The author behind the audit log action.\n reason : str\n The reason behidn the audit log action.\n\n Returns\n -------\n str\n The formatted audit log reason.\n\n \"\"\"\n return \\\n \"Action requested by {} (ID {}). Reason: {}\".format(author, author.id, reason) if reason else \\\n \"Action requested by {} (ID {}).\".format(author, author.id)\n\n\nasync def is_allowed_by_hierarchy(bot: Red,\n settings: Config,\n guild: discord.Guild,\n mod: discord.Member,\n user: discord.Member):\n if not await settings.guild(guild).respect_hierarchy():\n return True\n is_special = mod == guild.owner or await bot.is_owner(mod)\n return mod.top_role.position > user.top_role.position or is_special\n\n\nasync def is_mod_or_superior(\n bot: Red, obj: Union[discord.Message, discord.Member, discord.Role]):\n \"\"\"Check if an object has mod or superior permissions.\n\n If a message is passed, its author's permissions are checked. If a role is\n passed, it simply checks if it is one of either the admin or mod roles.\n\n Parameters\n ----------\n bot : redbot.core.bot.Red\n The bot object.\n obj : `discord.Message` or `discord.Member` or `discord.Role`\n The object to check permissions for.\n\n Returns\n -------\n bool\n :code:`True` if the object has mod permissions.\n\n Raises\n ------\n TypeError\n If the wrong type of ``obj`` was passed.\n\n \"\"\"\n user = None\n if isinstance(obj, discord.Message):\n user = obj.author\n elif isinstance(obj, discord.Member):\n user = obj\n elif isinstance(obj, discord.Role):\n pass\n else:\n raise TypeError('Only messages, members or roles may be passed')\n\n server = obj.guild\n admin_role_id = await bot.db.guild(server).admin_role()\n mod_role_id = await bot.db.guild(server).mod_role()\n\n if isinstance(obj, discord.Role):\n return obj.id in [admin_role_id, mod_role_id]\n mod_roles = [r for r in server.roles if r.id == mod_role_id]\n mod_role = mod_roles[0] if len(mod_roles) > 0 else None\n admin_roles = [r for r in server.roles if r.id == admin_role_id]\n admin_role = admin_roles[0] if len(admin_roles) > 0 else None\n\n if user and user == await bot.is_owner(user):\n return True\n elif admin_role and discord.utils.get(user.roles, name=admin_role):\n return True\n elif mod_role and discord.utils.get(user.roles, name=mod_role):\n return True\n else:\n return False\n\n\ndef strfdelta(delta: timedelta):\n \"\"\"Format a timedelta object to a message with time units.\n\n Parameters\n ----------\n delta : datetime.timedelta\n The duration to parse.\n\n Returns\n -------\n str\n A message representing the timedelta with units.\n\n \"\"\"\n s = []\n if delta.days:\n ds = '%i day' % delta.days\n if delta.days > 1:\n ds += 's'\n s.append(ds)\n hrs, rem = divmod(delta.seconds, 60*60)\n if hrs:\n hs = '%i hr' % hrs\n if hrs > 1:\n hs += 's'\n s.append(hs)\n mins, secs = divmod(rem, 60)\n if mins:\n s.append('%i min' % mins)\n if secs:\n s.append('%i sec' % secs)\n return ' '.join(s)\n\n\nasync def is_admin_or_superior(\n bot: Red, obj: Union[discord.Message, discord.Member, discord.Role]):\n \"\"\"Same as `is_mod_or_superior` except for admin permissions.\n\n If a message is passed, its author's permissions are checked. If a role is\n passed, it simply checks if it is the admin role.\n\n Parameters\n ----------\n bot : redbot.core.bot.Red\n The bot object.\n obj : `discord.Message` or `discord.Member` or `discord.Role`\n The object to check permissions for.\n\n Returns\n -------\n bool\n :code:`True` if the object has admin permissions.\n\n Raises\n ------\n TypeError\n If the wrong type of ``obj`` was passed.\n\n \"\"\"\n user = None\n if isinstance(obj, discord.Message):\n user = obj.author\n elif isinstance(obj, discord.Member):\n user = obj\n elif isinstance(obj, discord.Role):\n pass\n else:\n raise TypeError('Only messages, members or roles may be passed')\n\n server = obj.guild\n admin_role_id = await bot.db.guild(server).admin_role()\n\n if isinstance(obj, discord.Role):\n return obj.id == admin_role_id\n admin_roles = [r for r in server.roles if r.id == admin_role_id]\n admin_role = admin_roles[0] if len(admin_roles) > 0 else None\n\n if user and await bot.is_owner(user):\n return True\n elif admin_roles and discord.utils.get(user.roles, name=admin_role):\n return True\n else:\n return False\n", "path": "redbot/core/utils/mod.py"}]}
| 2,797 | 184 |
gh_patches_debug_7472
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-3211
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot use "ignored_assets" properly on Windows with custom themes
<!--
Before creating an issue:
* make sure you are using an up-to-date version of Nikola
* search for existing issues that might be related
Make sure to:
* provide information about your environment (below)
* include all the output you get, and any other information related to your problem
Nikola v7.6.4, as provided by Ubuntu, is NOT SUPPORTED.
If you are using this version, you should upgrade: https://getnikola.com/getting-started.html
-->
### Environment
**Python Version:** Python 3.7.1
**Nikola Version:** Nikola v8.0.1
**Operating System:** Windows 10
### Description:
The option "ignored_assets" used in theme meta files does not work properly on Windows: the relative path is only recognized when using a backslash, i.e. css\theme.css instead of css/theme.css ([see doc](https://getnikola.com/theming.html#theme-meta-files)).
### Solution:
Normalize the path to the convention of the operating system when parsing the theme meta file.
</issue>
<code>
[start of nikola/plugins/task/copy_assets.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2019 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Copy theme assets into output."""
28
29
30 import io
31 import os
32
33 from nikola.plugin_categories import Task
34 from nikola import utils
35
36
37 class CopyAssets(Task):
38 """Copy theme assets into output."""
39
40 name = "copy_assets"
41
42 def gen_tasks(self):
43 """Create tasks to copy the assets of the whole theme chain.
44
45 If a file is present on two themes, use the version
46 from the "youngest" theme.
47 """
48 kw = {
49 "themes": self.site.THEMES,
50 "translations": self.site.translations,
51 "files_folders": self.site.config['FILES_FOLDERS'],
52 "output_folder": self.site.config['OUTPUT_FOLDER'],
53 "filters": self.site.config['FILTERS'],
54 "code_color_scheme": self.site.config['CODE_COLOR_SCHEME'],
55 "code.css_selectors": ['pre.code', '.highlight pre'],
56 "code.css_head": '/* code.css file generated by Nikola */\n',
57 "code.css_close": "\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\n",
58 }
59 tasks = {}
60 code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')
61 code_css_input = utils.get_asset_path('assets/css/code.css',
62 themes=kw['themes'],
63 files_folders=kw['files_folders'], output_dir=None)
64 yield self.group_task()
65
66 main_theme = utils.get_theme_path(kw['themes'][0])
67 theme_ini = utils.parse_theme_meta(main_theme)
68 if theme_ini:
69 ignored_assets = theme_ini.get("Nikola", "ignored_assets", fallback='').split(',')
70 ignored_assets = [asset_name.strip() for asset_name in ignored_assets]
71 else:
72 ignored_assets = []
73
74 for theme_name in kw['themes']:
75 src = os.path.join(utils.get_theme_path(theme_name), 'assets')
76 dst = os.path.join(kw['output_folder'], 'assets')
77 for task in utils.copy_tree(src, dst):
78 asset_name = os.path.relpath(task['name'], dst)
79 if task['name'] in tasks or asset_name in ignored_assets:
80 continue
81 tasks[task['name']] = task
82 task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_assets')]
83 task['basename'] = self.name
84 if code_css_input:
85 if 'file_dep' not in task:
86 task['file_dep'] = []
87 task['file_dep'].append(code_css_input)
88 yield utils.apply_filters(task, kw['filters'])
89
90 # Check whether or not there is a code.css file around.
91 if not code_css_input and kw['code_color_scheme']:
92 def create_code_css():
93 from pygments.formatters import get_formatter_by_name
94 formatter = get_formatter_by_name('html', style=kw["code_color_scheme"])
95 utils.makedirs(os.path.dirname(code_css_path))
96 with io.open(code_css_path, 'w+', encoding='utf8') as outf:
97 outf.write(kw["code.css_head"])
98 outf.write(formatter.get_style_defs(kw["code.css_selectors"]))
99 outf.write(kw["code.css_close"])
100
101 if os.path.exists(code_css_path):
102 with io.open(code_css_path, 'r', encoding='utf-8') as fh:
103 testcontents = fh.read(len(kw["code.css_head"])) == kw["code.css_head"]
104 else:
105 testcontents = False
106
107 task = {
108 'basename': self.name,
109 'name': code_css_path,
110 'targets': [code_css_path],
111 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.copy_assets'), testcontents],
112 'actions': [(create_code_css, [])],
113 'clean': True,
114 }
115 yield utils.apply_filters(task, kw['filters'])
116
[end of nikola/plugins/task/copy_assets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py
--- a/nikola/plugins/task/copy_assets.py
+++ b/nikola/plugins/task/copy_assets.py
@@ -67,7 +67,7 @@
theme_ini = utils.parse_theme_meta(main_theme)
if theme_ini:
ignored_assets = theme_ini.get("Nikola", "ignored_assets", fallback='').split(',')
- ignored_assets = [asset_name.strip() for asset_name in ignored_assets]
+ ignored_assets = [os.path.normpath(asset_name.strip()) for asset_name in ignored_assets]
else:
ignored_assets = []
|
{"golden_diff": "diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py\n--- a/nikola/plugins/task/copy_assets.py\n+++ b/nikola/plugins/task/copy_assets.py\n@@ -67,7 +67,7 @@\n theme_ini = utils.parse_theme_meta(main_theme)\n if theme_ini:\n ignored_assets = theme_ini.get(\"Nikola\", \"ignored_assets\", fallback='').split(',')\n- ignored_assets = [asset_name.strip() for asset_name in ignored_assets]\n+ ignored_assets = [os.path.normpath(asset_name.strip()) for asset_name in ignored_assets]\n else:\n ignored_assets = []\n", "issue": "Cannot use \"ignored_assets\" properly on Windows with custom themes\n<!--\r\nBefore creating an issue:\r\n* make sure you are using an up-to-date version of Nikola\r\n* search for existing issues that might be related\r\n\r\nMake sure to:\r\n* provide information about your environment (below)\r\n* include all the output you get, and any other information related to your problem\r\n\r\nNikola v7.6.4, as provided by Ubuntu, is NOT SUPPORTED.\r\nIf you are using this version, you should upgrade: https://getnikola.com/getting-started.html\r\n-->\r\n\r\n### Environment\r\n\r\n**Python Version:** Python 3.7.1\r\n\r\n**Nikola Version:** Nikola v8.0.1\r\n\r\n**Operating System:** Windows 10\r\n\r\n### Description:\r\n\r\nThe option \"ignored_assets\" used in theme meta files does not work properly on Windows: the relative path is only recognized when using a backslash, i.e. css\\theme.css instead of css/theme.css ([see doc](https://getnikola.com/theming.html#theme-meta-files)).\r\n\r\n### Solution:\r\n\r\nNormalize the path to the convention of the operating system when parsing the theme meta file.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2019 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Copy theme assets into output.\"\"\"\n\n\nimport io\nimport os\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass CopyAssets(Task):\n \"\"\"Copy theme assets into output.\"\"\"\n\n name = \"copy_assets\"\n\n def gen_tasks(self):\n \"\"\"Create tasks to copy the assets of the whole theme chain.\n\n If a file is present on two themes, use the version\n from the \"youngest\" theme.\n \"\"\"\n kw = {\n \"themes\": self.site.THEMES,\n \"translations\": self.site.translations,\n \"files_folders\": self.site.config['FILES_FOLDERS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": ['pre.code', '.highlight pre'],\n \"code.css_head\": '/* code.css file generated by Nikola */\\n',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n code_css_input = utils.get_asset_path('assets/css/code.css',\n themes=kw['themes'],\n files_folders=kw['files_folders'], output_dir=None)\n yield self.group_task()\n\n main_theme = utils.get_theme_path(kw['themes'][0])\n theme_ini = utils.parse_theme_meta(main_theme)\n if theme_ini:\n ignored_assets = theme_ini.get(\"Nikola\", \"ignored_assets\", fallback='').split(',')\n ignored_assets = [asset_name.strip() for asset_name in ignored_assets]\n else:\n ignored_assets = []\n\n for theme_name in kw['themes']:\n src = os.path.join(utils.get_theme_path(theme_name), 'assets')\n dst = os.path.join(kw['output_folder'], 'assets')\n for task in utils.copy_tree(src, dst):\n asset_name = os.path.relpath(task['name'], dst)\n if task['name'] in tasks or asset_name in ignored_assets:\n continue\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_assets')]\n task['basename'] = self.name\n if code_css_input:\n if 'file_dep' not in task:\n task['file_dep'] = []\n task['file_dep'].append(code_css_input)\n yield utils.apply_filters(task, kw['filters'])\n\n # Check whether or not there is a code.css file around.\n if not code_css_input and kw['code_color_scheme']:\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with io.open(code_css_path, 'w+', encoding='utf8') as outf:\n outf.write(kw[\"code.css_head\"])\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n\n if os.path.exists(code_css_path):\n with io.open(code_css_path, 'r', encoding='utf-8') as fh:\n testcontents = fh.read(len(kw[\"code.css_head\"])) == kw[\"code.css_head\"]\n else:\n testcontents = False\n\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.copy_assets'), testcontents],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n", "path": "nikola/plugins/task/copy_assets.py"}]}
| 2,116 | 143 |
gh_patches_debug_353
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1044
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[announce] Send confirmation to caller after all channels announced
When Sopel is in many channels, announces are likely to be rate-limited. This makes it hard to know, for example, when it's safe to shut down the bot if announce is being used to broadcast an upgrade notice.
It's an easy fix, and I'll open a PR for it tomorrow if there are no objections.
I am as-yet undecided whether it's best to use `bot.reply()` or `bot.notice()` for this (or even `bot.msg()` via PM to the caller), but I'll think about it between now and when I open the PR, and it can always be changed before merging.
</issue>
<code>
[start of sopel/modules/announce.py]
1 # coding=utf-8
2 """
3 announce.py - Send a message to all channels
4 Copyright © 2013, Elad Alfassa, <[email protected]>
5 Licensed under the Eiffel Forum License 2.
6
7 """
8 from __future__ import unicode_literals, absolute_import, print_function, division
9
10 from sopel.module import commands, example
11
12
13 @commands('announce')
14 @example('.announce Some important message here')
15 def announce(bot, trigger):
16 """
17 Send an announcement to all channels the bot is in
18 """
19 if not trigger.admin:
20 bot.reply('Sorry, I can\'t let you do that')
21 return
22 for channel in bot.channels:
23 bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))
24
[end of sopel/modules/announce.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py
--- a/sopel/modules/announce.py
+++ b/sopel/modules/announce.py
@@ -21,3 +21,4 @@
return
for channel in bot.channels:
bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))
+ bot.reply('Announce complete.')
|
{"golden_diff": "diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py\n--- a/sopel/modules/announce.py\n+++ b/sopel/modules/announce.py\n@@ -21,3 +21,4 @@\n return\n for channel in bot.channels:\n bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))\n+ bot.reply('Announce complete.')\n", "issue": "[announce] Send confirmation to caller after all channels announced\nWhen Sopel is in many channels, announces are likely to be rate-limited. This makes it hard to know, for example, when it's safe to shut down the bot if announce is being used to broadcast an upgrade notice.\n\nIt's an easy fix, and I'll open a PR for it tomorrow if there are no objections.\n\nI am as-yet undecided whether it's best to use `bot.reply()` or `bot.notice()` for this (or even `bot.msg()` via PM to the caller), but I'll think about it between now and when I open the PR, and it can always be changed before merging.\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nannounce.py - Send a message to all channels\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.module import commands, example\n\n\n@commands('announce')\n@example('.announce Some important message here')\ndef announce(bot, trigger):\n \"\"\"\n Send an announcement to all channels the bot is in\n \"\"\"\n if not trigger.admin:\n bot.reply('Sorry, I can\\'t let you do that')\n return\n for channel in bot.channels:\n bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))\n", "path": "sopel/modules/announce.py"}]}
| 884 | 90 |
gh_patches_debug_14515
|
rasdani/github-patches
|
git_diff
|
scipy__scipy-11229
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OverflowError in resample_poly (upfirdn)
scipy.signal.resample_poly fails if the output vector length would be greater than 2^31-1.
```
Traceback (most recent call last):
File "<ipython-input-1-ac5d2b0a1632>", line 11, in <module>
yy = resample_poly(y, 128, 1)
File "F:\Programs\Miniconda3\lib\site-packages\scipy\signal\signaltools.py", line 2424, in resample_poly
y = upfirdn(h, x, up, down, axis=axis)
File "F:\Programs\Miniconda3\lib\site-packages\scipy\signal\_upfirdn.py", line 183, in upfirdn
return ufd.apply_filter(x, axis)
File "F:\Programs\Miniconda3\lib\site-packages\scipy\signal\_upfirdn.py", line 82, in apply_filter
output_shape[axis] = output_len
OverflowError: Python int too large to convert to C long
```
output_shape is created on the previous line (81):
`output_shape = np.asarray(x.shape)`
With an unspecified dtype it appears to get np.int32 by default, which is inadequate for specifying large array shapes.
This could be fixed by explicitly specifying the dtype:
`output_shape = np.asarray(x.shape, dtype=np.int64)`
</issue>
<code>
[start of scipy/signal/_upfirdn.py]
1 # Code adapted from "upfirdn" python library with permission:
2 #
3 # Copyright (c) 2009, Motorola, Inc
4 #
5 # All Rights Reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # * Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # * Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # * Neither the name of Motorola nor the names of its contributors may be
19 # used to endorse or promote products derived from this software without
20 # specific prior written permission.
21 #
22 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
34 import numpy as np
35
36 from ._upfirdn_apply import _output_len, _apply, mode_enum
37
38 __all__ = ['upfirdn', '_output_len']
39
40 _upfirdn_modes = [
41 'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect',
42 'antisymmetric', 'antireflect', 'line',
43 ]
44
45
46 def _pad_h(h, up):
47 """Store coefficients in a transposed, flipped arrangement.
48
49 For example, suppose upRate is 3, and the
50 input number of coefficients is 10, represented as h[0], ..., h[9].
51
52 Then the internal buffer will look like this::
53
54 h[9], h[6], h[3], h[0], // flipped phase 0 coefs
55 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded)
56 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded)
57
58 """
59 h_padlen = len(h) + (-len(h) % up)
60 h_full = np.zeros(h_padlen, h.dtype)
61 h_full[:len(h)] = h
62 h_full = h_full.reshape(-1, up).T[:, ::-1].ravel()
63 return h_full
64
65
66 def _check_mode(mode):
67 mode = mode.lower()
68 enum = mode_enum(mode)
69 return enum
70
71
72 class _UpFIRDn(object):
73 def __init__(self, h, x_dtype, up, down):
74 """Helper for resampling"""
75 h = np.asarray(h)
76 if h.ndim != 1 or h.size == 0:
77 raise ValueError('h must be 1-D with non-zero length')
78 self._output_type = np.result_type(h.dtype, x_dtype, np.float32)
79 h = np.asarray(h, self._output_type)
80 self._up = int(up)
81 self._down = int(down)
82 if self._up < 1 or self._down < 1:
83 raise ValueError('Both up and down must be >= 1')
84 # This both transposes, and "flips" each phase for filtering
85 self._h_trans_flip = _pad_h(h, self._up)
86 self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip)
87
88 def apply_filter(self, x, axis=-1, mode='constant', cval=0):
89 """Apply the prepared filter to the specified axis of a N-D signal x"""
90 output_len = _output_len(len(self._h_trans_flip), x.shape[axis],
91 self._up, self._down)
92 output_shape = np.asarray(x.shape)
93 output_shape[axis] = output_len
94 out = np.zeros(output_shape, dtype=self._output_type, order='C')
95 axis = axis % x.ndim
96 mode = _check_mode(mode)
97 _apply(np.asarray(x, self._output_type),
98 self._h_trans_flip, out,
99 self._up, self._down, axis, mode, cval)
100 return out
101
102
103 def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0):
104 """Upsample, FIR filter, and downsample
105
106 Parameters
107 ----------
108 h : array_like
109 1-D FIR (finite-impulse response) filter coefficients.
110 x : array_like
111 Input signal array.
112 up : int, optional
113 Upsampling rate. Default is 1.
114 down : int, optional
115 Downsampling rate. Default is 1.
116 axis : int, optional
117 The axis of the input data array along which to apply the
118 linear filter. The filter is applied to each subarray along
119 this axis. Default is -1.
120 mode : str, optional
121 The signal extension mode to use. The set
122 ``{"constant", "symmetric", "reflect", "edge", "wrap"}`` correspond to
123 modes provided by `numpy.pad`. ``"smooth"`` implements a smooth
124 extension by extending based on the slope of the last 2 points at each
125 end of the array. ``"antireflect"`` and ``"antisymmetric"`` are
126 anti-symmetric versions of ``"reflect"`` and ``"symmetric"``. The mode
127 `"line"` extends the signal based on a linear trend defined by the
128 first and last points along the ``axis``.
129
130 .. versionadded:: 1.4.0
131 cval : float, optional
132 The constant value to use when ``mode == "constant"``.
133
134 .. versionadded:: 1.4.0
135
136 Returns
137 -------
138 y : ndarray
139 The output signal array. Dimensions will be the same as `x` except
140 for along `axis`, which will change size according to the `h`,
141 `up`, and `down` parameters.
142
143 Notes
144 -----
145 The algorithm is an implementation of the block diagram shown on page 129
146 of the Vaidyanathan text [1]_ (Figure 4.3-8d).
147
148 .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks,
149 Prentice Hall, 1993.
150
151 The direct approach of upsampling by factor of P with zero insertion,
152 FIR filtering of length ``N``, and downsampling by factor of Q is
153 O(N*Q) per output sample. The polyphase implementation used here is
154 O(N/P).
155
156 .. versionadded:: 0.18
157
158 Examples
159 --------
160 Simple operations:
161
162 >>> from scipy.signal import upfirdn
163 >>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter
164 array([ 1., 2., 3., 2., 1.])
165 >>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion
166 array([ 1., 0., 0., 2., 0., 0., 3., 0., 0.])
167 >>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold
168 array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.])
169 >>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation
170 array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5, 0. ])
171 >>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3
172 array([ 0., 3., 6., 9.])
173 >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3
174 array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5, 0. ])
175
176 Apply a single filter to multiple signals:
177
178 >>> x = np.reshape(np.arange(8), (4, 2))
179 >>> x
180 array([[0, 1],
181 [2, 3],
182 [4, 5],
183 [6, 7]])
184
185 Apply along the last dimension of ``x``:
186
187 >>> h = [1, 1]
188 >>> upfirdn(h, x, 2)
189 array([[ 0., 0., 1., 1.],
190 [ 2., 2., 3., 3.],
191 [ 4., 4., 5., 5.],
192 [ 6., 6., 7., 7.]])
193
194 Apply along the 0th dimension of ``x``:
195
196 >>> upfirdn(h, x, 2, axis=0)
197 array([[ 0., 1.],
198 [ 0., 1.],
199 [ 2., 3.],
200 [ 2., 3.],
201 [ 4., 5.],
202 [ 4., 5.],
203 [ 6., 7.],
204 [ 6., 7.]])
205
206 """
207 x = np.asarray(x)
208 ufd = _UpFIRDn(h, x.dtype, up, down)
209 # This is equivalent to (but faster than) using np.apply_along_axis
210 return ufd.apply_filter(x, axis, mode, cval)
211
[end of scipy/signal/_upfirdn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scipy/signal/_upfirdn.py b/scipy/signal/_upfirdn.py
--- a/scipy/signal/_upfirdn.py
+++ b/scipy/signal/_upfirdn.py
@@ -89,7 +89,9 @@
"""Apply the prepared filter to the specified axis of a N-D signal x"""
output_len = _output_len(len(self._h_trans_flip), x.shape[axis],
self._up, self._down)
- output_shape = np.asarray(x.shape)
+ # Explicit use of np.int64 for output_shape dtype avoids OverflowError
+ # when allocating large array on platforms where np.int_ is 32 bits
+ output_shape = np.asarray(x.shape, dtype=np.int64)
output_shape[axis] = output_len
out = np.zeros(output_shape, dtype=self._output_type, order='C')
axis = axis % x.ndim
|
{"golden_diff": "diff --git a/scipy/signal/_upfirdn.py b/scipy/signal/_upfirdn.py\n--- a/scipy/signal/_upfirdn.py\n+++ b/scipy/signal/_upfirdn.py\n@@ -89,7 +89,9 @@\n \"\"\"Apply the prepared filter to the specified axis of a N-D signal x\"\"\"\n output_len = _output_len(len(self._h_trans_flip), x.shape[axis],\n self._up, self._down)\n- output_shape = np.asarray(x.shape)\n+ # Explicit use of np.int64 for output_shape dtype avoids OverflowError\n+ # when allocating large array on platforms where np.int_ is 32 bits\n+ output_shape = np.asarray(x.shape, dtype=np.int64)\n output_shape[axis] = output_len\n out = np.zeros(output_shape, dtype=self._output_type, order='C')\n axis = axis % x.ndim\n", "issue": "OverflowError in resample_poly (upfirdn)\nscipy.signal.resample_poly fails if the output vector length would be greater than 2^31-1.\r\n\r\n```\r\nTraceback (most recent call last):\r\n\r\n File \"<ipython-input-1-ac5d2b0a1632>\", line 11, in <module>\r\n yy = resample_poly(y, 128, 1)\r\n\r\n File \"F:\\Programs\\Miniconda3\\lib\\site-packages\\scipy\\signal\\signaltools.py\", line 2424, in resample_poly\r\n y = upfirdn(h, x, up, down, axis=axis)\r\n\r\n File \"F:\\Programs\\Miniconda3\\lib\\site-packages\\scipy\\signal\\_upfirdn.py\", line 183, in upfirdn\r\n return ufd.apply_filter(x, axis)\r\n\r\n File \"F:\\Programs\\Miniconda3\\lib\\site-packages\\scipy\\signal\\_upfirdn.py\", line 82, in apply_filter\r\n output_shape[axis] = output_len\r\n\r\nOverflowError: Python int too large to convert to C long\r\n```\r\n\r\noutput_shape is created on the previous line (81):\r\n`output_shape = np.asarray(x.shape)`\r\n\r\nWith an unspecified dtype it appears to get np.int32 by default, which is inadequate for specifying large array shapes.\r\n\r\nThis could be fixed by explicitly specifying the dtype:\r\n`output_shape = np.asarray(x.shape, dtype=np.int64)`\n", "before_files": [{"content": "# Code adapted from \"upfirdn\" python library with permission:\n#\n# Copyright (c) 2009, Motorola, Inc\n#\n# All Rights Reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# * Neither the name of Motorola nor the names of its contributors may be\n# used to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport numpy as np\n\nfrom ._upfirdn_apply import _output_len, _apply, mode_enum\n\n__all__ = ['upfirdn', '_output_len']\n\n_upfirdn_modes = [\n 'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect',\n 'antisymmetric', 'antireflect', 'line',\n]\n\n\ndef _pad_h(h, up):\n \"\"\"Store coefficients in a transposed, flipped arrangement.\n\n For example, suppose upRate is 3, and the\n input number of coefficients is 10, represented as h[0], ..., h[9].\n\n Then the internal buffer will look like this::\n\n h[9], h[6], h[3], h[0], // flipped phase 0 coefs\n 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded)\n 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded)\n\n \"\"\"\n h_padlen = len(h) + (-len(h) % up)\n h_full = np.zeros(h_padlen, h.dtype)\n h_full[:len(h)] = h\n h_full = h_full.reshape(-1, up).T[:, ::-1].ravel()\n return h_full\n\n\ndef _check_mode(mode):\n mode = mode.lower()\n enum = mode_enum(mode)\n return enum\n\n\nclass _UpFIRDn(object):\n def __init__(self, h, x_dtype, up, down):\n \"\"\"Helper for resampling\"\"\"\n h = np.asarray(h)\n if h.ndim != 1 or h.size == 0:\n raise ValueError('h must be 1-D with non-zero length')\n self._output_type = np.result_type(h.dtype, x_dtype, np.float32)\n h = np.asarray(h, self._output_type)\n self._up = int(up)\n self._down = int(down)\n if self._up < 1 or self._down < 1:\n raise ValueError('Both up and down must be >= 1')\n # This both transposes, and \"flips\" each phase for filtering\n self._h_trans_flip = _pad_h(h, self._up)\n self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip)\n\n def apply_filter(self, x, axis=-1, mode='constant', cval=0):\n \"\"\"Apply the prepared filter to the specified axis of a N-D signal x\"\"\"\n output_len = _output_len(len(self._h_trans_flip), x.shape[axis],\n self._up, self._down)\n output_shape = np.asarray(x.shape)\n output_shape[axis] = output_len\n out = np.zeros(output_shape, dtype=self._output_type, order='C')\n axis = axis % x.ndim\n mode = _check_mode(mode)\n _apply(np.asarray(x, self._output_type),\n self._h_trans_flip, out,\n self._up, self._down, axis, mode, cval)\n return out\n\n\ndef upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0):\n \"\"\"Upsample, FIR filter, and downsample\n\n Parameters\n ----------\n h : array_like\n 1-D FIR (finite-impulse response) filter coefficients.\n x : array_like\n Input signal array.\n up : int, optional\n Upsampling rate. Default is 1.\n down : int, optional\n Downsampling rate. Default is 1.\n axis : int, optional\n The axis of the input data array along which to apply the\n linear filter. The filter is applied to each subarray along\n this axis. Default is -1.\n mode : str, optional\n The signal extension mode to use. The set\n ``{\"constant\", \"symmetric\", \"reflect\", \"edge\", \"wrap\"}`` correspond to\n modes provided by `numpy.pad`. ``\"smooth\"`` implements a smooth\n extension by extending based on the slope of the last 2 points at each\n end of the array. ``\"antireflect\"`` and ``\"antisymmetric\"`` are\n anti-symmetric versions of ``\"reflect\"`` and ``\"symmetric\"``. The mode\n `\"line\"` extends the signal based on a linear trend defined by the\n first and last points along the ``axis``.\n\n .. versionadded:: 1.4.0\n cval : float, optional\n The constant value to use when ``mode == \"constant\"``.\n\n .. versionadded:: 1.4.0\n\n Returns\n -------\n y : ndarray\n The output signal array. Dimensions will be the same as `x` except\n for along `axis`, which will change size according to the `h`,\n `up`, and `down` parameters.\n\n Notes\n -----\n The algorithm is an implementation of the block diagram shown on page 129\n of the Vaidyanathan text [1]_ (Figure 4.3-8d).\n\n .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks,\n Prentice Hall, 1993.\n\n The direct approach of upsampling by factor of P with zero insertion,\n FIR filtering of length ``N``, and downsampling by factor of Q is\n O(N*Q) per output sample. The polyphase implementation used here is\n O(N/P).\n\n .. versionadded:: 0.18\n\n Examples\n --------\n Simple operations:\n\n >>> from scipy.signal import upfirdn\n >>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter\n array([ 1., 2., 3., 2., 1.])\n >>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion\n array([ 1., 0., 0., 2., 0., 0., 3., 0., 0.])\n >>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold\n array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.])\n >>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation\n array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5, 0. ])\n >>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3\n array([ 0., 3., 6., 9.])\n >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3\n array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5, 0. ])\n\n Apply a single filter to multiple signals:\n\n >>> x = np.reshape(np.arange(8), (4, 2))\n >>> x\n array([[0, 1],\n [2, 3],\n [4, 5],\n [6, 7]])\n\n Apply along the last dimension of ``x``:\n\n >>> h = [1, 1]\n >>> upfirdn(h, x, 2)\n array([[ 0., 0., 1., 1.],\n [ 2., 2., 3., 3.],\n [ 4., 4., 5., 5.],\n [ 6., 6., 7., 7.]])\n\n Apply along the 0th dimension of ``x``:\n\n >>> upfirdn(h, x, 2, axis=0)\n array([[ 0., 1.],\n [ 0., 1.],\n [ 2., 3.],\n [ 2., 3.],\n [ 4., 5.],\n [ 4., 5.],\n [ 6., 7.],\n [ 6., 7.]])\n\n \"\"\"\n x = np.asarray(x)\n ufd = _UpFIRDn(h, x.dtype, up, down)\n # This is equivalent to (but faster than) using np.apply_along_axis\n return ufd.apply_filter(x, axis, mode, cval)\n", "path": "scipy/signal/_upfirdn.py"}]}
| 3,748 | 206 |
gh_patches_debug_60583
|
rasdani/github-patches
|
git_diff
|
fonttools__fonttools-1715
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ascender and ascent
The [opentype spec ](https://docs.microsoft.com/en-gb/typography/opentype/spec/hhea) calls the first two substantive entries in the `hhea` table "`ascender`" and "`descender`". fonttools calls them "`ascent`" and "`descent`".
This was surprising! Maybe it's too late to change then but can we at least have an alias?
</issue>
<code>
[start of Lib/fontTools/ttLib/tables/_h_h_e_a.py]
1 from fontTools.misc.py23 import *
2 from fontTools.misc import sstruct
3 from fontTools.misc.textTools import safeEval
4 from fontTools.misc.fixedTools import (
5 ensureVersionIsLong as fi2ve, versionToFixed as ve2fi)
6 from . import DefaultTable
7 import math
8
9
10 hheaFormat = """
11 > # big endian
12 tableVersion: L
13 ascent: h
14 descent: h
15 lineGap: h
16 advanceWidthMax: H
17 minLeftSideBearing: h
18 minRightSideBearing: h
19 xMaxExtent: h
20 caretSlopeRise: h
21 caretSlopeRun: h
22 caretOffset: h
23 reserved0: h
24 reserved1: h
25 reserved2: h
26 reserved3: h
27 metricDataFormat: h
28 numberOfHMetrics: H
29 """
30
31
32 class table__h_h_e_a(DefaultTable.DefaultTable):
33
34 # Note: Keep in sync with table__v_h_e_a
35
36 dependencies = ['hmtx', 'glyf', 'CFF ']
37
38 def decompile(self, data, ttFont):
39 sstruct.unpack(hheaFormat, data, self)
40
41 def compile(self, ttFont):
42 if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ')):
43 self.recalc(ttFont)
44 self.tableVersion = fi2ve(self.tableVersion)
45 return sstruct.pack(hheaFormat, self)
46
47 def recalc(self, ttFont):
48 if 'hmtx' in ttFont:
49 hmtxTable = ttFont['hmtx']
50 self.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())
51
52 boundsWidthDict = {}
53 if 'glyf' in ttFont:
54 glyfTable = ttFont['glyf']
55 for name in ttFont.getGlyphOrder():
56 g = glyfTable[name]
57 if g.numberOfContours == 0:
58 continue
59 if g.numberOfContours < 0 and not hasattr(g, "xMax"):
60 # Composite glyph without extents set.
61 # Calculate those.
62 g.recalcBounds(glyfTable)
63 boundsWidthDict[name] = g.xMax - g.xMin
64 elif 'CFF ' in ttFont:
65 topDict = ttFont['CFF '].cff.topDictIndex[0]
66 charStrings = topDict.CharStrings
67 for name in ttFont.getGlyphOrder():
68 cs = charStrings[name]
69 bounds = cs.calcBounds(charStrings)
70 if bounds is not None:
71 boundsWidthDict[name] = int(
72 math.ceil(bounds[2]) - math.floor(bounds[0]))
73
74 if boundsWidthDict:
75 minLeftSideBearing = float('inf')
76 minRightSideBearing = float('inf')
77 xMaxExtent = -float('inf')
78 for name, boundsWidth in boundsWidthDict.items():
79 advanceWidth, lsb = hmtxTable[name]
80 rsb = advanceWidth - lsb - boundsWidth
81 extent = lsb + boundsWidth
82 minLeftSideBearing = min(minLeftSideBearing, lsb)
83 minRightSideBearing = min(minRightSideBearing, rsb)
84 xMaxExtent = max(xMaxExtent, extent)
85 self.minLeftSideBearing = minLeftSideBearing
86 self.minRightSideBearing = minRightSideBearing
87 self.xMaxExtent = xMaxExtent
88
89 else: # No glyph has outlines.
90 self.minLeftSideBearing = 0
91 self.minRightSideBearing = 0
92 self.xMaxExtent = 0
93
94 def toXML(self, writer, ttFont):
95 formatstring, names, fixes = sstruct.getformat(hheaFormat)
96 for name in names:
97 value = getattr(self, name)
98 if name == "tableVersion":
99 value = fi2ve(value)
100 value = "0x%08x" % value
101 writer.simpletag(name, value=value)
102 writer.newline()
103
104 def fromXML(self, name, attrs, content, ttFont):
105 if name == "tableVersion":
106 setattr(self, name, ve2fi(attrs["value"]))
107 return
108 setattr(self, name, safeEval(attrs["value"]))
109
[end of Lib/fontTools/ttLib/tables/_h_h_e_a.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/Lib/fontTools/ttLib/tables/_h_h_e_a.py b/Lib/fontTools/ttLib/tables/_h_h_e_a.py
--- a/Lib/fontTools/ttLib/tables/_h_h_e_a.py
+++ b/Lib/fontTools/ttLib/tables/_h_h_e_a.py
@@ -35,6 +35,19 @@
dependencies = ['hmtx', 'glyf', 'CFF ']
+ # OpenType spec renamed these, add aliases for compatibility
+ @property
+ def ascender(self): return self.ascent
+
+ @ascender.setter
+ def ascender(self,value): self.ascent = value
+
+ @property
+ def descender(self): return self.descent
+
+ @descender.setter
+ def descender(self,value): self.descent = value
+
def decompile(self, data, ttFont):
sstruct.unpack(hheaFormat, data, self)
|
{"golden_diff": "diff --git a/Lib/fontTools/ttLib/tables/_h_h_e_a.py b/Lib/fontTools/ttLib/tables/_h_h_e_a.py\n--- a/Lib/fontTools/ttLib/tables/_h_h_e_a.py\n+++ b/Lib/fontTools/ttLib/tables/_h_h_e_a.py\n@@ -35,6 +35,19 @@\n \n \tdependencies = ['hmtx', 'glyf', 'CFF ']\n \n+\t# OpenType spec renamed these, add aliases for compatibility\n+\t@property\n+\tdef ascender(self): return self.ascent\n+\n+\[email protected]\n+\tdef ascender(self,value): self.ascent = value\n+\n+\t@property\n+\tdef descender(self): return self.descent\n+\n+\[email protected]\n+\tdef descender(self,value): self.descent = value\n+\n \tdef decompile(self, data, ttFont):\n \t\tsstruct.unpack(hheaFormat, data, self)\n", "issue": "ascender and ascent\nThe [opentype spec ](https://docs.microsoft.com/en-gb/typography/opentype/spec/hhea) calls the first two substantive entries in the `hhea` table \"`ascender`\" and \"`descender`\". fonttools calls them \"`ascent`\" and \"`descent`\".\r\n\r\nThis was surprising! Maybe it's too late to change then but can we at least have an alias?\n", "before_files": [{"content": "from fontTools.misc.py23 import *\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import safeEval\nfrom fontTools.misc.fixedTools import (\n\tensureVersionIsLong as fi2ve, versionToFixed as ve2fi)\nfrom . import DefaultTable\nimport math\n\n\nhheaFormat = \"\"\"\n\t\t> # big endian\n\t\ttableVersion: L\n\t\tascent: h\n\t\tdescent: h\n\t\tlineGap: h\n\t\tadvanceWidthMax: H\n\t\tminLeftSideBearing: h\n\t\tminRightSideBearing: h\n\t\txMaxExtent: h\n\t\tcaretSlopeRise: h\n\t\tcaretSlopeRun: h\n\t\tcaretOffset: h\n\t\treserved0: h\n\t\treserved1: h\n\t\treserved2: h\n\t\treserved3: h\n\t\tmetricDataFormat: h\n\t\tnumberOfHMetrics: H\n\"\"\"\n\n\nclass table__h_h_e_a(DefaultTable.DefaultTable):\n\n\t# Note: Keep in sync with table__v_h_e_a\n\n\tdependencies = ['hmtx', 'glyf', 'CFF ']\n\n\tdef decompile(self, data, ttFont):\n\t\tsstruct.unpack(hheaFormat, data, self)\n\n\tdef compile(self, ttFont):\n\t\tif ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ')):\n\t\t\tself.recalc(ttFont)\n\t\tself.tableVersion = fi2ve(self.tableVersion)\n\t\treturn sstruct.pack(hheaFormat, self)\n\n\tdef recalc(self, ttFont):\n\t\tif 'hmtx' in ttFont:\n\t\t\thmtxTable = ttFont['hmtx']\n\t\t\tself.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())\n\n\t\tboundsWidthDict = {}\n\t\tif 'glyf' in ttFont:\n\t\t\tglyfTable = ttFont['glyf']\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\tg = glyfTable[name]\n\t\t\t\tif g.numberOfContours == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tif g.numberOfContours < 0 and not hasattr(g, \"xMax\"):\n\t\t\t\t\t# Composite glyph without extents set.\n\t\t\t\t\t# Calculate those.\n\t\t\t\t\tg.recalcBounds(glyfTable)\n\t\t\t\tboundsWidthDict[name] = g.xMax - g.xMin\n\t\telif 'CFF ' in ttFont:\n\t\t\ttopDict = ttFont['CFF '].cff.topDictIndex[0]\n\t\t\tcharStrings = topDict.CharStrings\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\tcs = charStrings[name]\n\t\t\t\tbounds = cs.calcBounds(charStrings)\n\t\t\t\tif bounds is not None:\n\t\t\t\t\tboundsWidthDict[name] = int(\n\t\t\t\t\t\tmath.ceil(bounds[2]) - math.floor(bounds[0]))\n\n\t\tif boundsWidthDict:\n\t\t\tminLeftSideBearing = float('inf')\n\t\t\tminRightSideBearing = float('inf')\n\t\t\txMaxExtent = -float('inf')\n\t\t\tfor name, boundsWidth in boundsWidthDict.items():\n\t\t\t\tadvanceWidth, lsb = hmtxTable[name]\n\t\t\t\trsb = advanceWidth - lsb - boundsWidth\n\t\t\t\textent = lsb + boundsWidth\n\t\t\t\tminLeftSideBearing = min(minLeftSideBearing, lsb)\n\t\t\t\tminRightSideBearing = min(minRightSideBearing, rsb)\n\t\t\t\txMaxExtent = max(xMaxExtent, extent)\n\t\t\tself.minLeftSideBearing = minLeftSideBearing\n\t\t\tself.minRightSideBearing = minRightSideBearing\n\t\t\tself.xMaxExtent = xMaxExtent\n\n\t\telse: # No glyph has outlines.\n\t\t\tself.minLeftSideBearing = 0\n\t\t\tself.minRightSideBearing = 0\n\t\t\tself.xMaxExtent = 0\n\n\tdef toXML(self, writer, ttFont):\n\t\tformatstring, names, fixes = sstruct.getformat(hheaFormat)\n\t\tfor name in names:\n\t\t\tvalue = getattr(self, name)\n\t\t\tif name == \"tableVersion\":\n\t\t\t\tvalue = fi2ve(value)\n\t\t\t\tvalue = \"0x%08x\" % value\n\t\t\twriter.simpletag(name, value=value)\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tif name == \"tableVersion\":\n\t\t\tsetattr(self, name, ve2fi(attrs[\"value\"]))\n\t\t\treturn\n\t\tsetattr(self, name, safeEval(attrs[\"value\"]))\n", "path": "Lib/fontTools/ttLib/tables/_h_h_e_a.py"}]}
| 1,845 | 215 |
gh_patches_debug_16206
|
rasdani/github-patches
|
git_diff
|
frappe__frappe-15552
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
S3 automatic backup fails when backup folder is empty
<!--
Welcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following:
1. This tracker should only be used to report bugs and request features / enhancements to Frappe
- For questions and general support, use https://stackoverflow.com/questions/tagged/frappe
- For documentation issues, refer to https://frappeframework.com/docs/user/en or the developer cheetsheet https://github.com/frappe/frappe/wiki/Developer-Cheatsheet
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
the original discussion.
3. When making a bug report, make sure you provide all required information. The easier it is for
maintainers to reproduce, the faster it'll be fixed.
4. If you think you know what the reason for the bug is, share it with us. Maybe put in a PR 😉
-->
## Description of the issue
`frappe.integrations.doctype.s3_backup_settings.s3_backup_settings.take_backups_s3` fails when the backup folder (site_name/private/backups/) is empty when the function is called.
This occurs because `frappe.integrations.offsite_backup_utils.get_file_size` does not anticipate a situation where its `file_path` argument is invalid i.e `None`.
This bug affects v13 and v12.
## Context information (for bug reports)
**Output of `bench version`**
```
frappe 12.24.0
```
## Steps to reproduce the issue
1. Delete _all_ files in the backup folder
2. Call `take_backups_s3`
3.
### Observed result
When the scheduled backup task runs, you will receive an email with this kind of stack trace:
```
*Backup Upload Failed!*
Oops, your automated backup to Amazon S3 failed.
Error message: Traceback (most recent call last): File "/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/doctype/s3_backup_settings/s3_backup_settings.py", line 83, in take_backups_s3 validate_file_size() File "/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py", line 105, in validate_file_size file_size = get_file_size(latest_file, unit="GB") File "/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py", line 75, in get_file_size file_size = os.path.getsize(file_path) File "/usr/lib/python3.7/genericpath.py", line 50, in getsize return os.stat(filename).st_size TypeError: stat: path should be string, bytes, os.PathLike or integer, not NoneType
Please contact your system manager for more information.
```
If you call it in your console, it will fail silently but you will observe that no backup file is created nor uploaded to S3.
### Expected result
### Stacktrace / full error message
```
Traceback (most recent call last): File "/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/doctype/s3_backup_settings/s3_backup_settings.py", line 83, in take_backups_s3 validate_file_size() File "/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py", line 105, in validate_file_size file_size = get_file_size(latest_file, unit="GB") File "/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py", line 75, in get_file_size file_size = os.path.getsize(file_path) File "/usr/lib/python3.7/genericpath.py", line 50, in getsize return os.stat(filename).st_size TypeError: stat: path should be string, bytes, os.PathLike or integer, not NoneType
```
## Additional information
OS version / distribution, `Frappe` install method, etc.
</issue>
<code>
[start of frappe/integrations/offsite_backup_utils.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2019, Frappe Technologies and contributors
3 # License: MIT. See LICENSE
4
5 import frappe
6 import glob
7 import os
8 from frappe.utils import split_emails, cint
9
10 def send_email(success, service_name, doctype, email_field, error_status=None):
11 recipients = get_recipients(doctype, email_field)
12 if not recipients:
13 frappe.log_error(
14 "No Email Recipient found for {0}".format(service_name),
15 "{0}: Failed to send backup status email".format(service_name),
16 )
17 return
18
19 if success:
20 if not frappe.db.get_single_value(doctype, "send_email_for_successful_backup"):
21 return
22
23 subject = "Backup Upload Successful"
24 message = """
25 <h3>Backup Uploaded Successfully!</h3>
26 <p>Hi there, this is just to inform you that your backup was successfully uploaded to your {0} bucket. So relax!</p>""".format(
27 service_name
28 )
29 else:
30 subject = "[Warning] Backup Upload Failed"
31 message = """
32 <h3>Backup Upload Failed!</h3>
33 <p>Oops, your automated backup to {0} failed.</p>
34 <p>Error message: {1}</p>
35 <p>Please contact your system manager for more information.</p>""".format(
36 service_name, error_status
37 )
38
39 frappe.sendmail(recipients=recipients, subject=subject, message=message)
40
41
42 def get_recipients(doctype, email_field):
43 if not frappe.db:
44 frappe.connect()
45
46 return split_emails(frappe.db.get_value(doctype, None, email_field))
47
48
49 def get_latest_backup_file(with_files=False):
50 from frappe.utils.backups import BackupGenerator
51
52 odb = BackupGenerator(
53 frappe.conf.db_name,
54 frappe.conf.db_name,
55 frappe.conf.db_password,
56 db_host=frappe.db.host,
57 db_type=frappe.conf.db_type,
58 db_port=frappe.conf.db_port,
59 )
60 database, public, private, config = odb.get_recent_backup(older_than=24 * 30)
61
62 if with_files:
63 return database, config, public, private
64
65 return database, config
66
67
68 def get_file_size(file_path, unit):
69 if not unit:
70 unit = "MB"
71
72 file_size = os.path.getsize(file_path)
73
74 memory_size_unit_mapper = {"KB": 1, "MB": 2, "GB": 3, "TB": 4}
75 i = 0
76 while i < memory_size_unit_mapper[unit]:
77 file_size = file_size / 1000.0
78 i += 1
79
80 return file_size
81
82 def get_chunk_site(file_size):
83 ''' this function will return chunk size in megabytes based on file size '''
84
85 file_size_in_gb = cint(file_size/1024/1024)
86
87 MB = 1024 * 1024
88 if file_size_in_gb > 5000:
89 return 200 * MB
90 elif file_size_in_gb >= 3000:
91 return 150 * MB
92 elif file_size_in_gb >= 1000:
93 return 100 * MB
94 elif file_size_in_gb >= 500:
95 return 50 * MB
96 else:
97 return 15 * MB
98
99 def validate_file_size():
100 frappe.flags.create_new_backup = True
101 latest_file, site_config = get_latest_backup_file()
102 file_size = get_file_size(latest_file, unit="GB")
103
104 if file_size > 1:
105 frappe.flags.create_new_backup = False
106
107 def generate_files_backup():
108 from frappe.utils.backups import BackupGenerator
109
110 backup = BackupGenerator(frappe.conf.db_name, frappe.conf.db_name,
111 frappe.conf.db_password, db_host = frappe.db.host,
112 db_type=frappe.conf.db_type, db_port=frappe.conf.db_port)
113
114 backup.set_backup_file_name()
115 backup.zip_files()
116
[end of frappe/integrations/offsite_backup_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/frappe/integrations/offsite_backup_utils.py b/frappe/integrations/offsite_backup_utils.py
--- a/frappe/integrations/offsite_backup_utils.py
+++ b/frappe/integrations/offsite_backup_utils.py
@@ -65,10 +65,7 @@
return database, config
-def get_file_size(file_path, unit):
- if not unit:
- unit = "MB"
-
+def get_file_size(file_path, unit='MB'):
file_size = os.path.getsize(file_path)
memory_size_unit_mapper = {"KB": 1, "MB": 2, "GB": 3, "TB": 4}
@@ -99,7 +96,7 @@
def validate_file_size():
frappe.flags.create_new_backup = True
latest_file, site_config = get_latest_backup_file()
- file_size = get_file_size(latest_file, unit="GB")
+ file_size = get_file_size(latest_file, unit="GB") if latest_file else 0
if file_size > 1:
frappe.flags.create_new_backup = False
|
{"golden_diff": "diff --git a/frappe/integrations/offsite_backup_utils.py b/frappe/integrations/offsite_backup_utils.py\n--- a/frappe/integrations/offsite_backup_utils.py\n+++ b/frappe/integrations/offsite_backup_utils.py\n@@ -65,10 +65,7 @@\n \treturn database, config\n \n \n-def get_file_size(file_path, unit):\n-\tif not unit:\n-\t\tunit = \"MB\"\n-\n+def get_file_size(file_path, unit='MB'):\n \tfile_size = os.path.getsize(file_path)\n \n \tmemory_size_unit_mapper = {\"KB\": 1, \"MB\": 2, \"GB\": 3, \"TB\": 4}\n@@ -99,7 +96,7 @@\n def validate_file_size():\n \tfrappe.flags.create_new_backup = True\n \tlatest_file, site_config = get_latest_backup_file()\n-\tfile_size = get_file_size(latest_file, unit=\"GB\")\n+\tfile_size = get_file_size(latest_file, unit=\"GB\") if latest_file else 0\n \n \tif file_size > 1:\n \t\tfrappe.flags.create_new_backup = False\n", "issue": "S3 automatic backup fails when backup folder is empty\n<!--\r\nWelcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following:\r\n\r\n1. This tracker should only be used to report bugs and request features / enhancements to Frappe\r\n - For questions and general support, use https://stackoverflow.com/questions/tagged/frappe\r\n - For documentation issues, refer to https://frappeframework.com/docs/user/en or the developer cheetsheet https://github.com/frappe/frappe/wiki/Developer-Cheatsheet\r\n2. Use the search function before creating a new issue. Duplicates will be closed and directed to\r\n the original discussion.\r\n3. When making a bug report, make sure you provide all required information. The easier it is for\r\n maintainers to reproduce, the faster it'll be fixed.\r\n4. If you think you know what the reason for the bug is, share it with us. Maybe put in a PR \ud83d\ude09\r\n-->\r\n\r\n## Description of the issue\r\n`frappe.integrations.doctype.s3_backup_settings.s3_backup_settings.take_backups_s3` fails when the backup folder (site_name/private/backups/) is empty when the function is called. \r\n\r\nThis occurs because `frappe.integrations.offsite_backup_utils.get_file_size` does not anticipate a situation where its `file_path` argument is invalid i.e `None`.\r\n\r\nThis bug affects v13 and v12.\r\n\r\n## Context information (for bug reports)\r\n\r\n**Output of `bench version`**\r\n```\r\nfrappe 12.24.0\r\n```\r\n\r\n## Steps to reproduce the issue\r\n\r\n1. Delete _all_ files in the backup folder\r\n2. Call `take_backups_s3`\r\n3.\r\n\r\n### Observed result\r\nWhen the scheduled backup task runs, you will receive an email with this kind of stack trace:\r\n```\r\n*Backup Upload Failed!*\r\n\r\nOops, your automated backup to Amazon S3 failed.\r\n\r\nError message: Traceback (most recent call last): File \"/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/doctype/s3_backup_settings/s3_backup_settings.py\", line 83, in take_backups_s3 validate_file_size() File \"/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py\", line 105, in validate_file_size file_size = get_file_size(latest_file, unit=\"GB\") File \"/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py\", line 75, in get_file_size file_size = os.path.getsize(file_path) File \"/usr/lib/python3.7/genericpath.py\", line 50, in getsize return os.stat(filename).st_size TypeError: stat: path should be string, bytes, os.PathLike or integer, not NoneType\r\n\r\nPlease contact your system manager for more information.\r\n```\r\nIf you call it in your console, it will fail silently but you will observe that no backup file is created nor uploaded to S3.\r\n\r\n### Expected result\r\n\r\n\r\n### Stacktrace / full error message\r\n\r\n```\r\nTraceback (most recent call last): File \"/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/doctype/s3_backup_settings/s3_backup_settings.py\", line 83, in take_backups_s3 validate_file_size() File \"/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py\", line 105, in validate_file_size file_size = get_file_size(latest_file, unit=\"GB\") File \"/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py\", line 75, in get_file_size file_size = os.path.getsize(file_path) File \"/usr/lib/python3.7/genericpath.py\", line 50, in getsize return os.stat(filename).st_size TypeError: stat: path should be string, bytes, os.PathLike or integer, not NoneType\r\n```\r\n\r\n## Additional information\r\n\r\nOS version / distribution, `Frappe` install method, etc.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2019, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport frappe\nimport glob\nimport os\nfrom frappe.utils import split_emails, cint\n\ndef send_email(success, service_name, doctype, email_field, error_status=None):\n\trecipients = get_recipients(doctype, email_field)\n\tif not recipients:\n\t\tfrappe.log_error(\n\t\t\t\"No Email Recipient found for {0}\".format(service_name),\n\t\t\t\"{0}: Failed to send backup status email\".format(service_name),\n\t\t)\n\t\treturn\n\n\tif success:\n\t\tif not frappe.db.get_single_value(doctype, \"send_email_for_successful_backup\"):\n\t\t\treturn\n\n\t\tsubject = \"Backup Upload Successful\"\n\t\tmessage = \"\"\"\n<h3>Backup Uploaded Successfully!</h3>\n<p>Hi there, this is just to inform you that your backup was successfully uploaded to your {0} bucket. So relax!</p>\"\"\".format(\n\t\t\tservice_name\n\t\t)\n\telse:\n\t\tsubject = \"[Warning] Backup Upload Failed\"\n\t\tmessage = \"\"\"\n<h3>Backup Upload Failed!</h3>\n<p>Oops, your automated backup to {0} failed.</p>\n<p>Error message: {1}</p>\n<p>Please contact your system manager for more information.</p>\"\"\".format(\n\t\t\tservice_name, error_status\n\t\t)\n\n\tfrappe.sendmail(recipients=recipients, subject=subject, message=message)\n\n\ndef get_recipients(doctype, email_field):\n\tif not frappe.db:\n\t\tfrappe.connect()\n\n\treturn split_emails(frappe.db.get_value(doctype, None, email_field))\n\n\ndef get_latest_backup_file(with_files=False):\n\tfrom frappe.utils.backups import BackupGenerator\n\n\todb = BackupGenerator(\n\t\tfrappe.conf.db_name,\n\t\tfrappe.conf.db_name,\n\t\tfrappe.conf.db_password,\n\t\tdb_host=frappe.db.host,\n\t\tdb_type=frappe.conf.db_type,\n\t\tdb_port=frappe.conf.db_port,\n\t)\n\tdatabase, public, private, config = odb.get_recent_backup(older_than=24 * 30)\n\n\tif with_files:\n\t\treturn database, config, public, private\n\n\treturn database, config\n\n\ndef get_file_size(file_path, unit):\n\tif not unit:\n\t\tunit = \"MB\"\n\n\tfile_size = os.path.getsize(file_path)\n\n\tmemory_size_unit_mapper = {\"KB\": 1, \"MB\": 2, \"GB\": 3, \"TB\": 4}\n\ti = 0\n\twhile i < memory_size_unit_mapper[unit]:\n\t\tfile_size = file_size / 1000.0\n\t\ti += 1\n\n\treturn file_size\n\ndef get_chunk_site(file_size):\n\t''' this function will return chunk size in megabytes based on file size '''\n\n\tfile_size_in_gb = cint(file_size/1024/1024)\n\n\tMB = 1024 * 1024\n\tif file_size_in_gb > 5000:\n\t\treturn 200 * MB\n\telif file_size_in_gb >= 3000:\n\t\treturn 150 * MB\n\telif file_size_in_gb >= 1000:\n\t\treturn 100 * MB\n\telif file_size_in_gb >= 500:\n\t\treturn 50 * MB\n\telse:\n\t\treturn 15 * MB\n\ndef validate_file_size():\n\tfrappe.flags.create_new_backup = True\n\tlatest_file, site_config = get_latest_backup_file()\n\tfile_size = get_file_size(latest_file, unit=\"GB\")\n\n\tif file_size > 1:\n\t\tfrappe.flags.create_new_backup = False\n\ndef generate_files_backup():\n\tfrom frappe.utils.backups import BackupGenerator\n\n\tbackup = BackupGenerator(frappe.conf.db_name, frappe.conf.db_name,\n\t\tfrappe.conf.db_password, db_host = frappe.db.host,\n\t\tdb_type=frappe.conf.db_type, db_port=frappe.conf.db_port)\n\n\tbackup.set_backup_file_name()\n\tbackup.zip_files()\n", "path": "frappe/integrations/offsite_backup_utils.py"}]}
| 2,570 | 246 |
gh_patches_debug_8925
|
rasdani/github-patches
|
git_diff
|
freqtrade__freqtrade-3200
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docker image making logfiles in user_data
docker image places error logfiles in user_data by default. (apparently it should not be doing that)
Maybe cud have it put them in a log dir?
docker-compose.yml
command: >
trade
--logfile /freqtrade/user_data/freqtrade.log
can maybe be changed to
--logfile /freqtrade/user_data/log/freqtrade.log
</issue>
<code>
[start of freqtrade/configuration/directory_operations.py]
1 import logging
2 import shutil
3 from pathlib import Path
4 from typing import Any, Dict, Optional
5
6 from freqtrade.exceptions import OperationalException
7 from freqtrade.constants import USER_DATA_FILES
8
9 logger = logging.getLogger(__name__)
10
11
12 def create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> Path:
13
14 folder = Path(datadir) if datadir else Path(f"{config['user_data_dir']}/data")
15 if not datadir:
16 # set datadir
17 exchange_name = config.get('exchange', {}).get('name').lower()
18 folder = folder.joinpath(exchange_name)
19
20 if not folder.is_dir():
21 folder.mkdir(parents=True)
22 logger.info(f'Created data directory: {datadir}')
23 return folder
24
25
26 def create_userdata_dir(directory: str, create_dir: bool = False) -> Path:
27 """
28 Create userdata directory structure.
29 if create_dir is True, then the parent-directory will be created if it does not exist.
30 Sub-directories will always be created if the parent directory exists.
31 Raises OperationalException if given a non-existing directory.
32 :param directory: Directory to check
33 :param create_dir: Create directory if it does not exist.
34 :return: Path object containing the directory
35 """
36 sub_dirs = ["backtest_results", "data", "hyperopts", "hyperopt_results", "notebooks",
37 "plot", "strategies", ]
38 folder = Path(directory)
39 if not folder.is_dir():
40 if create_dir:
41 folder.mkdir(parents=True)
42 logger.info(f'Created user-data directory: {folder}')
43 else:
44 raise OperationalException(
45 f"Directory `{folder}` does not exist. "
46 "Please use `freqtrade create-userdir` to create a user directory")
47
48 # Create required subdirectories
49 for f in sub_dirs:
50 subfolder = folder / f
51 if not subfolder.is_dir():
52 subfolder.mkdir(parents=False)
53 return folder
54
55
56 def copy_sample_files(directory: Path, overwrite: bool = False) -> None:
57 """
58 Copy files from templates to User data directory.
59 :param directory: Directory to copy data to
60 :param overwrite: Overwrite existing sample files
61 """
62 if not directory.is_dir():
63 raise OperationalException(f"Directory `{directory}` does not exist.")
64 sourcedir = Path(__file__).parents[1] / "templates"
65 for source, target in USER_DATA_FILES.items():
66 targetdir = directory / target
67 if not targetdir.is_dir():
68 raise OperationalException(f"Directory `{targetdir}` does not exist.")
69 targetfile = targetdir / source
70 if targetfile.exists():
71 if not overwrite:
72 logger.warning(f"File `{targetfile}` exists already, not deploying sample file.")
73 continue
74 else:
75 logger.warning(f"File `{targetfile}` exists already, overwriting.")
76 shutil.copy(str(sourcedir / source), str(targetfile))
77
[end of freqtrade/configuration/directory_operations.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/freqtrade/configuration/directory_operations.py b/freqtrade/configuration/directory_operations.py
--- a/freqtrade/configuration/directory_operations.py
+++ b/freqtrade/configuration/directory_operations.py
@@ -33,8 +33,8 @@
:param create_dir: Create directory if it does not exist.
:return: Path object containing the directory
"""
- sub_dirs = ["backtest_results", "data", "hyperopts", "hyperopt_results", "notebooks",
- "plot", "strategies", ]
+ sub_dirs = ["backtest_results", "data", "hyperopts", "hyperopt_results", "logs",
+ "notebooks", "plot", "strategies", ]
folder = Path(directory)
if not folder.is_dir():
if create_dir:
|
{"golden_diff": "diff --git a/freqtrade/configuration/directory_operations.py b/freqtrade/configuration/directory_operations.py\n--- a/freqtrade/configuration/directory_operations.py\n+++ b/freqtrade/configuration/directory_operations.py\n@@ -33,8 +33,8 @@\n :param create_dir: Create directory if it does not exist.\n :return: Path object containing the directory\n \"\"\"\n- sub_dirs = [\"backtest_results\", \"data\", \"hyperopts\", \"hyperopt_results\", \"notebooks\",\n- \"plot\", \"strategies\", ]\n+ sub_dirs = [\"backtest_results\", \"data\", \"hyperopts\", \"hyperopt_results\", \"logs\",\n+ \"notebooks\", \"plot\", \"strategies\", ]\n folder = Path(directory)\n if not folder.is_dir():\n if create_dir:\n", "issue": " Docker image making logfiles in user_data\ndocker image places error logfiles in user_data by default. (apparently it should not be doing that)\r\nMaybe cud have it put them in a log dir?\r\n\r\n\r\ndocker-compose.yml\r\n\r\ncommand: >\r\n trade\r\n --logfile /freqtrade/user_data/freqtrade.log\r\n\r\ncan maybe be changed to \r\n --logfile /freqtrade/user_data/log/freqtrade.log\r\n\n", "before_files": [{"content": "import logging\nimport shutil\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.constants import USER_DATA_FILES\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> Path:\n\n folder = Path(datadir) if datadir else Path(f\"{config['user_data_dir']}/data\")\n if not datadir:\n # set datadir\n exchange_name = config.get('exchange', {}).get('name').lower()\n folder = folder.joinpath(exchange_name)\n\n if not folder.is_dir():\n folder.mkdir(parents=True)\n logger.info(f'Created data directory: {datadir}')\n return folder\n\n\ndef create_userdata_dir(directory: str, create_dir: bool = False) -> Path:\n \"\"\"\n Create userdata directory structure.\n if create_dir is True, then the parent-directory will be created if it does not exist.\n Sub-directories will always be created if the parent directory exists.\n Raises OperationalException if given a non-existing directory.\n :param directory: Directory to check\n :param create_dir: Create directory if it does not exist.\n :return: Path object containing the directory\n \"\"\"\n sub_dirs = [\"backtest_results\", \"data\", \"hyperopts\", \"hyperopt_results\", \"notebooks\",\n \"plot\", \"strategies\", ]\n folder = Path(directory)\n if not folder.is_dir():\n if create_dir:\n folder.mkdir(parents=True)\n logger.info(f'Created user-data directory: {folder}')\n else:\n raise OperationalException(\n f\"Directory `{folder}` does not exist. \"\n \"Please use `freqtrade create-userdir` to create a user directory\")\n\n # Create required subdirectories\n for f in sub_dirs:\n subfolder = folder / f\n if not subfolder.is_dir():\n subfolder.mkdir(parents=False)\n return folder\n\n\ndef copy_sample_files(directory: Path, overwrite: bool = False) -> None:\n \"\"\"\n Copy files from templates to User data directory.\n :param directory: Directory to copy data to\n :param overwrite: Overwrite existing sample files\n \"\"\"\n if not directory.is_dir():\n raise OperationalException(f\"Directory `{directory}` does not exist.\")\n sourcedir = Path(__file__).parents[1] / \"templates\"\n for source, target in USER_DATA_FILES.items():\n targetdir = directory / target\n if not targetdir.is_dir():\n raise OperationalException(f\"Directory `{targetdir}` does not exist.\")\n targetfile = targetdir / source\n if targetfile.exists():\n if not overwrite:\n logger.warning(f\"File `{targetfile}` exists already, not deploying sample file.\")\n continue\n else:\n logger.warning(f\"File `{targetfile}` exists already, overwriting.\")\n shutil.copy(str(sourcedir / source), str(targetfile))\n", "path": "freqtrade/configuration/directory_operations.py"}]}
| 1,404 | 175 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.