problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_17890 | rasdani/github-patches | git_diff | apluslms__a-plus-1218 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exercise API field html_url behaves inconsistently
If an exercise has a parent (i.e., is embedded in a chapter or an exercise), the html_url in the API differ between what the ExerciseBriefSerializer and ExerciseSerializer produce.
The brief serializer provides the wanted format of the url (link to the parent with anchor to exercise):

However, the exercise detail page includes the previous format (to the exercise page):

This is illogical, inconsistent, and causes problems for MOOC-Jutut.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/api/serializers.py`
Content:
```
1 from functools import partial
2 from urllib.parse import urlencode
3
4 from django.db.models import Manager
5 from rest_framework import serializers
6 from rest_framework.serializers import HyperlinkedModelSerializer
7 from rest_framework.utils.field_mapping import get_nested_relation_kwargs
8 from rest_framework.fields import get_attribute
9
10 from .fields import NestedHyperlinkedIdentityField, NestedHyperlinkedRelatedField
11
12
13 class NestedHyperlinkedModelSerializer(HyperlinkedModelSerializer):
14 """
15 Extension of `HyperlinkedModelSerializer` that adds support for
16 nested resources.
17 """
18 serializer_related_field = NestedHyperlinkedRelatedField
19 serializer_url_field = NestedHyperlinkedIdentityField
20
21 def get_default_field_names(self, declared_fields, model_info):
22 """
23 Return the default list of field names that will be used if the
24 `Meta.fields` option is not specified.
25 """
26 return (
27 [self.url_field_name] +
28 list(declared_fields.keys()) +
29 list(model_info.fields.keys()) +
30 list(model_info.forward_relations.keys())
31 )
32
33 def build_nested_field(self, field_name, relation_info, nested_depth):
34 """
35 Create nested fields for forward and reverse relationships.
36 """
37 class NestedSerializer(NestedHyperlinkedModelSerializer):
38 class Meta:
39 model = relation_info.related_model
40 depth = nested_depth - 1
41
42 field_class = NestedSerializer
43 field_kwargs = get_nested_relation_kwargs(relation_info)
44
45 return field_class, field_kwargs
46
47
48 class AlwaysListSerializer:
49 def __new__(cls, *args, **kwargs):
50 if kwargs.pop('_many', True):
51 kwargs['many'] = True
52 return super(AlwaysListSerializer, cls).__new__(cls, *args, _many=False, **kwargs)
53
54 def __init__(self, *args, _many=False, **kwargs):
55 super().__init__(*args, **kwargs)
56
57
58 class HtmlViewField(serializers.ReadOnlyField):
59 def __init__(self, *args, **kwargs):
60 kwargs['source'] = '*'
61 super().__init__(*args, **kwargs)
62
63 def to_representation(self, obj): # pylint: disable=arguments-renamed
64 request = self.context['request']
65 url = obj.get_absolute_url()
66 return request.build_absolute_uri(url)
67
68
69 class NestedHyperlinkedIdentityFieldWithQuery(NestedHyperlinkedIdentityField):
70 def __init__(self, *args, query_params=None, **kwargs):
71 self.__query_params = query_params
72 super().__init__(*args, **kwargs)
73
74 def get_url(self, obj, view_name, request, format): # pylint: disable=redefined-builtin
75 url = super().get_url(obj, view_name, request, format)
76
77 if url and self.__query_params:
78 # pylint: disable-next=unnecessary-lambda-assignment
79 get = lambda x: x(obj) if callable(x) else get_attribute(obj, x.split('.'))
80 params = [(key, get(value)) for key, value in self.__query_params.items()]
81 url = url + '?' + urlencode(params)
82
83 return url
84
85
86 class AttributeProxy:
87 def __init__(self, obj, **kwargs):
88 self._obj = obj
89 self._kwargs = kwargs
90
91 def __getattr__(self, key):
92 try:
93 return self._kwargs[key]
94 except KeyError:
95 return getattr(self._obj, key)
96
97
98 def zip_instance_extra_with_iterable(instance, iterable, extra):
99 extra_attrs = dict(
100 (key, get_attribute(instance, attrs.split('.')))
101 for key, attrs in extra.items()
102 )
103 return (AttributeProxy(item, **extra_attrs) for item in iterable)
104
105
106 class CompositeListSerializer(serializers.ListSerializer):
107 @classmethod
108 def with_extra(cls, extra):
109 return partial(cls, extra=extra)
110
111 def __init__(self, instance=None, data=serializers.empty, extra=None, **kwargs):
112 self.__extra = extra
113 source = kwargs.get('source', None)
114 if instance and source:
115 iterable = instance[source]
116 instance = zip_instance_extra_with_iterable(instance, iterable, extra)
117 super().__init__(instance=instance, data=data, **kwargs)
118
119 def get_attribute(self, instance):
120 data = super().get_attribute(instance)
121 iterable = data.all() if isinstance(data, Manager) else data
122 return zip_instance_extra_with_iterable(instance, iterable, self.__extra)
123
124
125 class AplusSerializerMetaMetaclass(type):
126 def __new__(cls, name, bases, dict_):
127 new_cls = type.__new__(cls, name, bases, dict_)
128 for k, v in dict_.items():
129 if k[0] != '_' and not callable(v):
130 if isinstance(v, dict):
131 parent = getattr(super(new_cls, new_cls), k, {})
132 setattr(new_cls, k, dict(parent, **v))
133 elif isinstance(v, (tuple, list)):
134 parent = getattr(super(new_cls, new_cls), k, ())
135 seen = set()
136 seen_add = seen.add
137 res = [x for x in parent if not (x in seen or seen_add(x))]
138 res += (x for x in v if not (x in seen or seen_add(x)))
139 setattr(new_cls, k, type(v)(res))
140 return new_cls
141
142
143 class AplusSerializerMeta(metaclass=AplusSerializerMetaMetaclass):
144 pass
145
146
147 class AplusModelSerializerBase(NestedHyperlinkedModelSerializer):
148 url_field_name = 'url'
149 html_url_field_name = 'html_url'
150
151 def get_field_names(self, declared_fields, info):
152 fields = list(super().get_field_names(declared_fields, info))
153 extra_kwargs = getattr(self.Meta, 'extra_kwargs', {})
154 if self.url_field_name not in fields and self.url_field_name in extra_kwargs:
155 fields.insert(0, self.url_field_name)
156 return fields
157
158 def build_unknown_field(self, field_name, model_class):
159 if field_name == self.html_url_field_name:
160 return (HtmlViewField, {})
161 if field_name == self.url_field_name:
162 extra_kwargs = getattr(self.Meta, 'extra_kwargs', {})
163 kwargs = {'context': self.context}
164 kwargs.update(extra_kwargs[self.url_field_name])
165 return (NestedHyperlinkedIdentityField, kwargs)
166 return super().build_unknown_field(field_name, model_class)
167
168
169 class AplusModelSerializer(AplusModelSerializerBase):
170 id = serializers.IntegerField(source='pk', read_only=True)
171
172 class Meta(AplusSerializerMeta):
173 fields = (
174 'id',
175 'url',
176 )
177
178
179 class StatisticsSerializer(serializers.Serializer):
180 starttime = serializers.DateTimeField(allow_null=True)
181 endtime = serializers.DateTimeField(allow_null=True)
182 submission_count = serializers.IntegerField(read_only=True)
183 submitters = serializers.IntegerField(read_only=True)
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/api/serializers.py b/lib/api/serializers.py
--- a/lib/api/serializers.py
+++ b/lib/api/serializers.py
@@ -8,6 +8,7 @@
from rest_framework.fields import get_attribute
from .fields import NestedHyperlinkedIdentityField, NestedHyperlinkedRelatedField
+from lib.helpers import build_aplus_url
class NestedHyperlinkedModelSerializer(HyperlinkedModelSerializer):
@@ -61,9 +62,8 @@
super().__init__(*args, **kwargs)
def to_representation(self, obj): # pylint: disable=arguments-renamed
- request = self.context['request']
- url = obj.get_absolute_url()
- return request.build_absolute_uri(url)
+ url = obj.get_display_url()
+ return build_aplus_url(url, True)
class NestedHyperlinkedIdentityFieldWithQuery(NestedHyperlinkedIdentityField):
| {"golden_diff": "diff --git a/lib/api/serializers.py b/lib/api/serializers.py\n--- a/lib/api/serializers.py\n+++ b/lib/api/serializers.py\n@@ -8,6 +8,7 @@\n from rest_framework.fields import get_attribute\n \n from .fields import NestedHyperlinkedIdentityField, NestedHyperlinkedRelatedField\n+from lib.helpers import build_aplus_url\n \n \n class NestedHyperlinkedModelSerializer(HyperlinkedModelSerializer):\n@@ -61,9 +62,8 @@\n super().__init__(*args, **kwargs)\n \n def to_representation(self, obj): # pylint: disable=arguments-renamed\n- request = self.context['request']\n- url = obj.get_absolute_url()\n- return request.build_absolute_uri(url)\n+ url = obj.get_display_url()\n+ return build_aplus_url(url, True)\n \n \n class NestedHyperlinkedIdentityFieldWithQuery(NestedHyperlinkedIdentityField):\n", "issue": "Exercise API field html_url behaves inconsistently\nIf an exercise has a parent (i.e., is embedded in a chapter or an exercise), the html_url in the API differ between what the ExerciseBriefSerializer and ExerciseSerializer produce.\r\n\r\nThe brief serializer provides the wanted format of the url (link to the parent with anchor to exercise):\r\n\r\n\r\nHowever, the exercise detail page includes the previous format (to the exercise page): \r\n\r\n\r\nThis is illogical, inconsistent, and causes problems for MOOC-Jutut.\n", "before_files": [{"content": "from functools import partial\nfrom urllib.parse import urlencode\n\nfrom django.db.models import Manager\nfrom rest_framework import serializers\nfrom rest_framework.serializers import HyperlinkedModelSerializer\nfrom rest_framework.utils.field_mapping import get_nested_relation_kwargs\nfrom rest_framework.fields import get_attribute\n\nfrom .fields import NestedHyperlinkedIdentityField, NestedHyperlinkedRelatedField\n\n\nclass NestedHyperlinkedModelSerializer(HyperlinkedModelSerializer):\n \"\"\"\n Extension of `HyperlinkedModelSerializer` that adds support for\n nested resources.\n \"\"\"\n serializer_related_field = NestedHyperlinkedRelatedField\n serializer_url_field = NestedHyperlinkedIdentityField\n\n def get_default_field_names(self, declared_fields, model_info):\n \"\"\"\n Return the default list of field names that will be used if the\n `Meta.fields` option is not specified.\n \"\"\"\n return (\n [self.url_field_name] +\n list(declared_fields.keys()) +\n list(model_info.fields.keys()) +\n list(model_info.forward_relations.keys())\n )\n\n def build_nested_field(self, field_name, relation_info, nested_depth):\n \"\"\"\n Create nested fields for forward and reverse relationships.\n \"\"\"\n class NestedSerializer(NestedHyperlinkedModelSerializer):\n class Meta:\n model = relation_info.related_model\n depth = nested_depth - 1\n\n field_class = NestedSerializer\n field_kwargs = get_nested_relation_kwargs(relation_info)\n\n return field_class, field_kwargs\n\n\nclass AlwaysListSerializer:\n def __new__(cls, *args, **kwargs):\n if kwargs.pop('_many', True):\n kwargs['many'] = True\n return super(AlwaysListSerializer, cls).__new__(cls, *args, _many=False, **kwargs)\n\n def __init__(self, *args, _many=False, **kwargs):\n super().__init__(*args, **kwargs)\n\n\nclass HtmlViewField(serializers.ReadOnlyField):\n def __init__(self, *args, **kwargs):\n kwargs['source'] = '*'\n super().__init__(*args, **kwargs)\n\n def to_representation(self, obj): # pylint: disable=arguments-renamed\n request = self.context['request']\n url = obj.get_absolute_url()\n return request.build_absolute_uri(url)\n\n\nclass NestedHyperlinkedIdentityFieldWithQuery(NestedHyperlinkedIdentityField):\n def __init__(self, *args, query_params=None, **kwargs):\n self.__query_params = query_params\n super().__init__(*args, **kwargs)\n\n def get_url(self, obj, view_name, request, format): # pylint: disable=redefined-builtin\n url = super().get_url(obj, view_name, request, format)\n\n if url and self.__query_params:\n # pylint: disable-next=unnecessary-lambda-assignment\n get = lambda x: x(obj) if callable(x) else get_attribute(obj, x.split('.'))\n params = [(key, get(value)) for key, value in self.__query_params.items()]\n url = url + '?' + urlencode(params)\n\n return url\n\n\nclass AttributeProxy:\n def __init__(self, obj, **kwargs):\n self._obj = obj\n self._kwargs = kwargs\n\n def __getattr__(self, key):\n try:\n return self._kwargs[key]\n except KeyError:\n return getattr(self._obj, key)\n\n\ndef zip_instance_extra_with_iterable(instance, iterable, extra):\n extra_attrs = dict(\n (key, get_attribute(instance, attrs.split('.')))\n for key, attrs in extra.items()\n )\n return (AttributeProxy(item, **extra_attrs) for item in iterable)\n\n\nclass CompositeListSerializer(serializers.ListSerializer):\n @classmethod\n def with_extra(cls, extra):\n return partial(cls, extra=extra)\n\n def __init__(self, instance=None, data=serializers.empty, extra=None, **kwargs):\n self.__extra = extra\n source = kwargs.get('source', None)\n if instance and source:\n iterable = instance[source]\n instance = zip_instance_extra_with_iterable(instance, iterable, extra)\n super().__init__(instance=instance, data=data, **kwargs)\n\n def get_attribute(self, instance):\n data = super().get_attribute(instance)\n iterable = data.all() if isinstance(data, Manager) else data\n return zip_instance_extra_with_iterable(instance, iterable, self.__extra)\n\n\nclass AplusSerializerMetaMetaclass(type):\n def __new__(cls, name, bases, dict_):\n new_cls = type.__new__(cls, name, bases, dict_)\n for k, v in dict_.items():\n if k[0] != '_' and not callable(v):\n if isinstance(v, dict):\n parent = getattr(super(new_cls, new_cls), k, {})\n setattr(new_cls, k, dict(parent, **v))\n elif isinstance(v, (tuple, list)):\n parent = getattr(super(new_cls, new_cls), k, ())\n seen = set()\n seen_add = seen.add\n res = [x for x in parent if not (x in seen or seen_add(x))]\n res += (x for x in v if not (x in seen or seen_add(x)))\n setattr(new_cls, k, type(v)(res))\n return new_cls\n\n\nclass AplusSerializerMeta(metaclass=AplusSerializerMetaMetaclass):\n pass\n\n\nclass AplusModelSerializerBase(NestedHyperlinkedModelSerializer):\n url_field_name = 'url'\n html_url_field_name = 'html_url'\n\n def get_field_names(self, declared_fields, info):\n fields = list(super().get_field_names(declared_fields, info))\n extra_kwargs = getattr(self.Meta, 'extra_kwargs', {})\n if self.url_field_name not in fields and self.url_field_name in extra_kwargs:\n fields.insert(0, self.url_field_name)\n return fields\n\n def build_unknown_field(self, field_name, model_class):\n if field_name == self.html_url_field_name:\n return (HtmlViewField, {})\n if field_name == self.url_field_name:\n extra_kwargs = getattr(self.Meta, 'extra_kwargs', {})\n kwargs = {'context': self.context}\n kwargs.update(extra_kwargs[self.url_field_name])\n return (NestedHyperlinkedIdentityField, kwargs)\n return super().build_unknown_field(field_name, model_class)\n\n\nclass AplusModelSerializer(AplusModelSerializerBase):\n id = serializers.IntegerField(source='pk', read_only=True)\n\n class Meta(AplusSerializerMeta):\n fields = (\n 'id',\n 'url',\n )\n\n\nclass StatisticsSerializer(serializers.Serializer):\n starttime = serializers.DateTimeField(allow_null=True)\n endtime = serializers.DateTimeField(allow_null=True)\n submission_count = serializers.IntegerField(read_only=True)\n submitters = serializers.IntegerField(read_only=True)\n", "path": "lib/api/serializers.py"}], "after_files": [{"content": "from functools import partial\nfrom urllib.parse import urlencode\n\nfrom django.db.models import Manager\nfrom rest_framework import serializers\nfrom rest_framework.serializers import HyperlinkedModelSerializer\nfrom rest_framework.utils.field_mapping import get_nested_relation_kwargs\nfrom rest_framework.fields import get_attribute\n\nfrom .fields import NestedHyperlinkedIdentityField, NestedHyperlinkedRelatedField\nfrom lib.helpers import build_aplus_url\n\n\nclass NestedHyperlinkedModelSerializer(HyperlinkedModelSerializer):\n \"\"\"\n Extension of `HyperlinkedModelSerializer` that adds support for\n nested resources.\n \"\"\"\n serializer_related_field = NestedHyperlinkedRelatedField\n serializer_url_field = NestedHyperlinkedIdentityField\n\n def get_default_field_names(self, declared_fields, model_info):\n \"\"\"\n Return the default list of field names that will be used if the\n `Meta.fields` option is not specified.\n \"\"\"\n return (\n [self.url_field_name] +\n list(declared_fields.keys()) +\n list(model_info.fields.keys()) +\n list(model_info.forward_relations.keys())\n )\n\n def build_nested_field(self, field_name, relation_info, nested_depth):\n \"\"\"\n Create nested fields for forward and reverse relationships.\n \"\"\"\n class NestedSerializer(NestedHyperlinkedModelSerializer):\n class Meta:\n model = relation_info.related_model\n depth = nested_depth - 1\n\n field_class = NestedSerializer\n field_kwargs = get_nested_relation_kwargs(relation_info)\n\n return field_class, field_kwargs\n\n\nclass AlwaysListSerializer:\n def __new__(cls, *args, **kwargs):\n if kwargs.pop('_many', True):\n kwargs['many'] = True\n return super(AlwaysListSerializer, cls).__new__(cls, *args, _many=False, **kwargs)\n\n def __init__(self, *args, _many=False, **kwargs):\n super().__init__(*args, **kwargs)\n\n\nclass HtmlViewField(serializers.ReadOnlyField):\n def __init__(self, *args, **kwargs):\n kwargs['source'] = '*'\n super().__init__(*args, **kwargs)\n\n def to_representation(self, obj): # pylint: disable=arguments-renamed\n url = obj.get_display_url()\n return build_aplus_url(url, True)\n\n\nclass NestedHyperlinkedIdentityFieldWithQuery(NestedHyperlinkedIdentityField):\n def __init__(self, *args, query_params=None, **kwargs):\n self.__query_params = query_params\n super().__init__(*args, **kwargs)\n\n def get_url(self, obj, view_name, request, format): # pylint: disable=redefined-builtin\n url = super().get_url(obj, view_name, request, format)\n\n if url and self.__query_params:\n # pylint: disable-next=unnecessary-lambda-assignment\n get = lambda x: x(obj) if callable(x) else get_attribute(obj, x.split('.'))\n params = [(key, get(value)) for key, value in self.__query_params.items()]\n url = url + '?' + urlencode(params)\n\n return url\n\n\nclass AttributeProxy:\n def __init__(self, obj, **kwargs):\n self._obj = obj\n self._kwargs = kwargs\n\n def __getattr__(self, key):\n try:\n return self._kwargs[key]\n except KeyError:\n return getattr(self._obj, key)\n\n\ndef zip_instance_extra_with_iterable(instance, iterable, extra):\n extra_attrs = dict(\n (key, get_attribute(instance, attrs.split('.')))\n for key, attrs in extra.items()\n )\n return (AttributeProxy(item, **extra_attrs) for item in iterable)\n\n\nclass CompositeListSerializer(serializers.ListSerializer):\n @classmethod\n def with_extra(cls, extra):\n return partial(cls, extra=extra)\n\n def __init__(self, instance=None, data=serializers.empty, extra=None, **kwargs):\n self.__extra = extra\n source = kwargs.get('source', None)\n if instance and source:\n iterable = instance[source]\n instance = zip_instance_extra_with_iterable(instance, iterable, extra)\n super().__init__(instance=instance, data=data, **kwargs)\n\n def get_attribute(self, instance):\n data = super().get_attribute(instance)\n iterable = data.all() if isinstance(data, Manager) else data\n return zip_instance_extra_with_iterable(instance, iterable, self.__extra)\n\n\nclass AplusSerializerMetaMetaclass(type):\n def __new__(cls, name, bases, dict_):\n new_cls = type.__new__(cls, name, bases, dict_)\n for k, v in dict_.items():\n if k[0] != '_' and not callable(v):\n if isinstance(v, dict):\n parent = getattr(super(new_cls, new_cls), k, {})\n setattr(new_cls, k, dict(parent, **v))\n elif isinstance(v, (tuple, list)):\n parent = getattr(super(new_cls, new_cls), k, ())\n seen = set()\n seen_add = seen.add\n res = [x for x in parent if not (x in seen or seen_add(x))]\n res += (x for x in v if not (x in seen or seen_add(x)))\n setattr(new_cls, k, type(v)(res))\n return new_cls\n\n\nclass AplusSerializerMeta(metaclass=AplusSerializerMetaMetaclass):\n pass\n\n\nclass AplusModelSerializerBase(NestedHyperlinkedModelSerializer):\n url_field_name = 'url'\n html_url_field_name = 'html_url'\n\n def get_field_names(self, declared_fields, info):\n fields = list(super().get_field_names(declared_fields, info))\n extra_kwargs = getattr(self.Meta, 'extra_kwargs', {})\n if self.url_field_name not in fields and self.url_field_name in extra_kwargs:\n fields.insert(0, self.url_field_name)\n return fields\n\n def build_unknown_field(self, field_name, model_class):\n if field_name == self.html_url_field_name:\n return (HtmlViewField, {})\n if field_name == self.url_field_name:\n extra_kwargs = getattr(self.Meta, 'extra_kwargs', {})\n kwargs = {'context': self.context}\n kwargs.update(extra_kwargs[self.url_field_name])\n return (NestedHyperlinkedIdentityField, kwargs)\n return super().build_unknown_field(field_name, model_class)\n\n\nclass AplusModelSerializer(AplusModelSerializerBase):\n id = serializers.IntegerField(source='pk', read_only=True)\n\n class Meta(AplusSerializerMeta):\n fields = (\n 'id',\n 'url',\n )\n\n\nclass StatisticsSerializer(serializers.Serializer):\n starttime = serializers.DateTimeField(allow_null=True)\n endtime = serializers.DateTimeField(allow_null=True)\n submission_count = serializers.IntegerField(read_only=True)\n submitters = serializers.IntegerField(read_only=True)\n", "path": "lib/api/serializers.py"}]} | 2,372 | 196 |
gh_patches_debug_3154 | rasdani/github-patches | git_diff | ultrabug__py3status-1893 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mpd_status: self.current_status dosen't return (text, state)
self.current_status command returns a tuple of (text, dict_of_mpd_state) but the mpd_status method expects the second value of the tuple to be `play`, `pause` or `stop`. this results in the module not coloring the output
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py3status/modules/mpd_status.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Display song currently playing in mpd.
4
5 Configuration parameters:
6 cache_timeout: how often we refresh this module in seconds (default 2)
7 format: template string (see below)
8 (default '{state} [[[{artist}] - {title}]|[{file}]]')
9 hide_on_error: hide the status if an error has occurred (default False)
10 hide_when_paused: hide the status if state is paused (default False)
11 hide_when_stopped: hide the status if state is stopped (default True)
12 host: mpd host (default 'localhost')
13 idle_subsystems: a space seperated string of subsystems to idle on.
14 player: changes in song information, play state
15 mixer: changes in volume
16 options: e.g. repeat mode
17 See the MPD protocol documentation for additional events.
18 (default 'player mixer options')
19 idle_timeout: force idle to reset every n seconds (default 3600)
20 max_width: maximum status length (default 120)
21 password: mpd password (default None)
22 port: mpd port (default '6600')
23 state_pause: label to display for "paused" state (default '[pause]')
24 state_play: label to display for "playing" state (default '[play]')
25 state_stop: label to display for "stopped" state (default '[stop]')
26 use_idle: whether to use idling instead of polling. None to autodetect
27 (default None)
28
29 Format placeholders:
30 {state} state (paused, playing. stopped) can be defined via `state_..`
31 configuration parameters
32 Refer to the mpc(1) manual page for the list of available placeholders to
33 be used in the format. Placeholders should use braces `{}` rather than
34 percent `%%` eg `{artist}`.
35 Every placeholder can also be prefixed with
36 `next_` to retrieve the data for the song following the one currently
37 playing.
38
39 Color options:
40 color_pause: Paused, default color_degraded
41 color_play: Playing, default color_good
42 color_stop: Stopped, default color_bad
43
44 Requires:
45 python-mpd2: (NOT python2-mpd2)
46
47 Examples:
48 ```
49 # Show state and (artist -) title, if no title fallback to file:
50 {state} [[[{artist} - ]{title}]|[{file}]]
51
52 # Show state, [duration], title (or file) and next song title (or file):
53 {state} \[{time}\] [{title}|{file}] → [{next_title}|{next_file}]
54 ```
55
56 @author shadowprince, zopieux
57 @license Eclipse Public License
58
59 SAMPLE OUTPUT
60 {'color': '#00ff00', 'full_text': '[play] Music For Programming - Idol Eyes'}
61
62 paused
63 {'color': '#ffff00', 'full_text': '[pause] Music For Programming - Idol Eyes'}
64
65 stopped
66 {'color': '#ff0000', 'full_text': '[stop] Music For Programming - Idol Eyes'}
67 """
68
69 import datetime
70 import re
71 import socket
72 from py3status.composite import Composite
73 from mpd import MPDClient, CommandError, ConnectionError
74 from threading import Thread
75 from time import sleep
76
77
78 def song_attr(song, attr):
79 def parse_mtime(date_str):
80 return datetime.datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%SZ")
81
82 if attr == "time":
83 try:
84 duration = int(song["time"])
85 if duration > 0:
86 minutes, seconds = divmod(duration, 60)
87 return "{:d}:{:02d}".format(minutes, seconds)
88 raise ValueError
89 except (KeyError, ValueError):
90 return ""
91 elif attr == "position":
92 try:
93 return "{}".format(int(song["pos"]) + 1)
94 except (KeyError, ValueError):
95 return ""
96 elif attr == "mtime":
97 return parse_mtime(song["last-modified"]).strftime("%c")
98 elif attr == "mdate":
99 return parse_mtime(song["last-modified"]).strftime("%x")
100
101 return song.get(attr, "")
102
103
104 class Py3status:
105 """
106 """
107
108 # available configuration parameters
109 cache_timeout = 2
110 format = "{state} [[[{artist}] - {title}]|[{file}]]"
111 hide_on_error = False
112 hide_when_paused = False
113 hide_when_stopped = True
114 host = "localhost"
115 idle_subsystems = "player mixer options"
116 idle_timeout = 3600
117 max_width = 120
118 password = None
119 port = "6600"
120 state_pause = "[pause]"
121 state_play = "[play]"
122 state_stop = "[stop]"
123 use_idle = None
124
125 def post_config_hook(self):
126 # class variables:
127 self.current_status = None
128 self.idle_thread = Thread()
129 # Convert from %placeholder% to {placeholder}
130 # This is not perfect but should be good enough
131 if not self.py3.get_placeholders_list(self.format) and "%" in self.format:
132 self.format = re.sub("%([a-z]+)%", r"{\1}", self.format)
133 self.py3.log("Old % style format DEPRECATED use { style format")
134 # class variables:
135 self.client = None
136
137 def _get_mpd(self, disconnect=False):
138 if disconnect:
139 try:
140 self.client.disconnect()
141 finally:
142 self.client = None
143 return
144
145 try:
146 if self.client is None:
147 self.client = MPDClient()
148 self.client.connect(host=self.host, port=self.port)
149 if self.password:
150 self.client.password(self.password)
151 if self.use_idle is None:
152 self.use_idle = "idle" in self.client.commands()
153 if self.use_idle and self.idle_timeout:
154 self.client.idletimeout = self.idle_timeout
155 return self.client
156 except (socket.error, ConnectionError, CommandError) as e:
157 self.client = None
158 raise e
159
160 def _state_character(self, state):
161 if state == "play":
162 return self.state_play
163 elif state == "pause":
164 return self.state_pause
165 elif state == "stop":
166 return self.state_stop
167 return "?"
168
169 def mpd_status(self):
170 # I - get current mpd status (or wait until it changes)
171 # this writes into self.current_status
172 if self.use_idle is not False:
173 if not self.idle_thread.is_alive():
174 sleep(self.cache_timeout) # rate limit thread restarting
175 self.idle_thread = Thread(target=self._get_status)
176 self.idle_thread.daemon = True
177 self.idle_thread.start()
178 else:
179 self._get_status()
180
181 # II - format response
182 (text, state) = ("", "")
183 if self.current_status is not None:
184 (text, state) = self.current_status
185
186 if len(text) > self.max_width:
187 text = u"{}...".format(text[: self.max_width - 3])
188
189 response = {
190 "cached_until": self.py3.time_in(self.cache_timeout),
191 "full_text": text if state or not self.hide_on_error else "",
192 }
193
194 if state:
195 if state == "play":
196 response["color"] = self.py3.COLOR_PLAY or self.py3.COLOR_GOOD
197 elif state == "pause":
198 response["color"] = self.py3.COLOR_PAUSE or self.py3.COLOR_DEGRADED
199 elif state == "stop":
200 response["color"] = self.py3.COLOR_STOP or self.py3.COLOR_BAD
201
202 return response
203
204 def _get_status(self):
205 while True:
206 try:
207 status = self._get_mpd().status()
208 song = int(status.get("song", 0))
209 next_song = int(status.get("nextsong", 0))
210
211 state = status.get("state")
212
213 if (state == "pause" and self.hide_when_paused) or (
214 state == "stop" and self.hide_when_stopped
215 ):
216 text = ""
217
218 else:
219 playlist_info = self._get_mpd().playlistinfo()
220 try:
221 song = playlist_info[song]
222 except IndexError:
223 song = {}
224 try:
225 next_song = playlist_info[next_song]
226 except IndexError:
227 next_song = {}
228
229 song["state"] = next_song["state"] = self._state_character(state)
230
231 def attr_getter(attr):
232 if attr.startswith("next_"):
233 return song_attr(next_song, attr[5:])
234 return song_attr(song, attr)
235
236 text = self.py3.safe_format(self.format, attr_getter=attr_getter)
237 if isinstance(text, Composite):
238 text = text.text()
239
240 self.current_status = (text, status)
241
242 if self.use_idle:
243 self.py3.update()
244 # Note: mpd2 does not support more than 1 idle subsystem. so if
245 # the user wants to listen on more than one, we listen on all
246 # and loop until one we're interested in changed.
247 # https://github.com/Mic92/python-mpd2/issues/107
248 changed = self._get_mpd().idle()
249 while not any([c in self.idle_subsystems for c in changed]):
250 changed = self._get_mpd().idle()
251 else:
252 return
253
254 except (ValueError, socket.error, ConnectionError, CommandError) as e:
255 # ValueError can happen when status.get(...) returns None; e.g.
256 # during reversal of playlist
257 if isinstance(e, ValueError):
258 text = "No song information!"
259 if isinstance(e, socket.error):
260 text = "Failed to connect to mpd!"
261 if isinstance(e, ConnectionError):
262 text = "Error while connecting to mpd!"
263 self._get_mpd(disconnect=True)
264 if isinstance(e, CommandError):
265 text = "Failed to authenticate to mpd!"
266 self._get_mpd(disconnect=True)
267
268 state = None
269 self.current_status = (text, status)
270 return
271 finally:
272 self.py3.update() # to propagate error message
273
274 def kill(self):
275 self._get_mpd(disconnect=True)
276
277
278 if __name__ == "__main__":
279 """
280 Run module in test mode.
281 """
282 from py3status.module_test import module_test
283
284 module_test(Py3status)
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/py3status/modules/mpd_status.py b/py3status/modules/mpd_status.py
--- a/py3status/modules/mpd_status.py
+++ b/py3status/modules/mpd_status.py
@@ -237,7 +237,7 @@
if isinstance(text, Composite):
text = text.text()
- self.current_status = (text, status)
+ self.current_status = (text, state)
if self.use_idle:
self.py3.update()
| {"golden_diff": "diff --git a/py3status/modules/mpd_status.py b/py3status/modules/mpd_status.py\n--- a/py3status/modules/mpd_status.py\n+++ b/py3status/modules/mpd_status.py\n@@ -237,7 +237,7 @@\n if isinstance(text, Composite):\n text = text.text()\n \n- self.current_status = (text, status)\n+ self.current_status = (text, state)\n \n if self.use_idle:\n self.py3.update()\n", "issue": "mpd_status: self.current_status dosen't return (text, state)\nself.current_status command returns a tuple of (text, dict_of_mpd_state) but the mpd_status method expects the second value of the tuple to be `play`, `pause` or `stop`. this results in the module not coloring the output\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay song currently playing in mpd.\n\nConfiguration parameters:\n cache_timeout: how often we refresh this module in seconds (default 2)\n format: template string (see below)\n (default '{state} [[[{artist}] - {title}]|[{file}]]')\n hide_on_error: hide the status if an error has occurred (default False)\n hide_when_paused: hide the status if state is paused (default False)\n hide_when_stopped: hide the status if state is stopped (default True)\n host: mpd host (default 'localhost')\n idle_subsystems: a space seperated string of subsystems to idle on.\n player: changes in song information, play state\n mixer: changes in volume\n options: e.g. repeat mode\n See the MPD protocol documentation for additional events.\n (default 'player mixer options')\n idle_timeout: force idle to reset every n seconds (default 3600)\n max_width: maximum status length (default 120)\n password: mpd password (default None)\n port: mpd port (default '6600')\n state_pause: label to display for \"paused\" state (default '[pause]')\n state_play: label to display for \"playing\" state (default '[play]')\n state_stop: label to display for \"stopped\" state (default '[stop]')\n use_idle: whether to use idling instead of polling. None to autodetect\n (default None)\n\nFormat placeholders:\n {state} state (paused, playing. stopped) can be defined via `state_..`\n configuration parameters\n Refer to the mpc(1) manual page for the list of available placeholders to\n be used in the format. Placeholders should use braces `{}` rather than\n percent `%%` eg `{artist}`.\n Every placeholder can also be prefixed with\n `next_` to retrieve the data for the song following the one currently\n playing.\n\nColor options:\n color_pause: Paused, default color_degraded\n color_play: Playing, default color_good\n color_stop: Stopped, default color_bad\n\nRequires:\n python-mpd2: (NOT python2-mpd2)\n\nExamples:\n```\n# Show state and (artist -) title, if no title fallback to file:\n{state} [[[{artist} - ]{title}]|[{file}]]\n\n# Show state, [duration], title (or file) and next song title (or file):\n{state} \\[{time}\\] [{title}|{file}] \u2192 [{next_title}|{next_file}]\n```\n\n@author shadowprince, zopieux\n@license Eclipse Public License\n\nSAMPLE OUTPUT\n{'color': '#00ff00', 'full_text': '[play] Music For Programming - Idol Eyes'}\n\npaused\n{'color': '#ffff00', 'full_text': '[pause] Music For Programming - Idol Eyes'}\n\nstopped\n{'color': '#ff0000', 'full_text': '[stop] Music For Programming - Idol Eyes'}\n\"\"\"\n\nimport datetime\nimport re\nimport socket\nfrom py3status.composite import Composite\nfrom mpd import MPDClient, CommandError, ConnectionError\nfrom threading import Thread\nfrom time import sleep\n\n\ndef song_attr(song, attr):\n def parse_mtime(date_str):\n return datetime.datetime.strptime(date_str, \"%Y-%m-%dT%H:%M:%SZ\")\n\n if attr == \"time\":\n try:\n duration = int(song[\"time\"])\n if duration > 0:\n minutes, seconds = divmod(duration, 60)\n return \"{:d}:{:02d}\".format(minutes, seconds)\n raise ValueError\n except (KeyError, ValueError):\n return \"\"\n elif attr == \"position\":\n try:\n return \"{}\".format(int(song[\"pos\"]) + 1)\n except (KeyError, ValueError):\n return \"\"\n elif attr == \"mtime\":\n return parse_mtime(song[\"last-modified\"]).strftime(\"%c\")\n elif attr == \"mdate\":\n return parse_mtime(song[\"last-modified\"]).strftime(\"%x\")\n\n return song.get(attr, \"\")\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n\n # available configuration parameters\n cache_timeout = 2\n format = \"{state} [[[{artist}] - {title}]|[{file}]]\"\n hide_on_error = False\n hide_when_paused = False\n hide_when_stopped = True\n host = \"localhost\"\n idle_subsystems = \"player mixer options\"\n idle_timeout = 3600\n max_width = 120\n password = None\n port = \"6600\"\n state_pause = \"[pause]\"\n state_play = \"[play]\"\n state_stop = \"[stop]\"\n use_idle = None\n\n def post_config_hook(self):\n # class variables:\n self.current_status = None\n self.idle_thread = Thread()\n # Convert from %placeholder% to {placeholder}\n # This is not perfect but should be good enough\n if not self.py3.get_placeholders_list(self.format) and \"%\" in self.format:\n self.format = re.sub(\"%([a-z]+)%\", r\"{\\1}\", self.format)\n self.py3.log(\"Old % style format DEPRECATED use { style format\")\n # class variables:\n self.client = None\n\n def _get_mpd(self, disconnect=False):\n if disconnect:\n try:\n self.client.disconnect()\n finally:\n self.client = None\n return\n\n try:\n if self.client is None:\n self.client = MPDClient()\n self.client.connect(host=self.host, port=self.port)\n if self.password:\n self.client.password(self.password)\n if self.use_idle is None:\n self.use_idle = \"idle\" in self.client.commands()\n if self.use_idle and self.idle_timeout:\n self.client.idletimeout = self.idle_timeout\n return self.client\n except (socket.error, ConnectionError, CommandError) as e:\n self.client = None\n raise e\n\n def _state_character(self, state):\n if state == \"play\":\n return self.state_play\n elif state == \"pause\":\n return self.state_pause\n elif state == \"stop\":\n return self.state_stop\n return \"?\"\n\n def mpd_status(self):\n # I - get current mpd status (or wait until it changes)\n # this writes into self.current_status\n if self.use_idle is not False:\n if not self.idle_thread.is_alive():\n sleep(self.cache_timeout) # rate limit thread restarting\n self.idle_thread = Thread(target=self._get_status)\n self.idle_thread.daemon = True\n self.idle_thread.start()\n else:\n self._get_status()\n\n # II - format response\n (text, state) = (\"\", \"\")\n if self.current_status is not None:\n (text, state) = self.current_status\n\n if len(text) > self.max_width:\n text = u\"{}...\".format(text[: self.max_width - 3])\n\n response = {\n \"cached_until\": self.py3.time_in(self.cache_timeout),\n \"full_text\": text if state or not self.hide_on_error else \"\",\n }\n\n if state:\n if state == \"play\":\n response[\"color\"] = self.py3.COLOR_PLAY or self.py3.COLOR_GOOD\n elif state == \"pause\":\n response[\"color\"] = self.py3.COLOR_PAUSE or self.py3.COLOR_DEGRADED\n elif state == \"stop\":\n response[\"color\"] = self.py3.COLOR_STOP or self.py3.COLOR_BAD\n\n return response\n\n def _get_status(self):\n while True:\n try:\n status = self._get_mpd().status()\n song = int(status.get(\"song\", 0))\n next_song = int(status.get(\"nextsong\", 0))\n\n state = status.get(\"state\")\n\n if (state == \"pause\" and self.hide_when_paused) or (\n state == \"stop\" and self.hide_when_stopped\n ):\n text = \"\"\n\n else:\n playlist_info = self._get_mpd().playlistinfo()\n try:\n song = playlist_info[song]\n except IndexError:\n song = {}\n try:\n next_song = playlist_info[next_song]\n except IndexError:\n next_song = {}\n\n song[\"state\"] = next_song[\"state\"] = self._state_character(state)\n\n def attr_getter(attr):\n if attr.startswith(\"next_\"):\n return song_attr(next_song, attr[5:])\n return song_attr(song, attr)\n\n text = self.py3.safe_format(self.format, attr_getter=attr_getter)\n if isinstance(text, Composite):\n text = text.text()\n\n self.current_status = (text, status)\n\n if self.use_idle:\n self.py3.update()\n # Note: mpd2 does not support more than 1 idle subsystem. so if\n # the user wants to listen on more than one, we listen on all\n # and loop until one we're interested in changed.\n # https://github.com/Mic92/python-mpd2/issues/107\n changed = self._get_mpd().idle()\n while not any([c in self.idle_subsystems for c in changed]):\n changed = self._get_mpd().idle()\n else:\n return\n\n except (ValueError, socket.error, ConnectionError, CommandError) as e:\n # ValueError can happen when status.get(...) returns None; e.g.\n # during reversal of playlist\n if isinstance(e, ValueError):\n text = \"No song information!\"\n if isinstance(e, socket.error):\n text = \"Failed to connect to mpd!\"\n if isinstance(e, ConnectionError):\n text = \"Error while connecting to mpd!\"\n self._get_mpd(disconnect=True)\n if isinstance(e, CommandError):\n text = \"Failed to authenticate to mpd!\"\n self._get_mpd(disconnect=True)\n\n state = None\n self.current_status = (text, status)\n return\n finally:\n self.py3.update() # to propagate error message\n\n def kill(self):\n self._get_mpd(disconnect=True)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n\n module_test(Py3status)\n", "path": "py3status/modules/mpd_status.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay song currently playing in mpd.\n\nConfiguration parameters:\n cache_timeout: how often we refresh this module in seconds (default 2)\n format: template string (see below)\n (default '{state} [[[{artist}] - {title}]|[{file}]]')\n hide_on_error: hide the status if an error has occurred (default False)\n hide_when_paused: hide the status if state is paused (default False)\n hide_when_stopped: hide the status if state is stopped (default True)\n host: mpd host (default 'localhost')\n idle_subsystems: a space seperated string of subsystems to idle on.\n player: changes in song information, play state\n mixer: changes in volume\n options: e.g. repeat mode\n See the MPD protocol documentation for additional events.\n (default 'player mixer options')\n idle_timeout: force idle to reset every n seconds (default 3600)\n max_width: maximum status length (default 120)\n password: mpd password (default None)\n port: mpd port (default '6600')\n state_pause: label to display for \"paused\" state (default '[pause]')\n state_play: label to display for \"playing\" state (default '[play]')\n state_stop: label to display for \"stopped\" state (default '[stop]')\n use_idle: whether to use idling instead of polling. None to autodetect\n (default None)\n\nFormat placeholders:\n {state} state (paused, playing. stopped) can be defined via `state_..`\n configuration parameters\n Refer to the mpc(1) manual page for the list of available placeholders to\n be used in the format. Placeholders should use braces `{}` rather than\n percent `%%` eg `{artist}`.\n Every placeholder can also be prefixed with\n `next_` to retrieve the data for the song following the one currently\n playing.\n\nColor options:\n color_pause: Paused, default color_degraded\n color_play: Playing, default color_good\n color_stop: Stopped, default color_bad\n\nRequires:\n python-mpd2: (NOT python2-mpd2)\n\nExamples:\n```\n# Show state and (artist -) title, if no title fallback to file:\n{state} [[[{artist} - ]{title}]|[{file}]]\n\n# Show state, [duration], title (or file) and next song title (or file):\n{state} \\[{time}\\] [{title}|{file}] \u2192 [{next_title}|{next_file}]\n```\n\n@author shadowprince, zopieux\n@license Eclipse Public License\n\nSAMPLE OUTPUT\n{'color': '#00ff00', 'full_text': '[play] Music For Programming - Idol Eyes'}\n\npaused\n{'color': '#ffff00', 'full_text': '[pause] Music For Programming - Idol Eyes'}\n\nstopped\n{'color': '#ff0000', 'full_text': '[stop] Music For Programming - Idol Eyes'}\n\"\"\"\n\nimport datetime\nimport re\nimport socket\nfrom py3status.composite import Composite\nfrom mpd import MPDClient, CommandError, ConnectionError\nfrom threading import Thread\nfrom time import sleep\n\n\ndef song_attr(song, attr):\n def parse_mtime(date_str):\n return datetime.datetime.strptime(date_str, \"%Y-%m-%dT%H:%M:%SZ\")\n\n if attr == \"time\":\n try:\n duration = int(song[\"time\"])\n if duration > 0:\n minutes, seconds = divmod(duration, 60)\n return \"{:d}:{:02d}\".format(minutes, seconds)\n raise ValueError\n except (KeyError, ValueError):\n return \"\"\n elif attr == \"position\":\n try:\n return \"{}\".format(int(song[\"pos\"]) + 1)\n except (KeyError, ValueError):\n return \"\"\n elif attr == \"mtime\":\n return parse_mtime(song[\"last-modified\"]).strftime(\"%c\")\n elif attr == \"mdate\":\n return parse_mtime(song[\"last-modified\"]).strftime(\"%x\")\n\n return song.get(attr, \"\")\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n\n # available configuration parameters\n cache_timeout = 2\n format = \"{state} [[[{artist}] - {title}]|[{file}]]\"\n hide_on_error = False\n hide_when_paused = False\n hide_when_stopped = True\n host = \"localhost\"\n idle_subsystems = \"player mixer options\"\n idle_timeout = 3600\n max_width = 120\n password = None\n port = \"6600\"\n state_pause = \"[pause]\"\n state_play = \"[play]\"\n state_stop = \"[stop]\"\n use_idle = None\n\n def post_config_hook(self):\n # class variables:\n self.current_status = None\n self.idle_thread = Thread()\n # Convert from %placeholder% to {placeholder}\n # This is not perfect but should be good enough\n if not self.py3.get_placeholders_list(self.format) and \"%\" in self.format:\n self.format = re.sub(\"%([a-z]+)%\", r\"{\\1}\", self.format)\n self.py3.log(\"Old % style format DEPRECATED use { style format\")\n # class variables:\n self.client = None\n\n def _get_mpd(self, disconnect=False):\n if disconnect:\n try:\n self.client.disconnect()\n finally:\n self.client = None\n return\n\n try:\n if self.client is None:\n self.client = MPDClient()\n self.client.connect(host=self.host, port=self.port)\n if self.password:\n self.client.password(self.password)\n if self.use_idle is None:\n self.use_idle = \"idle\" in self.client.commands()\n if self.use_idle and self.idle_timeout:\n self.client.idletimeout = self.idle_timeout\n return self.client\n except (socket.error, ConnectionError, CommandError) as e:\n self.client = None\n raise e\n\n def _state_character(self, state):\n if state == \"play\":\n return self.state_play\n elif state == \"pause\":\n return self.state_pause\n elif state == \"stop\":\n return self.state_stop\n return \"?\"\n\n def mpd_status(self):\n # I - get current mpd status (or wait until it changes)\n # this writes into self.current_status\n if self.use_idle is not False:\n if not self.idle_thread.is_alive():\n sleep(self.cache_timeout) # rate limit thread restarting\n self.idle_thread = Thread(target=self._get_status)\n self.idle_thread.daemon = True\n self.idle_thread.start()\n else:\n self._get_status()\n\n # II - format response\n (text, state) = (\"\", \"\")\n if self.current_status is not None:\n (text, state) = self.current_status\n\n if len(text) > self.max_width:\n text = u\"{}...\".format(text[: self.max_width - 3])\n\n response = {\n \"cached_until\": self.py3.time_in(self.cache_timeout),\n \"full_text\": text if state or not self.hide_on_error else \"\",\n }\n\n if state:\n if state == \"play\":\n response[\"color\"] = self.py3.COLOR_PLAY or self.py3.COLOR_GOOD\n elif state == \"pause\":\n response[\"color\"] = self.py3.COLOR_PAUSE or self.py3.COLOR_DEGRADED\n elif state == \"stop\":\n response[\"color\"] = self.py3.COLOR_STOP or self.py3.COLOR_BAD\n\n return response\n\n def _get_status(self):\n while True:\n try:\n status = self._get_mpd().status()\n song = int(status.get(\"song\", 0))\n next_song = int(status.get(\"nextsong\", 0))\n\n state = status.get(\"state\")\n\n if (state == \"pause\" and self.hide_when_paused) or (\n state == \"stop\" and self.hide_when_stopped\n ):\n text = \"\"\n\n else:\n playlist_info = self._get_mpd().playlistinfo()\n try:\n song = playlist_info[song]\n except IndexError:\n song = {}\n try:\n next_song = playlist_info[next_song]\n except IndexError:\n next_song = {}\n\n song[\"state\"] = next_song[\"state\"] = self._state_character(state)\n\n def attr_getter(attr):\n if attr.startswith(\"next_\"):\n return song_attr(next_song, attr[5:])\n return song_attr(song, attr)\n\n text = self.py3.safe_format(self.format, attr_getter=attr_getter)\n if isinstance(text, Composite):\n text = text.text()\n\n self.current_status = (text, state)\n\n if self.use_idle:\n self.py3.update()\n # Note: mpd2 does not support more than 1 idle subsystem. so if\n # the user wants to listen on more than one, we listen on all\n # and loop until one we're interested in changed.\n # https://github.com/Mic92/python-mpd2/issues/107\n changed = self._get_mpd().idle()\n while not any([c in self.idle_subsystems for c in changed]):\n changed = self._get_mpd().idle()\n else:\n return\n\n except (ValueError, socket.error, ConnectionError, CommandError) as e:\n # ValueError can happen when status.get(...) returns None; e.g.\n # during reversal of playlist\n if isinstance(e, ValueError):\n text = \"No song information!\"\n if isinstance(e, socket.error):\n text = \"Failed to connect to mpd!\"\n if isinstance(e, ConnectionError):\n text = \"Error while connecting to mpd!\"\n self._get_mpd(disconnect=True)\n if isinstance(e, CommandError):\n text = \"Failed to authenticate to mpd!\"\n self._get_mpd(disconnect=True)\n\n state = None\n self.current_status = (text, status)\n return\n finally:\n self.py3.update() # to propagate error message\n\n def kill(self):\n self._get_mpd(disconnect=True)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n\n module_test(Py3status)\n", "path": "py3status/modules/mpd_status.py"}]} | 3,348 | 107 |
gh_patches_debug_21905 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-850 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error numpy.ndarray size changed
## 🐛 Bug
Hello,
I Installed a clean Ubuntu 20.04, installed nvidia drivers 440 for RTX 3080. Then I followed the instructions to train a model with NVIDIA NeMo Tacotron2.
I got the next error. It seems that the latest release of metrics is not compatible with the latest release of numpy.
numpy fails with the next error:
valueerror: numpy.ndarray size changed, may indicate binary incompatibility. expected 96 from c header, got 88 from pyobject
I share the full stack trace:
```
pc@pc:~/Downloads$ sudo python3.8 tacotron2.py sample_rate=25050 train_dataset=/home/pc/Downloads/newcontent.json validation_datasets=/home/pc/Downloads/newcontent.json trainer.max_epochs=1 trainer.accelerator=null trainer.check_val_every_n_epoch=1
[sudo] password for pc:
Traceback (most recent call last):
File "tacotron2.py", line 24, in
import pytorch_lightning as pl
File "/usr/local/lib/python3.8/site-packages/pytorch_lightning/init.py", line 20, in
from pytorch_lightning.callbacks import Callback # noqa: E402
File "/usr/local/lib/python3.8/site-packages/pytorch_lightning/callbacks/init.py", line 14, in
from pytorch_lightning.callbacks.base import Callback
File "/usr/local/lib/python3.8/site-packages/pytorch_lightning/callbacks/base.py", line 26, in
from pytorch_lightning.utilities.types import STEP_OUTPUT
File "/usr/local/lib/python3.8/site-packages/pytorch_lightning/utilities/types.py", line 25, in
from torchmetrics import Metric
File "/usr/local/lib/python3.8/site-packages/torchmetrics/init.py", line 14, in
from torchmetrics import functional # noqa: E402
File "/usr/local/lib/python3.8/site-packages/torchmetrics/functional/init.py", line 14, in
from torchmetrics.functional.audio.pit import permutation_invariant_training, pit, pit_permutate
File "/usr/local/lib/python3.8/site-packages/torchmetrics/functional/audio/init.py", line 26, in
from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality # noqa: F401
File "/usr/local/lib/python3.8/site-packages/torchmetrics/functional/audio/pesq.py", line 20, in
import pesq as pesq_backend
File "/usr/local/lib/python3.8/site-packages/pesq/init.py", line 6, in
from .cypesq import cypesq, cypesq_retvals, cypesq_error_message as pesq_error_message
File "pesq/cypesq.pyx", line 1, in init cypesq
ValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 96 from C header, got 88 from PyObject
```
### Environment
- PyTorch Version: 1.10.1
- OS: Ubuntu 20.10
- PyTorch installed with pip
- Python version: python3.8
- CUDA/cuDNN version: 11.3
- GPU models and configuration: RTX 3080
- Any other relevant information: MSI Creator P100X 637
Could you help me to solve this?
Thank you!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchmetrics/functional/audio/pesq.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import numpy as np
15
16 from torchmetrics.utilities.imports import _PESQ_AVAILABLE
17
18 if _PESQ_AVAILABLE:
19 import pesq as pesq_backend
20 else:
21 pesq_backend = None
22 import torch
23 from torch import Tensor
24
25 from torchmetrics.utilities.checks import _check_same_shape
26
27 __doctest_requires__ = {("perceptual_evaluation_speech_quality",): ["pesq"]}
28
29
30 def perceptual_evaluation_speech_quality(
31 preds: Tensor, target: Tensor, fs: int, mode: str, keep_same_device: bool = False
32 ) -> Tensor:
33 r"""PESQ (Perceptual Evaluation of Speech Quality)
34
35 This is a wrapper for the ``pesq`` package [1]. Note that input will be moved to `cpu`
36 to perform the metric calculation.
37
38 .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install
39 torchmetrics[audio]`` or ``pip install pesq``
40
41 Args:
42 preds:
43 shape ``[...,time]``
44 target:
45 shape ``[...,time]``
46 fs:
47 sampling frequency, should be 16000 or 8000 (Hz)
48 mode:
49 'wb' (wide-band) or 'nb' (narrow-band)
50 keep_same_device:
51 whether to move the pesq value to the device of preds
52
53 Returns:
54 pesq value of shape [...]
55
56 Raises:
57 ModuleNotFoundError:
58 If ``peqs`` package is not installed
59 ValueError:
60 If ``fs`` is not either ``8000`` or ``16000``
61 ValueError:
62 If ``mode`` is not either ``"wb"`` or ``"nb"``
63
64 Example:
65 >>> from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality
66 >>> import torch
67 >>> g = torch.manual_seed(1)
68 >>> preds = torch.randn(8000)
69 >>> target = torch.randn(8000)
70 >>> perceptual_evaluation_speech_quality(preds, target, 8000, 'nb')
71 tensor(2.2076)
72 >>> perceptual_evaluation_speech_quality(preds, target, 16000, 'wb')
73 tensor(1.7359)
74
75 References:
76 [1] https://github.com/ludlows/python-pesq
77 """
78 if not _PESQ_AVAILABLE:
79 raise ModuleNotFoundError(
80 "PESQ metric requires that pesq is installed."
81 " Either install as `pip install torchmetrics[audio]` or `pip install pesq`."
82 )
83 if fs not in (8000, 16000):
84 raise ValueError(f"Expected argument `fs` to either be 8000 or 16000 but got {fs}")
85 if mode not in ("wb", "nb"):
86 raise ValueError(f"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}")
87 _check_same_shape(preds, target)
88
89 if preds.ndim == 1:
90 pesq_val_np = pesq_backend.pesq(fs, target.detach().cpu().numpy(), preds.detach().cpu().numpy(), mode)
91 pesq_val = torch.tensor(pesq_val_np)
92 else:
93 preds_np = preds.reshape(-1, preds.shape[-1]).detach().cpu().numpy()
94 target_np = target.reshape(-1, preds.shape[-1]).detach().cpu().numpy()
95 pesq_val_np = np.empty(shape=(preds_np.shape[0]))
96 for b in range(preds_np.shape[0]):
97 pesq_val_np[b] = pesq_backend.pesq(fs, target_np[b, :], preds_np[b, :], mode)
98 pesq_val = torch.from_numpy(pesq_val_np)
99 pesq_val = pesq_val.reshape(preds.shape[:-1])
100
101 if keep_same_device:
102 pesq_val = pesq_val.to(preds.device)
103
104 return pesq_val
105
```
Path: `torchmetrics/audio/pesq.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Dict, Optional
15
16 from torch import Tensor, tensor
17
18 from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality
19 from torchmetrics.metric import Metric
20 from torchmetrics.utilities.imports import _PESQ_AVAILABLE
21
22 __doctest_requires__ = {("PerceptualEvaluationSpeechQuality"): ["pesq"]}
23
24
25 class PerceptualEvaluationSpeechQuality(Metric):
26 """Perceptual Evaluation of Speech Quality (PESQ)
27
28 This is a wrapper for the pesq package [1]. . Note that input will be moved to `cpu`
29 to perform the metric calculation.
30
31 .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install
32 torchmetrics[audio]`` or ``pip install pesq``
33
34 Forward accepts
35
36 - ``preds``: ``shape [...,time]``
37 - ``target``: ``shape [...,time]``
38
39 Args:
40 fs:
41 sampling frequency, should be 16000 or 8000 (Hz)
42 mode:
43 'wb' (wide-band) or 'nb' (narrow-band)
44 keep_same_device:
45 whether to move the pesq value to the device of preds
46 compute_on_step:
47 Forward only calls ``update()`` and returns None if this is set to False.
48
49 .. deprecated:: v0.8
50 Argument has no use anymore and will be removed v0.9.
51
52 kwargs:
53 Additional keyword arguments, see :ref:`Metric kwargs` for more info.
54
55 Raises:
56 ModuleNotFoundError:
57 If ``peqs`` package is not installed
58 ValueError:
59 If ``fs`` is not either ``8000`` or ``16000``
60 ValueError:
61 If ``mode`` is not either ``"wb"`` or ``"nb"``
62
63 Example:
64 >>> from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality
65 >>> import torch
66 >>> g = torch.manual_seed(1)
67 >>> preds = torch.randn(8000)
68 >>> target = torch.randn(8000)
69 >>> nb_pesq = PerceptualEvaluationSpeechQuality(8000, 'nb')
70 >>> nb_pesq(preds, target)
71 tensor(2.2076)
72 >>> wb_pesq = PerceptualEvaluationSpeechQuality(16000, 'wb')
73 >>> wb_pesq(preds, target)
74 tensor(1.7359)
75
76 References:
77 [1] https://github.com/ludlows/python-pesq
78 """
79
80 sum_pesq: Tensor
81 total: Tensor
82 is_differentiable = False
83 higher_is_better = True
84
85 def __init__(
86 self,
87 fs: int,
88 mode: str,
89 compute_on_step: Optional[bool] = None,
90 **kwargs: Dict[str, Any],
91 ) -> None:
92 super().__init__(compute_on_step=compute_on_step, **kwargs)
93 if not _PESQ_AVAILABLE:
94 raise ModuleNotFoundError(
95 "PerceptualEvaluationSpeechQuality metric requires that `pesq` is installed."
96 " Either install as `pip install torchmetrics[audio]` or `pip install pesq`."
97 )
98 if fs not in (8000, 16000):
99 raise ValueError(f"Expected argument `fs` to either be 8000 or 16000 but got {fs}")
100 self.fs = fs
101 if mode not in ("wb", "nb"):
102 raise ValueError(f"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}")
103 self.mode = mode
104
105 self.add_state("sum_pesq", default=tensor(0.0), dist_reduce_fx="sum")
106 self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
107
108 def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
109 """Update state with predictions and targets.
110
111 Args:
112 preds: Predictions from model
113 target: Ground truth values
114 """
115 pesq_batch = perceptual_evaluation_speech_quality(preds, target, self.fs, self.mode, False).to(
116 self.sum_pesq.device
117 )
118
119 self.sum_pesq += pesq_batch.sum()
120 self.total += pesq_batch.numel()
121
122 def compute(self) -> Tensor:
123 """Computes average PESQ."""
124 return self.sum_pesq / self.total
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchmetrics/audio/pesq.py b/torchmetrics/audio/pesq.py
--- a/torchmetrics/audio/pesq.py
+++ b/torchmetrics/audio/pesq.py
@@ -29,7 +29,9 @@
to perform the metric calculation.
.. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install
- torchmetrics[audio]`` or ``pip install pesq``
+ torchmetrics[audio]`` or ``pip install pesq``. Note that ``pesq`` will compile with your currently
+ installed version of numpy, meaning that if you upgrade numpy at some point in the future you will
+ most likely have to reinstall ``pesq``.
Forward accepts
diff --git a/torchmetrics/functional/audio/pesq.py b/torchmetrics/functional/audio/pesq.py
--- a/torchmetrics/functional/audio/pesq.py
+++ b/torchmetrics/functional/audio/pesq.py
@@ -36,7 +36,9 @@
to perform the metric calculation.
.. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install
- torchmetrics[audio]`` or ``pip install pesq``
+ torchmetrics[audio]`` or ``pip install pesq``. Note that ``pesq`` will compile with your currently
+ installed version of numpy, meaning that if you upgrade numpy at some point in the future you will
+ most likely have to reinstall ``pesq``.
Args:
preds:
| {"golden_diff": "diff --git a/torchmetrics/audio/pesq.py b/torchmetrics/audio/pesq.py\n--- a/torchmetrics/audio/pesq.py\n+++ b/torchmetrics/audio/pesq.py\n@@ -29,7 +29,9 @@\n to perform the metric calculation.\n \n .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install\n- torchmetrics[audio]`` or ``pip install pesq``\n+ torchmetrics[audio]`` or ``pip install pesq``. Note that ``pesq`` will compile with your currently\n+ installed version of numpy, meaning that if you upgrade numpy at some point in the future you will\n+ most likely have to reinstall ``pesq``.\n \n Forward accepts\n \ndiff --git a/torchmetrics/functional/audio/pesq.py b/torchmetrics/functional/audio/pesq.py\n--- a/torchmetrics/functional/audio/pesq.py\n+++ b/torchmetrics/functional/audio/pesq.py\n@@ -36,7 +36,9 @@\n to perform the metric calculation.\n \n .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install\n- torchmetrics[audio]`` or ``pip install pesq``\n+ torchmetrics[audio]`` or ``pip install pesq``. Note that ``pesq`` will compile with your currently\n+ installed version of numpy, meaning that if you upgrade numpy at some point in the future you will\n+ most likely have to reinstall ``pesq``.\n \n Args:\n preds:\n", "issue": "Error numpy.ndarray size changed\n## \ud83d\udc1b Bug\r\n\r\nHello,\r\n\r\nI Installed a clean Ubuntu 20.04, installed nvidia drivers 440 for RTX 3080. Then I followed the instructions to train a model with NVIDIA NeMo Tacotron2.\r\n\r\nI got the next error. It seems that the latest release of metrics is not compatible with the latest release of numpy.\r\n\r\nnumpy fails with the next error:\r\nvalueerror: numpy.ndarray size changed, may indicate binary incompatibility. expected 96 from c header, got 88 from pyobject\r\n\r\nI share the full stack trace:\r\n```\r\npc@pc:~/Downloads$ sudo python3.8 tacotron2.py sample_rate=25050 train_dataset=/home/pc/Downloads/newcontent.json validation_datasets=/home/pc/Downloads/newcontent.json trainer.max_epochs=1 trainer.accelerator=null trainer.check_val_every_n_epoch=1\r\n[sudo] password for pc:\r\nTraceback (most recent call last):\r\nFile \"tacotron2.py\", line 24, in\r\nimport pytorch_lightning as pl\r\nFile \"/usr/local/lib/python3.8/site-packages/pytorch_lightning/init.py\", line 20, in\r\nfrom pytorch_lightning.callbacks import Callback # noqa: E402\r\nFile \"/usr/local/lib/python3.8/site-packages/pytorch_lightning/callbacks/init.py\", line 14, in\r\nfrom pytorch_lightning.callbacks.base import Callback\r\nFile \"/usr/local/lib/python3.8/site-packages/pytorch_lightning/callbacks/base.py\", line 26, in\r\nfrom pytorch_lightning.utilities.types import STEP_OUTPUT\r\nFile \"/usr/local/lib/python3.8/site-packages/pytorch_lightning/utilities/types.py\", line 25, in\r\nfrom torchmetrics import Metric\r\nFile \"/usr/local/lib/python3.8/site-packages/torchmetrics/init.py\", line 14, in\r\nfrom torchmetrics import functional # noqa: E402\r\nFile \"/usr/local/lib/python3.8/site-packages/torchmetrics/functional/init.py\", line 14, in\r\nfrom torchmetrics.functional.audio.pit import permutation_invariant_training, pit, pit_permutate\r\nFile \"/usr/local/lib/python3.8/site-packages/torchmetrics/functional/audio/init.py\", line 26, in\r\nfrom torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality # noqa: F401\r\nFile \"/usr/local/lib/python3.8/site-packages/torchmetrics/functional/audio/pesq.py\", line 20, in\r\nimport pesq as pesq_backend\r\nFile \"/usr/local/lib/python3.8/site-packages/pesq/init.py\", line 6, in\r\nfrom .cypesq import cypesq, cypesq_retvals, cypesq_error_message as pesq_error_message\r\nFile \"pesq/cypesq.pyx\", line 1, in init cypesq\r\nValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 96 from C header, got 88 from PyObject\r\n```\r\n\r\n### Environment\r\n\r\n- PyTorch Version: 1.10.1\r\n- OS: Ubuntu 20.10\r\n- PyTorch installed with pip\r\n- Python version: python3.8\r\n- CUDA/cuDNN version: 11.3\r\n- GPU models and configuration: RTX 3080\r\n- Any other relevant information: MSI Creator P100X 637\r\n\r\nCould you help me to solve this?\r\n\r\nThank you!\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\n\nfrom torchmetrics.utilities.imports import _PESQ_AVAILABLE\n\nif _PESQ_AVAILABLE:\n import pesq as pesq_backend\nelse:\n pesq_backend = None\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n__doctest_requires__ = {(\"perceptual_evaluation_speech_quality\",): [\"pesq\"]}\n\n\ndef perceptual_evaluation_speech_quality(\n preds: Tensor, target: Tensor, fs: int, mode: str, keep_same_device: bool = False\n) -> Tensor:\n r\"\"\"PESQ (Perceptual Evaluation of Speech Quality)\n\n This is a wrapper for the ``pesq`` package [1]. Note that input will be moved to `cpu`\n to perform the metric calculation.\n\n .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install\n torchmetrics[audio]`` or ``pip install pesq``\n\n Args:\n preds:\n shape ``[...,time]``\n target:\n shape ``[...,time]``\n fs:\n sampling frequency, should be 16000 or 8000 (Hz)\n mode:\n 'wb' (wide-band) or 'nb' (narrow-band)\n keep_same_device:\n whether to move the pesq value to the device of preds\n\n Returns:\n pesq value of shape [...]\n\n Raises:\n ModuleNotFoundError:\n If ``peqs`` package is not installed\n ValueError:\n If ``fs`` is not either ``8000`` or ``16000``\n ValueError:\n If ``mode`` is not either ``\"wb\"`` or ``\"nb\"``\n\n Example:\n >>> from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality\n >>> import torch\n >>> g = torch.manual_seed(1)\n >>> preds = torch.randn(8000)\n >>> target = torch.randn(8000)\n >>> perceptual_evaluation_speech_quality(preds, target, 8000, 'nb')\n tensor(2.2076)\n >>> perceptual_evaluation_speech_quality(preds, target, 16000, 'wb')\n tensor(1.7359)\n\n References:\n [1] https://github.com/ludlows/python-pesq\n \"\"\"\n if not _PESQ_AVAILABLE:\n raise ModuleNotFoundError(\n \"PESQ metric requires that pesq is installed.\"\n \" Either install as `pip install torchmetrics[audio]` or `pip install pesq`.\"\n )\n if fs not in (8000, 16000):\n raise ValueError(f\"Expected argument `fs` to either be 8000 or 16000 but got {fs}\")\n if mode not in (\"wb\", \"nb\"):\n raise ValueError(f\"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}\")\n _check_same_shape(preds, target)\n\n if preds.ndim == 1:\n pesq_val_np = pesq_backend.pesq(fs, target.detach().cpu().numpy(), preds.detach().cpu().numpy(), mode)\n pesq_val = torch.tensor(pesq_val_np)\n else:\n preds_np = preds.reshape(-1, preds.shape[-1]).detach().cpu().numpy()\n target_np = target.reshape(-1, preds.shape[-1]).detach().cpu().numpy()\n pesq_val_np = np.empty(shape=(preds_np.shape[0]))\n for b in range(preds_np.shape[0]):\n pesq_val_np[b] = pesq_backend.pesq(fs, target_np[b, :], preds_np[b, :], mode)\n pesq_val = torch.from_numpy(pesq_val_np)\n pesq_val = pesq_val.reshape(preds.shape[:-1])\n\n if keep_same_device:\n pesq_val = pesq_val.to(preds.device)\n\n return pesq_val\n", "path": "torchmetrics/functional/audio/pesq.py"}, {"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional\n\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.imports import _PESQ_AVAILABLE\n\n__doctest_requires__ = {(\"PerceptualEvaluationSpeechQuality\"): [\"pesq\"]}\n\n\nclass PerceptualEvaluationSpeechQuality(Metric):\n \"\"\"Perceptual Evaluation of Speech Quality (PESQ)\n\n This is a wrapper for the pesq package [1]. . Note that input will be moved to `cpu`\n to perform the metric calculation.\n\n .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install\n torchmetrics[audio]`` or ``pip install pesq``\n\n Forward accepts\n\n - ``preds``: ``shape [...,time]``\n - ``target``: ``shape [...,time]``\n\n Args:\n fs:\n sampling frequency, should be 16000 or 8000 (Hz)\n mode:\n 'wb' (wide-band) or 'nb' (narrow-band)\n keep_same_device:\n whether to move the pesq value to the device of preds\n compute_on_step:\n Forward only calls ``update()`` and returns None if this is set to False.\n\n .. deprecated:: v0.8\n Argument has no use anymore and will be removed v0.9.\n\n kwargs:\n Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ModuleNotFoundError:\n If ``peqs`` package is not installed\n ValueError:\n If ``fs`` is not either ``8000`` or ``16000``\n ValueError:\n If ``mode`` is not either ``\"wb\"`` or ``\"nb\"``\n\n Example:\n >>> from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality\n >>> import torch\n >>> g = torch.manual_seed(1)\n >>> preds = torch.randn(8000)\n >>> target = torch.randn(8000)\n >>> nb_pesq = PerceptualEvaluationSpeechQuality(8000, 'nb')\n >>> nb_pesq(preds, target)\n tensor(2.2076)\n >>> wb_pesq = PerceptualEvaluationSpeechQuality(16000, 'wb')\n >>> wb_pesq(preds, target)\n tensor(1.7359)\n\n References:\n [1] https://github.com/ludlows/python-pesq\n \"\"\"\n\n sum_pesq: Tensor\n total: Tensor\n is_differentiable = False\n higher_is_better = True\n\n def __init__(\n self,\n fs: int,\n mode: str,\n compute_on_step: Optional[bool] = None,\n **kwargs: Dict[str, Any],\n ) -> None:\n super().__init__(compute_on_step=compute_on_step, **kwargs)\n if not _PESQ_AVAILABLE:\n raise ModuleNotFoundError(\n \"PerceptualEvaluationSpeechQuality metric requires that `pesq` is installed.\"\n \" Either install as `pip install torchmetrics[audio]` or `pip install pesq`.\"\n )\n if fs not in (8000, 16000):\n raise ValueError(f\"Expected argument `fs` to either be 8000 or 16000 but got {fs}\")\n self.fs = fs\n if mode not in (\"wb\", \"nb\"):\n raise ValueError(f\"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}\")\n self.mode = mode\n\n self.add_state(\"sum_pesq\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n pesq_batch = perceptual_evaluation_speech_quality(preds, target, self.fs, self.mode, False).to(\n self.sum_pesq.device\n )\n\n self.sum_pesq += pesq_batch.sum()\n self.total += pesq_batch.numel()\n\n def compute(self) -> Tensor:\n \"\"\"Computes average PESQ.\"\"\"\n return self.sum_pesq / self.total\n", "path": "torchmetrics/audio/pesq.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\n\nfrom torchmetrics.utilities.imports import _PESQ_AVAILABLE\n\nif _PESQ_AVAILABLE:\n import pesq as pesq_backend\nelse:\n pesq_backend = None\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n__doctest_requires__ = {(\"perceptual_evaluation_speech_quality\",): [\"pesq\"]}\n\n\ndef perceptual_evaluation_speech_quality(\n preds: Tensor, target: Tensor, fs: int, mode: str, keep_same_device: bool = False\n) -> Tensor:\n r\"\"\"PESQ (Perceptual Evaluation of Speech Quality)\n\n This is a wrapper for the ``pesq`` package [1]. Note that input will be moved to `cpu`\n to perform the metric calculation.\n\n .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install\n torchmetrics[audio]`` or ``pip install pesq``. Note that ``pesq`` will compile with your currently\n installed version of numpy, meaning that if you upgrade numpy at some point in the future you will\n most likely have to reinstall ``pesq``.\n\n Args:\n preds:\n shape ``[...,time]``\n target:\n shape ``[...,time]``\n fs:\n sampling frequency, should be 16000 or 8000 (Hz)\n mode:\n 'wb' (wide-band) or 'nb' (narrow-band)\n keep_same_device:\n whether to move the pesq value to the device of preds\n\n Returns:\n pesq value of shape [...]\n\n Raises:\n ModuleNotFoundError:\n If ``peqs`` package is not installed\n ValueError:\n If ``fs`` is not either ``8000`` or ``16000``\n ValueError:\n If ``mode`` is not either ``\"wb\"`` or ``\"nb\"``\n\n Example:\n >>> from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality\n >>> import torch\n >>> g = torch.manual_seed(1)\n >>> preds = torch.randn(8000)\n >>> target = torch.randn(8000)\n >>> perceptual_evaluation_speech_quality(preds, target, 8000, 'nb')\n tensor(2.2076)\n >>> perceptual_evaluation_speech_quality(preds, target, 16000, 'wb')\n tensor(1.7359)\n\n References:\n [1] https://github.com/ludlows/python-pesq\n \"\"\"\n if not _PESQ_AVAILABLE:\n raise ModuleNotFoundError(\n \"PESQ metric requires that pesq is installed.\"\n \" Either install as `pip install torchmetrics[audio]` or `pip install pesq`.\"\n )\n if fs not in (8000, 16000):\n raise ValueError(f\"Expected argument `fs` to either be 8000 or 16000 but got {fs}\")\n if mode not in (\"wb\", \"nb\"):\n raise ValueError(f\"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}\")\n _check_same_shape(preds, target)\n\n if preds.ndim == 1:\n pesq_val_np = pesq_backend.pesq(fs, target.detach().cpu().numpy(), preds.detach().cpu().numpy(), mode)\n pesq_val = torch.tensor(pesq_val_np)\n else:\n preds_np = preds.reshape(-1, preds.shape[-1]).detach().cpu().numpy()\n target_np = target.reshape(-1, preds.shape[-1]).detach().cpu().numpy()\n pesq_val_np = np.empty(shape=(preds_np.shape[0]))\n for b in range(preds_np.shape[0]):\n pesq_val_np[b] = pesq_backend.pesq(fs, target_np[b, :], preds_np[b, :], mode)\n pesq_val = torch.from_numpy(pesq_val_np)\n pesq_val = pesq_val.reshape(preds.shape[:-1])\n\n if keep_same_device:\n pesq_val = pesq_val.to(preds.device)\n\n return pesq_val\n", "path": "torchmetrics/functional/audio/pesq.py"}, {"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional\n\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.imports import _PESQ_AVAILABLE\n\n__doctest_requires__ = {(\"PerceptualEvaluationSpeechQuality\"): [\"pesq\"]}\n\n\nclass PerceptualEvaluationSpeechQuality(Metric):\n \"\"\"Perceptual Evaluation of Speech Quality (PESQ)\n\n This is a wrapper for the pesq package [1]. . Note that input will be moved to `cpu`\n to perform the metric calculation.\n\n .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install\n torchmetrics[audio]`` or ``pip install pesq``. Note that ``pesq`` will compile with your currently\n installed version of numpy, meaning that if you upgrade numpy at some point in the future you will\n most likely have to reinstall ``pesq``.\n\n Forward accepts\n\n - ``preds``: ``shape [...,time]``\n - ``target``: ``shape [...,time]``\n\n Args:\n fs:\n sampling frequency, should be 16000 or 8000 (Hz)\n mode:\n 'wb' (wide-band) or 'nb' (narrow-band)\n keep_same_device:\n whether to move the pesq value to the device of preds\n compute_on_step:\n Forward only calls ``update()`` and returns None if this is set to False.\n\n .. deprecated:: v0.8\n Argument has no use anymore and will be removed v0.9.\n\n kwargs:\n Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ModuleNotFoundError:\n If ``peqs`` package is not installed\n ValueError:\n If ``fs`` is not either ``8000`` or ``16000``\n ValueError:\n If ``mode`` is not either ``\"wb\"`` or ``\"nb\"``\n\n Example:\n >>> from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality\n >>> import torch\n >>> g = torch.manual_seed(1)\n >>> preds = torch.randn(8000)\n >>> target = torch.randn(8000)\n >>> nb_pesq = PerceptualEvaluationSpeechQuality(8000, 'nb')\n >>> nb_pesq(preds, target)\n tensor(2.2076)\n >>> wb_pesq = PerceptualEvaluationSpeechQuality(16000, 'wb')\n >>> wb_pesq(preds, target)\n tensor(1.7359)\n\n References:\n [1] https://github.com/ludlows/python-pesq\n \"\"\"\n\n sum_pesq: Tensor\n total: Tensor\n is_differentiable = False\n higher_is_better = True\n\n def __init__(\n self,\n fs: int,\n mode: str,\n compute_on_step: Optional[bool] = None,\n **kwargs: Dict[str, Any],\n ) -> None:\n super().__init__(compute_on_step=compute_on_step, **kwargs)\n if not _PESQ_AVAILABLE:\n raise ModuleNotFoundError(\n \"PerceptualEvaluationSpeechQuality metric requires that `pesq` is installed.\"\n \" Either install as `pip install torchmetrics[audio]` or `pip install pesq`.\"\n )\n if fs not in (8000, 16000):\n raise ValueError(f\"Expected argument `fs` to either be 8000 or 16000 but got {fs}\")\n self.fs = fs\n if mode not in (\"wb\", \"nb\"):\n raise ValueError(f\"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}\")\n self.mode = mode\n\n self.add_state(\"sum_pesq\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n pesq_batch = perceptual_evaluation_speech_quality(preds, target, self.fs, self.mode, False).to(\n self.sum_pesq.device\n )\n\n self.sum_pesq += pesq_batch.sum()\n self.total += pesq_batch.numel()\n\n def compute(self) -> Tensor:\n \"\"\"Computes average PESQ.\"\"\"\n return self.sum_pesq / self.total\n", "path": "torchmetrics/audio/pesq.py"}]} | 3,645 | 350 |
gh_patches_debug_8688 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-2547 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] aws default vpc should not be required.
**Describe the bug**
nvflare commands bomb out, if no default VPC is defined in my AWS account. Many AWS accounts (per AWS recommendation) have no default VPC assigned, however they may have exactly one VPC assigned. In that case (check `aws ec2 describe-vpcs`) you do not have to ask the user and just pick that one. If there is none you can tell the user to create a default vpc if they have the permissions to do so: `aws ec2 create-default-vpc` or have their admin create one
**To Reproduce**
```
dp@grammy:~/NVFlare$ nvflare dashboard --cloud aws
Dashboard launch script for cloud is written at /home/dp/NVFlare/aws_start_dsb.sh. Now running the script.
This script requires aws (AWS CLI), sshpass, dig and jq. Now checking if they are installed.
Checking if aws exists. => found
Checking if sshpass exists. => found
Checking if dig exists. => found
Checking if jq exists. => found
One initial user will be created when starting dashboard.
Please enter the email address for this user.
[email protected]
Generating key pair for VM
An error occurred (VPCIdNotSpecified) when calling the CreateSecurityGroup operation: No default VPC for this user
Security group id:
usage: aws [options] <command> <subcommand> [<subcommand> ...] [parameters]
To see help text, you can run:
aws help
aws <command> help
aws <command> <subcommand> help
aws: error: argument --group-id: expected one argument
usage: aws [options] <command> <subcommand> [<subcommand> ...] [parameters]
To see help text, you can run:
aws help
aws <command> help
aws <command> <subcommand> help
aws: error: argument --group-id: expected one argument
```
**Desktop (please complete the following information):**
- OS: [e.g. wsl windows 11]
- Python Version [e.g. 3.9]
- NVFlare Version [e.g. 2.4]
**Other comments:**
Ask ChatGPT/Claude:
Does aws recommend to not create a default vpc for enterprise aws accounts?
Yes, AWS generally recommends not creating a default VPC for enterprise AWS accounts. Instead, they recommend creating custom VPCs tailored to your specific network requirements.
Here are a few reasons why:
Security: Default VPCs have predetermined settings, which might not align with your organization's security policies. Creating a custom VPC allows you to configure security groups, network ACLs, and other settings as per your needs.
IP Address Range: Default VPCs come with a predetermined IP address range. This might lead to IP address conflicts if you have multiple AWS accounts or if you need to connect your AWS resources with your on-premises network. With a custom VPC, you can choose your IP address range.
VPC Peering: If you need to set up VPC peering between multiple VPCs, using default VPCs can make the process more complex due to potential overlapping IP address ranges.
Compliance: Depending on your industry, you might have to adhere to certain compliance requirements. Default VPCs might not be set up to meet these requirements out of the box.
Customization: Custom VPCs give you complete control over your network setup, including subnets, route tables, gateways, and more. This level of customization is essential for many enterprise setups.
For these reasons, AWS recommends creating custom VPCs for enterprise accounts, as it provides more flexibility, security, and control over your network infrastructure. Cop
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nvflare/dashboard/cli.py`
Content:
```
1 # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import argparse
16 import os
17 import signal
18 import subprocess
19 import sys
20
21 import docker
22 import nvflare
23 from nvflare.apis.utils.format_check import name_check
24 from nvflare.lighter import tplt_utils, utils
25
26 supported_csp = ("azure", "aws")
27
28
29 def start(args):
30 cwd = os.getcwd()
31 if not args.folder:
32 folder = cwd
33 else:
34 folder = os.path.join(cwd, args.folder)
35 environment = dict()
36 env_vars = args.env
37 if env_vars:
38 for e in env_vars:
39 splitted = e.split("=")
40 environment[splitted[0]] = splitted[1]
41 passphrase = args.passphrase
42 if passphrase:
43 environment["NVFL_DASHBOARD_PP"] = passphrase
44 if args.cred:
45 environment.update({"NVFL_CREDENTIAL": args.cred})
46 elif not os.path.exists(os.path.join(folder, ".db_init_done")):
47 need_email = True
48 while need_email:
49 answer = input(
50 "Please provide project admin email address. This person will be the super user of the dashboard and this project.\n"
51 )
52 error, reason = name_check(answer, "email")
53 if error:
54 print(f"Expecting an email address, but got one in an invalid format. Reason: {reason}")
55 else:
56 need_email = False
57 print("generating random password")
58 pwd = utils.generate_password(8)
59 print(f"Project admin credential is {answer} and the password is {pwd}")
60 environment.update({"NVFL_CREDENTIAL": f"{answer}:{pwd}"})
61 if args.local:
62 return start_local(environment)
63 try:
64 client = docker.from_env()
65 except docker.errors.DockerException:
66 print("Unable to communicate to docker daemon/socket. Please make sure your docker is up and running.")
67 exit(0)
68 version = nvflare.__version__
69 dashboard_image = f"nvflare/nvflare:{version}"
70 if args.image:
71 if dashboard_image != args.image:
72 print(
73 f"Current dashboard container image is nvflare/nvflare:{version}, but requesting to use {args.image}. Use it at your own risk."
74 )
75 dashboard_image = args.image
76 try:
77 print(f"Pulling {dashboard_image}, may take some time to finish.")
78 _ = client.images.pull(dashboard_image)
79 except docker.errors.APIError:
80 print(f"unable to pull {dashboard_image}")
81 exit(1)
82 print(f"Launching {dashboard_image}")
83 print(f"Dashboard will listen to port {args.port}")
84 print(f"{folder} on host mounted to /var/tmp/nvflare/dashboard in container")
85 if environment:
86 print(f"environment vars set to {environment}")
87 else:
88 print("No additional environment variables set to the launched container.")
89 try:
90 container_obj = client.containers.run(
91 dashboard_image,
92 entrypoint=["/usr/local/bin/python3", "nvflare/dashboard/wsgi.py"],
93 detach=True,
94 auto_remove=True,
95 name="nvflare-dashboard",
96 ports={8443: args.port},
97 volumes={folder: {"bind": "/var/tmp/nvflare/dashboard", "model": "rw"}},
98 environment=environment,
99 )
100 except docker.errors.APIError as e:
101 print(f"Either {dashboard_image} image does not exist or another nvflare-dashboard instance is still running.")
102 print("Please either provide an existing container image or stop the running container instance.")
103 print(e)
104 exit(1)
105 if container_obj:
106 print("Dashboard container started")
107 print("Container name nvflare-dashboard")
108 print(f"id is {container_obj.id}")
109 else:
110 print("Container failed to start")
111
112
113 def start_local(env):
114 print("Local dashboard without docker is for development and test only")
115 file_dir_path = os.path.dirname(__file__)
116 wsgi_location = os.path.join(file_dir_path, "wsgi.py")
117 cmd = [sys.executable, wsgi_location]
118 env.update({"NVFL_WEB_ROOT": os.path.dirname(os.path.abspath(__file__))})
119 process_status = subprocess.run(args=cmd, env=env)
120 return process_status
121
122
123 def stop():
124 try:
125 client = docker.from_env()
126 except docker.errors.DockerException:
127 print("Unable to communicate to docker daemon/socket. Please make sure your docker is up and running.")
128 exit(0)
129 try:
130 container_obj = client.containers.get("nvflare-dashboard")
131 except docker.errors.NotFound:
132 print("No nvflare-dashboard container found")
133 exit(0)
134 container_obj.kill(signal=signal.SIGINT)
135 print("nvflare-dashboard exited")
136
137
138 def cloud(args):
139 lighter_folder = os.path.dirname(utils.__file__)
140 template = utils.load_yaml(os.path.join(lighter_folder, "impl", "master_template.yml"))
141 tplt = tplt_utils.Template(template)
142 cwd = os.getcwd()
143 csp = args.cloud
144 dest = os.path.join(cwd, f"{csp}_start_dsb.sh")
145 dsb_start = template[f"{csp}_start_dsb_sh"]
146 version = nvflare.__version__
147 replacement_dict = {"NVFLARE": f"nvflare=={version}", "START_OPT": f"-i {args.image}" if args.image else ""}
148 utils._write(
149 dest,
150 utils.sh_replace(tplt.get_cloud_script_header() + dsb_start, replacement_dict),
151 "t",
152 exe=True,
153 )
154 print(f"Dashboard launch script for cloud is written at {dest}. Now running the script.")
155 _ = subprocess.run(dest)
156 os.remove(dest)
157
158
159 def has_no_arguments() -> bool:
160 last_item = sys.argv[-1]
161 return (
162 last_item.endswith("dashboard.cli") or last_item.endswith("dashboard/cli.py") or last_item.endswith("dashboard")
163 )
164
165
166 def main():
167 parser = argparse.ArgumentParser()
168 define_dashboard_parser(parser)
169 args = parser.parse_args()
170 handle_dashboard(args)
171
172
173 def define_dashboard_parser(parser):
174 parser.add_argument(
175 "--cloud",
176 type=str,
177 default="",
178 help="launch dashboard on cloud service provider (ex: --cloud azure or --cloud aws)",
179 )
180 parser.add_argument("--start", action="store_true", help="start dashboard")
181 parser.add_argument("--stop", action="store_true", help="stop dashboard")
182 parser.add_argument("-p", "--port", type=str, default="443", help="port to listen")
183 parser.add_argument(
184 "-f", "--folder", type=str, help="folder containing necessary info (default: current working directory)"
185 )
186 parser.add_argument(
187 "--passphrase", help="Passphrase to encrypt/decrypt root CA private key. !!! Do not share it with others. !!!"
188 )
189 parser.add_argument("-e", "--env", action="append", help="additonal environment variables: var1=value1")
190 parser.add_argument("--cred", help="set credential directly in the form of USER_EMAIL:PASSWORD")
191 parser.add_argument("-i", "--image", help="set the container image name")
192 parser.add_argument("--local", action="store_true", help="start dashboard locally without docker image")
193
194
195 def handle_dashboard(args):
196 support_csp_string = ", ".join(supported_csp)
197 if args.stop:
198 stop()
199 elif args.start or args.local:
200 start(args)
201 elif args.cloud:
202 if args.cloud in supported_csp:
203 cloud(args)
204 else:
205 print(
206 f"Currently --cloud support the following options: {support_csp_string}. However, {args.cloud} is requested."
207 )
208 else:
209 print("Please use -h option to see usage")
210
211
212 if __name__ == "__main__":
213 main()
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nvflare/dashboard/cli.py b/nvflare/dashboard/cli.py
--- a/nvflare/dashboard/cli.py
+++ b/nvflare/dashboard/cli.py
@@ -138,6 +138,8 @@
def cloud(args):
lighter_folder = os.path.dirname(utils.__file__)
template = utils.load_yaml(os.path.join(lighter_folder, "impl", "master_template.yml"))
+ template.update(utils.load_yaml(os.path.join(lighter_folder, "impl", "aws_template.yml")))
+ template.update(utils.load_yaml(os.path.join(lighter_folder, "impl", "azure_template.yml")))
tplt = tplt_utils.Template(template)
cwd = os.getcwd()
csp = args.cloud
| {"golden_diff": "diff --git a/nvflare/dashboard/cli.py b/nvflare/dashboard/cli.py\n--- a/nvflare/dashboard/cli.py\n+++ b/nvflare/dashboard/cli.py\n@@ -138,6 +138,8 @@\n def cloud(args):\n lighter_folder = os.path.dirname(utils.__file__)\n template = utils.load_yaml(os.path.join(lighter_folder, \"impl\", \"master_template.yml\"))\n+ template.update(utils.load_yaml(os.path.join(lighter_folder, \"impl\", \"aws_template.yml\")))\n+ template.update(utils.load_yaml(os.path.join(lighter_folder, \"impl\", \"azure_template.yml\")))\n tplt = tplt_utils.Template(template)\n cwd = os.getcwd()\n csp = args.cloud\n", "issue": "[BUG] aws default vpc should not be required. \n**Describe the bug**\r\n\r\nnvflare commands bomb out, if no default VPC is defined in my AWS account. Many AWS accounts (per AWS recommendation) have no default VPC assigned, however they may have exactly one VPC assigned. In that case (check `aws ec2 describe-vpcs`) you do not have to ask the user and just pick that one. If there is none you can tell the user to create a default vpc if they have the permissions to do so: `aws ec2 create-default-vpc` or have their admin create one \r\n\r\n**To Reproduce**\r\n\r\n```\r\n dp@grammy:~/NVFlare$ nvflare dashboard --cloud aws\r\nDashboard launch script for cloud is written at /home/dp/NVFlare/aws_start_dsb.sh. Now running the script.\r\nThis script requires aws (AWS CLI), sshpass, dig and jq. Now checking if they are installed.\r\nChecking if aws exists. => found\r\nChecking if sshpass exists. => found\r\nChecking if dig exists. => found\r\nChecking if jq exists. => found\r\nOne initial user will be created when starting dashboard.\r\nPlease enter the email address for this user.\r\[email protected]\r\nGenerating key pair for VM\r\n\r\nAn error occurred (VPCIdNotSpecified) when calling the CreateSecurityGroup operation: No default VPC for this user\r\nSecurity group id:\r\n\r\nusage: aws [options] <command> <subcommand> [<subcommand> ...] [parameters]\r\nTo see help text, you can run:\r\n\r\n aws help\r\n aws <command> help\r\n aws <command> <subcommand> help\r\n\r\naws: error: argument --group-id: expected one argument\r\n\r\n\r\nusage: aws [options] <command> <subcommand> [<subcommand> ...] [parameters]\r\nTo see help text, you can run:\r\n\r\n aws help\r\n aws <command> help\r\n aws <command> <subcommand> help\r\n\r\naws: error: argument --group-id: expected one argument\r\n\r\n```\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. wsl windows 11]\r\n - Python Version [e.g. 3.9]\r\n - NVFlare Version [e.g. 2.4]\r\n\r\n**Other comments:**\r\n\r\nAsk ChatGPT/Claude: \r\n\r\nDoes aws recommend to not create a default vpc for enterprise aws accounts? \r\n\r\nYes, AWS generally recommends not creating a default VPC for enterprise AWS accounts. Instead, they recommend creating custom VPCs tailored to your specific network requirements.\r\nHere are a few reasons why:\r\n\r\nSecurity: Default VPCs have predetermined settings, which might not align with your organization's security policies. Creating a custom VPC allows you to configure security groups, network ACLs, and other settings as per your needs.\r\nIP Address Range: Default VPCs come with a predetermined IP address range. This might lead to IP address conflicts if you have multiple AWS accounts or if you need to connect your AWS resources with your on-premises network. With a custom VPC, you can choose your IP address range.\r\nVPC Peering: If you need to set up VPC peering between multiple VPCs, using default VPCs can make the process more complex due to potential overlapping IP address ranges.\r\nCompliance: Depending on your industry, you might have to adhere to certain compliance requirements. Default VPCs might not be set up to meet these requirements out of the box.\r\nCustomization: Custom VPCs give you complete control over your network setup, including subnets, route tables, gateways, and more. This level of customization is essential for many enterprise setups.\r\n\r\nFor these reasons, AWS recommends creating custom VPCs for enterprise accounts, as it provides more flexibility, security, and control over your network infrastructure. Cop\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport signal\nimport subprocess\nimport sys\n\nimport docker\nimport nvflare\nfrom nvflare.apis.utils.format_check import name_check\nfrom nvflare.lighter import tplt_utils, utils\n\nsupported_csp = (\"azure\", \"aws\")\n\n\ndef start(args):\n cwd = os.getcwd()\n if not args.folder:\n folder = cwd\n else:\n folder = os.path.join(cwd, args.folder)\n environment = dict()\n env_vars = args.env\n if env_vars:\n for e in env_vars:\n splitted = e.split(\"=\")\n environment[splitted[0]] = splitted[1]\n passphrase = args.passphrase\n if passphrase:\n environment[\"NVFL_DASHBOARD_PP\"] = passphrase\n if args.cred:\n environment.update({\"NVFL_CREDENTIAL\": args.cred})\n elif not os.path.exists(os.path.join(folder, \".db_init_done\")):\n need_email = True\n while need_email:\n answer = input(\n \"Please provide project admin email address. This person will be the super user of the dashboard and this project.\\n\"\n )\n error, reason = name_check(answer, \"email\")\n if error:\n print(f\"Expecting an email address, but got one in an invalid format. Reason: {reason}\")\n else:\n need_email = False\n print(\"generating random password\")\n pwd = utils.generate_password(8)\n print(f\"Project admin credential is {answer} and the password is {pwd}\")\n environment.update({\"NVFL_CREDENTIAL\": f\"{answer}:{pwd}\"})\n if args.local:\n return start_local(environment)\n try:\n client = docker.from_env()\n except docker.errors.DockerException:\n print(\"Unable to communicate to docker daemon/socket. Please make sure your docker is up and running.\")\n exit(0)\n version = nvflare.__version__\n dashboard_image = f\"nvflare/nvflare:{version}\"\n if args.image:\n if dashboard_image != args.image:\n print(\n f\"Current dashboard container image is nvflare/nvflare:{version}, but requesting to use {args.image}. Use it at your own risk.\"\n )\n dashboard_image = args.image\n try:\n print(f\"Pulling {dashboard_image}, may take some time to finish.\")\n _ = client.images.pull(dashboard_image)\n except docker.errors.APIError:\n print(f\"unable to pull {dashboard_image}\")\n exit(1)\n print(f\"Launching {dashboard_image}\")\n print(f\"Dashboard will listen to port {args.port}\")\n print(f\"{folder} on host mounted to /var/tmp/nvflare/dashboard in container\")\n if environment:\n print(f\"environment vars set to {environment}\")\n else:\n print(\"No additional environment variables set to the launched container.\")\n try:\n container_obj = client.containers.run(\n dashboard_image,\n entrypoint=[\"/usr/local/bin/python3\", \"nvflare/dashboard/wsgi.py\"],\n detach=True,\n auto_remove=True,\n name=\"nvflare-dashboard\",\n ports={8443: args.port},\n volumes={folder: {\"bind\": \"/var/tmp/nvflare/dashboard\", \"model\": \"rw\"}},\n environment=environment,\n )\n except docker.errors.APIError as e:\n print(f\"Either {dashboard_image} image does not exist or another nvflare-dashboard instance is still running.\")\n print(\"Please either provide an existing container image or stop the running container instance.\")\n print(e)\n exit(1)\n if container_obj:\n print(\"Dashboard container started\")\n print(\"Container name nvflare-dashboard\")\n print(f\"id is {container_obj.id}\")\n else:\n print(\"Container failed to start\")\n\n\ndef start_local(env):\n print(\"Local dashboard without docker is for development and test only\")\n file_dir_path = os.path.dirname(__file__)\n wsgi_location = os.path.join(file_dir_path, \"wsgi.py\")\n cmd = [sys.executable, wsgi_location]\n env.update({\"NVFL_WEB_ROOT\": os.path.dirname(os.path.abspath(__file__))})\n process_status = subprocess.run(args=cmd, env=env)\n return process_status\n\n\ndef stop():\n try:\n client = docker.from_env()\n except docker.errors.DockerException:\n print(\"Unable to communicate to docker daemon/socket. Please make sure your docker is up and running.\")\n exit(0)\n try:\n container_obj = client.containers.get(\"nvflare-dashboard\")\n except docker.errors.NotFound:\n print(\"No nvflare-dashboard container found\")\n exit(0)\n container_obj.kill(signal=signal.SIGINT)\n print(\"nvflare-dashboard exited\")\n\n\ndef cloud(args):\n lighter_folder = os.path.dirname(utils.__file__)\n template = utils.load_yaml(os.path.join(lighter_folder, \"impl\", \"master_template.yml\"))\n tplt = tplt_utils.Template(template)\n cwd = os.getcwd()\n csp = args.cloud\n dest = os.path.join(cwd, f\"{csp}_start_dsb.sh\")\n dsb_start = template[f\"{csp}_start_dsb_sh\"]\n version = nvflare.__version__\n replacement_dict = {\"NVFLARE\": f\"nvflare=={version}\", \"START_OPT\": f\"-i {args.image}\" if args.image else \"\"}\n utils._write(\n dest,\n utils.sh_replace(tplt.get_cloud_script_header() + dsb_start, replacement_dict),\n \"t\",\n exe=True,\n )\n print(f\"Dashboard launch script for cloud is written at {dest}. Now running the script.\")\n _ = subprocess.run(dest)\n os.remove(dest)\n\n\ndef has_no_arguments() -> bool:\n last_item = sys.argv[-1]\n return (\n last_item.endswith(\"dashboard.cli\") or last_item.endswith(\"dashboard/cli.py\") or last_item.endswith(\"dashboard\")\n )\n\n\ndef main():\n parser = argparse.ArgumentParser()\n define_dashboard_parser(parser)\n args = parser.parse_args()\n handle_dashboard(args)\n\n\ndef define_dashboard_parser(parser):\n parser.add_argument(\n \"--cloud\",\n type=str,\n default=\"\",\n help=\"launch dashboard on cloud service provider (ex: --cloud azure or --cloud aws)\",\n )\n parser.add_argument(\"--start\", action=\"store_true\", help=\"start dashboard\")\n parser.add_argument(\"--stop\", action=\"store_true\", help=\"stop dashboard\")\n parser.add_argument(\"-p\", \"--port\", type=str, default=\"443\", help=\"port to listen\")\n parser.add_argument(\n \"-f\", \"--folder\", type=str, help=\"folder containing necessary info (default: current working directory)\"\n )\n parser.add_argument(\n \"--passphrase\", help=\"Passphrase to encrypt/decrypt root CA private key. !!! Do not share it with others. !!!\"\n )\n parser.add_argument(\"-e\", \"--env\", action=\"append\", help=\"additonal environment variables: var1=value1\")\n parser.add_argument(\"--cred\", help=\"set credential directly in the form of USER_EMAIL:PASSWORD\")\n parser.add_argument(\"-i\", \"--image\", help=\"set the container image name\")\n parser.add_argument(\"--local\", action=\"store_true\", help=\"start dashboard locally without docker image\")\n\n\ndef handle_dashboard(args):\n support_csp_string = \", \".join(supported_csp)\n if args.stop:\n stop()\n elif args.start or args.local:\n start(args)\n elif args.cloud:\n if args.cloud in supported_csp:\n cloud(args)\n else:\n print(\n f\"Currently --cloud support the following options: {support_csp_string}. However, {args.cloud} is requested.\"\n )\n else:\n print(\"Please use -h option to see usage\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "nvflare/dashboard/cli.py"}], "after_files": [{"content": "# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport signal\nimport subprocess\nimport sys\n\nimport docker\nimport nvflare\nfrom nvflare.apis.utils.format_check import name_check\nfrom nvflare.lighter import tplt_utils, utils\n\nsupported_csp = (\"azure\", \"aws\")\n\n\ndef start(args):\n cwd = os.getcwd()\n if not args.folder:\n folder = cwd\n else:\n folder = os.path.join(cwd, args.folder)\n environment = dict()\n env_vars = args.env\n if env_vars:\n for e in env_vars:\n splitted = e.split(\"=\")\n environment[splitted[0]] = splitted[1]\n passphrase = args.passphrase\n if passphrase:\n environment[\"NVFL_DASHBOARD_PP\"] = passphrase\n if args.cred:\n environment.update({\"NVFL_CREDENTIAL\": args.cred})\n elif not os.path.exists(os.path.join(folder, \".db_init_done\")):\n need_email = True\n while need_email:\n answer = input(\n \"Please provide project admin email address. This person will be the super user of the dashboard and this project.\\n\"\n )\n error, reason = name_check(answer, \"email\")\n if error:\n print(f\"Expecting an email address, but got one in an invalid format. Reason: {reason}\")\n else:\n need_email = False\n print(\"generating random password\")\n pwd = utils.generate_password(8)\n print(f\"Project admin credential is {answer} and the password is {pwd}\")\n environment.update({\"NVFL_CREDENTIAL\": f\"{answer}:{pwd}\"})\n if args.local:\n return start_local(environment)\n try:\n client = docker.from_env()\n except docker.errors.DockerException:\n print(\"Unable to communicate to docker daemon/socket. Please make sure your docker is up and running.\")\n exit(0)\n version = nvflare.__version__\n dashboard_image = f\"nvflare/nvflare:{version}\"\n if args.image:\n if dashboard_image != args.image:\n print(\n f\"Current dashboard container image is nvflare/nvflare:{version}, but requesting to use {args.image}. Use it at your own risk.\"\n )\n dashboard_image = args.image\n try:\n print(f\"Pulling {dashboard_image}, may take some time to finish.\")\n _ = client.images.pull(dashboard_image)\n except docker.errors.APIError:\n print(f\"unable to pull {dashboard_image}\")\n exit(1)\n print(f\"Launching {dashboard_image}\")\n print(f\"Dashboard will listen to port {args.port}\")\n print(f\"{folder} on host mounted to /var/tmp/nvflare/dashboard in container\")\n if environment:\n print(f\"environment vars set to {environment}\")\n else:\n print(\"No additional environment variables set to the launched container.\")\n try:\n container_obj = client.containers.run(\n dashboard_image,\n entrypoint=[\"/usr/local/bin/python3\", \"nvflare/dashboard/wsgi.py\"],\n detach=True,\n auto_remove=True,\n name=\"nvflare-dashboard\",\n ports={8443: args.port},\n volumes={folder: {\"bind\": \"/var/tmp/nvflare/dashboard\", \"model\": \"rw\"}},\n environment=environment,\n )\n except docker.errors.APIError as e:\n print(f\"Either {dashboard_image} image does not exist or another nvflare-dashboard instance is still running.\")\n print(\"Please either provide an existing container image or stop the running container instance.\")\n print(e)\n exit(1)\n if container_obj:\n print(\"Dashboard container started\")\n print(\"Container name nvflare-dashboard\")\n print(f\"id is {container_obj.id}\")\n else:\n print(\"Container failed to start\")\n\n\ndef start_local(env):\n print(\"Local dashboard without docker is for development and test only\")\n file_dir_path = os.path.dirname(__file__)\n wsgi_location = os.path.join(file_dir_path, \"wsgi.py\")\n cmd = [sys.executable, wsgi_location]\n env.update({\"NVFL_WEB_ROOT\": os.path.dirname(os.path.abspath(__file__))})\n process_status = subprocess.run(args=cmd, env=env)\n return process_status\n\n\ndef stop():\n try:\n client = docker.from_env()\n except docker.errors.DockerException:\n print(\"Unable to communicate to docker daemon/socket. Please make sure your docker is up and running.\")\n exit(0)\n try:\n container_obj = client.containers.get(\"nvflare-dashboard\")\n except docker.errors.NotFound:\n print(\"No nvflare-dashboard container found\")\n exit(0)\n container_obj.kill(signal=signal.SIGINT)\n print(\"nvflare-dashboard exited\")\n\n\ndef cloud(args):\n lighter_folder = os.path.dirname(utils.__file__)\n template = utils.load_yaml(os.path.join(lighter_folder, \"impl\", \"master_template.yml\"))\n template.update(utils.load_yaml(os.path.join(lighter_folder, \"impl\", \"aws_template.yml\")))\n template.update(utils.load_yaml(os.path.join(lighter_folder, \"impl\", \"azure_template.yml\")))\n tplt = tplt_utils.Template(template)\n cwd = os.getcwd()\n csp = args.cloud\n dest = os.path.join(cwd, f\"{csp}_start_dsb.sh\")\n dsb_start = template[f\"{csp}_start_dsb_sh\"]\n version = nvflare.__version__\n replacement_dict = {\"NVFLARE\": f\"nvflare=={version}\", \"START_OPT\": f\"-i {args.image}\" if args.image else \"\"}\n utils._write(\n dest,\n utils.sh_replace(tplt.get_cloud_script_header() + dsb_start, replacement_dict),\n \"t\",\n exe=True,\n )\n print(f\"Dashboard launch script for cloud is written at {dest}. Now running the script.\")\n _ = subprocess.run(dest)\n os.remove(dest)\n\n\ndef has_no_arguments() -> bool:\n last_item = sys.argv[-1]\n return (\n last_item.endswith(\"dashboard.cli\") or last_item.endswith(\"dashboard/cli.py\") or last_item.endswith(\"dashboard\")\n )\n\n\ndef main():\n parser = argparse.ArgumentParser()\n define_dashboard_parser(parser)\n args = parser.parse_args()\n handle_dashboard(args)\n\n\ndef define_dashboard_parser(parser):\n parser.add_argument(\n \"--cloud\",\n type=str,\n default=\"\",\n help=\"launch dashboard on cloud service provider (ex: --cloud azure or --cloud aws)\",\n )\n parser.add_argument(\"--start\", action=\"store_true\", help=\"start dashboard\")\n parser.add_argument(\"--stop\", action=\"store_true\", help=\"stop dashboard\")\n parser.add_argument(\"-p\", \"--port\", type=str, default=\"443\", help=\"port to listen\")\n parser.add_argument(\n \"-f\", \"--folder\", type=str, help=\"folder containing necessary info (default: current working directory)\"\n )\n parser.add_argument(\n \"--passphrase\", help=\"Passphrase to encrypt/decrypt root CA private key. !!! Do not share it with others. !!!\"\n )\n parser.add_argument(\"-e\", \"--env\", action=\"append\", help=\"additonal environment variables: var1=value1\")\n parser.add_argument(\"--cred\", help=\"set credential directly in the form of USER_EMAIL:PASSWORD\")\n parser.add_argument(\"-i\", \"--image\", help=\"set the container image name\")\n parser.add_argument(\"--local\", action=\"store_true\", help=\"start dashboard locally without docker image\")\n\n\ndef handle_dashboard(args):\n support_csp_string = \", \".join(supported_csp)\n if args.stop:\n stop()\n elif args.start or args.local:\n start(args)\n elif args.cloud:\n if args.cloud in supported_csp:\n cloud(args)\n else:\n print(\n f\"Currently --cloud support the following options: {support_csp_string}. However, {args.cloud} is requested.\"\n )\n else:\n print(\"Please use -h option to see usage\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "nvflare/dashboard/cli.py"}]} | 3,407 | 153 |
gh_patches_debug_25379 | rasdani/github-patches | git_diff | freedomofpress__securedrop-335 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Timestamps
The issue of having timestamps on submitted files, and the disadvantages of retaining that metadata which can be correlated with other known variables, has been brought up.
One of the solutions which has been posited is scrambling/randomizing or nullifying the timestamps, but the problem with that is journalists would lose track of the order in which submissions arrived. With document flagging implemented in the future (#124) that would be less of an issue.
I believe @Hainish proposed an idea that would employ cryptographic hashes to retain the ordering of documents, while ditching the timestamps, but I don't know much about the specifics. Would love to hear more. So far I've done a preliminary search for possible filesystem/kernel or mount option-based solutions and didn't come up with anything.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/source.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import os
3 from datetime import datetime
4 import uuid
5 from functools import wraps
6 import zipfile
7 from cStringIO import StringIO
8
9 import logging
10 # This module's logger is explicitly labeled so the correct logger is used,
11 # even when this is run from the command line (e.g. during development)
12 log = logging.getLogger('source')
13
14 from flask import (Flask, request, render_template, session, redirect, url_for,
15 flash, abort, g, send_file)
16 from flask_wtf.csrf import CsrfProtect
17
18 from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
19
20 import config
21 import version
22 import crypto_util
23 import store
24 import background
25 from db import db_session, Source, Submission
26
27 app = Flask(__name__, template_folder=config.SOURCE_TEMPLATES_DIR)
28 app.config.from_object(config.FlaskConfig)
29 CsrfProtect(app)
30
31 app.jinja_env.globals['version'] = version.__version__
32 if getattr(config, 'CUSTOM_HEADER_IMAGE', None):
33 app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE
34 app.jinja_env.globals['use_custom_header_image'] = True
35 else:
36 app.jinja_env.globals['header_image'] = 'securedrop.png'
37 app.jinja_env.globals['use_custom_header_image'] = False
38
39
40 @app.teardown_appcontext
41 def shutdown_session(exception=None):
42 """Automatically remove database sessions at the end of the request, or
43 when the application shuts down"""
44 db_session.remove()
45
46
47 def logged_in():
48 if 'logged_in' in session:
49 return True
50
51
52 def login_required(f):
53 @wraps(f)
54 def decorated_function(*args, **kwargs):
55 if not logged_in():
56 return redirect(url_for('lookup'))
57 return f(*args, **kwargs)
58 return decorated_function
59
60
61 def ignore_static(f):
62 """Only executes the wrapped function if we're not loading a static resource."""
63 @wraps(f)
64 def decorated_function(*args, **kwargs):
65 if request.path.startswith('/static'):
66 return # don't execute the decorated function
67 return f(*args, **kwargs)
68 return decorated_function
69
70
71 @app.before_request
72 @ignore_static
73 def setup_g():
74 """Store commonly used values in Flask's special g object"""
75 # ignore_static here because `crypto_util.hash_codename` is scrypt (very
76 # time consuming), and we don't need to waste time running if we're just
77 # serving a static resource that won't need to access these common values.
78 if logged_in():
79 g.codename = session['codename']
80 g.sid = crypto_util.hash_codename(g.codename)
81 try:
82 g.source = Source.query.filter(Source.filesystem_id == g.sid).one()
83 except MultipleResultsFound as e:
84 app.logger.error("Found multiple Sources when one was expected: %s" % (e,))
85 abort(500)
86 except NoResultFound as e:
87 app.logger.error("Found no Sources when one was expected: %s" % (e,))
88 abort(404)
89 g.loc = store.path(g.sid)
90
91
92 @app.before_request
93 @ignore_static
94 def check_tor2web():
95 # ignore_static here so we only flash a single message warning about Tor2Web,
96 # corresponding to the intial page load.
97 if 'X-tor2web' in request.headers:
98 flash('<strong>WARNING:</strong> You appear to be using Tor2Web. '
99 'This <strong>does not</strong> provide anonymity. '
100 '<a href="/tor2web-warning">Why is this dangerous?</a>',
101 "header-warning")
102
103
104 @app.after_request
105 def no_cache(response):
106 """Minimize potential traces of site access by telling the browser not to
107 cache anything"""
108 no_cache_headers = {
109 'Cache-Control': 'no-cache, no-store, must-revalidate',
110 'Pragma': 'no-cache',
111 'Expires': '-1',
112 }
113 for header, header_value in no_cache_headers.iteritems():
114 response.headers.add(header, header_value)
115 return response
116
117
118 @app.route('/')
119 def index():
120 return render_template('index.html')
121
122
123 @app.route('/generate', methods=('GET', 'POST'))
124 def generate():
125 number_words = 8
126 if request.method == 'POST':
127 number_words = int(request.form['number-words'])
128 if number_words not in range(7, 11):
129 abort(403)
130 session['codename'] = crypto_util.genrandomid(number_words)
131 # TODO: make sure this codename isn't a repeat
132 return render_template('generate.html', codename=session['codename'])
133
134
135 @app.route('/create', methods=['POST'])
136 def create():
137 sid = crypto_util.hash_codename(session['codename'])
138
139 source = Source(sid, crypto_util.display_id())
140 db_session.add(source)
141 db_session.commit()
142
143 if os.path.exists(store.path(sid)):
144 # if this happens, we're not using very secure crypto
145 log.warning("Got a duplicate ID '%s'" % sid)
146 else:
147 os.mkdir(store.path(sid))
148
149 session['logged_in'] = True
150 return redirect(url_for('lookup'))
151
152
153 @app.route('/lookup', methods=('GET',))
154 @login_required
155 def lookup():
156 replies = []
157 for fn in os.listdir(g.loc):
158 if fn.startswith('reply-'):
159 try:
160 msg = crypto_util.decrypt(g.sid, g.codename,
161 file(store.path(g.sid, fn)).read()).decode("utf-8")
162 except UnicodeDecodeError:
163 app.logger.error("Could not decode reply %s" % fn)
164 else:
165 date = str(datetime.fromtimestamp(
166 os.stat(store.path(g.sid, fn)).st_mtime))
167 replies.append(dict(id=fn, date=date, msg=msg))
168
169 def async_genkey(sid, codename):
170 with app.app_context():
171 background.execute(lambda: crypto_util.genkeypair(sid, codename))
172
173 # Generate a keypair to encrypt replies from the journalist
174 # Only do this if the journalist has flagged the source as one
175 # that they would like to reply to. (Issue #140.)
176 if not crypto_util.getkey(g.sid) and g.source.flagged:
177 async_genkey(g.sid, g.codename)
178
179 return render_template('lookup.html', codename=g.codename, msgs=replies,
180 flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid))
181
182
183 @app.route('/submit', methods=('POST',))
184 @login_required
185 def submit():
186 msg = request.form['msg']
187 fh = request.files['fh']
188 strip_metadata = True if 'notclean' in request.form else False
189
190 fnames = []
191
192 if msg:
193 fnames.append(store.save_message_submission(g.sid, msg))
194 flash("Thanks! We received your message.", "notification")
195 if fh:
196 fnames.append(store.save_file_submission(g.sid, fh.filename,
197 fh.stream, fh.content_type, strip_metadata))
198 flash("Thanks! We received your document '%s'."
199 % fh.filename or '[unnamed]', "notification")
200
201 for fname in fnames:
202 submission = Submission(g.source, fname)
203 db_session.add(submission)
204
205 g.source.last_updated = datetime.now()
206 db_session.commit()
207
208 return redirect(url_for('lookup'))
209
210
211 @app.route('/delete', methods=('POST',))
212 @login_required
213 def delete():
214 msgid = request.form['msgid']
215 assert '/' not in msgid
216 potential_files = os.listdir(g.loc)
217 if msgid not in potential_files:
218 abort(404) # TODO are the checks necessary?
219 store.secure_unlink(store.path(g.sid, msgid))
220 flash("Reply deleted.", "notification")
221
222 return redirect(url_for('lookup'))
223
224
225 def valid_codename(codename):
226 return os.path.exists(store.path(crypto_util.hash_codename(codename)))
227
228 @app.route('/login', methods=('GET', 'POST'))
229 def login():
230 if request.method == 'POST':
231 codename = request.form['codename']
232 if valid_codename(codename):
233 session.update(codename=codename, logged_in=True)
234 return redirect(url_for('lookup'))
235 else:
236 flash("Sorry, that is not a recognized codename.", "error")
237 return render_template('login.html')
238
239
240 @app.route('/howto-disable-js')
241 def howto_disable_js():
242 return render_template("howto-disable-js.html")
243
244
245 @app.route('/tor2web-warning')
246 def tor2web_warning():
247 return render_template("tor2web-warning.html")
248
249
250 @app.route('/journalist-key')
251 def download_journalist_pubkey():
252 journalist_pubkey = crypto_util.gpg.export_keys(config.JOURNALIST_KEY)
253 return send_file(StringIO(journalist_pubkey),
254 mimetype="application/pgp-keys",
255 attachment_filename=config.JOURNALIST_KEY + ".asc",
256 as_attachment=True)
257
258
259 @app.route('/why-journalist-key')
260 def why_download_journalist_pubkey():
261 return render_template("why-journalist-key.html")
262
263
264 _REDIRECT_URL_WHITELIST = ["http://tor2web.org/",
265 "https://www.torproject.org/download.html.en",
266 "https://tails.boum.org/",
267 "http://www.wired.com/threatlevel/2013/09/freedom-hosting-fbi/",
268 "http://www.theguardian.com/world/interactive/2013/oct/04/egotistical-giraffe-nsa-tor-document",
269 "https://addons.mozilla.org/en-US/firefox/addon/noscript/",
270 "http://noscript.net"]
271
272
273 @app.route('/redirect/<path:redirect_url>')
274 def redirect_hack(redirect_url):
275 # A hack to avoid referer leakage when a user clicks on an external link.
276 # TODO: Most likely will want to share this between source.py and
277 # journalist.py in the future.
278 if redirect_url not in _REDIRECT_URL_WHITELIST:
279 return 'Redirect not allowed'
280 else:
281 return render_template("redirect.html", redirect_url=redirect_url)
282
283
284 @app.errorhandler(404)
285 def page_not_found(error):
286 return render_template('notfound.html'), 404
287
288 @app.errorhandler(500)
289 def internal_error(error):
290 return render_template('error.html'), 500
291
292 if __name__ == "__main__":
293 # TODO make sure debug is not on in production
294 app.run(debug=True, host='0.0.0.0', port=8080)
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -5,6 +5,7 @@
from functools import wraps
import zipfile
from cStringIO import StringIO
+import subprocess
import logging
# This module's logger is explicitly labeled so the correct logger is used,
@@ -180,6 +181,22 @@
flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid))
+def normalize_timestamps(sid):
+ """
+ Update the timestamps on all of the source's submissions to match that of
+ the latest submission. This minimizes metadata that could be useful to
+ investigators. See #301.
+ """
+ sub_paths = [ store.path(sid, submission.filename)
+ for submission in g.source.submissions ]
+ if len(sub_paths) > 1:
+ args = ["touch"]
+ args.extend(sub_paths[:-1])
+ rc = subprocess.call(args)
+ if rc != 0:
+ app.logger.warning("Couldn't normalize submission timestamps (touch exited with %d)" % rc)
+
+
@app.route('/submit', methods=('POST',))
@login_required
def submit():
@@ -204,6 +221,7 @@
g.source.last_updated = datetime.now()
db_session.commit()
+ normalize_timestamps(g.sid)
return redirect(url_for('lookup'))
| {"golden_diff": "diff --git a/securedrop/source.py b/securedrop/source.py\n--- a/securedrop/source.py\n+++ b/securedrop/source.py\n@@ -5,6 +5,7 @@\n from functools import wraps\n import zipfile\n from cStringIO import StringIO\n+import subprocess\n \n import logging\n # This module's logger is explicitly labeled so the correct logger is used,\n@@ -180,6 +181,22 @@\n flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid))\n \n \n+def normalize_timestamps(sid):\n+ \"\"\"\n+ Update the timestamps on all of the source's submissions to match that of\n+ the latest submission. This minimizes metadata that could be useful to\n+ investigators. See #301.\n+ \"\"\"\n+ sub_paths = [ store.path(sid, submission.filename)\n+ for submission in g.source.submissions ]\n+ if len(sub_paths) > 1:\n+ args = [\"touch\"]\n+ args.extend(sub_paths[:-1])\n+ rc = subprocess.call(args)\n+ if rc != 0:\n+ app.logger.warning(\"Couldn't normalize submission timestamps (touch exited with %d)\" % rc)\n+\n+\n @app.route('/submit', methods=('POST',))\n @login_required\n def submit():\n@@ -204,6 +221,7 @@\n \n g.source.last_updated = datetime.now()\n db_session.commit()\n+ normalize_timestamps(g.sid)\n \n return redirect(url_for('lookup'))\n", "issue": "Timestamps\nThe issue of having timestamps on submitted files, and the disadvantages of retaining that metadata which can be correlated with other known variables, has been brought up.\n\nOne of the solutions which has been posited is scrambling/randomizing or nullifying the timestamps, but the problem with that is journalists would lose track of the order in which submissions arrived. With document flagging implemented in the future (#124) that would be less of an issue.\n\nI believe @Hainish proposed an idea that would employ cryptographic hashes to retain the ordering of documents, while ditching the timestamps, but I don't know much about the specifics. Would love to hear more. So far I've done a preliminary search for possible filesystem/kernel or mount option-based solutions and didn't come up with anything.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nfrom datetime import datetime\nimport uuid\nfrom functools import wraps\nimport zipfile\nfrom cStringIO import StringIO\n\nimport logging\n# This module's logger is explicitly labeled so the correct logger is used,\n# even when this is run from the command line (e.g. during development)\nlog = logging.getLogger('source')\n\nfrom flask import (Flask, request, render_template, session, redirect, url_for,\n flash, abort, g, send_file)\nfrom flask_wtf.csrf import CsrfProtect\n\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nimport config\nimport version\nimport crypto_util\nimport store\nimport background\nfrom db import db_session, Source, Submission\n\napp = Flask(__name__, template_folder=config.SOURCE_TEMPLATES_DIR)\napp.config.from_object(config.FlaskConfig)\nCsrfProtect(app)\n\napp.jinja_env.globals['version'] = version.__version__\nif getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\nelse:\n app.jinja_env.globals['header_image'] = 'securedrop.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n\ndef logged_in():\n if 'logged_in' in session:\n return True\n\n\ndef login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not logged_in():\n return redirect(url_for('lookup'))\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef ignore_static(f):\n \"\"\"Only executes the wrapped function if we're not loading a static resource.\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if request.path.startswith('/static'):\n return # don't execute the decorated function\n return f(*args, **kwargs)\n return decorated_function\n\n\[email protected]_request\n@ignore_static\ndef setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n # ignore_static here because `crypto_util.hash_codename` is scrypt (very\n # time consuming), and we don't need to waste time running if we're just\n # serving a static resource that won't need to access these common values.\n if logged_in():\n g.codename = session['codename']\n g.sid = crypto_util.hash_codename(g.codename)\n try:\n g.source = Source.query.filter(Source.filesystem_id == g.sid).one()\n except MultipleResultsFound as e:\n app.logger.error(\"Found multiple Sources when one was expected: %s\" % (e,))\n abort(500)\n except NoResultFound as e:\n app.logger.error(\"Found no Sources when one was expected: %s\" % (e,))\n abort(404)\n g.loc = store.path(g.sid)\n\n\[email protected]_request\n@ignore_static\ndef check_tor2web():\n # ignore_static here so we only flash a single message warning about Tor2Web,\n # corresponding to the intial page load.\n if 'X-tor2web' in request.headers:\n flash('<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"/tor2web-warning\">Why is this dangerous?</a>',\n \"header-warning\")\n\n\[email protected]_request\ndef no_cache(response):\n \"\"\"Minimize potential traces of site access by telling the browser not to\n cache anything\"\"\"\n no_cache_headers = {\n 'Cache-Control': 'no-cache, no-store, must-revalidate',\n 'Pragma': 'no-cache',\n 'Expires': '-1',\n }\n for header, header_value in no_cache_headers.iteritems():\n response.headers.add(header, header_value)\n return response\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/generate', methods=('GET', 'POST'))\ndef generate():\n number_words = 8\n if request.method == 'POST':\n number_words = int(request.form['number-words'])\n if number_words not in range(7, 11):\n abort(403)\n session['codename'] = crypto_util.genrandomid(number_words)\n # TODO: make sure this codename isn't a repeat\n return render_template('generate.html', codename=session['codename'])\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n sid = crypto_util.hash_codename(session['codename'])\n\n source = Source(sid, crypto_util.display_id())\n db_session.add(source)\n db_session.commit()\n\n if os.path.exists(store.path(sid)):\n # if this happens, we're not using very secure crypto\n log.warning(\"Got a duplicate ID '%s'\" % sid)\n else:\n os.mkdir(store.path(sid))\n\n session['logged_in'] = True\n return redirect(url_for('lookup'))\n\n\[email protected]('/lookup', methods=('GET',))\n@login_required\ndef lookup():\n replies = []\n for fn in os.listdir(g.loc):\n if fn.startswith('reply-'):\n try:\n msg = crypto_util.decrypt(g.sid, g.codename,\n file(store.path(g.sid, fn)).read()).decode(\"utf-8\")\n except UnicodeDecodeError:\n app.logger.error(\"Could not decode reply %s\" % fn)\n else:\n date = str(datetime.fromtimestamp(\n os.stat(store.path(g.sid, fn)).st_mtime))\n replies.append(dict(id=fn, date=date, msg=msg))\n\n def async_genkey(sid, codename):\n with app.app_context():\n background.execute(lambda: crypto_util.genkeypair(sid, codename))\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not crypto_util.getkey(g.sid) and g.source.flagged:\n async_genkey(g.sid, g.codename)\n\n return render_template('lookup.html', codename=g.codename, msgs=replies,\n flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid))\n\n\[email protected]('/submit', methods=('POST',))\n@login_required\ndef submit():\n msg = request.form['msg']\n fh = request.files['fh']\n strip_metadata = True if 'notclean' in request.form else False\n\n fnames = []\n\n if msg:\n fnames.append(store.save_message_submission(g.sid, msg))\n flash(\"Thanks! We received your message.\", \"notification\")\n if fh:\n fnames.append(store.save_file_submission(g.sid, fh.filename,\n fh.stream, fh.content_type, strip_metadata))\n flash(\"Thanks! We received your document '%s'.\"\n % fh.filename or '[unnamed]', \"notification\")\n\n for fname in fnames:\n submission = Submission(g.source, fname)\n db_session.add(submission)\n\n g.source.last_updated = datetime.now()\n db_session.commit()\n\n return redirect(url_for('lookup'))\n\n\[email protected]('/delete', methods=('POST',))\n@login_required\ndef delete():\n msgid = request.form['msgid']\n assert '/' not in msgid\n potential_files = os.listdir(g.loc)\n if msgid not in potential_files:\n abort(404) # TODO are the checks necessary?\n store.secure_unlink(store.path(g.sid, msgid))\n flash(\"Reply deleted.\", \"notification\")\n\n return redirect(url_for('lookup'))\n\n\ndef valid_codename(codename):\n return os.path.exists(store.path(crypto_util.hash_codename(codename)))\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n if request.method == 'POST':\n codename = request.form['codename']\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('lookup'))\n else:\n flash(\"Sorry, that is not a recognized codename.\", \"error\")\n return render_template('login.html')\n\n\[email protected]('/howto-disable-js')\ndef howto_disable_js():\n return render_template(\"howto-disable-js.html\")\n\n\[email protected]('/tor2web-warning')\ndef tor2web_warning():\n return render_template(\"tor2web-warning.html\")\n\n\[email protected]('/journalist-key')\ndef download_journalist_pubkey():\n journalist_pubkey = crypto_util.gpg.export_keys(config.JOURNALIST_KEY)\n return send_file(StringIO(journalist_pubkey),\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n\n\[email protected]('/why-journalist-key')\ndef why_download_journalist_pubkey():\n return render_template(\"why-journalist-key.html\")\n\n\n_REDIRECT_URL_WHITELIST = [\"http://tor2web.org/\",\n \"https://www.torproject.org/download.html.en\",\n \"https://tails.boum.org/\",\n \"http://www.wired.com/threatlevel/2013/09/freedom-hosting-fbi/\",\n \"http://www.theguardian.com/world/interactive/2013/oct/04/egotistical-giraffe-nsa-tor-document\",\n \"https://addons.mozilla.org/en-US/firefox/addon/noscript/\",\n \"http://noscript.net\"]\n\n\[email protected]('/redirect/<path:redirect_url>')\ndef redirect_hack(redirect_url):\n # A hack to avoid referer leakage when a user clicks on an external link.\n # TODO: Most likely will want to share this between source.py and\n # journalist.py in the future.\n if redirect_url not in _REDIRECT_URL_WHITELIST:\n return 'Redirect not allowed'\n else:\n return render_template(\"redirect.html\", redirect_url=redirect_url)\n\n\[email protected](404)\ndef page_not_found(error):\n return render_template('notfound.html'), 404\n\[email protected](500)\ndef internal_error(error):\n return render_template('error.html'), 500\n\nif __name__ == \"__main__\":\n # TODO make sure debug is not on in production\n app.run(debug=True, host='0.0.0.0', port=8080)\n", "path": "securedrop/source.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nfrom datetime import datetime\nimport uuid\nfrom functools import wraps\nimport zipfile\nfrom cStringIO import StringIO\nimport subprocess\n\nimport logging\n# This module's logger is explicitly labeled so the correct logger is used,\n# even when this is run from the command line (e.g. during development)\nlog = logging.getLogger('source')\n\nfrom flask import (Flask, request, render_template, session, redirect, url_for,\n flash, abort, g, send_file)\nfrom flask_wtf.csrf import CsrfProtect\n\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nimport config\nimport version\nimport crypto_util\nimport store\nimport background\nfrom db import db_session, Source, Submission\n\napp = Flask(__name__, template_folder=config.SOURCE_TEMPLATES_DIR)\napp.config.from_object(config.FlaskConfig)\nCsrfProtect(app)\n\napp.jinja_env.globals['version'] = version.__version__\nif getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\nelse:\n app.jinja_env.globals['header_image'] = 'securedrop.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n\ndef logged_in():\n if 'logged_in' in session:\n return True\n\n\ndef login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not logged_in():\n return redirect(url_for('lookup'))\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef ignore_static(f):\n \"\"\"Only executes the wrapped function if we're not loading a static resource.\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if request.path.startswith('/static'):\n return # don't execute the decorated function\n return f(*args, **kwargs)\n return decorated_function\n\n\[email protected]_request\n@ignore_static\ndef setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n # ignore_static here because `crypto_util.hash_codename` is scrypt (very\n # time consuming), and we don't need to waste time running if we're just\n # serving a static resource that won't need to access these common values.\n if logged_in():\n g.codename = session['codename']\n g.sid = crypto_util.hash_codename(g.codename)\n try:\n g.source = Source.query.filter(Source.filesystem_id == g.sid).one()\n except MultipleResultsFound as e:\n app.logger.error(\"Found multiple Sources when one was expected: %s\" % (e,))\n abort(500)\n except NoResultFound as e:\n app.logger.error(\"Found no Sources when one was expected: %s\" % (e,))\n abort(404)\n g.loc = store.path(g.sid)\n\n\[email protected]_request\n@ignore_static\ndef check_tor2web():\n # ignore_static here so we only flash a single message warning about Tor2Web,\n # corresponding to the intial page load.\n if 'X-tor2web' in request.headers:\n flash('<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"/tor2web-warning\">Why is this dangerous?</a>',\n \"header-warning\")\n\n\[email protected]_request\ndef no_cache(response):\n \"\"\"Minimize potential traces of site access by telling the browser not to\n cache anything\"\"\"\n no_cache_headers = {\n 'Cache-Control': 'no-cache, no-store, must-revalidate',\n 'Pragma': 'no-cache',\n 'Expires': '-1',\n }\n for header, header_value in no_cache_headers.iteritems():\n response.headers.add(header, header_value)\n return response\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/generate', methods=('GET', 'POST'))\ndef generate():\n number_words = 8\n if request.method == 'POST':\n number_words = int(request.form['number-words'])\n if number_words not in range(7, 11):\n abort(403)\n session['codename'] = crypto_util.genrandomid(number_words)\n # TODO: make sure this codename isn't a repeat\n return render_template('generate.html', codename=session['codename'])\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n sid = crypto_util.hash_codename(session['codename'])\n\n source = Source(sid, crypto_util.display_id())\n db_session.add(source)\n db_session.commit()\n\n if os.path.exists(store.path(sid)):\n # if this happens, we're not using very secure crypto\n log.warning(\"Got a duplicate ID '%s'\" % sid)\n else:\n os.mkdir(store.path(sid))\n\n session['logged_in'] = True\n return redirect(url_for('lookup'))\n\n\[email protected]('/lookup', methods=('GET',))\n@login_required\ndef lookup():\n replies = []\n for fn in os.listdir(g.loc):\n if fn.startswith('reply-'):\n try:\n msg = crypto_util.decrypt(g.sid, g.codename,\n file(store.path(g.sid, fn)).read()).decode(\"utf-8\")\n except UnicodeDecodeError:\n app.logger.error(\"Could not decode reply %s\" % fn)\n else:\n date = str(datetime.fromtimestamp(\n os.stat(store.path(g.sid, fn)).st_mtime))\n replies.append(dict(id=fn, date=date, msg=msg))\n\n def async_genkey(sid, codename):\n with app.app_context():\n background.execute(lambda: crypto_util.genkeypair(sid, codename))\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not crypto_util.getkey(g.sid) and g.source.flagged:\n async_genkey(g.sid, g.codename)\n\n return render_template('lookup.html', codename=g.codename, msgs=replies,\n flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid))\n\n\ndef normalize_timestamps(sid):\n \"\"\"\n Update the timestamps on all of the source's submissions to match that of\n the latest submission. This minimizes metadata that could be useful to\n investigators. See #301.\n \"\"\"\n sub_paths = [ store.path(sid, submission.filename)\n for submission in g.source.submissions ]\n if len(sub_paths) > 1:\n args = [\"touch\"]\n args.extend(sub_paths[:-1])\n rc = subprocess.call(args)\n if rc != 0:\n app.logger.warning(\"Couldn't normalize submission timestamps (touch exited with %d)\" % rc)\n\n\[email protected]('/submit', methods=('POST',))\n@login_required\ndef submit():\n msg = request.form['msg']\n fh = request.files['fh']\n strip_metadata = True if 'notclean' in request.form else False\n\n fnames = []\n\n if msg:\n fnames.append(store.save_message_submission(g.sid, msg))\n flash(\"Thanks! We received your message.\", \"notification\")\n if fh:\n fnames.append(store.save_file_submission(g.sid, fh.filename,\n fh.stream, fh.content_type, strip_metadata))\n flash(\"Thanks! We received your document '%s'.\"\n % fh.filename or '[unnamed]', \"notification\")\n\n for fname in fnames:\n submission = Submission(g.source, fname)\n db_session.add(submission)\n\n g.source.last_updated = datetime.now()\n db_session.commit()\n normalize_timestamps(g.sid)\n\n return redirect(url_for('lookup'))\n\n\[email protected]('/delete', methods=('POST',))\n@login_required\ndef delete():\n msgid = request.form['msgid']\n assert '/' not in msgid\n potential_files = os.listdir(g.loc)\n if msgid not in potential_files:\n abort(404) # TODO are the checks necessary?\n store.secure_unlink(store.path(g.sid, msgid))\n flash(\"Reply deleted.\", \"notification\")\n\n return redirect(url_for('lookup'))\n\n\ndef valid_codename(codename):\n return os.path.exists(store.path(crypto_util.hash_codename(codename)))\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n if request.method == 'POST':\n codename = request.form['codename']\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('lookup'))\n else:\n flash(\"Sorry, that is not a recognized codename.\", \"error\")\n return render_template('login.html')\n\n\[email protected]('/howto-disable-js')\ndef howto_disable_js():\n return render_template(\"howto-disable-js.html\")\n\n\[email protected]('/tor2web-warning')\ndef tor2web_warning():\n return render_template(\"tor2web-warning.html\")\n\n\[email protected]('/journalist-key')\ndef download_journalist_pubkey():\n journalist_pubkey = crypto_util.gpg.export_keys(config.JOURNALIST_KEY)\n return send_file(StringIO(journalist_pubkey),\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n\n\[email protected]('/why-journalist-key')\ndef why_download_journalist_pubkey():\n return render_template(\"why-journalist-key.html\")\n\n\n_REDIRECT_URL_WHITELIST = [\"http://tor2web.org/\",\n \"https://www.torproject.org/download.html.en\",\n \"https://tails.boum.org/\",\n \"http://www.wired.com/threatlevel/2013/09/freedom-hosting-fbi/\",\n \"http://www.theguardian.com/world/interactive/2013/oct/04/egotistical-giraffe-nsa-tor-document\",\n \"https://addons.mozilla.org/en-US/firefox/addon/noscript/\",\n \"http://noscript.net\"]\n\n\[email protected]('/redirect/<path:redirect_url>')\ndef redirect_hack(redirect_url):\n # A hack to avoid referer leakage when a user clicks on an external link.\n # TODO: Most likely will want to share this between source.py and\n # journalist.py in the future.\n if redirect_url not in _REDIRECT_URL_WHITELIST:\n return 'Redirect not allowed'\n else:\n return render_template(\"redirect.html\", redirect_url=redirect_url)\n\n\[email protected](404)\ndef page_not_found(error):\n return render_template('notfound.html'), 404\n\[email protected](500)\ndef internal_error(error):\n return render_template('error.html'), 500\n\nif __name__ == \"__main__\":\n # TODO make sure debug is not on in production\n app.run(debug=True, host='0.0.0.0', port=8080)\n", "path": "securedrop/source.py"}]} | 3,506 | 321 |
gh_patches_debug_8286 | rasdani/github-patches | git_diff | liberapay__liberapay.com-1724 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Automatic renewal failure due to currency mismatch
Stripe error message: `The currency specified (gbp) does not match the currency on the source (eur). A payment can only be created for the same currency as the corresponding source. (request ID: req_PSmPQ85CKvW5kz)`.
SEPA Direct Debit failures
A European donor who tried to send money to a Canadian got the following error message from Stripe: `For 'sepa_debit' payments, we currently require your account to have a bank account in one of the following currencies: eur, usd, gbp, sek, nok, dkk, chf`.
SEPA Direct Debit failures
A European donor who tried to send money to a Canadian got the following error message from Stripe: `For 'sepa_debit' payments, we currently require your account to have a bank account in one of the following currencies: eur, usd, gbp, sek, nok, dkk, chf`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liberapay/payin/cron.py`
Content:
```
1 from collections import defaultdict
2 from datetime import date
3
4 from pando import json
5
6 from ..cron import logger
7 from ..i18n.currencies import Money
8 from ..website import website
9 from ..utils import utcnow
10 from .common import prepare_donation, prepare_payin
11 from .stripe import charge
12
13
14 def send_donation_reminder_notifications():
15 """This function reminds donors to renew their donations.
16
17 The notifications are sent two weeks before the due date.
18 """
19 db = website.db
20 counts = defaultdict(int)
21 rows = db.all("""
22 SELECT (SELECT p FROM participants p WHERE p.id = sp.payer) AS payer
23 , json_agg((SELECT a FROM (
24 SELECT sp.id, sp.execution_date, sp.amount, sp.transfers
25 ) a ORDER BY a.execution_date)) AS payins
26 FROM scheduled_payins sp
27 WHERE sp.execution_date <= (current_date + interval '14 days')
28 AND sp.automatic IS NOT true
29 AND sp.payin IS NULL
30 AND sp.ctime < (current_timestamp - interval '6 hours')
31 GROUP BY sp.payer
32 HAVING count(*) FILTER (
33 WHERE sp.notifs_count = 0
34 OR sp.notifs_count = 1 AND sp.last_notif_ts <= (current_date - interval '4 weeks')
35 OR sp.notifs_count = 2 AND sp.last_notif_ts <= (current_date - interval '26 weeks')
36 ) > 0
37 """)
38 for payer, payins in rows:
39 if payer.is_suspended or payer.status != 'active':
40 continue
41 _check_scheduled_payins(db, payer, payins, automatic=False)
42 if not payins:
43 continue
44 donations = []
45 for sp in payins:
46 for tr in sp['transfers']:
47 donations.append({
48 'periodic_amount': tr['tip'].periodic_amount,
49 'tippee_username': tr['tippee_username'],
50 })
51 payer.notify('donate_reminder', donations=donations, email_unverified_address=True)
52 counts['donate_reminder'] += 1
53 db.run("""
54 UPDATE scheduled_payins
55 SET notifs_count = notifs_count + 1
56 , last_notif_ts = now()
57 WHERE payer = %s
58 AND id IN %s
59 """, (payer.id, tuple(sp['id'] for sp in payins)))
60 for k, n in sorted(counts.items()):
61 logger.info("Sent %i %s notifications." % (n, k))
62
63
64 def send_upcoming_debit_notifications():
65 """This daily cron job notifies donors who are about to be debited.
66
67 The notifications are sent at most once a month, 14 days before the first
68 payment of the "month" (31 days, not the calendar month).
69 """
70 db = website.db
71 counts = defaultdict(int)
72 rows = db.all("""
73 SELECT (SELECT p FROM participants p WHERE p.id = sp.payer) AS payer
74 , json_agg((SELECT a FROM (
75 SELECT sp.id, sp.execution_date, sp.amount, sp.transfers
76 ) a ORDER BY a.execution_date)) AS payins
77 FROM scheduled_payins sp
78 WHERE sp.execution_date <= (current_date + interval '45 days')
79 AND sp.automatic
80 AND sp.notifs_count = 0
81 AND sp.payin IS NULL
82 AND sp.ctime < (current_timestamp - interval '6 hours')
83 GROUP BY sp.payer, (sp.amount).currency
84 HAVING min(sp.execution_date) <= (current_date + interval '14 days')
85 """)
86 for payer, payins in rows:
87 if payer.is_suspended or payer.status != 'active':
88 continue
89 _check_scheduled_payins(db, payer, payins, automatic=True)
90 if not payins:
91 continue
92 context = {
93 'payins': payins,
94 'total_amount': sum(sp['amount'] for sp in payins),
95 }
96 for sp in context['payins']:
97 for tr in sp['transfers']:
98 del tr['tip'], tr['beneficiary']
99 if len(payins) > 1:
100 context['ndays'] = (payins[-1]['execution_date'] - utcnow().date()).days
101 while True:
102 route = db.one("""
103 SELECT r
104 FROM exchange_routes r
105 WHERE r.participant = %s
106 AND r.status = 'chargeable'
107 AND r.network::text LIKE 'stripe-%%'
108 ORDER BY r.is_default NULLS LAST
109 , r.network = 'stripe-sdd' DESC
110 , r.ctime DESC
111 LIMIT 1
112 """, (payer.id,))
113 if route is None:
114 break
115 route.sync_status()
116 if route.status == 'chargeable':
117 break
118 if route:
119 event = 'upcoming_debit'
120 context['instrument_brand'] = route.get_brand()
121 context['instrument_partial_number'] = route.get_partial_number()
122 if route.network == 'stripe-sdd':
123 source = route.stripe_source
124 context.update({
125 'creditor_identifier': website.app_conf.sepa_creditor_identifier,
126 'mandate_creation_date': route.ctime.date(),
127 'mandate_id': source.sepa_debit.mandate_reference,
128 'mandate_url': source.sepa_debit.mandate_url,
129 })
130 else:
131 event = 'missing_route'
132 payer.notify(event, email_unverified_address=True, **context)
133 counts[event] += 1
134 db.run("""
135 UPDATE scheduled_payins
136 SET notifs_count = notifs_count + 1
137 , last_notif_ts = now()
138 WHERE payer = %s
139 AND id IN %s
140 """, (payer.id, tuple(sp['id'] for sp in payins)))
141 for k, n in sorted(counts.items()):
142 logger.info("Sent %i %s notifications." % (n, k))
143
144
145 def execute_scheduled_payins():
146 """This daily cron job initiates scheduled payments.
147 """
148 db = website.db
149 counts = defaultdict(int)
150 retry = False
151 rows = db.all("""
152 SELECT sp.id, sp.execution_date, sp.transfers
153 , p AS payer, r.*::exchange_routes AS route
154 FROM scheduled_payins sp
155 JOIN participants p ON p.id = sp.payer
156 JOIN LATERAL (
157 SELECT r.*
158 FROM exchange_routes r
159 WHERE r.participant = sp.payer
160 AND r.status = 'chargeable'
161 AND r.network::text LIKE 'stripe-%%'
162 ORDER BY r.is_default NULLS LAST
163 , r.network = 'stripe-sdd' DESC
164 , r.ctime DESC
165 LIMIT 1
166 ) r ON true
167 WHERE ( r.network = 'stripe-sdd' AND sp.execution_date <= (current_date + interval '5 days') OR
168 r.network = 'stripe-card' AND sp.execution_date <= current_date )
169 AND sp.last_notif_ts < (current_date - interval '2 days')
170 AND sp.automatic
171 AND sp.payin IS NULL
172 AND p.is_suspended IS NOT TRUE
173 """)
174 for sp_id, execution_date, transfers, payer, route in rows:
175 route.__dict__['participant'] = payer
176 route.sync_status()
177 if route.status != 'chargeable':
178 retry = True
179 continue
180 transfers, canceled, impossible = _filter_transfers(payer, transfers, automatic=True)
181 if impossible:
182 for tr in impossible:
183 tr['execution_date'] = execution_date
184 del tr['beneficiary'], tr['tip']
185 payer.notify('renewal_aborted', transfers=impossible)
186 counts['renewal_aborted'] += 1
187 if transfers:
188 payin_amount = sum(tr['amount'] for tr in transfers)
189 payin = prepare_payin(db, payer, payin_amount, route, off_session=True)
190 for tr in transfers:
191 prepare_donation(
192 db, payin, tr['tip'], tr['beneficiary'], 'stripe',
193 payer, route.country, tr['amount']
194 )
195 payin = charge(db, payin, payer)
196 if payin.status in ('failed', 'succeeded'):
197 payer.notify('payin_' + payin.status, payin=payin._asdict(), provider='Stripe')
198 counts['payin_' + payin.status] += 1
199 db.run("""
200 UPDATE scheduled_payins
201 SET payin = %s
202 , mtime = current_timestamp
203 WHERE id = %s
204 """, (payin.id, sp_id))
205 else:
206 db.run("DELETE FROM scheduled_payins WHERE id = %s", (sp_id,))
207 for k, n in sorted(counts.items()):
208 logger.info("Sent %i %s notifications." % (n, k))
209 if retry:
210 execute_scheduled_payins()
211
212
213 def _check_scheduled_payins(db, payer, payins, automatic):
214 """Check scheduled payins before they're acted upon.
215
216 A scheduled payin can be out of sync with the state of the donor's tips or
217 the status of the recipient's account if the `Participant.schedule_renewals()`
218 method wasn't successfully called.
219 """
220 for sp in list(payins):
221 if isinstance(sp['amount'], dict):
222 sp['amount'] = Money(**sp['amount'])
223 sp['execution_date'] = date(*map(int, sp['execution_date'].split('-')))
224 canceled, impossible = _filter_transfers(payer, sp['transfers'], automatic)[1:]
225 if canceled:
226 if len(canceled) == len(sp['transfers']):
227 payins.remove(sp)
228 db.run("DELETE FROM scheduled_payins WHERE id = %(id)s", sp)
229 else:
230 old_tippee_ids = set(tr['tippee_id'] for tr in canceled)
231 sp['transfers'] = [
232 tr for tr in sp['transfers'] if tr['tippee_id'] not in old_tippee_ids
233 ]
234 sp['amount'] = sum(tr['amount'] for tr in sp['transfers'])
235 db.run("""
236 UPDATE scheduled_payins
237 SET amount = %(amount)s
238 , transfers = %(transfers)s
239 , mtime = current_timestamp
240 WHERE id = %(id)s
241 """, dict(sp, transfers=json.dumps([
242 {k: v for k, v in tr.items() if k not in ('tip', 'beneficiary')}
243 for tr in sp['transfers']
244 ])))
245 for tr in impossible:
246 tr['impossible'] = True
247
248
249 def _filter_transfers(payer, transfers, automatic):
250 """Splits scheduled transfers into 3 lists: "okay", "canceled" and "impossible".
251 """
252 canceled_transfers = []
253 impossible_transfers = []
254 okay_transfers = []
255 for tr in transfers:
256 if isinstance(tr['amount'], dict):
257 tr['amount'] = Money(**tr['amount'])
258 beneficiary = tr['beneficiary'] = website.db.Participant.from_id(tr['tippee_id'])
259 tip = tr['tip'] = payer.get_tip_to(beneficiary)
260 if tip.renewal_mode < 1 or (tip.renewal_mode == 2) != automatic:
261 canceled_transfers.append(tr)
262 elif beneficiary.status != 'active' or beneficiary.is_suspended or \
263 beneficiary.payment_providers & 1 == 0:
264 impossible_transfers.append(tr)
265 else:
266 okay_transfers.append(tr)
267 return okay_transfers, canceled_transfers, impossible_transfers
268
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/liberapay/payin/cron.py b/liberapay/payin/cron.py
--- a/liberapay/payin/cron.py
+++ b/liberapay/payin/cron.py
@@ -159,6 +159,7 @@
WHERE r.participant = sp.payer
AND r.status = 'chargeable'
AND r.network::text LIKE 'stripe-%%'
+ AND ( sp.amount::currency = 'EUR' OR r.network <> 'stripe-sdd' )
ORDER BY r.is_default NULLS LAST
, r.network = 'stripe-sdd' DESC
, r.ctime DESC
| {"golden_diff": "diff --git a/liberapay/payin/cron.py b/liberapay/payin/cron.py\n--- a/liberapay/payin/cron.py\n+++ b/liberapay/payin/cron.py\n@@ -159,6 +159,7 @@\n WHERE r.participant = sp.payer\n AND r.status = 'chargeable'\n AND r.network::text LIKE 'stripe-%%'\n+ AND ( sp.amount::currency = 'EUR' OR r.network <> 'stripe-sdd' )\n ORDER BY r.is_default NULLS LAST\n , r.network = 'stripe-sdd' DESC\n , r.ctime DESC\n", "issue": "Automatic renewal failure due to currency mismatch\nStripe error message: `The currency specified (gbp) does not match the currency on the source (eur). A payment can only be created for the same currency as the corresponding source. (request ID: req_PSmPQ85CKvW5kz)`.\nSEPA Direct Debit failures\nA European donor who tried to send money to a Canadian got the following error message from Stripe: `For 'sepa_debit' payments, we currently require your account to have a bank account in one of the following currencies: eur, usd, gbp, sek, nok, dkk, chf`.\nSEPA Direct Debit failures\nA European donor who tried to send money to a Canadian got the following error message from Stripe: `For 'sepa_debit' payments, we currently require your account to have a bank account in one of the following currencies: eur, usd, gbp, sek, nok, dkk, chf`.\n", "before_files": [{"content": "from collections import defaultdict\nfrom datetime import date\n\nfrom pando import json\n\nfrom ..cron import logger\nfrom ..i18n.currencies import Money\nfrom ..website import website\nfrom ..utils import utcnow\nfrom .common import prepare_donation, prepare_payin\nfrom .stripe import charge\n\n\ndef send_donation_reminder_notifications():\n \"\"\"This function reminds donors to renew their donations.\n\n The notifications are sent two weeks before the due date.\n \"\"\"\n db = website.db\n counts = defaultdict(int)\n rows = db.all(\"\"\"\n SELECT (SELECT p FROM participants p WHERE p.id = sp.payer) AS payer\n , json_agg((SELECT a FROM (\n SELECT sp.id, sp.execution_date, sp.amount, sp.transfers\n ) a ORDER BY a.execution_date)) AS payins\n FROM scheduled_payins sp\n WHERE sp.execution_date <= (current_date + interval '14 days')\n AND sp.automatic IS NOT true\n AND sp.payin IS NULL\n AND sp.ctime < (current_timestamp - interval '6 hours')\n GROUP BY sp.payer\n HAVING count(*) FILTER (\n WHERE sp.notifs_count = 0\n OR sp.notifs_count = 1 AND sp.last_notif_ts <= (current_date - interval '4 weeks')\n OR sp.notifs_count = 2 AND sp.last_notif_ts <= (current_date - interval '26 weeks')\n ) > 0\n \"\"\")\n for payer, payins in rows:\n if payer.is_suspended or payer.status != 'active':\n continue\n _check_scheduled_payins(db, payer, payins, automatic=False)\n if not payins:\n continue\n donations = []\n for sp in payins:\n for tr in sp['transfers']:\n donations.append({\n 'periodic_amount': tr['tip'].periodic_amount,\n 'tippee_username': tr['tippee_username'],\n })\n payer.notify('donate_reminder', donations=donations, email_unverified_address=True)\n counts['donate_reminder'] += 1\n db.run(\"\"\"\n UPDATE scheduled_payins\n SET notifs_count = notifs_count + 1\n , last_notif_ts = now()\n WHERE payer = %s\n AND id IN %s\n \"\"\", (payer.id, tuple(sp['id'] for sp in payins)))\n for k, n in sorted(counts.items()):\n logger.info(\"Sent %i %s notifications.\" % (n, k))\n\n\ndef send_upcoming_debit_notifications():\n \"\"\"This daily cron job notifies donors who are about to be debited.\n\n The notifications are sent at most once a month, 14 days before the first\n payment of the \"month\" (31 days, not the calendar month).\n \"\"\"\n db = website.db\n counts = defaultdict(int)\n rows = db.all(\"\"\"\n SELECT (SELECT p FROM participants p WHERE p.id = sp.payer) AS payer\n , json_agg((SELECT a FROM (\n SELECT sp.id, sp.execution_date, sp.amount, sp.transfers\n ) a ORDER BY a.execution_date)) AS payins\n FROM scheduled_payins sp\n WHERE sp.execution_date <= (current_date + interval '45 days')\n AND sp.automatic\n AND sp.notifs_count = 0\n AND sp.payin IS NULL\n AND sp.ctime < (current_timestamp - interval '6 hours')\n GROUP BY sp.payer, (sp.amount).currency\n HAVING min(sp.execution_date) <= (current_date + interval '14 days')\n \"\"\")\n for payer, payins in rows:\n if payer.is_suspended or payer.status != 'active':\n continue\n _check_scheduled_payins(db, payer, payins, automatic=True)\n if not payins:\n continue\n context = {\n 'payins': payins,\n 'total_amount': sum(sp['amount'] for sp in payins),\n }\n for sp in context['payins']:\n for tr in sp['transfers']:\n del tr['tip'], tr['beneficiary']\n if len(payins) > 1:\n context['ndays'] = (payins[-1]['execution_date'] - utcnow().date()).days\n while True:\n route = db.one(\"\"\"\n SELECT r\n FROM exchange_routes r\n WHERE r.participant = %s\n AND r.status = 'chargeable'\n AND r.network::text LIKE 'stripe-%%'\n ORDER BY r.is_default NULLS LAST\n , r.network = 'stripe-sdd' DESC\n , r.ctime DESC\n LIMIT 1\n \"\"\", (payer.id,))\n if route is None:\n break\n route.sync_status()\n if route.status == 'chargeable':\n break\n if route:\n event = 'upcoming_debit'\n context['instrument_brand'] = route.get_brand()\n context['instrument_partial_number'] = route.get_partial_number()\n if route.network == 'stripe-sdd':\n source = route.stripe_source\n context.update({\n 'creditor_identifier': website.app_conf.sepa_creditor_identifier,\n 'mandate_creation_date': route.ctime.date(),\n 'mandate_id': source.sepa_debit.mandate_reference,\n 'mandate_url': source.sepa_debit.mandate_url,\n })\n else:\n event = 'missing_route'\n payer.notify(event, email_unverified_address=True, **context)\n counts[event] += 1\n db.run(\"\"\"\n UPDATE scheduled_payins\n SET notifs_count = notifs_count + 1\n , last_notif_ts = now()\n WHERE payer = %s\n AND id IN %s\n \"\"\", (payer.id, tuple(sp['id'] for sp in payins)))\n for k, n in sorted(counts.items()):\n logger.info(\"Sent %i %s notifications.\" % (n, k))\n\n\ndef execute_scheduled_payins():\n \"\"\"This daily cron job initiates scheduled payments.\n \"\"\"\n db = website.db\n counts = defaultdict(int)\n retry = False\n rows = db.all(\"\"\"\n SELECT sp.id, sp.execution_date, sp.transfers\n , p AS payer, r.*::exchange_routes AS route\n FROM scheduled_payins sp\n JOIN participants p ON p.id = sp.payer\n JOIN LATERAL (\n SELECT r.*\n FROM exchange_routes r\n WHERE r.participant = sp.payer\n AND r.status = 'chargeable'\n AND r.network::text LIKE 'stripe-%%'\n ORDER BY r.is_default NULLS LAST\n , r.network = 'stripe-sdd' DESC\n , r.ctime DESC\n LIMIT 1\n ) r ON true\n WHERE ( r.network = 'stripe-sdd' AND sp.execution_date <= (current_date + interval '5 days') OR\n r.network = 'stripe-card' AND sp.execution_date <= current_date )\n AND sp.last_notif_ts < (current_date - interval '2 days')\n AND sp.automatic\n AND sp.payin IS NULL\n AND p.is_suspended IS NOT TRUE\n \"\"\")\n for sp_id, execution_date, transfers, payer, route in rows:\n route.__dict__['participant'] = payer\n route.sync_status()\n if route.status != 'chargeable':\n retry = True\n continue\n transfers, canceled, impossible = _filter_transfers(payer, transfers, automatic=True)\n if impossible:\n for tr in impossible:\n tr['execution_date'] = execution_date\n del tr['beneficiary'], tr['tip']\n payer.notify('renewal_aborted', transfers=impossible)\n counts['renewal_aborted'] += 1\n if transfers:\n payin_amount = sum(tr['amount'] for tr in transfers)\n payin = prepare_payin(db, payer, payin_amount, route, off_session=True)\n for tr in transfers:\n prepare_donation(\n db, payin, tr['tip'], tr['beneficiary'], 'stripe',\n payer, route.country, tr['amount']\n )\n payin = charge(db, payin, payer)\n if payin.status in ('failed', 'succeeded'):\n payer.notify('payin_' + payin.status, payin=payin._asdict(), provider='Stripe')\n counts['payin_' + payin.status] += 1\n db.run(\"\"\"\n UPDATE scheduled_payins\n SET payin = %s\n , mtime = current_timestamp\n WHERE id = %s\n \"\"\", (payin.id, sp_id))\n else:\n db.run(\"DELETE FROM scheduled_payins WHERE id = %s\", (sp_id,))\n for k, n in sorted(counts.items()):\n logger.info(\"Sent %i %s notifications.\" % (n, k))\n if retry:\n execute_scheduled_payins()\n\n\ndef _check_scheduled_payins(db, payer, payins, automatic):\n \"\"\"Check scheduled payins before they're acted upon.\n\n A scheduled payin can be out of sync with the state of the donor's tips or\n the status of the recipient's account if the `Participant.schedule_renewals()`\n method wasn't successfully called.\n \"\"\"\n for sp in list(payins):\n if isinstance(sp['amount'], dict):\n sp['amount'] = Money(**sp['amount'])\n sp['execution_date'] = date(*map(int, sp['execution_date'].split('-')))\n canceled, impossible = _filter_transfers(payer, sp['transfers'], automatic)[1:]\n if canceled:\n if len(canceled) == len(sp['transfers']):\n payins.remove(sp)\n db.run(\"DELETE FROM scheduled_payins WHERE id = %(id)s\", sp)\n else:\n old_tippee_ids = set(tr['tippee_id'] for tr in canceled)\n sp['transfers'] = [\n tr for tr in sp['transfers'] if tr['tippee_id'] not in old_tippee_ids\n ]\n sp['amount'] = sum(tr['amount'] for tr in sp['transfers'])\n db.run(\"\"\"\n UPDATE scheduled_payins\n SET amount = %(amount)s\n , transfers = %(transfers)s\n , mtime = current_timestamp\n WHERE id = %(id)s\n \"\"\", dict(sp, transfers=json.dumps([\n {k: v for k, v in tr.items() if k not in ('tip', 'beneficiary')}\n for tr in sp['transfers']\n ])))\n for tr in impossible:\n tr['impossible'] = True\n\n\ndef _filter_transfers(payer, transfers, automatic):\n \"\"\"Splits scheduled transfers into 3 lists: \"okay\", \"canceled\" and \"impossible\".\n \"\"\"\n canceled_transfers = []\n impossible_transfers = []\n okay_transfers = []\n for tr in transfers:\n if isinstance(tr['amount'], dict):\n tr['amount'] = Money(**tr['amount'])\n beneficiary = tr['beneficiary'] = website.db.Participant.from_id(tr['tippee_id'])\n tip = tr['tip'] = payer.get_tip_to(beneficiary)\n if tip.renewal_mode < 1 or (tip.renewal_mode == 2) != automatic:\n canceled_transfers.append(tr)\n elif beneficiary.status != 'active' or beneficiary.is_suspended or \\\n beneficiary.payment_providers & 1 == 0:\n impossible_transfers.append(tr)\n else:\n okay_transfers.append(tr)\n return okay_transfers, canceled_transfers, impossible_transfers\n", "path": "liberapay/payin/cron.py"}], "after_files": [{"content": "from collections import defaultdict\nfrom datetime import date\n\nfrom pando import json\n\nfrom ..cron import logger\nfrom ..i18n.currencies import Money\nfrom ..website import website\nfrom ..utils import utcnow\nfrom .common import prepare_donation, prepare_payin\nfrom .stripe import charge\n\n\ndef send_donation_reminder_notifications():\n \"\"\"This function reminds donors to renew their donations.\n\n The notifications are sent two weeks before the due date.\n \"\"\"\n db = website.db\n counts = defaultdict(int)\n rows = db.all(\"\"\"\n SELECT (SELECT p FROM participants p WHERE p.id = sp.payer) AS payer\n , json_agg((SELECT a FROM (\n SELECT sp.id, sp.execution_date, sp.amount, sp.transfers\n ) a ORDER BY a.execution_date)) AS payins\n FROM scheduled_payins sp\n WHERE sp.execution_date <= (current_date + interval '14 days')\n AND sp.automatic IS NOT true\n AND sp.payin IS NULL\n AND sp.ctime < (current_timestamp - interval '6 hours')\n GROUP BY sp.payer\n HAVING count(*) FILTER (\n WHERE sp.notifs_count = 0\n OR sp.notifs_count = 1 AND sp.last_notif_ts <= (current_date - interval '4 weeks')\n OR sp.notifs_count = 2 AND sp.last_notif_ts <= (current_date - interval '26 weeks')\n ) > 0\n \"\"\")\n for payer, payins in rows:\n if payer.is_suspended or payer.status != 'active':\n continue\n _check_scheduled_payins(db, payer, payins, automatic=False)\n if not payins:\n continue\n donations = []\n for sp in payins:\n for tr in sp['transfers']:\n donations.append({\n 'periodic_amount': tr['tip'].periodic_amount,\n 'tippee_username': tr['tippee_username'],\n })\n payer.notify('donate_reminder', donations=donations, email_unverified_address=True)\n counts['donate_reminder'] += 1\n db.run(\"\"\"\n UPDATE scheduled_payins\n SET notifs_count = notifs_count + 1\n , last_notif_ts = now()\n WHERE payer = %s\n AND id IN %s\n \"\"\", (payer.id, tuple(sp['id'] for sp in payins)))\n for k, n in sorted(counts.items()):\n logger.info(\"Sent %i %s notifications.\" % (n, k))\n\n\ndef send_upcoming_debit_notifications():\n \"\"\"This daily cron job notifies donors who are about to be debited.\n\n The notifications are sent at most once a month, 14 days before the first\n payment of the \"month\" (31 days, not the calendar month).\n \"\"\"\n db = website.db\n counts = defaultdict(int)\n rows = db.all(\"\"\"\n SELECT (SELECT p FROM participants p WHERE p.id = sp.payer) AS payer\n , json_agg((SELECT a FROM (\n SELECT sp.id, sp.execution_date, sp.amount, sp.transfers\n ) a ORDER BY a.execution_date)) AS payins\n FROM scheduled_payins sp\n WHERE sp.execution_date <= (current_date + interval '45 days')\n AND sp.automatic\n AND sp.notifs_count = 0\n AND sp.payin IS NULL\n AND sp.ctime < (current_timestamp - interval '6 hours')\n GROUP BY sp.payer, (sp.amount).currency\n HAVING min(sp.execution_date) <= (current_date + interval '14 days')\n \"\"\")\n for payer, payins in rows:\n if payer.is_suspended or payer.status != 'active':\n continue\n _check_scheduled_payins(db, payer, payins, automatic=True)\n if not payins:\n continue\n context = {\n 'payins': payins,\n 'total_amount': sum(sp['amount'] for sp in payins),\n }\n for sp in context['payins']:\n for tr in sp['transfers']:\n del tr['tip'], tr['beneficiary']\n if len(payins) > 1:\n context['ndays'] = (payins[-1]['execution_date'] - utcnow().date()).days\n while True:\n route = db.one(\"\"\"\n SELECT r\n FROM exchange_routes r\n WHERE r.participant = %s\n AND r.status = 'chargeable'\n AND r.network::text LIKE 'stripe-%%'\n ORDER BY r.is_default NULLS LAST\n , r.network = 'stripe-sdd' DESC\n , r.ctime DESC\n LIMIT 1\n \"\"\", (payer.id,))\n if route is None:\n break\n route.sync_status()\n if route.status == 'chargeable':\n break\n if route:\n event = 'upcoming_debit'\n context['instrument_brand'] = route.get_brand()\n context['instrument_partial_number'] = route.get_partial_number()\n if route.network == 'stripe-sdd':\n source = route.stripe_source\n context.update({\n 'creditor_identifier': website.app_conf.sepa_creditor_identifier,\n 'mandate_creation_date': route.ctime.date(),\n 'mandate_id': source.sepa_debit.mandate_reference,\n 'mandate_url': source.sepa_debit.mandate_url,\n })\n else:\n event = 'missing_route'\n payer.notify(event, email_unverified_address=True, **context)\n counts[event] += 1\n db.run(\"\"\"\n UPDATE scheduled_payins\n SET notifs_count = notifs_count + 1\n , last_notif_ts = now()\n WHERE payer = %s\n AND id IN %s\n \"\"\", (payer.id, tuple(sp['id'] for sp in payins)))\n for k, n in sorted(counts.items()):\n logger.info(\"Sent %i %s notifications.\" % (n, k))\n\n\ndef execute_scheduled_payins():\n \"\"\"This daily cron job initiates scheduled payments.\n \"\"\"\n db = website.db\n counts = defaultdict(int)\n retry = False\n rows = db.all(\"\"\"\n SELECT sp.id, sp.execution_date, sp.transfers\n , p AS payer, r.*::exchange_routes AS route\n FROM scheduled_payins sp\n JOIN participants p ON p.id = sp.payer\n JOIN LATERAL (\n SELECT r.*\n FROM exchange_routes r\n WHERE r.participant = sp.payer\n AND r.status = 'chargeable'\n AND r.network::text LIKE 'stripe-%%'\n AND ( sp.amount::currency = 'EUR' OR r.network <> 'stripe-sdd' )\n ORDER BY r.is_default NULLS LAST\n , r.network = 'stripe-sdd' DESC\n , r.ctime DESC\n LIMIT 1\n ) r ON true\n WHERE ( r.network = 'stripe-sdd' AND sp.execution_date <= (current_date + interval '5 days') OR\n r.network = 'stripe-card' AND sp.execution_date <= current_date )\n AND sp.last_notif_ts < (current_date - interval '2 days')\n AND sp.automatic\n AND sp.payin IS NULL\n AND p.is_suspended IS NOT TRUE\n \"\"\")\n for sp_id, execution_date, transfers, payer, route in rows:\n route.__dict__['participant'] = payer\n route.sync_status()\n if route.status != 'chargeable':\n retry = True\n continue\n transfers, canceled, impossible = _filter_transfers(payer, transfers, automatic=True)\n if impossible:\n for tr in impossible:\n tr['execution_date'] = execution_date\n del tr['beneficiary'], tr['tip']\n payer.notify('renewal_aborted', transfers=impossible)\n counts['renewal_aborted'] += 1\n if transfers:\n payin_amount = sum(tr['amount'] for tr in transfers)\n payin = prepare_payin(db, payer, payin_amount, route, off_session=True)\n for tr in transfers:\n prepare_donation(\n db, payin, tr['tip'], tr['beneficiary'], 'stripe',\n payer, route.country, tr['amount']\n )\n payin = charge(db, payin, payer)\n if payin.status in ('failed', 'succeeded'):\n payer.notify('payin_' + payin.status, payin=payin._asdict(), provider='Stripe')\n counts['payin_' + payin.status] += 1\n db.run(\"\"\"\n UPDATE scheduled_payins\n SET payin = %s\n , mtime = current_timestamp\n WHERE id = %s\n \"\"\", (payin.id, sp_id))\n else:\n db.run(\"DELETE FROM scheduled_payins WHERE id = %s\", (sp_id,))\n for k, n in sorted(counts.items()):\n logger.info(\"Sent %i %s notifications.\" % (n, k))\n if retry:\n execute_scheduled_payins()\n\n\ndef _check_scheduled_payins(db, payer, payins, automatic):\n \"\"\"Check scheduled payins before they're acted upon.\n\n A scheduled payin can be out of sync with the state of the donor's tips or\n the status of the recipient's account if the `Participant.schedule_renewals()`\n method wasn't successfully called.\n \"\"\"\n for sp in list(payins):\n if isinstance(sp['amount'], dict):\n sp['amount'] = Money(**sp['amount'])\n sp['execution_date'] = date(*map(int, sp['execution_date'].split('-')))\n canceled, impossible = _filter_transfers(payer, sp['transfers'], automatic)[1:]\n if canceled:\n if len(canceled) == len(sp['transfers']):\n payins.remove(sp)\n db.run(\"DELETE FROM scheduled_payins WHERE id = %(id)s\", sp)\n else:\n old_tippee_ids = set(tr['tippee_id'] for tr in canceled)\n sp['transfers'] = [\n tr for tr in sp['transfers'] if tr['tippee_id'] not in old_tippee_ids\n ]\n sp['amount'] = sum(tr['amount'] for tr in sp['transfers'])\n db.run(\"\"\"\n UPDATE scheduled_payins\n SET amount = %(amount)s\n , transfers = %(transfers)s\n , mtime = current_timestamp\n WHERE id = %(id)s\n \"\"\", dict(sp, transfers=json.dumps([\n {k: v for k, v in tr.items() if k not in ('tip', 'beneficiary')}\n for tr in sp['transfers']\n ])))\n for tr in impossible:\n tr['impossible'] = True\n\n\ndef _filter_transfers(payer, transfers, automatic):\n \"\"\"Splits scheduled transfers into 3 lists: \"okay\", \"canceled\" and \"impossible\".\n \"\"\"\n canceled_transfers = []\n impossible_transfers = []\n okay_transfers = []\n for tr in transfers:\n if isinstance(tr['amount'], dict):\n tr['amount'] = Money(**tr['amount'])\n beneficiary = tr['beneficiary'] = website.db.Participant.from_id(tr['tippee_id'])\n tip = tr['tip'] = payer.get_tip_to(beneficiary)\n if tip.renewal_mode < 1 or (tip.renewal_mode == 2) != automatic:\n canceled_transfers.append(tr)\n elif beneficiary.status != 'active' or beneficiary.is_suspended or \\\n beneficiary.payment_providers & 1 == 0:\n impossible_transfers.append(tr)\n else:\n okay_transfers.append(tr)\n return okay_transfers, canceled_transfers, impossible_transfers\n", "path": "liberapay/payin/cron.py"}]} | 3,675 | 144 |
gh_patches_debug_2672 | rasdani/github-patches | git_diff | e-valuation__EvaP-1666 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make Typescript code Prettier
We should add automated formatting for our typescript files. I think https://prettier.io/ is pretty good, but the choice is open for discussion. The formatting should be done in `manage.py format` and be checked in CI.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/evaluation/management/commands/format.py`
Content:
```
1 import subprocess # nosec
2
3 from django.core.management.base import BaseCommand
4
5
6 class Command(BaseCommand):
7 args = ""
8 help = "Runs the code formatter"
9 requires_migrations_checks = False
10
11 def handle(self, *args, **options):
12 subprocess.run(["black", "evap"], check=False) # nosec
13 subprocess.run(["isort", "."], check=False) # nosec
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/evaluation/management/commands/format.py b/evap/evaluation/management/commands/format.py
--- a/evap/evaluation/management/commands/format.py
+++ b/evap/evaluation/management/commands/format.py
@@ -11,3 +11,4 @@
def handle(self, *args, **options):
subprocess.run(["black", "evap"], check=False) # nosec
subprocess.run(["isort", "."], check=False) # nosec
+ subprocess.run(["npx", "prettier", "--write", "evap/static/ts/src"], check=False) # nosec
| {"golden_diff": "diff --git a/evap/evaluation/management/commands/format.py b/evap/evaluation/management/commands/format.py\n--- a/evap/evaluation/management/commands/format.py\n+++ b/evap/evaluation/management/commands/format.py\n@@ -11,3 +11,4 @@\n def handle(self, *args, **options):\n subprocess.run([\"black\", \"evap\"], check=False) # nosec\n subprocess.run([\"isort\", \".\"], check=False) # nosec\n+ subprocess.run([\"npx\", \"prettier\", \"--write\", \"evap/static/ts/src\"], check=False) # nosec\n", "issue": "Make Typescript code Prettier\nWe should add automated formatting for our typescript files. I think https://prettier.io/ is pretty good, but the choice is open for discussion. The formatting should be done in `manage.py format` and be checked in CI.\n", "before_files": [{"content": "import subprocess # nosec\n\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n args = \"\"\n help = \"Runs the code formatter\"\n requires_migrations_checks = False\n\n def handle(self, *args, **options):\n subprocess.run([\"black\", \"evap\"], check=False) # nosec\n subprocess.run([\"isort\", \".\"], check=False) # nosec\n", "path": "evap/evaluation/management/commands/format.py"}], "after_files": [{"content": "import subprocess # nosec\n\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n args = \"\"\n help = \"Runs the code formatter\"\n requires_migrations_checks = False\n\n def handle(self, *args, **options):\n subprocess.run([\"black\", \"evap\"], check=False) # nosec\n subprocess.run([\"isort\", \".\"], check=False) # nosec\n subprocess.run([\"npx\", \"prettier\", \"--write\", \"evap/static/ts/src\"], check=False) # nosec\n", "path": "evap/evaluation/management/commands/format.py"}]} | 432 | 145 |
gh_patches_debug_29734 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3007 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AZURE_116 fails with Terraform Azure provider >= v.2.97.0
**Describe the issue**
This issue is regarding CKV_AZURE_116.
Since v2.97.0 of the Azure Terraform provider, the Azure policies add-on is enabled by setting
```terraform
azure_policy_enabled = true
```
as a top-level property in a `azurerm_kubernetes_cluster` resource definition, instead of the following
```terraform
addon_profile {
azure_policy {
enabled = true
}
}
```
The check should be updated to reflect the change.
**Examples**
```terraform
resource "azurerm_kubernetes_cluster" "example" {
azure_policy_enabled = true
}
```
should pass the check for CKV_AZURE_116.
**Version:**
2.0.1160
**Additional context**
None
I will submit a PR to fix this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/azure/AKSUsesAzurePoliciesAddon.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3
4
5 class AKSUsesAzurePoliciesAddon(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure that AKS uses Azure Policies Add-on"
8 id = "CKV_AZURE_116"
9 supported_resources = ['azurerm_kubernetes_cluster']
10 categories = [CheckCategories.NETWORKING]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return "addon_profile/[0]/azure_policy/[0]/enabled"
15
16
17 check = AKSUsesAzurePoliciesAddon()
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/azure/AKSUsesAzurePoliciesAddon.py b/checkov/terraform/checks/resource/azure/AKSUsesAzurePoliciesAddon.py
--- a/checkov/terraform/checks/resource/azure/AKSUsesAzurePoliciesAddon.py
+++ b/checkov/terraform/checks/resource/azure/AKSUsesAzurePoliciesAddon.py
@@ -1,8 +1,10 @@
-from checkov.common.models.enums import CheckCategories
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+from typing import Dict, List, Any
+from checkov.common.models.enums import CheckCategories, CheckResult
+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
-class AKSUsesAzurePoliciesAddon(BaseResourceValueCheck):
+
+class AKSUsesAzurePoliciesAddon(BaseResourceCheck):
def __init__(self):
name = "Ensure that AKS uses Azure Policies Add-on"
id = "CKV_AZURE_116"
@@ -10,8 +12,20 @@
categories = [CheckCategories.NETWORKING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def get_inspected_key(self):
- return "addon_profile/[0]/azure_policy/[0]/enabled"
+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
+ # since Azure provider v2.97.0
+ azure_policy_enabled = conf.get("azure_policy_enabled", [None])[0]
+ if azure_policy_enabled:
+ self.evaluated_keys = ["azure_policy_enabled"]
+ return CheckResult.PASSED
+ # up to and including Azure provider v2.96.0
+ self.evaluated_keys = ["addon_profile/[0]/azure_policy/[0]/enabled"]
+ addon_profile = conf.get("addon_profile", [None])[0]
+ if addon_profile and isinstance(addon_profile, dict):
+ azure_policy = addon_profile.get("azure_policy", [None])[0]
+ if azure_policy and isinstance(azure_policy, dict) and azure_policy.get("enabled", [None])[0]:
+ return CheckResult.PASSED
+ return CheckResult.FAILED
check = AKSUsesAzurePoliciesAddon()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/AKSUsesAzurePoliciesAddon.py b/checkov/terraform/checks/resource/azure/AKSUsesAzurePoliciesAddon.py\n--- a/checkov/terraform/checks/resource/azure/AKSUsesAzurePoliciesAddon.py\n+++ b/checkov/terraform/checks/resource/azure/AKSUsesAzurePoliciesAddon.py\n@@ -1,8 +1,10 @@\n-from checkov.common.models.enums import CheckCategories\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n+from typing import Dict, List, Any\n \n+from checkov.common.models.enums import CheckCategories, CheckResult\n+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n-class AKSUsesAzurePoliciesAddon(BaseResourceValueCheck):\n+\n+class AKSUsesAzurePoliciesAddon(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure that AKS uses Azure Policies Add-on\"\n id = \"CKV_AZURE_116\"\n@@ -10,8 +12,20 @@\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def get_inspected_key(self):\n- return \"addon_profile/[0]/azure_policy/[0]/enabled\"\n+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n+ # since Azure provider v2.97.0\n+ azure_policy_enabled = conf.get(\"azure_policy_enabled\", [None])[0]\n+ if azure_policy_enabled:\n+ self.evaluated_keys = [\"azure_policy_enabled\"]\n+ return CheckResult.PASSED\n+ # up to and including Azure provider v2.96.0\n+ self.evaluated_keys = [\"addon_profile/[0]/azure_policy/[0]/enabled\"]\n+ addon_profile = conf.get(\"addon_profile\", [None])[0]\n+ if addon_profile and isinstance(addon_profile, dict):\n+ azure_policy = addon_profile.get(\"azure_policy\", [None])[0]\n+ if azure_policy and isinstance(azure_policy, dict) and azure_policy.get(\"enabled\", [None])[0]:\n+ return CheckResult.PASSED\n+ return CheckResult.FAILED\n \n \n check = AKSUsesAzurePoliciesAddon()\n", "issue": "CKV_AZURE_116 fails with Terraform Azure provider >= v.2.97.0\n**Describe the issue**\r\nThis issue is regarding CKV_AZURE_116.\r\nSince v2.97.0 of the Azure Terraform provider, the Azure policies add-on is enabled by setting\r\n\r\n```terraform\r\nazure_policy_enabled = true\r\n```\r\n\r\nas a top-level property in a `azurerm_kubernetes_cluster` resource definition, instead of the following\r\n\r\n```terraform\r\naddon_profile {\r\n azure_policy {\r\n enabled = true\r\n }\r\n}\r\n```\r\nThe check should be updated to reflect the change.\r\n\r\n**Examples**\r\n```terraform\r\nresource \"azurerm_kubernetes_cluster\" \"example\" {\r\n azure_policy_enabled = true\r\n}\r\n```\r\nshould pass the check for CKV_AZURE_116.\r\n\r\n**Version:**\r\n2.0.1160\r\n\r\n**Additional context**\r\nNone\r\n\r\nI will submit a PR to fix this.\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass AKSUsesAzurePoliciesAddon(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that AKS uses Azure Policies Add-on\"\n id = \"CKV_AZURE_116\"\n supported_resources = ['azurerm_kubernetes_cluster']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"addon_profile/[0]/azure_policy/[0]/enabled\"\n\n\ncheck = AKSUsesAzurePoliciesAddon()\n", "path": "checkov/terraform/checks/resource/azure/AKSUsesAzurePoliciesAddon.py"}], "after_files": [{"content": "from typing import Dict, List, Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass AKSUsesAzurePoliciesAddon(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure that AKS uses Azure Policies Add-on\"\n id = \"CKV_AZURE_116\"\n supported_resources = ['azurerm_kubernetes_cluster']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n # since Azure provider v2.97.0\n azure_policy_enabled = conf.get(\"azure_policy_enabled\", [None])[0]\n if azure_policy_enabled:\n self.evaluated_keys = [\"azure_policy_enabled\"]\n return CheckResult.PASSED\n # up to and including Azure provider v2.96.0\n self.evaluated_keys = [\"addon_profile/[0]/azure_policy/[0]/enabled\"]\n addon_profile = conf.get(\"addon_profile\", [None])[0]\n if addon_profile and isinstance(addon_profile, dict):\n azure_policy = addon_profile.get(\"azure_policy\", [None])[0]\n if azure_policy and isinstance(azure_policy, dict) and azure_policy.get(\"enabled\", [None])[0]:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = AKSUsesAzurePoliciesAddon()\n", "path": "checkov/terraform/checks/resource/azure/AKSUsesAzurePoliciesAddon.py"}]} | 660 | 509 |
gh_patches_debug_14134 | rasdani/github-patches | git_diff | pypa__cibuildwheel-88 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'wheel' missing from setup.py install_requires list?
While trying to use a vanilla cibuildwheel configuration with AppVeyor, I kept running into this error,
```
cibuildwheel: Failed to get name of the package. Command was ['c:\\python27\\python.exe', '.\\setup.py', '--name']
```
It was only after running that command (`python .\\setup.py --name`) ahead of the `cibuildwheel --output-dir wheelhouse` that I finally got a clue of what was going on,
```
setup requires that the Python package 'wheel' be installed. Try the command 'pip install wheel'.
```
To resolve this problem I've added `'wheel'` to [setup.py `install_requires` list](https://github.com/joerick/cibuildwheel/blob/master/setup.py#L12) in our fork.
I think that two changes are necessary:
1. On [line 111 of `__main__.py`](https://github.com/joerick/cibuildwheel/blob/master/cibuildwheel/__main__.py#L111), print the **`output`** of [CalledProcessError](https://docs.python.org/2/library/subprocess.html#subprocess.CalledProcessError), and exit with the value of **`returncode`**.
2. Add `'wheel'` to [setup.py `install_requires` list](https://github.com/joerick/cibuildwheel/blob/master/setup.py#L12)
Do you want a PR?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cibuildwheel/__main__.py`
Content:
```
1 from __future__ import print_function
2 import argparse, os, subprocess, sys, textwrap
3
4 import cibuildwheel
5 import cibuildwheel.linux, cibuildwheel.windows, cibuildwheel.macos
6 from cibuildwheel.environment import parse_environment, EnvironmentParseError
7 from cibuildwheel.util import BuildSkipper
8
9 def get_option_from_environment(option_name, platform=None, default=None):
10 '''
11 Returns an option from the environment, optionally scoped by the platform.
12
13 Example:
14 get_option_from_environment('CIBW_COLOR', platform='macos')
15
16 This will return the value of CIBW_COLOR_MACOS if it exists, otherwise the value of
17 CIBW_COLOR.
18 '''
19 if platform:
20 option = os.environ.get('%s_%s' % (option_name, platform.upper()))
21 if option is not None:
22 return option
23
24 return os.environ.get(option_name, default)
25
26
27 def main():
28 parser = argparse.ArgumentParser(
29 description='Build wheels for all the platforms.',
30 epilog=('Most options are supplied via environment variables. '
31 'See https://github.com/joerick/cibuildwheel#options for info.'))
32
33 parser.add_argument('--platform',
34 choices=['auto', 'linux', 'macos', 'windows'],
35 default=os.environ.get('CIBW_PLATFORM', 'auto'),
36 help=('Platform to build for. For "linux" you need docker running, on Mac '
37 'or Linux. For "macos", you need a Mac machine, and note that this '
38 'script is going to automatically install MacPython on your system, '
39 'so don\'t run on your development machine. For "windows", you need to '
40 'run in Windows, and it will build and test for all versions of '
41 'Python at C:\\PythonXX[-x64]. Default: auto.'))
42 parser.add_argument('--output-dir',
43 default=os.environ.get('CIBW_OUTPUT_DIR', 'wheelhouse'),
44 help='Destination folder for the wheels.')
45 parser.add_argument('project_dir',
46 default='.',
47 nargs='?',
48 help=('Path to the project that you want wheels for. Default: the current '
49 'directory.'))
50
51 args = parser.parse_args()
52
53 if args.platform != 'auto':
54 platform = args.platform
55 else:
56 if os.environ.get('TRAVIS_OS_NAME') == 'linux':
57 platform = 'linux'
58 elif os.environ.get('TRAVIS_OS_NAME') == 'osx':
59 platform = 'macos'
60 elif 'APPVEYOR' in os.environ:
61 platform = 'windows'
62 elif 'BITRISE_BUILD_NUMBER' in os.environ:
63 platform = 'macos'
64 else:
65 print('cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server, '
66 'Travis CI and Appveyor are supported. You can run on your development '
67 'machine using the --platform argument. Check --help output for more '
68 'information.',
69 file=sys.stderr)
70 exit(2)
71
72 output_dir = args.output_dir
73 test_command = get_option_from_environment('CIBW_TEST_COMMAND', platform=platform)
74 test_requires = get_option_from_environment('CIBW_TEST_REQUIRES', platform=platform, default='').split()
75 project_dir = args.project_dir
76 before_build = get_option_from_environment('CIBW_BEFORE_BUILD', platform=platform)
77 build_verbosity = get_option_from_environment('CIBW_BUILD_VERBOSITY', platform=platform, default='')
78 skip_config = os.environ.get('CIBW_SKIP', '')
79 environment_config = get_option_from_environment('CIBW_ENVIRONMENT', platform=platform, default='')
80
81 try:
82 build_verbosity = min(3, max(-3, int(build_verbosity)))
83 except ValueError:
84 build_verbosity = 0
85
86 try:
87 environment = parse_environment(environment_config)
88 except (EnvironmentParseError, ValueError) as e:
89 print('cibuildwheel: Malformed environment option "%s"' % environment_config, file=sys.stderr)
90 import traceback
91 traceback.print_exc(None, sys.stderr)
92 exit(2)
93
94 skip = BuildSkipper(skip_config)
95
96 # Add CIBUILDWHEEL environment variable
97 # This needs to be passed on to the docker container in linux.py
98 os.environ['CIBUILDWHEEL'] = '1'
99
100 try:
101 project_setup_py = os.path.join(project_dir, 'setup.py')
102 name_output = subprocess.check_output([sys.executable, project_setup_py, '--name'],
103 universal_newlines=True)
104 # the last line of output is the name
105 package_name = name_output.strip().splitlines()[-1]
106 except subprocess.CalledProcessError as err:
107 if not os.path.exists(project_setup_py):
108 print('cibuildwheel: Could not find setup.py at root of project', file=sys.stderr)
109 exit(2)
110 else:
111 print('cibuildwheel: Failed to get name of the package. Command was %s' % err.cmd,
112 file=sys.stderr)
113 exit(2)
114
115 if package_name == '' or package_name == 'UNKNOWN':
116 print('cibuildwheel: Invalid package name "%s". Check your setup.py' % package_name,
117 file=sys.stderr)
118 exit(2)
119
120 build_options = dict(
121 project_dir=project_dir,
122 package_name=package_name,
123 output_dir=output_dir,
124 test_command=test_command,
125 test_requires=test_requires,
126 before_build=before_build,
127 build_verbosity=build_verbosity,
128 skip=skip,
129 environment=environment,
130 )
131
132 if platform == 'linux':
133 manylinux1_x86_64_image = os.environ.get('CIBW_MANYLINUX1_X86_64_IMAGE', None)
134 manylinux1_i686_image = os.environ.get('CIBW_MANYLINUX1_I686_IMAGE', None)
135
136 build_options.update(
137 manylinux1_images={'x86_64': manylinux1_x86_64_image, 'i686': manylinux1_i686_image},
138 )
139 elif platform == 'macos':
140 pass
141 elif platform == 'windows':
142 pass
143
144 print_preamble(platform, build_options)
145
146 if not os.path.exists(output_dir):
147 os.makedirs(output_dir)
148
149 if platform == 'linux':
150 cibuildwheel.linux.build(**build_options)
151 elif platform == 'windows':
152 cibuildwheel.windows.build(**build_options)
153 elif platform == 'macos':
154 cibuildwheel.macos.build(**build_options)
155 else:
156 raise Exception('Unsupported platform')
157
158 def print_preamble(platform, build_options):
159 print(textwrap.dedent('''
160 _ _ _ _ _ _ _
161 ___|_| |_ _ _|_| |_| |_ _ _| |_ ___ ___| |
162 | _| | . | | | | | . | | | | | -_| -_| |
163 |___|_|___|___|_|_|___|_____|_|_|___|___|_|
164 '''))
165
166 print('cibuildwheel version %s\n' % cibuildwheel.__version__)
167
168
169 print('Build options:')
170 print(' platform: %r' % platform)
171 for option, value in build_options.items():
172 print(' %s: %r' % (option, value))
173
174 warnings = detect_warnings(platform, build_options)
175 if warnings:
176 print('\nWarnings:')
177 for warning in warnings:
178 print(' ' + warning)
179
180 print('\nHere we go!\n')
181
182 def detect_warnings(platform, build_options):
183 warnings = []
184
185 # warn about deprecated {python} and {pip}
186 for option_name in ['test_command', 'before_build']:
187 option_value = build_options.get(option_name)
188
189 if option_value:
190 if '{python}' in option_value or '{pip}' in option_value:
191 warnings.append(option_name + ": '{python}' and '{pip}' are no longer needed, and will be removed in a future release. Simply use 'python' or 'pip' instead.")
192
193 return warnings
194
195
196 if __name__ == '__main__':
197 main()
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py
--- a/cibuildwheel/__main__.py
+++ b/cibuildwheel/__main__.py
@@ -108,9 +108,10 @@
print('cibuildwheel: Could not find setup.py at root of project', file=sys.stderr)
exit(2)
else:
+ print(err.output)
print('cibuildwheel: Failed to get name of the package. Command was %s' % err.cmd,
file=sys.stderr)
- exit(2)
+ exit(err.returncode)
if package_name == '' or package_name == 'UNKNOWN':
print('cibuildwheel: Invalid package name "%s". Check your setup.py' % package_name,
| {"golden_diff": "diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py\n--- a/cibuildwheel/__main__.py\n+++ b/cibuildwheel/__main__.py\n@@ -108,9 +108,10 @@\n print('cibuildwheel: Could not find setup.py at root of project', file=sys.stderr)\n exit(2)\n else:\n+ print(err.output)\n print('cibuildwheel: Failed to get name of the package. Command was %s' % err.cmd,\n file=sys.stderr)\n- exit(2)\n+ exit(err.returncode)\n \n if package_name == '' or package_name == 'UNKNOWN':\n print('cibuildwheel: Invalid package name \"%s\". Check your setup.py' % package_name,\n", "issue": "'wheel' missing from setup.py install_requires list?\nWhile trying to use a vanilla cibuildwheel configuration with AppVeyor, I kept running into this error,\r\n```\r\ncibuildwheel: Failed to get name of the package. Command was ['c:\\\\python27\\\\python.exe', '.\\\\setup.py', '--name']\r\n```\r\n\r\nIt was only after running that command (`python .\\\\setup.py --name`) ahead of the `cibuildwheel --output-dir wheelhouse` that I finally got a clue of what was going on,\r\n```\r\nsetup requires that the Python package 'wheel' be installed. Try the command 'pip install wheel'.\r\n```\r\n\r\nTo resolve this problem I've added `'wheel'` to [setup.py `install_requires` list](https://github.com/joerick/cibuildwheel/blob/master/setup.py#L12) in our fork.\r\n\r\nI think that two changes are necessary:\r\n1. On [line 111 of `__main__.py`](https://github.com/joerick/cibuildwheel/blob/master/cibuildwheel/__main__.py#L111), print the **`output`** of [CalledProcessError](https://docs.python.org/2/library/subprocess.html#subprocess.CalledProcessError), and exit with the value of **`returncode`**.\r\n2. Add `'wheel'` to [setup.py `install_requires` list](https://github.com/joerick/cibuildwheel/blob/master/setup.py#L12)\r\n\r\nDo you want a PR?\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse, os, subprocess, sys, textwrap\n\nimport cibuildwheel\nimport cibuildwheel.linux, cibuildwheel.windows, cibuildwheel.macos\nfrom cibuildwheel.environment import parse_environment, EnvironmentParseError\nfrom cibuildwheel.util import BuildSkipper\n\ndef get_option_from_environment(option_name, platform=None, default=None):\n '''\n Returns an option from the environment, optionally scoped by the platform.\n\n Example:\n get_option_from_environment('CIBW_COLOR', platform='macos')\n\n This will return the value of CIBW_COLOR_MACOS if it exists, otherwise the value of\n CIBW_COLOR.\n '''\n if platform:\n option = os.environ.get('%s_%s' % (option_name, platform.upper()))\n if option is not None:\n return option\n\n return os.environ.get(option_name, default)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Build wheels for all the platforms.',\n epilog=('Most options are supplied via environment variables. '\n 'See https://github.com/joerick/cibuildwheel#options for info.'))\n\n parser.add_argument('--platform',\n choices=['auto', 'linux', 'macos', 'windows'],\n default=os.environ.get('CIBW_PLATFORM', 'auto'),\n help=('Platform to build for. For \"linux\" you need docker running, on Mac '\n 'or Linux. For \"macos\", you need a Mac machine, and note that this '\n 'script is going to automatically install MacPython on your system, '\n 'so don\\'t run on your development machine. For \"windows\", you need to '\n 'run in Windows, and it will build and test for all versions of '\n 'Python at C:\\\\PythonXX[-x64]. Default: auto.'))\n parser.add_argument('--output-dir',\n default=os.environ.get('CIBW_OUTPUT_DIR', 'wheelhouse'),\n help='Destination folder for the wheels.')\n parser.add_argument('project_dir',\n default='.',\n nargs='?',\n help=('Path to the project that you want wheels for. Default: the current '\n 'directory.'))\n\n args = parser.parse_args()\n\n if args.platform != 'auto':\n platform = args.platform\n else:\n if os.environ.get('TRAVIS_OS_NAME') == 'linux':\n platform = 'linux'\n elif os.environ.get('TRAVIS_OS_NAME') == 'osx':\n platform = 'macos'\n elif 'APPVEYOR' in os.environ:\n platform = 'windows'\n elif 'BITRISE_BUILD_NUMBER' in os.environ:\n platform = 'macos'\n else:\n print('cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server, '\n 'Travis CI and Appveyor are supported. You can run on your development '\n 'machine using the --platform argument. Check --help output for more '\n 'information.',\n file=sys.stderr)\n exit(2)\n\n output_dir = args.output_dir\n test_command = get_option_from_environment('CIBW_TEST_COMMAND', platform=platform)\n test_requires = get_option_from_environment('CIBW_TEST_REQUIRES', platform=platform, default='').split()\n project_dir = args.project_dir\n before_build = get_option_from_environment('CIBW_BEFORE_BUILD', platform=platform)\n build_verbosity = get_option_from_environment('CIBW_BUILD_VERBOSITY', platform=platform, default='')\n skip_config = os.environ.get('CIBW_SKIP', '')\n environment_config = get_option_from_environment('CIBW_ENVIRONMENT', platform=platform, default='')\n\n try:\n build_verbosity = min(3, max(-3, int(build_verbosity)))\n except ValueError:\n build_verbosity = 0\n\n try:\n environment = parse_environment(environment_config)\n except (EnvironmentParseError, ValueError) as e:\n print('cibuildwheel: Malformed environment option \"%s\"' % environment_config, file=sys.stderr)\n import traceback\n traceback.print_exc(None, sys.stderr)\n exit(2)\n\n skip = BuildSkipper(skip_config)\n\n # Add CIBUILDWHEEL environment variable\n # This needs to be passed on to the docker container in linux.py\n os.environ['CIBUILDWHEEL'] = '1'\n\n try:\n project_setup_py = os.path.join(project_dir, 'setup.py')\n name_output = subprocess.check_output([sys.executable, project_setup_py, '--name'],\n universal_newlines=True)\n # the last line of output is the name\n package_name = name_output.strip().splitlines()[-1]\n except subprocess.CalledProcessError as err:\n if not os.path.exists(project_setup_py):\n print('cibuildwheel: Could not find setup.py at root of project', file=sys.stderr)\n exit(2)\n else:\n print('cibuildwheel: Failed to get name of the package. Command was %s' % err.cmd,\n file=sys.stderr)\n exit(2)\n\n if package_name == '' or package_name == 'UNKNOWN':\n print('cibuildwheel: Invalid package name \"%s\". Check your setup.py' % package_name,\n file=sys.stderr)\n exit(2)\n\n build_options = dict(\n project_dir=project_dir,\n package_name=package_name,\n output_dir=output_dir,\n test_command=test_command,\n test_requires=test_requires,\n before_build=before_build,\n build_verbosity=build_verbosity,\n skip=skip,\n environment=environment,\n )\n\n if platform == 'linux':\n manylinux1_x86_64_image = os.environ.get('CIBW_MANYLINUX1_X86_64_IMAGE', None)\n manylinux1_i686_image = os.environ.get('CIBW_MANYLINUX1_I686_IMAGE', None)\n\n build_options.update(\n manylinux1_images={'x86_64': manylinux1_x86_64_image, 'i686': manylinux1_i686_image},\n )\n elif platform == 'macos':\n pass\n elif platform == 'windows':\n pass\n\n print_preamble(platform, build_options)\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if platform == 'linux':\n cibuildwheel.linux.build(**build_options)\n elif platform == 'windows':\n cibuildwheel.windows.build(**build_options)\n elif platform == 'macos':\n cibuildwheel.macos.build(**build_options)\n else:\n raise Exception('Unsupported platform')\n\ndef print_preamble(platform, build_options):\n print(textwrap.dedent('''\n _ _ _ _ _ _ _\n ___|_| |_ _ _|_| |_| |_ _ _| |_ ___ ___| |\n | _| | . | | | | | . | | | | | -_| -_| |\n |___|_|___|___|_|_|___|_____|_|_|___|___|_|\n '''))\n\n print('cibuildwheel version %s\\n' % cibuildwheel.__version__)\n\n\n print('Build options:')\n print(' platform: %r' % platform)\n for option, value in build_options.items():\n print(' %s: %r' % (option, value))\n\n warnings = detect_warnings(platform, build_options)\n if warnings:\n print('\\nWarnings:')\n for warning in warnings:\n print(' ' + warning)\n\n print('\\nHere we go!\\n')\n\ndef detect_warnings(platform, build_options):\n warnings = []\n\n # warn about deprecated {python} and {pip}\n for option_name in ['test_command', 'before_build']:\n option_value = build_options.get(option_name)\n\n if option_value:\n if '{python}' in option_value or '{pip}' in option_value:\n warnings.append(option_name + \": '{python}' and '{pip}' are no longer needed, and will be removed in a future release. Simply use 'python' or 'pip' instead.\")\n\n return warnings\n\n\nif __name__ == '__main__':\n main()\n", "path": "cibuildwheel/__main__.py"}], "after_files": [{"content": "from __future__ import print_function\nimport argparse, os, subprocess, sys, textwrap\n\nimport cibuildwheel\nimport cibuildwheel.linux, cibuildwheel.windows, cibuildwheel.macos\nfrom cibuildwheel.environment import parse_environment, EnvironmentParseError\nfrom cibuildwheel.util import BuildSkipper\n\ndef get_option_from_environment(option_name, platform=None, default=None):\n '''\n Returns an option from the environment, optionally scoped by the platform.\n\n Example:\n get_option_from_environment('CIBW_COLOR', platform='macos')\n\n This will return the value of CIBW_COLOR_MACOS if it exists, otherwise the value of\n CIBW_COLOR.\n '''\n if platform:\n option = os.environ.get('%s_%s' % (option_name, platform.upper()))\n if option is not None:\n return option\n\n return os.environ.get(option_name, default)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Build wheels for all the platforms.',\n epilog=('Most options are supplied via environment variables. '\n 'See https://github.com/joerick/cibuildwheel#options for info.'))\n\n parser.add_argument('--platform',\n choices=['auto', 'linux', 'macos', 'windows'],\n default=os.environ.get('CIBW_PLATFORM', 'auto'),\n help=('Platform to build for. For \"linux\" you need docker running, on Mac '\n 'or Linux. For \"macos\", you need a Mac machine, and note that this '\n 'script is going to automatically install MacPython on your system, '\n 'so don\\'t run on your development machine. For \"windows\", you need to '\n 'run in Windows, and it will build and test for all versions of '\n 'Python at C:\\\\PythonXX[-x64]. Default: auto.'))\n parser.add_argument('--output-dir',\n default=os.environ.get('CIBW_OUTPUT_DIR', 'wheelhouse'),\n help='Destination folder for the wheels.')\n parser.add_argument('project_dir',\n default='.',\n nargs='?',\n help=('Path to the project that you want wheels for. Default: the current '\n 'directory.'))\n\n args = parser.parse_args()\n\n if args.platform != 'auto':\n platform = args.platform\n else:\n if os.environ.get('TRAVIS_OS_NAME') == 'linux':\n platform = 'linux'\n elif os.environ.get('TRAVIS_OS_NAME') == 'osx':\n platform = 'macos'\n elif 'APPVEYOR' in os.environ:\n platform = 'windows'\n elif 'BITRISE_BUILD_NUMBER' in os.environ:\n platform = 'macos'\n else:\n print('cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server, '\n 'Travis CI and Appveyor are supported. You can run on your development '\n 'machine using the --platform argument. Check --help output for more '\n 'information.',\n file=sys.stderr)\n exit(2)\n\n output_dir = args.output_dir\n test_command = get_option_from_environment('CIBW_TEST_COMMAND', platform=platform)\n test_requires = get_option_from_environment('CIBW_TEST_REQUIRES', platform=platform, default='').split()\n project_dir = args.project_dir\n before_build = get_option_from_environment('CIBW_BEFORE_BUILD', platform=platform)\n build_verbosity = get_option_from_environment('CIBW_BUILD_VERBOSITY', platform=platform, default='')\n skip_config = os.environ.get('CIBW_SKIP', '')\n environment_config = get_option_from_environment('CIBW_ENVIRONMENT', platform=platform, default='')\n\n try:\n build_verbosity = min(3, max(-3, int(build_verbosity)))\n except ValueError:\n build_verbosity = 0\n\n try:\n environment = parse_environment(environment_config)\n except (EnvironmentParseError, ValueError) as e:\n print('cibuildwheel: Malformed environment option \"%s\"' % environment_config, file=sys.stderr)\n import traceback\n traceback.print_exc(None, sys.stderr)\n exit(2)\n\n skip = BuildSkipper(skip_config)\n\n # Add CIBUILDWHEEL environment variable\n # This needs to be passed on to the docker container in linux.py\n os.environ['CIBUILDWHEEL'] = '1'\n\n try:\n project_setup_py = os.path.join(project_dir, 'setup.py')\n name_output = subprocess.check_output([sys.executable, project_setup_py, '--name'],\n universal_newlines=True)\n # the last line of output is the name\n package_name = name_output.strip().splitlines()[-1]\n except subprocess.CalledProcessError as err:\n if not os.path.exists(project_setup_py):\n print('cibuildwheel: Could not find setup.py at root of project', file=sys.stderr)\n exit(2)\n else:\n print(err.output)\n print('cibuildwheel: Failed to get name of the package. Command was %s' % err.cmd,\n file=sys.stderr)\n exit(err.returncode)\n\n if package_name == '' or package_name == 'UNKNOWN':\n print('cibuildwheel: Invalid package name \"%s\". Check your setup.py' % package_name,\n file=sys.stderr)\n exit(2)\n\n build_options = dict(\n project_dir=project_dir,\n package_name=package_name,\n output_dir=output_dir,\n test_command=test_command,\n test_requires=test_requires,\n before_build=before_build,\n build_verbosity=build_verbosity,\n skip=skip,\n environment=environment,\n )\n\n if platform == 'linux':\n manylinux1_x86_64_image = os.environ.get('CIBW_MANYLINUX1_X86_64_IMAGE', None)\n manylinux1_i686_image = os.environ.get('CIBW_MANYLINUX1_I686_IMAGE', None)\n\n build_options.update(\n manylinux1_images={'x86_64': manylinux1_x86_64_image, 'i686': manylinux1_i686_image},\n )\n elif platform == 'macos':\n pass\n elif platform == 'windows':\n pass\n\n print_preamble(platform, build_options)\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if platform == 'linux':\n cibuildwheel.linux.build(**build_options)\n elif platform == 'windows':\n cibuildwheel.windows.build(**build_options)\n elif platform == 'macos':\n cibuildwheel.macos.build(**build_options)\n else:\n raise Exception('Unsupported platform')\n\ndef print_preamble(platform, build_options):\n print(textwrap.dedent('''\n _ _ _ _ _ _ _\n ___|_| |_ _ _|_| |_| |_ _ _| |_ ___ ___| |\n | _| | . | | | | | . | | | | | -_| -_| |\n |___|_|___|___|_|_|___|_____|_|_|___|___|_|\n '''))\n\n print('cibuildwheel version %s\\n' % cibuildwheel.__version__)\n\n\n print('Build options:')\n print(' platform: %r' % platform)\n for option, value in build_options.items():\n print(' %s: %r' % (option, value))\n\n warnings = detect_warnings(platform, build_options)\n if warnings:\n print('\\nWarnings:')\n for warning in warnings:\n print(' ' + warning)\n\n print('\\nHere we go!\\n')\n\ndef detect_warnings(platform, build_options):\n warnings = []\n\n # warn about deprecated {python} and {pip}\n for option_name in ['test_command', 'before_build']:\n option_value = build_options.get(option_name)\n\n if option_value:\n if '{python}' in option_value or '{pip}' in option_value:\n warnings.append(option_name + \": '{python}' and '{pip}' are no longer needed, and will be removed in a future release. Simply use 'python' or 'pip' instead.\")\n\n return warnings\n\n\nif __name__ == '__main__':\n main()\n", "path": "cibuildwheel/__main__.py"}]} | 2,862 | 175 |
gh_patches_debug_8536 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1648 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Found an embedded parameter "${AWS::Partition}" outside of an "Fn::Sub" at Resources
*cfn-lint version: (`cfn-lint --version`)*
cfn-lint 0.29.5
*Description of issue.*
I have a CloudFormation Template embedded into a AWS Systems Manager Automation Document.
Please provide as much information as possible:
* Template linting issues:
`E1029 Found an embedded parameter "${AWS::Partition}" outside of an "Fn::Sub" at Resources/DNSForwarderSetup/Properties/Content/mainSteps/0/inputs/TemplateBody
ad-3.template:433:13`
This is actually okay, I am using !Sub correctly in an Embedded CloudFormation....
* Please provide a CloudFormation sample that generated the issue.
https://github.com/aws-quickstart/quickstart-microsoft-activedirectory/blob/b071c3def8d9a30fd055571c9603cad10a753f40/templates/ad-3.template#L433
* If present, please add links to the (official) documentation for clarification.
* Validate if the issue still exists with the latest version of `cfn-lint` and/or the latest Spec files
* Feature request:
* Please provide argumentation about the missing feature. Context is key!
This errors if I use !Sub or Fn::Sub: when it actually correct..
Cfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/functions/SubNeeded.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from functools import reduce # pylint: disable=redefined-builtin
6 import re
7 import six
8 from cfnlint.rules import CloudFormationLintRule
9 from cfnlint.rules import RuleMatch
10
11
12 class SubNeeded(CloudFormationLintRule):
13 """Check if a substitution string exists without a substitution function"""
14 id = 'E1029'
15 shortdesc = 'Sub is required if a variable is used in a string'
16 description = 'If a substitution variable exists in a string but isn\'t wrapped with the Fn::Sub function the deployment will fail.'
17 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
18 tags = ['functions', 'sub']
19
20 # Free-form text properties to exclude from this rule
21 excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',
22 'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',
23 'RequestMappingTemplate', 'LogFormat']
24 api_excludes = ['Uri', 'Body', 'ConnectionId']
25
26
27 # IAM Policy has special variables that don't require !Sub, Check for these
28 # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html
29 # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html
30 # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html
31 # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down
32 # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html
33 resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',
34 '${aws:TokenIssueTime}', '${aws:principaltype}',
35 '${aws:SecureTransport}', '${aws:SourceIp}',
36 '${aws:UserAgent}', '${aws:userid}',
37 '${aws:username}', '${ec2:SourceInstanceARN}',
38 '${iot:Connection.Thing.ThingName}',
39 '${iot:Connection.Thing.ThingTypeName}',
40 '${iot:Connection.Thing.IsAttached}',
41 '${iot:ClientId}', '${transfer:HomeBucket}',
42 '${transfer:HomeDirectory}', '${transfer:HomeFolder}',
43 '${transfer:UserName}', '${redshift:DbUser}',
44 '${cognito-identity.amazonaws.com:aud}',
45 '${cognito-identity.amazonaws.com:sub}',
46 '${cognito-identity.amazonaws.com:amr}']
47
48 # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html
49 condition_excludes = [
50 '${redshift:DbUser}',
51 ]
52
53 def __init__(self):
54 """Init"""
55 super(SubNeeded, self).__init__()
56 self.config_definition = {
57 'custom_excludes': {
58 'default': '',
59 'type': 'string'
60 }
61 }
62 self.configure()
63 self.subParameterRegex = re.compile(r'(\$\{[A-Za-z0-9_:\.]+\})')
64
65 def _match_values(self, cfnelem, path):
66 """Recursively search for values matching the searchRegex"""
67 values = []
68 if isinstance(cfnelem, dict):
69 for key in cfnelem:
70 pathprop = path[:]
71 pathprop.append(key)
72 values.extend(self._match_values(cfnelem[key], pathprop))
73 elif isinstance(cfnelem, list):
74 for index, item in enumerate(cfnelem):
75 pathprop = path[:]
76 pathprop.append(index)
77 values.extend(self._match_values(item, pathprop))
78 else:
79 # Leaf node
80 if isinstance(cfnelem, six.string_types): # and re.match(searchRegex, cfnelem):
81 for variable in re.findall(self.subParameterRegex, cfnelem):
82 values.append(path + [variable])
83
84 return values
85
86 def match_values(self, cfn):
87 """
88 Search for values in all parts of the templates that match the searchRegex
89 """
90 results = []
91 results.extend(self._match_values(cfn.template, []))
92 # Globals are removed during a transform. They need to be checked manually
93 results.extend(self._match_values(cfn.template.get('Globals', {}), []))
94 return results
95
96 def _api_exceptions(self, value):
97 """ Key value exceptions """
98 parameter_search = re.compile(r'^\$\{stageVariables\..*\}$')
99 return re.match(parameter_search, value)
100
101 def _variable_custom_excluded(self, value):
102 """ User-defined exceptions for variables, anywhere in the file """
103 custom_excludes = self.config['custom_excludes']
104 if custom_excludes:
105 custom_search = re.compile(custom_excludes)
106 return re.match(custom_search, value)
107 return False
108
109 def match(self, cfn):
110 matches = []
111
112 # Get a list of paths to every leaf node string containing at least one ${parameter}
113 parameter_string_paths = self.match_values(cfn)
114 # We want to search all of the paths to check if each one contains an 'Fn::Sub'
115 for parameter_string_path in parameter_string_paths:
116 if parameter_string_path[0] in ['Parameters']:
117 continue
118 # Exclude the special IAM variables
119 variable = parameter_string_path[-1]
120
121 if 'Resource' in parameter_string_path:
122 if variable in self.resource_excludes:
123 continue
124 if 'NotResource' in parameter_string_path:
125 if variable in self.resource_excludes:
126 continue
127 if 'Condition' in parameter_string_path:
128 if variable in self.condition_excludes:
129 continue
130
131 # Step Function State Machine has a Definition Substitution that allows usage of special variables outside of a !Sub
132 # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-statemachine-definitionsubstitutions.html
133
134 if 'DefinitionString' in parameter_string_path:
135 modified_parameter_string_path = parameter_string_path
136 index = parameter_string_path.index('DefinitionString')
137 modified_parameter_string_path[index] = 'DefinitionSubstitutions'
138 modified_parameter_string_path = modified_parameter_string_path[:index+1]
139 modified_parameter_string_path.append(variable[2:-1])
140 if reduce(lambda c, k: c.get(k, {}), modified_parameter_string_path, cfn.template):
141 continue
142
143 # Exclude variables that match custom exclude filters, if configured
144 # (for third-party tools that pre-process templates before uploading them to AWS)
145 if self._variable_custom_excluded(variable):
146 continue
147
148 # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)
149 if variable.startswith('${!'):
150 continue
151
152 found_sub = False
153 # Does the path contain an 'Fn::Sub'?
154 for step in parameter_string_path:
155 if step in self.api_excludes:
156 if self._api_exceptions(parameter_string_path[-1]):
157 found_sub = True
158 elif step == 'Fn::Sub' or step in self.excludes:
159 found_sub = True
160
161 # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly
162 if not found_sub:
163 # Remove the last item (the variable) to prevent multiple errors on 1 line errors
164 path = parameter_string_path[:-1]
165 message = 'Found an embedded parameter "{}" outside of an "Fn::Sub" at {}'.format(
166 variable, '/'.join(map(str, path)))
167 matches.append(RuleMatch(path, message))
168
169 return matches
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py
--- a/src/cfnlint/rules/functions/SubNeeded.py
+++ b/src/cfnlint/rules/functions/SubNeeded.py
@@ -20,7 +20,7 @@
# Free-form text properties to exclude from this rule
excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',
'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',
- 'RequestMappingTemplate', 'LogFormat']
+ 'RequestMappingTemplate', 'LogFormat', 'TemplateBody']
api_excludes = ['Uri', 'Body', 'ConnectionId']
| {"golden_diff": "diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py\n--- a/src/cfnlint/rules/functions/SubNeeded.py\n+++ b/src/cfnlint/rules/functions/SubNeeded.py\n@@ -20,7 +20,7 @@\n # Free-form text properties to exclude from this rule\n excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',\n 'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',\n- 'RequestMappingTemplate', 'LogFormat']\n+ 'RequestMappingTemplate', 'LogFormat', 'TemplateBody']\n api_excludes = ['Uri', 'Body', 'ConnectionId']\n", "issue": "Found an embedded parameter \"${AWS::Partition}\" outside of an \"Fn::Sub\" at Resources\n*cfn-lint version: (`cfn-lint --version`)*\r\ncfn-lint 0.29.5\r\n\r\n*Description of issue.*\r\nI have a CloudFormation Template embedded into a AWS Systems Manager Automation Document. \r\n\r\nPlease provide as much information as possible:\r\n* Template linting issues:\r\n\r\n`E1029 Found an embedded parameter \"${AWS::Partition}\" outside of an \"Fn::Sub\" at Resources/DNSForwarderSetup/Properties/Content/mainSteps/0/inputs/TemplateBody\r\nad-3.template:433:13`\r\n\r\nThis is actually okay, I am using !Sub correctly in an Embedded CloudFormation.... \r\n\r\n * Please provide a CloudFormation sample that generated the issue.\r\n\r\nhttps://github.com/aws-quickstart/quickstart-microsoft-activedirectory/blob/b071c3def8d9a30fd055571c9603cad10a753f40/templates/ad-3.template#L433\r\n\r\n * If present, please add links to the (official) documentation for clarification.\r\n * Validate if the issue still exists with the latest version of `cfn-lint` and/or the latest Spec files\r\n* Feature request:\r\n * Please provide argumentation about the missing feature. Context is key!\r\nThis errors if I use !Sub or Fn::Sub: when it actually correct.. \r\n\r\n\r\nCfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom functools import reduce # pylint: disable=redefined-builtin\nimport re\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n # Free-form text properties to exclude from this rule\n excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',\n 'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',\n 'RequestMappingTemplate', 'LogFormat']\n api_excludes = ['Uri', 'Body', 'ConnectionId']\n\n\n # IAM Policy has special variables that don't require !Sub, Check for these\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html\n # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html\n resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',\n '${aws:TokenIssueTime}', '${aws:principaltype}',\n '${aws:SecureTransport}', '${aws:SourceIp}',\n '${aws:UserAgent}', '${aws:userid}',\n '${aws:username}', '${ec2:SourceInstanceARN}',\n '${iot:Connection.Thing.ThingName}',\n '${iot:Connection.Thing.ThingTypeName}',\n '${iot:Connection.Thing.IsAttached}',\n '${iot:ClientId}', '${transfer:HomeBucket}',\n '${transfer:HomeDirectory}', '${transfer:HomeFolder}',\n '${transfer:UserName}', '${redshift:DbUser}',\n '${cognito-identity.amazonaws.com:aud}',\n '${cognito-identity.amazonaws.com:sub}',\n '${cognito-identity.amazonaws.com:amr}']\n\n # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html\n condition_excludes = [\n '${redshift:DbUser}',\n ]\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(SubNeeded, self).__init__()\n self.config_definition = {\n 'custom_excludes': {\n 'default': '',\n 'type': 'string'\n }\n }\n self.configure()\n self.subParameterRegex = re.compile(r'(\\$\\{[A-Za-z0-9_:\\.]+\\})')\n\n def _match_values(self, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, six.string_types): # and re.match(searchRegex, cfnelem):\n for variable in re.findall(self.subParameterRegex, cfnelem):\n values.append(path + [variable])\n\n return values\n\n def match_values(self, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(cfn.template.get('Globals', {}), []))\n return results\n\n def _api_exceptions(self, value):\n \"\"\" Key value exceptions \"\"\"\n parameter_search = re.compile(r'^\\$\\{stageVariables\\..*\\}$')\n return re.match(parameter_search, value)\n\n def _variable_custom_excluded(self, value):\n \"\"\" User-defined exceptions for variables, anywhere in the file \"\"\"\n custom_excludes = self.config['custom_excludes']\n if custom_excludes:\n custom_search = re.compile(custom_excludes)\n return re.match(custom_search, value)\n return False\n\n def match(self, cfn):\n matches = []\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(cfn)\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n if parameter_string_path[0] in ['Parameters']:\n continue\n # Exclude the special IAM variables\n variable = parameter_string_path[-1]\n\n if 'Resource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n if 'NotResource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n if 'Condition' in parameter_string_path:\n if variable in self.condition_excludes:\n continue\n\n # Step Function State Machine has a Definition Substitution that allows usage of special variables outside of a !Sub\n # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-statemachine-definitionsubstitutions.html\n\n if 'DefinitionString' in parameter_string_path:\n modified_parameter_string_path = parameter_string_path\n index = parameter_string_path.index('DefinitionString')\n modified_parameter_string_path[index] = 'DefinitionSubstitutions'\n modified_parameter_string_path = modified_parameter_string_path[:index+1]\n modified_parameter_string_path.append(variable[2:-1])\n if reduce(lambda c, k: c.get(k, {}), modified_parameter_string_path, cfn.template):\n continue\n\n # Exclude variables that match custom exclude filters, if configured\n # (for third-party tools that pre-process templates before uploading them to AWS)\n if self._variable_custom_excluded(variable):\n continue\n\n # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)\n if variable.startswith('${!'):\n continue\n\n found_sub = False\n # Does the path contain an 'Fn::Sub'?\n for step in parameter_string_path:\n if step in self.api_excludes:\n if self._api_exceptions(parameter_string_path[-1]):\n found_sub = True\n elif step == 'Fn::Sub' or step in self.excludes:\n found_sub = True\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not found_sub:\n # Remove the last item (the variable) to prevent multiple errors on 1 line errors\n path = parameter_string_path[:-1]\n message = 'Found an embedded parameter \"{}\" outside of an \"Fn::Sub\" at {}'.format(\n variable, '/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom functools import reduce # pylint: disable=redefined-builtin\nimport re\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n # Free-form text properties to exclude from this rule\n excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',\n 'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',\n 'RequestMappingTemplate', 'LogFormat', 'TemplateBody']\n api_excludes = ['Uri', 'Body', 'ConnectionId']\n\n\n # IAM Policy has special variables that don't require !Sub, Check for these\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html\n # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html\n resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',\n '${aws:TokenIssueTime}', '${aws:principaltype}',\n '${aws:SecureTransport}', '${aws:SourceIp}',\n '${aws:UserAgent}', '${aws:userid}',\n '${aws:username}', '${ec2:SourceInstanceARN}',\n '${iot:Connection.Thing.ThingName}',\n '${iot:Connection.Thing.ThingTypeName}',\n '${iot:Connection.Thing.IsAttached}',\n '${iot:ClientId}', '${transfer:HomeBucket}',\n '${transfer:HomeDirectory}', '${transfer:HomeFolder}',\n '${transfer:UserName}', '${redshift:DbUser}',\n '${cognito-identity.amazonaws.com:aud}',\n '${cognito-identity.amazonaws.com:sub}',\n '${cognito-identity.amazonaws.com:amr}']\n\n # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html\n condition_excludes = [\n '${redshift:DbUser}',\n ]\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(SubNeeded, self).__init__()\n self.config_definition = {\n 'custom_excludes': {\n 'default': '',\n 'type': 'string'\n }\n }\n self.configure()\n self.subParameterRegex = re.compile(r'(\\$\\{[A-Za-z0-9_:\\.]+\\})')\n\n def _match_values(self, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, six.string_types): # and re.match(searchRegex, cfnelem):\n for variable in re.findall(self.subParameterRegex, cfnelem):\n values.append(path + [variable])\n\n return values\n\n def match_values(self, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(cfn.template.get('Globals', {}), []))\n return results\n\n def _api_exceptions(self, value):\n \"\"\" Key value exceptions \"\"\"\n parameter_search = re.compile(r'^\\$\\{stageVariables\\..*\\}$')\n return re.match(parameter_search, value)\n\n def _variable_custom_excluded(self, value):\n \"\"\" User-defined exceptions for variables, anywhere in the file \"\"\"\n custom_excludes = self.config['custom_excludes']\n if custom_excludes:\n custom_search = re.compile(custom_excludes)\n return re.match(custom_search, value)\n return False\n\n def match(self, cfn):\n matches = []\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(cfn)\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n if parameter_string_path[0] in ['Parameters']:\n continue\n # Exclude the special IAM variables\n variable = parameter_string_path[-1]\n\n if 'Resource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n if 'NotResource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n if 'Condition' in parameter_string_path:\n if variable in self.condition_excludes:\n continue\n\n # Step Function State Machine has a Definition Substitution that allows usage of special variables outside of a !Sub\n # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-statemachine-definitionsubstitutions.html\n\n if 'DefinitionString' in parameter_string_path:\n modified_parameter_string_path = parameter_string_path\n index = parameter_string_path.index('DefinitionString')\n modified_parameter_string_path[index] = 'DefinitionSubstitutions'\n modified_parameter_string_path = modified_parameter_string_path[:index+1]\n modified_parameter_string_path.append(variable[2:-1])\n if reduce(lambda c, k: c.get(k, {}), modified_parameter_string_path, cfn.template):\n continue\n\n # Exclude variables that match custom exclude filters, if configured\n # (for third-party tools that pre-process templates before uploading them to AWS)\n if self._variable_custom_excluded(variable):\n continue\n\n # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)\n if variable.startswith('${!'):\n continue\n\n found_sub = False\n # Does the path contain an 'Fn::Sub'?\n for step in parameter_string_path:\n if step in self.api_excludes:\n if self._api_exceptions(parameter_string_path[-1]):\n found_sub = True\n elif step == 'Fn::Sub' or step in self.excludes:\n found_sub = True\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not found_sub:\n # Remove the last item (the variable) to prevent multiple errors on 1 line errors\n path = parameter_string_path[:-1]\n message = 'Found an embedded parameter \"{}\" outside of an \"Fn::Sub\" at {}'.format(\n variable, '/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}]} | 2,703 | 148 |
gh_patches_debug_15245 | rasdani/github-patches | git_diff | kornia__kornia-2232 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cleanup on setup metadata
_Originally posted by @johnnv1 in https://github.com/kornia/kornia/pull/2225#discussion_r1117693700_
-----
right now I think we have some deadlines in the setup, for example:
- https://github.com/kornia/kornia/blob/master/setup.py#L16
- https://github.com/kornia/kornia/blob/master/setup.cfg#L57
- This isn't used anymore https://github.com/kornia/kornia/tree/master/packaging
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kornia/__init__.py`
Content:
```
1 # NOTE: kornia filters and geometry must go first since are the core of the library
2 # and by changing the import order you might get into a circular dependencies issue.
3 from . import filters
4 from . import geometry
5 from . import grad_estimator
6
7 # import the other modules for convenience
8 from . import augmentation, color, contrib, core, enhance, feature, io, losses, metrics, morphology, tracking, utils, x
9
10 # NOTE: we are going to expose to top level very few things
11 from kornia.constants import pi
12 from kornia.testing import xla_is_available
13 from kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image
14
15 # Version variable
16 import sys
17
18 if sys.version_info >= (3, 8): # pragma: >=3.8 cover
19 import importlib.metadata as importlib_metadata
20 else: # pragma: <3.8 cover
21 import importlib_metadata
22
23 __version__ = importlib_metadata.version('kornia')
24
```
Path: `kornia/utils/_compat.py`
Content:
```
1 from typing import TYPE_CHECKING, Callable, ContextManager, List, Optional, Tuple, TypeVar
2
3 import torch
4 from torch import Tensor
5
6 from packaging import version
7
8
9 def torch_version() -> str:
10 """Parse the `torch.__version__` variable and removes +cu*/cpu."""
11 return torch.__version__.split('+')[0]
12
13
14 def torch_version_lt(major: int, minor: int, patch: int) -> bool:
15 _version = version.parse(torch_version())
16 return _version < version.parse(f"{major}.{minor}.{patch}")
17
18
19 def torch_version_le(major: int, minor: int, patch: int) -> bool:
20 _version = version.parse(torch_version())
21 return _version <= version.parse(f"{major}.{minor}.{patch}")
22
23
24 def torch_version_ge(major: int, minor: int, patch: Optional[int] = None) -> bool:
25 _version = version.parse(torch_version())
26 if patch is None:
27 return _version >= version.parse(f"{major}.{minor}")
28 else:
29 return _version >= version.parse(f"{major}.{minor}.{patch}")
30
31
32 if TYPE_CHECKING:
33 # TODO: remove this branch when kornia relies on torch >= 1.10.0
34 def torch_meshgrid(tensors: List[Tensor], indexing: Optional[str] = None) -> Tuple[Tensor, ...]:
35 ...
36
37 else:
38 if torch_version_ge(1, 10, 0):
39
40 def torch_meshgrid(tensors: List[Tensor], indexing: str):
41 return torch.meshgrid(tensors, indexing=indexing)
42
43 else:
44 # TODO: remove this branch when kornia relies on torch >= 1.10.0
45 def torch_meshgrid(tensors: List[Tensor], indexing: str):
46 return torch.meshgrid(tensors)
47
48
49 if TYPE_CHECKING:
50 # TODO: remove this branch when kornia relies on torch >= 1.10.0
51 _T = TypeVar('_T')
52 torch_inference_mode: Callable[..., ContextManager[_T]]
53 else:
54 if torch_version_ge(1, 10, 0):
55 torch_inference_mode = torch.inference_mode
56 else:
57 # TODO: remove this branch when kornia relies on torch >= 1.10.0
58 torch_inference_mode = torch.no_grad
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kornia/__init__.py b/kornia/__init__.py
--- a/kornia/__init__.py
+++ b/kornia/__init__.py
@@ -13,11 +13,4 @@
from kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image
# Version variable
-import sys
-
-if sys.version_info >= (3, 8): # pragma: >=3.8 cover
- import importlib.metadata as importlib_metadata
-else: # pragma: <3.8 cover
- import importlib_metadata
-
-__version__ = importlib_metadata.version('kornia')
+__version__ = "0.6.11.dev"
diff --git a/kornia/utils/_compat.py b/kornia/utils/_compat.py
--- a/kornia/utils/_compat.py
+++ b/kornia/utils/_compat.py
@@ -1,9 +1,8 @@
from typing import TYPE_CHECKING, Callable, ContextManager, List, Optional, Tuple, TypeVar
import torch
-from torch import Tensor
-
from packaging import version
+from torch import Tensor
def torch_version() -> str:
| {"golden_diff": "diff --git a/kornia/__init__.py b/kornia/__init__.py\n--- a/kornia/__init__.py\n+++ b/kornia/__init__.py\n@@ -13,11 +13,4 @@\n from kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image\n \n # Version variable\n-import sys\n-\n-if sys.version_info >= (3, 8): # pragma: >=3.8 cover\n- import importlib.metadata as importlib_metadata\n-else: # pragma: <3.8 cover\n- import importlib_metadata\n-\n-__version__ = importlib_metadata.version('kornia')\n+__version__ = \"0.6.11.dev\"\ndiff --git a/kornia/utils/_compat.py b/kornia/utils/_compat.py\n--- a/kornia/utils/_compat.py\n+++ b/kornia/utils/_compat.py\n@@ -1,9 +1,8 @@\n from typing import TYPE_CHECKING, Callable, ContextManager, List, Optional, Tuple, TypeVar\n \n import torch\n-from torch import Tensor\n-\n from packaging import version\n+from torch import Tensor\n \n \n def torch_version() -> str:\n", "issue": "Cleanup on setup metadata\n_Originally posted by @johnnv1 in https://github.com/kornia/kornia/pull/2225#discussion_r1117693700_\r\n\r\n\r\n-----\r\nright now I think we have some deadlines in the setup, for example:\r\n- https://github.com/kornia/kornia/blob/master/setup.py#L16\r\n- https://github.com/kornia/kornia/blob/master/setup.cfg#L57\r\n- This isn't used anymore https://github.com/kornia/kornia/tree/master/packaging \n", "before_files": [{"content": "# NOTE: kornia filters and geometry must go first since are the core of the library\n# and by changing the import order you might get into a circular dependencies issue.\nfrom . import filters\nfrom . import geometry\nfrom . import grad_estimator\n\n# import the other modules for convenience\nfrom . import augmentation, color, contrib, core, enhance, feature, io, losses, metrics, morphology, tracking, utils, x\n\n# NOTE: we are going to expose to top level very few things\nfrom kornia.constants import pi\nfrom kornia.testing import xla_is_available\nfrom kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image\n\n# Version variable\nimport sys\n\nif sys.version_info >= (3, 8): # pragma: >=3.8 cover\n import importlib.metadata as importlib_metadata\nelse: # pragma: <3.8 cover\n import importlib_metadata\n\n__version__ = importlib_metadata.version('kornia')\n", "path": "kornia/__init__.py"}, {"content": "from typing import TYPE_CHECKING, Callable, ContextManager, List, Optional, Tuple, TypeVar\n\nimport torch\nfrom torch import Tensor\n\nfrom packaging import version\n\n\ndef torch_version() -> str:\n \"\"\"Parse the `torch.__version__` variable and removes +cu*/cpu.\"\"\"\n return torch.__version__.split('+')[0]\n\n\ndef torch_version_lt(major: int, minor: int, patch: int) -> bool:\n _version = version.parse(torch_version())\n return _version < version.parse(f\"{major}.{minor}.{patch}\")\n\n\ndef torch_version_le(major: int, minor: int, patch: int) -> bool:\n _version = version.parse(torch_version())\n return _version <= version.parse(f\"{major}.{minor}.{patch}\")\n\n\ndef torch_version_ge(major: int, minor: int, patch: Optional[int] = None) -> bool:\n _version = version.parse(torch_version())\n if patch is None:\n return _version >= version.parse(f\"{major}.{minor}\")\n else:\n return _version >= version.parse(f\"{major}.{minor}.{patch}\")\n\n\nif TYPE_CHECKING:\n # TODO: remove this branch when kornia relies on torch >= 1.10.0\n def torch_meshgrid(tensors: List[Tensor], indexing: Optional[str] = None) -> Tuple[Tensor, ...]:\n ...\n\nelse:\n if torch_version_ge(1, 10, 0):\n\n def torch_meshgrid(tensors: List[Tensor], indexing: str):\n return torch.meshgrid(tensors, indexing=indexing)\n\n else:\n # TODO: remove this branch when kornia relies on torch >= 1.10.0\n def torch_meshgrid(tensors: List[Tensor], indexing: str):\n return torch.meshgrid(tensors)\n\n\nif TYPE_CHECKING:\n # TODO: remove this branch when kornia relies on torch >= 1.10.0\n _T = TypeVar('_T')\n torch_inference_mode: Callable[..., ContextManager[_T]]\nelse:\n if torch_version_ge(1, 10, 0):\n torch_inference_mode = torch.inference_mode\n else:\n # TODO: remove this branch when kornia relies on torch >= 1.10.0\n torch_inference_mode = torch.no_grad\n", "path": "kornia/utils/_compat.py"}], "after_files": [{"content": "# NOTE: kornia filters and geometry must go first since are the core of the library\n# and by changing the import order you might get into a circular dependencies issue.\nfrom . import filters\nfrom . import geometry\nfrom . import grad_estimator\n\n# import the other modules for convenience\nfrom . import augmentation, color, contrib, core, enhance, feature, io, losses, metrics, morphology, tracking, utils, x\n\n# NOTE: we are going to expose to top level very few things\nfrom kornia.constants import pi\nfrom kornia.testing import xla_is_available\nfrom kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image\n\n# Version variable\n__version__ = \"0.6.11.dev\"\n", "path": "kornia/__init__.py"}, {"content": "from typing import TYPE_CHECKING, Callable, ContextManager, List, Optional, Tuple, TypeVar\n\nimport torch\nfrom packaging import version\nfrom torch import Tensor\n\n\ndef torch_version() -> str:\n \"\"\"Parse the `torch.__version__` variable and removes +cu*/cpu.\"\"\"\n return torch.__version__.split('+')[0]\n\n\ndef torch_version_lt(major: int, minor: int, patch: int) -> bool:\n _version = version.parse(torch_version())\n return _version < version.parse(f\"{major}.{minor}.{patch}\")\n\n\ndef torch_version_le(major: int, minor: int, patch: int) -> bool:\n _version = version.parse(torch_version())\n return _version <= version.parse(f\"{major}.{minor}.{patch}\")\n\n\ndef torch_version_ge(major: int, minor: int, patch: Optional[int] = None) -> bool:\n _version = version.parse(torch_version())\n if patch is None:\n return _version >= version.parse(f\"{major}.{minor}\")\n else:\n return _version >= version.parse(f\"{major}.{minor}.{patch}\")\n\n\nif TYPE_CHECKING:\n # TODO: remove this branch when kornia relies on torch >= 1.10.0\n def torch_meshgrid(tensors: List[Tensor], indexing: Optional[str] = None) -> Tuple[Tensor, ...]:\n ...\n\nelse:\n if torch_version_ge(1, 10, 0):\n\n def torch_meshgrid(tensors: List[Tensor], indexing: str):\n return torch.meshgrid(tensors, indexing=indexing)\n\n else:\n # TODO: remove this branch when kornia relies on torch >= 1.10.0\n def torch_meshgrid(tensors: List[Tensor], indexing: str):\n return torch.meshgrid(tensors)\n\n\nif TYPE_CHECKING:\n # TODO: remove this branch when kornia relies on torch >= 1.10.0\n _T = TypeVar('_T')\n torch_inference_mode: Callable[..., ContextManager[_T]]\nelse:\n if torch_version_ge(1, 10, 0):\n torch_inference_mode = torch.inference_mode\n else:\n # TODO: remove this branch when kornia relies on torch >= 1.10.0\n torch_inference_mode = torch.no_grad\n", "path": "kornia/utils/_compat.py"}]} | 1,284 | 265 |
gh_patches_debug_37891 | rasdani/github-patches | git_diff | feast-dev__feast-536 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Extend feature set and/or feature metadata
This issue tracks the addition of new fields to the current feature set specification that allow a user to add metadata to either the feature set or features. These fields are optional and are intended to provide users with the flexibility to include feature level or feature level information.
The current proposal is to only add a single string field called `description` to `FeatureSpec`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/loaders/ingest.py`
Content:
```
1 import logging
2 from functools import partial
3 from multiprocessing import Pool
4 from typing import Iterable, List
5
6 import pandas as pd
7 import pyarrow.parquet as pq
8
9 from feast.constants import DATETIME_COLUMN
10 from feast.feature_set import FeatureSet
11 from feast.type_map import (
12 pa_column_to_proto_column,
13 pa_column_to_timestamp_proto_column,
14 )
15 from feast.types import Field_pb2 as FieldProto
16 from feast.types.FeatureRow_pb2 import FeatureRow
17
18 _logger = logging.getLogger(__name__)
19
20 GRPC_CONNECTION_TIMEOUT_DEFAULT = 3 # type: int
21 GRPC_CONNECTION_TIMEOUT_APPLY = 300 # type: int
22 FEAST_SERVING_URL_ENV_KEY = "FEAST_SERVING_URL" # type: str
23 FEAST_CORE_URL_ENV_KEY = "FEAST_CORE_URL" # type: str
24 BATCH_FEATURE_REQUEST_WAIT_TIME_SECONDS = 300
25 KAFKA_CHUNK_PRODUCTION_TIMEOUT = 120 # type: int
26
27
28 def _encode_pa_tables(file: str, fs: FeatureSet, row_group_idx: int) -> List[bytes]:
29 """
30 Helper function to encode a PyArrow table(s) read from parquet file(s) into
31 FeatureRows.
32
33 This function accepts a list of file directory pointing to many parquet
34 files. All parquet files must have the same schema.
35
36 Each parquet file will be read into as a table and encoded into FeatureRows
37 using a pool of max_workers workers.
38
39 Args:
40 file (str):
41 File directory of all the parquet file to encode.
42 Parquet file must have more than one row group.
43
44 fs (feast.feature_set.FeatureSet):
45 FeatureSet describing parquet files.
46
47 row_group_idx(int):
48 Row group index to read and encode into byte like FeatureRow
49 protobuf objects.
50
51 Returns:
52 List[bytes]:
53 List of byte encoded FeatureRows from the parquet file.
54 """
55 pq_file = pq.ParquetFile(file)
56 # Read parquet file as a PyArrow table
57 table = pq_file.read_row_group(row_group_idx)
58
59 # Add datetime column
60 datetime_col = pa_column_to_timestamp_proto_column(table.column(DATETIME_COLUMN))
61
62 # Preprocess the columns by converting all its values to Proto values
63 proto_columns = {
64 field_name: pa_column_to_proto_column(field.dtype, table.column(field_name))
65 for field_name, field in fs.fields.items()
66 }
67
68 feature_set = f"{fs.project}/{fs.name}:{fs.version}"
69
70 # List to store result
71 feature_rows = []
72
73 # Loop optimization declaration(s)
74 field = FieldProto.Field
75 proto_items = proto_columns.items()
76 append = feature_rows.append
77
78 # Iterate through the rows
79 for row_idx in range(table.num_rows):
80 feature_row = FeatureRow(
81 event_timestamp=datetime_col[row_idx], feature_set=feature_set
82 )
83 # Loop optimization declaration
84 ext = feature_row.fields.extend
85
86 # Insert field from each column
87 for k, v in proto_items:
88 ext([field(name=k, value=v[row_idx])])
89
90 # Append FeatureRow in byte string form
91 append(feature_row.SerializeToString())
92
93 return feature_rows
94
95
96 def get_feature_row_chunks(
97 file: str, row_groups: List[int], fs: FeatureSet, max_workers: int
98 ) -> Iterable[List[bytes]]:
99 """
100 Iterator function to encode a PyArrow table read from a parquet file to
101 FeatureRow(s).
102
103 Args:
104 file (str):
105 File directory of the parquet file. The parquet file must have more
106 than one row group.
107
108 row_groups (List[int]):
109 Specific row group indexes to be read and transformed in the parquet
110 file.
111
112 fs (feast.feature_set.FeatureSet):
113 FeatureSet describing parquet files.
114
115 max_workers (int):
116 Maximum number of workers to spawn.
117
118 Returns:
119 Iterable[List[bytes]]:
120 Iterable list of byte encoded FeatureRow(s).
121 """
122
123 pool = Pool(max_workers)
124 func = partial(_encode_pa_tables, file, fs)
125 for chunk in pool.imap(func, row_groups):
126 yield chunk
127 return
128
129
130 def validate_dataframe(dataframe: pd.DataFrame, feature_set: FeatureSet):
131 if "datetime" not in dataframe.columns:
132 raise ValueError(
133 f'Dataframe does not contain entity "datetime" in columns {dataframe.columns}'
134 )
135
136 for entity in feature_set.entities:
137 if entity.name not in dataframe.columns:
138 raise ValueError(
139 f"Dataframe does not contain entity {entity.name} in columns {dataframe.columns}"
140 )
141
142 for feature in feature_set.features:
143 if feature.name not in dataframe.columns:
144 raise ValueError(
145 f"Dataframe does not contain feature {feature.name} in columns {dataframe.columns}"
146 )
147
```
Path: `sdk/python/feast/feature.py`
Content:
```
1 # Copyright 2019 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from feast.core.FeatureSet_pb2 import FeatureSpec as FeatureProto
16 from feast.field import Field
17 from feast.types import Value_pb2 as ValueTypeProto
18 from feast.value_type import ValueType
19
20
21 class Feature(Field):
22 """Feature field type"""
23
24 def to_proto(self) -> FeatureProto:
25 """Converts Feature object to its Protocol Buffer representation"""
26 value_type = ValueTypeProto.ValueType.Enum.Value(self.dtype.name)
27 return FeatureProto(
28 name=self.name,
29 value_type=value_type,
30 presence=self.presence,
31 group_presence=self.group_presence,
32 shape=self.shape,
33 value_count=self.value_count,
34 domain=self.domain,
35 int_domain=self.int_domain,
36 float_domain=self.float_domain,
37 string_domain=self.string_domain,
38 bool_domain=self.bool_domain,
39 struct_domain=self.struct_domain,
40 natural_language_domain=self.natural_language_domain,
41 image_domain=self.image_domain,
42 mid_domain=self.mid_domain,
43 url_domain=self.url_domain,
44 time_domain=self.time_domain,
45 time_of_day_domain=self.time_of_day_domain,
46 )
47
48 @classmethod
49 def from_proto(cls, feature_proto: FeatureProto):
50 """
51
52 Args:
53 feature_proto: FeatureSpec protobuf object
54
55 Returns:
56 Feature object
57 """
58 feature = cls(
59 name=feature_proto.name, dtype=ValueType(feature_proto.value_type)
60 )
61 feature.update_presence_constraints(feature_proto)
62 feature.update_shape_type(feature_proto)
63 feature.update_domain_info(feature_proto)
64 return feature
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/feast/feature.py b/sdk/python/feast/feature.py
--- a/sdk/python/feast/feature.py
+++ b/sdk/python/feast/feature.py
@@ -56,7 +56,7 @@
Feature object
"""
feature = cls(
- name=feature_proto.name, dtype=ValueType(feature_proto.value_type)
+ name=feature_proto.name, dtype=ValueType(feature_proto.value_type),
)
feature.update_presence_constraints(feature_proto)
feature.update_shape_type(feature_proto)
diff --git a/sdk/python/feast/loaders/ingest.py b/sdk/python/feast/loaders/ingest.py
--- a/sdk/python/feast/loaders/ingest.py
+++ b/sdk/python/feast/loaders/ingest.py
@@ -25,7 +25,9 @@
KAFKA_CHUNK_PRODUCTION_TIMEOUT = 120 # type: int
-def _encode_pa_tables(file: str, fs: FeatureSet, row_group_idx: int) -> List[bytes]:
+def _encode_pa_tables(
+ file: str, feature_set: str, fields: dict, row_group_idx: int
+) -> List[bytes]:
"""
Helper function to encode a PyArrow table(s) read from parquet file(s) into
FeatureRows.
@@ -41,8 +43,11 @@
File directory of all the parquet file to encode.
Parquet file must have more than one row group.
- fs (feast.feature_set.FeatureSet):
- FeatureSet describing parquet files.
+ feature_set (str):
+ Feature set reference in the format f"{project}/{name}:{version}".
+
+ fields (dict[str, enum.Enum.ValueType]):
+ A mapping of field names to their value types.
row_group_idx(int):
Row group index to read and encode into byte like FeatureRow
@@ -61,12 +66,10 @@
# Preprocess the columns by converting all its values to Proto values
proto_columns = {
- field_name: pa_column_to_proto_column(field.dtype, table.column(field_name))
- for field_name, field in fs.fields.items()
+ field_name: pa_column_to_proto_column(dtype, table.column(field_name))
+ for field_name, dtype in fields.items()
}
- feature_set = f"{fs.project}/{fs.name}:{fs.version}"
-
# List to store result
feature_rows = []
@@ -120,8 +123,12 @@
Iterable list of byte encoded FeatureRow(s).
"""
+ feature_set = f"{fs.project}/{fs.name}:{fs.version}"
+
+ field_map = {field.name: field.dtype for field in fs.fields.values()}
+
pool = Pool(max_workers)
- func = partial(_encode_pa_tables, file, fs)
+ func = partial(_encode_pa_tables, file, feature_set, field_map)
for chunk in pool.imap(func, row_groups):
yield chunk
return
| {"golden_diff": "diff --git a/sdk/python/feast/feature.py b/sdk/python/feast/feature.py\n--- a/sdk/python/feast/feature.py\n+++ b/sdk/python/feast/feature.py\n@@ -56,7 +56,7 @@\n Feature object\n \"\"\"\n feature = cls(\n- name=feature_proto.name, dtype=ValueType(feature_proto.value_type)\n+ name=feature_proto.name, dtype=ValueType(feature_proto.value_type),\n )\n feature.update_presence_constraints(feature_proto)\n feature.update_shape_type(feature_proto)\ndiff --git a/sdk/python/feast/loaders/ingest.py b/sdk/python/feast/loaders/ingest.py\n--- a/sdk/python/feast/loaders/ingest.py\n+++ b/sdk/python/feast/loaders/ingest.py\n@@ -25,7 +25,9 @@\n KAFKA_CHUNK_PRODUCTION_TIMEOUT = 120 # type: int\n \n \n-def _encode_pa_tables(file: str, fs: FeatureSet, row_group_idx: int) -> List[bytes]:\n+def _encode_pa_tables(\n+ file: str, feature_set: str, fields: dict, row_group_idx: int\n+) -> List[bytes]:\n \"\"\"\n Helper function to encode a PyArrow table(s) read from parquet file(s) into\n FeatureRows.\n@@ -41,8 +43,11 @@\n File directory of all the parquet file to encode.\n Parquet file must have more than one row group.\n \n- fs (feast.feature_set.FeatureSet):\n- FeatureSet describing parquet files.\n+ feature_set (str):\n+ Feature set reference in the format f\"{project}/{name}:{version}\".\n+\n+ fields (dict[str, enum.Enum.ValueType]):\n+ A mapping of field names to their value types.\n \n row_group_idx(int):\n Row group index to read and encode into byte like FeatureRow\n@@ -61,12 +66,10 @@\n \n # Preprocess the columns by converting all its values to Proto values\n proto_columns = {\n- field_name: pa_column_to_proto_column(field.dtype, table.column(field_name))\n- for field_name, field in fs.fields.items()\n+ field_name: pa_column_to_proto_column(dtype, table.column(field_name))\n+ for field_name, dtype in fields.items()\n }\n \n- feature_set = f\"{fs.project}/{fs.name}:{fs.version}\"\n-\n # List to store result\n feature_rows = []\n \n@@ -120,8 +123,12 @@\n Iterable list of byte encoded FeatureRow(s).\n \"\"\"\n \n+ feature_set = f\"{fs.project}/{fs.name}:{fs.version}\"\n+\n+ field_map = {field.name: field.dtype for field in fs.fields.values()}\n+\n pool = Pool(max_workers)\n- func = partial(_encode_pa_tables, file, fs)\n+ func = partial(_encode_pa_tables, file, feature_set, field_map)\n for chunk in pool.imap(func, row_groups):\n yield chunk\n return\n", "issue": "Extend feature set and/or feature metadata\nThis issue tracks the addition of new fields to the current feature set specification that allow a user to add metadata to either the feature set or features. These fields are optional and are intended to provide users with the flexibility to include feature level or feature level information.\r\n\r\nThe current proposal is to only add a single string field called `description` to `FeatureSpec`\n", "before_files": [{"content": "import logging\nfrom functools import partial\nfrom multiprocessing import Pool\nfrom typing import Iterable, List\n\nimport pandas as pd\nimport pyarrow.parquet as pq\n\nfrom feast.constants import DATETIME_COLUMN\nfrom feast.feature_set import FeatureSet\nfrom feast.type_map import (\n pa_column_to_proto_column,\n pa_column_to_timestamp_proto_column,\n)\nfrom feast.types import Field_pb2 as FieldProto\nfrom feast.types.FeatureRow_pb2 import FeatureRow\n\n_logger = logging.getLogger(__name__)\n\nGRPC_CONNECTION_TIMEOUT_DEFAULT = 3 # type: int\nGRPC_CONNECTION_TIMEOUT_APPLY = 300 # type: int\nFEAST_SERVING_URL_ENV_KEY = \"FEAST_SERVING_URL\" # type: str\nFEAST_CORE_URL_ENV_KEY = \"FEAST_CORE_URL\" # type: str\nBATCH_FEATURE_REQUEST_WAIT_TIME_SECONDS = 300\nKAFKA_CHUNK_PRODUCTION_TIMEOUT = 120 # type: int\n\n\ndef _encode_pa_tables(file: str, fs: FeatureSet, row_group_idx: int) -> List[bytes]:\n \"\"\"\n Helper function to encode a PyArrow table(s) read from parquet file(s) into\n FeatureRows.\n\n This function accepts a list of file directory pointing to many parquet\n files. All parquet files must have the same schema.\n\n Each parquet file will be read into as a table and encoded into FeatureRows\n using a pool of max_workers workers.\n\n Args:\n file (str):\n File directory of all the parquet file to encode.\n Parquet file must have more than one row group.\n\n fs (feast.feature_set.FeatureSet):\n FeatureSet describing parquet files.\n\n row_group_idx(int):\n Row group index to read and encode into byte like FeatureRow\n protobuf objects.\n\n Returns:\n List[bytes]:\n List of byte encoded FeatureRows from the parquet file.\n \"\"\"\n pq_file = pq.ParquetFile(file)\n # Read parquet file as a PyArrow table\n table = pq_file.read_row_group(row_group_idx)\n\n # Add datetime column\n datetime_col = pa_column_to_timestamp_proto_column(table.column(DATETIME_COLUMN))\n\n # Preprocess the columns by converting all its values to Proto values\n proto_columns = {\n field_name: pa_column_to_proto_column(field.dtype, table.column(field_name))\n for field_name, field in fs.fields.items()\n }\n\n feature_set = f\"{fs.project}/{fs.name}:{fs.version}\"\n\n # List to store result\n feature_rows = []\n\n # Loop optimization declaration(s)\n field = FieldProto.Field\n proto_items = proto_columns.items()\n append = feature_rows.append\n\n # Iterate through the rows\n for row_idx in range(table.num_rows):\n feature_row = FeatureRow(\n event_timestamp=datetime_col[row_idx], feature_set=feature_set\n )\n # Loop optimization declaration\n ext = feature_row.fields.extend\n\n # Insert field from each column\n for k, v in proto_items:\n ext([field(name=k, value=v[row_idx])])\n\n # Append FeatureRow in byte string form\n append(feature_row.SerializeToString())\n\n return feature_rows\n\n\ndef get_feature_row_chunks(\n file: str, row_groups: List[int], fs: FeatureSet, max_workers: int\n) -> Iterable[List[bytes]]:\n \"\"\"\n Iterator function to encode a PyArrow table read from a parquet file to\n FeatureRow(s).\n\n Args:\n file (str):\n File directory of the parquet file. The parquet file must have more\n than one row group.\n\n row_groups (List[int]):\n Specific row group indexes to be read and transformed in the parquet\n file.\n\n fs (feast.feature_set.FeatureSet):\n FeatureSet describing parquet files.\n\n max_workers (int):\n Maximum number of workers to spawn.\n\n Returns:\n Iterable[List[bytes]]:\n Iterable list of byte encoded FeatureRow(s).\n \"\"\"\n\n pool = Pool(max_workers)\n func = partial(_encode_pa_tables, file, fs)\n for chunk in pool.imap(func, row_groups):\n yield chunk\n return\n\n\ndef validate_dataframe(dataframe: pd.DataFrame, feature_set: FeatureSet):\n if \"datetime\" not in dataframe.columns:\n raise ValueError(\n f'Dataframe does not contain entity \"datetime\" in columns {dataframe.columns}'\n )\n\n for entity in feature_set.entities:\n if entity.name not in dataframe.columns:\n raise ValueError(\n f\"Dataframe does not contain entity {entity.name} in columns {dataframe.columns}\"\n )\n\n for feature in feature_set.features:\n if feature.name not in dataframe.columns:\n raise ValueError(\n f\"Dataframe does not contain feature {feature.name} in columns {dataframe.columns}\"\n )\n", "path": "sdk/python/feast/loaders/ingest.py"}, {"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom feast.core.FeatureSet_pb2 import FeatureSpec as FeatureProto\nfrom feast.field import Field\nfrom feast.types import Value_pb2 as ValueTypeProto\nfrom feast.value_type import ValueType\n\n\nclass Feature(Field):\n \"\"\"Feature field type\"\"\"\n\n def to_proto(self) -> FeatureProto:\n \"\"\"Converts Feature object to its Protocol Buffer representation\"\"\"\n value_type = ValueTypeProto.ValueType.Enum.Value(self.dtype.name)\n return FeatureProto(\n name=self.name,\n value_type=value_type,\n presence=self.presence,\n group_presence=self.group_presence,\n shape=self.shape,\n value_count=self.value_count,\n domain=self.domain,\n int_domain=self.int_domain,\n float_domain=self.float_domain,\n string_domain=self.string_domain,\n bool_domain=self.bool_domain,\n struct_domain=self.struct_domain,\n natural_language_domain=self.natural_language_domain,\n image_domain=self.image_domain,\n mid_domain=self.mid_domain,\n url_domain=self.url_domain,\n time_domain=self.time_domain,\n time_of_day_domain=self.time_of_day_domain,\n )\n\n @classmethod\n def from_proto(cls, feature_proto: FeatureProto):\n \"\"\"\n\n Args:\n feature_proto: FeatureSpec protobuf object\n\n Returns:\n Feature object\n \"\"\"\n feature = cls(\n name=feature_proto.name, dtype=ValueType(feature_proto.value_type)\n )\n feature.update_presence_constraints(feature_proto)\n feature.update_shape_type(feature_proto)\n feature.update_domain_info(feature_proto)\n return feature\n", "path": "sdk/python/feast/feature.py"}], "after_files": [{"content": "import logging\nfrom functools import partial\nfrom multiprocessing import Pool\nfrom typing import Iterable, List\n\nimport pandas as pd\nimport pyarrow.parquet as pq\n\nfrom feast.constants import DATETIME_COLUMN\nfrom feast.feature_set import FeatureSet\nfrom feast.type_map import (\n pa_column_to_proto_column,\n pa_column_to_timestamp_proto_column,\n)\nfrom feast.types import Field_pb2 as FieldProto\nfrom feast.types.FeatureRow_pb2 import FeatureRow\n\n_logger = logging.getLogger(__name__)\n\nGRPC_CONNECTION_TIMEOUT_DEFAULT = 3 # type: int\nGRPC_CONNECTION_TIMEOUT_APPLY = 300 # type: int\nFEAST_SERVING_URL_ENV_KEY = \"FEAST_SERVING_URL\" # type: str\nFEAST_CORE_URL_ENV_KEY = \"FEAST_CORE_URL\" # type: str\nBATCH_FEATURE_REQUEST_WAIT_TIME_SECONDS = 300\nKAFKA_CHUNK_PRODUCTION_TIMEOUT = 120 # type: int\n\n\ndef _encode_pa_tables(\n file: str, feature_set: str, fields: dict, row_group_idx: int\n) -> List[bytes]:\n \"\"\"\n Helper function to encode a PyArrow table(s) read from parquet file(s) into\n FeatureRows.\n\n This function accepts a list of file directory pointing to many parquet\n files. All parquet files must have the same schema.\n\n Each parquet file will be read into as a table and encoded into FeatureRows\n using a pool of max_workers workers.\n\n Args:\n file (str):\n File directory of all the parquet file to encode.\n Parquet file must have more than one row group.\n\n feature_set (str):\n Feature set reference in the format f\"{project}/{name}:{version}\".\n\n fields (dict[str, enum.Enum.ValueType]):\n A mapping of field names to their value types.\n\n row_group_idx(int):\n Row group index to read and encode into byte like FeatureRow\n protobuf objects.\n\n Returns:\n List[bytes]:\n List of byte encoded FeatureRows from the parquet file.\n \"\"\"\n pq_file = pq.ParquetFile(file)\n # Read parquet file as a PyArrow table\n table = pq_file.read_row_group(row_group_idx)\n\n # Add datetime column\n datetime_col = pa_column_to_timestamp_proto_column(table.column(DATETIME_COLUMN))\n\n # Preprocess the columns by converting all its values to Proto values\n proto_columns = {\n field_name: pa_column_to_proto_column(dtype, table.column(field_name))\n for field_name, dtype in fields.items()\n }\n\n # List to store result\n feature_rows = []\n\n # Loop optimization declaration(s)\n field = FieldProto.Field\n proto_items = proto_columns.items()\n append = feature_rows.append\n\n # Iterate through the rows\n for row_idx in range(table.num_rows):\n feature_row = FeatureRow(\n event_timestamp=datetime_col[row_idx], feature_set=feature_set\n )\n # Loop optimization declaration\n ext = feature_row.fields.extend\n\n # Insert field from each column\n for k, v in proto_items:\n ext([field(name=k, value=v[row_idx])])\n\n # Append FeatureRow in byte string form\n append(feature_row.SerializeToString())\n\n return feature_rows\n\n\ndef get_feature_row_chunks(\n file: str, row_groups: List[int], fs: FeatureSet, max_workers: int\n) -> Iterable[List[bytes]]:\n \"\"\"\n Iterator function to encode a PyArrow table read from a parquet file to\n FeatureRow(s).\n\n Args:\n file (str):\n File directory of the parquet file. The parquet file must have more\n than one row group.\n\n row_groups (List[int]):\n Specific row group indexes to be read and transformed in the parquet\n file.\n\n fs (feast.feature_set.FeatureSet):\n FeatureSet describing parquet files.\n\n max_workers (int):\n Maximum number of workers to spawn.\n\n Returns:\n Iterable[List[bytes]]:\n Iterable list of byte encoded FeatureRow(s).\n \"\"\"\n\n feature_set = f\"{fs.project}/{fs.name}:{fs.version}\"\n\n field_map = {field.name: field.dtype for field in fs.fields.values()}\n\n pool = Pool(max_workers)\n func = partial(_encode_pa_tables, file, feature_set, field_map)\n for chunk in pool.imap(func, row_groups):\n yield chunk\n return\n\n\ndef validate_dataframe(dataframe: pd.DataFrame, feature_set: FeatureSet):\n if \"datetime\" not in dataframe.columns:\n raise ValueError(\n f'Dataframe does not contain entity \"datetime\" in columns {dataframe.columns}'\n )\n\n for entity in feature_set.entities:\n if entity.name not in dataframe.columns:\n raise ValueError(\n f\"Dataframe does not contain entity {entity.name} in columns {dataframe.columns}\"\n )\n\n for feature in feature_set.features:\n if feature.name not in dataframe.columns:\n raise ValueError(\n f\"Dataframe does not contain feature {feature.name} in columns {dataframe.columns}\"\n )\n", "path": "sdk/python/feast/loaders/ingest.py"}, {"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom feast.core.FeatureSet_pb2 import FeatureSpec as FeatureProto\nfrom feast.field import Field\nfrom feast.types import Value_pb2 as ValueTypeProto\nfrom feast.value_type import ValueType\n\n\nclass Feature(Field):\n \"\"\"Feature field type\"\"\"\n\n def to_proto(self) -> FeatureProto:\n \"\"\"Converts Feature object to its Protocol Buffer representation\"\"\"\n value_type = ValueTypeProto.ValueType.Enum.Value(self.dtype.name)\n return FeatureProto(\n name=self.name,\n value_type=value_type,\n presence=self.presence,\n group_presence=self.group_presence,\n shape=self.shape,\n value_count=self.value_count,\n domain=self.domain,\n int_domain=self.int_domain,\n float_domain=self.float_domain,\n string_domain=self.string_domain,\n bool_domain=self.bool_domain,\n struct_domain=self.struct_domain,\n natural_language_domain=self.natural_language_domain,\n image_domain=self.image_domain,\n mid_domain=self.mid_domain,\n url_domain=self.url_domain,\n time_domain=self.time_domain,\n time_of_day_domain=self.time_of_day_domain,\n )\n\n @classmethod\n def from_proto(cls, feature_proto: FeatureProto):\n \"\"\"\n\n Args:\n feature_proto: FeatureSpec protobuf object\n\n Returns:\n Feature object\n \"\"\"\n feature = cls(\n name=feature_proto.name, dtype=ValueType(feature_proto.value_type),\n )\n feature.update_presence_constraints(feature_proto)\n feature.update_shape_type(feature_proto)\n feature.update_domain_info(feature_proto)\n return feature\n", "path": "sdk/python/feast/feature.py"}]} | 2,311 | 665 |
gh_patches_debug_39661 | rasdani/github-patches | git_diff | pytorch__TensorRT-166 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Module tests fail because of segfault in cuDNN destructor
The tests pass functionality wise, but in the clean up, the test segfaults. It seems like this is a issue others have seen in libtorch https://github.com/pytorch/pytorch/issues/17658
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py/setup.py`
Content:
```
1 import os
2 import sys
3 import glob
4 import setuptools
5 from setuptools import setup, Extension, find_packages
6 from setuptools.command.build_ext import build_ext
7 from setuptools.command.develop import develop
8 from setuptools.command.install import install
9 from distutils.cmd import Command
10 from wheel.bdist_wheel import bdist_wheel
11
12 from torch.utils import cpp_extension
13 from shutil import copyfile, rmtree
14
15 import subprocess
16
17 dir_path = os.path.dirname(os.path.realpath(__file__))
18
19 __version__ = '0.0.3'
20
21 CXX11_ABI = False
22
23 if "--use-cxx11-abi" in sys.argv:
24 sys.argv.remove("--use-cxx11-abi")
25 CXX11_ABI = True
26
27 def which(program):
28 import os
29 def is_exe(fpath):
30 return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
31
32 fpath, fname = os.path.split(program)
33 if fpath:
34 if is_exe(program):
35 return program
36 else:
37 for path in os.environ["PATH"].split(os.pathsep):
38 exe_file = os.path.join(path, program)
39 if is_exe(exe_file):
40 return exe_file
41
42 return None
43
44 BAZEL_EXE = which("bazel")
45
46 def build_libtrtorch_pre_cxx11_abi(develop=True, use_dist_dir=True, cxx11_abi=False):
47 cmd = [BAZEL_EXE, "build"]
48 cmd.append("//cpp/api/lib:libtrtorch.so")
49 if develop:
50 cmd.append("--compilation_mode=dbg")
51 else:
52 cmd.append("--compilation_mode=opt")
53 if use_dist_dir:
54 cmd.append("--distdir=third_party/dist_dir/x86_64-linux-gnu")
55 if not cxx11_abi:
56 cmd.append("--config=python")
57 else:
58 print("using CXX11 ABI build")
59
60 print("building libtrtorch")
61 status_code = subprocess.run(cmd).returncode
62
63 if status_code != 0:
64 sys.exit(status_code)
65
66
67 def gen_version_file():
68 if not os.path.exists(dir_path + '/trtorch/_version.py'):
69 os.mknod(dir_path + '/trtorch/_version.py')
70
71 with open(dir_path + '/trtorch/_version.py', 'w') as f:
72 print("creating version file")
73 f.write("__version__ = \"" + __version__ + '\"')
74
75 def copy_libtrtorch(multilinux=False):
76 if not os.path.exists(dir_path + '/trtorch/lib'):
77 os.makedirs(dir_path + '/trtorch/lib')
78
79 print("copying library into module")
80 if multilinux:
81 copyfile(dir_path + "/build/libtrtorch_build/libtrtorch.so", dir_path + '/trtorch/lib/libtrtorch.so')
82 else:
83 copyfile(dir_path + "/../bazel-bin/cpp/api/lib/libtrtorch.so", dir_path + '/trtorch/lib/libtrtorch.so')
84
85 class DevelopCommand(develop):
86 description = "Builds the package and symlinks it into the PYTHONPATH"
87
88 def initialize_options(self):
89 develop.initialize_options(self)
90
91 def finalize_options(self):
92 develop.finalize_options(self)
93
94 def run(self):
95 global CXX11_ABI
96 build_libtrtorch_pre_cxx11_abi(develop=True, cxx11_abi=CXX11_ABI)
97 gen_version_file()
98 copy_libtrtorch()
99 develop.run(self)
100
101
102 class InstallCommand(install):
103 description = "Builds the package"
104
105 def initialize_options(self):
106 install.initialize_options(self)
107
108 def finalize_options(self):
109 install.finalize_options(self)
110
111 def run(self):
112 global CXX11_ABI
113 build_libtrtorch_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)
114 gen_version_file()
115 copy_libtrtorch()
116 install.run(self)
117
118 class BdistCommand(bdist_wheel):
119 description = "Builds the package"
120
121 def initialize_options(self):
122 bdist_wheel.initialize_options(self)
123
124 def finalize_options(self):
125 bdist_wheel.finalize_options(self)
126
127 def run(self):
128 global CXX11_ABI
129 build_libtrtorch_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)
130 gen_version_file()
131 copy_libtrtorch()
132 bdist_wheel.run(self)
133
134 class CleanCommand(Command):
135 """Custom clean command to tidy up the project root."""
136 PY_CLEAN_FILES = ['./build', './dist', './trtorch/__pycache__', './trtorch/lib', './*.pyc', './*.tgz', './*.egg-info']
137 description = "Command to tidy up the project root"
138 user_options = []
139
140 def initialize_options(self):
141 pass
142
143 def finalize_options(self):
144 pass
145
146 def run(self):
147 for path_spec in self.PY_CLEAN_FILES:
148 # Make paths absolute and relative to this path
149 abs_paths = glob.glob(os.path.normpath(os.path.join(dir_path, path_spec)))
150 for path in [str(p) for p in abs_paths]:
151 if not path.startswith(dir_path):
152 # Die if path in CLEAN_FILES is absolute + outside this directory
153 raise ValueError("%s is not a path inside %s" % (path, dir_path))
154 print('Removing %s' % os.path.relpath(path))
155 rmtree(path)
156
157 ext_modules = [
158 cpp_extension.CUDAExtension('trtorch._C',
159 ['trtorch/csrc/trtorch_py.cpp'],
160 library_dirs=[
161 dir_path + '/trtorch/lib/'
162 ],
163 libraries=[
164 "trtorch"
165 ],
166 include_dirs=[
167 dir_path + "/../",
168 dir_path + "/../bazel-TRTorch/external/tensorrt/include",
169 ],
170 extra_compile_args=[
171 "-Wno-deprecated",
172 "-Wno-deprecated-declarations",
173 ] + (["-D_GLIBCXX_USE_CXX11_ABI=1"] if CXX11_ABI else ["-D_GLIBCXX_USE_CXX11_ABI=0"]),
174 extra_link_args=[
175 "-Wno-deprecated",
176 "-Wno-deprecated-declarations",
177 "-Wl,--no-as-needed",
178 "-ltrtorch",
179 "-Wl,-rpath,$ORIGIN/lib"
180 ] + (["-D_GLIBCXX_USE_CXX11_ABI=1"] if CXX11_ABI else ["-D_GLIBCXX_USE_CXX11_ABI=0"]),
181 undef_macros=[ "NDEBUG" ]
182 )
183 ]
184
185 with open("README.md", "r") as fh:
186 long_description = fh.read()
187
188 setup(
189 name='trtorch',
190 version=__version__,
191 author='NVIDIA',
192 author_email='[email protected]',
193 url='https://nvidia.github.io/TRTorch',
194 description='A compiler backend for PyTorch JIT targeting NVIDIA GPUs',
195 long_description_content_type='text/markdown',
196 long_description=long_description,
197 ext_modules=ext_modules,
198 install_requires=[
199 'torch==1.5.1',
200 ],
201 setup_requires=[],
202 cmdclass={
203 'install': InstallCommand,
204 'clean': CleanCommand,
205 'develop': DevelopCommand,
206 'build_ext': cpp_extension.BuildExtension,
207 'bdist_wheel': BdistCommand,
208 },
209 zip_safe=False,
210 license="BSD",
211 packages=find_packages(),
212 classifiers=[
213 "Development Status :: 3 - Alpha",
214 "Environment :: GPU :: NVIDIA CUDA",
215 "License :: OSI Approved :: BSD License",
216 "Intended Audience :: Developers",
217 "Intended Audience :: Science/Research",
218 "Operating System :: POSIX :: Linux",
219 "Programming Language :: C++",
220 "Programming Language :: Python",
221 "Programming Language :: Python :: Implementation :: CPython",
222 "Topic :: Scientific/Engineering",
223 "Topic :: Scientific/Engineering :: Artificial Intelligence",
224 "Topic :: Software Development",
225 "Topic :: Software Development :: Libraries"
226 ],
227 python_requires='>=3.5',
228 include_package_data=True,
229 package_data={
230 'trtorch': ['lib/*.so'],
231 },
232 exclude_package_data={
233 '': ['*.cpp', '*.h'],
234 'trtorch': ['csrc/*.cpp'],
235 }
236 )
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/py/setup.py b/py/setup.py
--- a/py/setup.py
+++ b/py/setup.py
@@ -16,7 +16,7 @@
dir_path = os.path.dirname(os.path.realpath(__file__))
-__version__ = '0.0.3'
+__version__ = '0.1.0a0'
CXX11_ABI = False
@@ -158,7 +158,8 @@
cpp_extension.CUDAExtension('trtorch._C',
['trtorch/csrc/trtorch_py.cpp'],
library_dirs=[
- dir_path + '/trtorch/lib/'
+ (dir_path + '/trtorch/lib/'),
+ "/opt/conda/lib/python3.6/config-3.6m-x86_64-linux-gnu"
],
libraries=[
"trtorch"
@@ -176,7 +177,14 @@
"-Wno-deprecated-declarations",
"-Wl,--no-as-needed",
"-ltrtorch",
- "-Wl,-rpath,$ORIGIN/lib"
+ "-Wl,-rpath,$ORIGIN/lib",
+ "-lpthread",
+ "-ldl",
+ "-lutil",
+ "-lrt",
+ "-lm",
+ "-Xlinker",
+ "-export-dynamic"
] + (["-D_GLIBCXX_USE_CXX11_ABI=1"] if CXX11_ABI else ["-D_GLIBCXX_USE_CXX11_ABI=0"]),
undef_macros=[ "NDEBUG" ]
)
@@ -196,7 +204,7 @@
long_description=long_description,
ext_modules=ext_modules,
install_requires=[
- 'torch==1.5.1',
+ 'torch==1.6.0',
],
setup_requires=[],
cmdclass={
@@ -210,7 +218,7 @@
license="BSD",
packages=find_packages(),
classifiers=[
- "Development Status :: 3 - Alpha",
+ "Development Status :: 4 - Beta",
"Environment :: GPU :: NVIDIA CUDA",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Developers",
@@ -224,7 +232,7 @@
"Topic :: Software Development",
"Topic :: Software Development :: Libraries"
],
- python_requires='>=3.5',
+ python_requires='>=3.6',
include_package_data=True,
package_data={
'trtorch': ['lib/*.so'],
| {"golden_diff": "diff --git a/py/setup.py b/py/setup.py\n--- a/py/setup.py\n+++ b/py/setup.py\n@@ -16,7 +16,7 @@\n \n dir_path = os.path.dirname(os.path.realpath(__file__))\n \n-__version__ = '0.0.3'\n+__version__ = '0.1.0a0'\n \n CXX11_ABI = False\n \n@@ -158,7 +158,8 @@\n cpp_extension.CUDAExtension('trtorch._C',\n ['trtorch/csrc/trtorch_py.cpp'],\n library_dirs=[\n- dir_path + '/trtorch/lib/'\n+ (dir_path + '/trtorch/lib/'),\n+ \"/opt/conda/lib/python3.6/config-3.6m-x86_64-linux-gnu\"\n ],\n libraries=[\n \"trtorch\"\n@@ -176,7 +177,14 @@\n \"-Wno-deprecated-declarations\",\n \"-Wl,--no-as-needed\",\n \"-ltrtorch\",\n- \"-Wl,-rpath,$ORIGIN/lib\"\n+ \"-Wl,-rpath,$ORIGIN/lib\",\n+ \"-lpthread\",\n+ \"-ldl\",\n+ \"-lutil\",\n+ \"-lrt\",\n+ \"-lm\",\n+ \"-Xlinker\",\n+ \"-export-dynamic\"\n ] + ([\"-D_GLIBCXX_USE_CXX11_ABI=1\"] if CXX11_ABI else [\"-D_GLIBCXX_USE_CXX11_ABI=0\"]),\n undef_macros=[ \"NDEBUG\" ]\n )\n@@ -196,7 +204,7 @@\n long_description=long_description,\n ext_modules=ext_modules,\n install_requires=[\n- 'torch==1.5.1',\n+ 'torch==1.6.0',\n ],\n setup_requires=[],\n cmdclass={\n@@ -210,7 +218,7 @@\n license=\"BSD\",\n packages=find_packages(),\n classifiers=[\n- \"Development Status :: 3 - Alpha\",\n+ \"Development Status :: 4 - Beta\",\n \"Environment :: GPU :: NVIDIA CUDA\",\n \"License :: OSI Approved :: BSD License\",\n \"Intended Audience :: Developers\",\n@@ -224,7 +232,7 @@\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\"\n ],\n- python_requires='>=3.5',\n+ python_requires='>=3.6',\n include_package_data=True,\n package_data={\n 'trtorch': ['lib/*.so'],\n", "issue": "Module tests fail because of segfault in cuDNN destructor\nThe tests pass functionality wise, but in the clean up, the test segfaults. It seems like this is a issue others have seen in libtorch https://github.com/pytorch/pytorch/issues/17658\n", "before_files": [{"content": "import os\nimport sys\nimport glob\nimport setuptools\nfrom setuptools import setup, Extension, find_packages\nfrom setuptools.command.build_ext import build_ext\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\nfrom distutils.cmd import Command\nfrom wheel.bdist_wheel import bdist_wheel\n\nfrom torch.utils import cpp_extension\nfrom shutil import copyfile, rmtree\n\nimport subprocess\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n__version__ = '0.0.3'\n\nCXX11_ABI = False\n\nif \"--use-cxx11-abi\" in sys.argv:\n sys.argv.remove(\"--use-cxx11-abi\")\n CXX11_ABI = True\n\ndef which(program):\n import os\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None\n\nBAZEL_EXE = which(\"bazel\")\n\ndef build_libtrtorch_pre_cxx11_abi(develop=True, use_dist_dir=True, cxx11_abi=False):\n cmd = [BAZEL_EXE, \"build\"]\n cmd.append(\"//cpp/api/lib:libtrtorch.so\")\n if develop:\n cmd.append(\"--compilation_mode=dbg\")\n else:\n cmd.append(\"--compilation_mode=opt\")\n if use_dist_dir:\n cmd.append(\"--distdir=third_party/dist_dir/x86_64-linux-gnu\")\n if not cxx11_abi:\n cmd.append(\"--config=python\")\n else:\n print(\"using CXX11 ABI build\")\n\n print(\"building libtrtorch\")\n status_code = subprocess.run(cmd).returncode\n\n if status_code != 0:\n sys.exit(status_code)\n\n\ndef gen_version_file():\n if not os.path.exists(dir_path + '/trtorch/_version.py'):\n os.mknod(dir_path + '/trtorch/_version.py')\n\n with open(dir_path + '/trtorch/_version.py', 'w') as f:\n print(\"creating version file\")\n f.write(\"__version__ = \\\"\" + __version__ + '\\\"')\n\ndef copy_libtrtorch(multilinux=False):\n if not os.path.exists(dir_path + '/trtorch/lib'):\n os.makedirs(dir_path + '/trtorch/lib')\n\n print(\"copying library into module\")\n if multilinux:\n copyfile(dir_path + \"/build/libtrtorch_build/libtrtorch.so\", dir_path + '/trtorch/lib/libtrtorch.so')\n else:\n copyfile(dir_path + \"/../bazel-bin/cpp/api/lib/libtrtorch.so\", dir_path + '/trtorch/lib/libtrtorch.so')\n\nclass DevelopCommand(develop):\n description = \"Builds the package and symlinks it into the PYTHONPATH\"\n\n def initialize_options(self):\n develop.initialize_options(self)\n\n def finalize_options(self):\n develop.finalize_options(self)\n\n def run(self):\n global CXX11_ABI\n build_libtrtorch_pre_cxx11_abi(develop=True, cxx11_abi=CXX11_ABI)\n gen_version_file()\n copy_libtrtorch()\n develop.run(self)\n\n\nclass InstallCommand(install):\n description = \"Builds the package\"\n\n def initialize_options(self):\n install.initialize_options(self)\n\n def finalize_options(self):\n install.finalize_options(self)\n\n def run(self):\n global CXX11_ABI\n build_libtrtorch_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)\n gen_version_file()\n copy_libtrtorch()\n install.run(self)\n\nclass BdistCommand(bdist_wheel):\n description = \"Builds the package\"\n\n def initialize_options(self):\n bdist_wheel.initialize_options(self)\n\n def finalize_options(self):\n bdist_wheel.finalize_options(self)\n\n def run(self):\n global CXX11_ABI\n build_libtrtorch_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)\n gen_version_file()\n copy_libtrtorch()\n bdist_wheel.run(self)\n\nclass CleanCommand(Command):\n \"\"\"Custom clean command to tidy up the project root.\"\"\"\n PY_CLEAN_FILES = ['./build', './dist', './trtorch/__pycache__', './trtorch/lib', './*.pyc', './*.tgz', './*.egg-info']\n description = \"Command to tidy up the project root\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n for path_spec in self.PY_CLEAN_FILES:\n # Make paths absolute and relative to this path\n abs_paths = glob.glob(os.path.normpath(os.path.join(dir_path, path_spec)))\n for path in [str(p) for p in abs_paths]:\n if not path.startswith(dir_path):\n # Die if path in CLEAN_FILES is absolute + outside this directory\n raise ValueError(\"%s is not a path inside %s\" % (path, dir_path))\n print('Removing %s' % os.path.relpath(path))\n rmtree(path)\n\next_modules = [\n cpp_extension.CUDAExtension('trtorch._C',\n ['trtorch/csrc/trtorch_py.cpp'],\n library_dirs=[\n dir_path + '/trtorch/lib/'\n ],\n libraries=[\n \"trtorch\"\n ],\n include_dirs=[\n dir_path + \"/../\",\n dir_path + \"/../bazel-TRTorch/external/tensorrt/include\",\n ],\n extra_compile_args=[\n \"-Wno-deprecated\",\n \"-Wno-deprecated-declarations\",\n ] + ([\"-D_GLIBCXX_USE_CXX11_ABI=1\"] if CXX11_ABI else [\"-D_GLIBCXX_USE_CXX11_ABI=0\"]),\n extra_link_args=[\n \"-Wno-deprecated\",\n \"-Wno-deprecated-declarations\",\n \"-Wl,--no-as-needed\",\n \"-ltrtorch\",\n \"-Wl,-rpath,$ORIGIN/lib\"\n ] + ([\"-D_GLIBCXX_USE_CXX11_ABI=1\"] if CXX11_ABI else [\"-D_GLIBCXX_USE_CXX11_ABI=0\"]),\n undef_macros=[ \"NDEBUG\" ]\n )\n]\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='trtorch',\n version=__version__,\n author='NVIDIA',\n author_email='[email protected]',\n url='https://nvidia.github.io/TRTorch',\n description='A compiler backend for PyTorch JIT targeting NVIDIA GPUs',\n long_description_content_type='text/markdown',\n long_description=long_description,\n ext_modules=ext_modules,\n install_requires=[\n 'torch==1.5.1',\n ],\n setup_requires=[],\n cmdclass={\n 'install': InstallCommand,\n 'clean': CleanCommand,\n 'develop': DevelopCommand,\n 'build_ext': cpp_extension.BuildExtension,\n 'bdist_wheel': BdistCommand,\n },\n zip_safe=False,\n license=\"BSD\",\n packages=find_packages(),\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: GPU :: NVIDIA CUDA\",\n \"License :: OSI Approved :: BSD License\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: C++\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\"\n ],\n python_requires='>=3.5',\n include_package_data=True,\n package_data={\n 'trtorch': ['lib/*.so'],\n },\n exclude_package_data={\n '': ['*.cpp', '*.h'],\n 'trtorch': ['csrc/*.cpp'],\n }\n)\n", "path": "py/setup.py"}], "after_files": [{"content": "import os\nimport sys\nimport glob\nimport setuptools\nfrom setuptools import setup, Extension, find_packages\nfrom setuptools.command.build_ext import build_ext\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\nfrom distutils.cmd import Command\nfrom wheel.bdist_wheel import bdist_wheel\n\nfrom torch.utils import cpp_extension\nfrom shutil import copyfile, rmtree\n\nimport subprocess\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n__version__ = '0.1.0a0'\n\nCXX11_ABI = False\n\nif \"--use-cxx11-abi\" in sys.argv:\n sys.argv.remove(\"--use-cxx11-abi\")\n CXX11_ABI = True\n\ndef which(program):\n import os\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None\n\nBAZEL_EXE = which(\"bazel\")\n\ndef build_libtrtorch_pre_cxx11_abi(develop=True, use_dist_dir=True, cxx11_abi=False):\n cmd = [BAZEL_EXE, \"build\"]\n cmd.append(\"//cpp/api/lib:libtrtorch.so\")\n if develop:\n cmd.append(\"--compilation_mode=dbg\")\n else:\n cmd.append(\"--compilation_mode=opt\")\n if use_dist_dir:\n cmd.append(\"--distdir=third_party/dist_dir/x86_64-linux-gnu\")\n if not cxx11_abi:\n cmd.append(\"--config=python\")\n else:\n print(\"using CXX11 ABI build\")\n\n print(\"building libtrtorch\")\n status_code = subprocess.run(cmd).returncode\n\n if status_code != 0:\n sys.exit(status_code)\n\n\ndef gen_version_file():\n if not os.path.exists(dir_path + '/trtorch/_version.py'):\n os.mknod(dir_path + '/trtorch/_version.py')\n\n with open(dir_path + '/trtorch/_version.py', 'w') as f:\n print(\"creating version file\")\n f.write(\"__version__ = \\\"\" + __version__ + '\\\"')\n\ndef copy_libtrtorch(multilinux=False):\n if not os.path.exists(dir_path + '/trtorch/lib'):\n os.makedirs(dir_path + '/trtorch/lib')\n\n print(\"copying library into module\")\n if multilinux:\n copyfile(dir_path + \"/build/libtrtorch_build/libtrtorch.so\", dir_path + '/trtorch/lib/libtrtorch.so')\n else:\n copyfile(dir_path + \"/../bazel-bin/cpp/api/lib/libtrtorch.so\", dir_path + '/trtorch/lib/libtrtorch.so')\n\nclass DevelopCommand(develop):\n description = \"Builds the package and symlinks it into the PYTHONPATH\"\n\n def initialize_options(self):\n develop.initialize_options(self)\n\n def finalize_options(self):\n develop.finalize_options(self)\n\n def run(self):\n global CXX11_ABI\n build_libtrtorch_pre_cxx11_abi(develop=True, cxx11_abi=CXX11_ABI)\n gen_version_file()\n copy_libtrtorch()\n develop.run(self)\n\n\nclass InstallCommand(install):\n description = \"Builds the package\"\n\n def initialize_options(self):\n install.initialize_options(self)\n\n def finalize_options(self):\n install.finalize_options(self)\n\n def run(self):\n global CXX11_ABI\n build_libtrtorch_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)\n gen_version_file()\n copy_libtrtorch()\n install.run(self)\n\nclass BdistCommand(bdist_wheel):\n description = \"Builds the package\"\n\n def initialize_options(self):\n bdist_wheel.initialize_options(self)\n\n def finalize_options(self):\n bdist_wheel.finalize_options(self)\n\n def run(self):\n global CXX11_ABI\n build_libtrtorch_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)\n gen_version_file()\n copy_libtrtorch()\n bdist_wheel.run(self)\n\nclass CleanCommand(Command):\n \"\"\"Custom clean command to tidy up the project root.\"\"\"\n PY_CLEAN_FILES = ['./build', './dist', './trtorch/__pycache__', './trtorch/lib', './*.pyc', './*.tgz', './*.egg-info']\n description = \"Command to tidy up the project root\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n for path_spec in self.PY_CLEAN_FILES:\n # Make paths absolute and relative to this path\n abs_paths = glob.glob(os.path.normpath(os.path.join(dir_path, path_spec)))\n for path in [str(p) for p in abs_paths]:\n if not path.startswith(dir_path):\n # Die if path in CLEAN_FILES is absolute + outside this directory\n raise ValueError(\"%s is not a path inside %s\" % (path, dir_path))\n print('Removing %s' % os.path.relpath(path))\n rmtree(path)\n\next_modules = [\n cpp_extension.CUDAExtension('trtorch._C',\n ['trtorch/csrc/trtorch_py.cpp'],\n library_dirs=[\n (dir_path + '/trtorch/lib/'),\n \"/opt/conda/lib/python3.6/config-3.6m-x86_64-linux-gnu\"\n ],\n libraries=[\n \"trtorch\"\n ],\n include_dirs=[\n dir_path + \"/../\",\n dir_path + \"/../bazel-TRTorch/external/tensorrt/include\",\n ],\n extra_compile_args=[\n \"-Wno-deprecated\",\n \"-Wno-deprecated-declarations\",\n ] + ([\"-D_GLIBCXX_USE_CXX11_ABI=1\"] if CXX11_ABI else [\"-D_GLIBCXX_USE_CXX11_ABI=0\"]),\n extra_link_args=[\n \"-Wno-deprecated\",\n \"-Wno-deprecated-declarations\",\n \"-Wl,--no-as-needed\",\n \"-ltrtorch\",\n \"-Wl,-rpath,$ORIGIN/lib\",\n \"-lpthread\",\n \"-ldl\",\n \"-lutil\",\n \"-lrt\",\n \"-lm\",\n \"-Xlinker\",\n \"-export-dynamic\"\n ] + ([\"-D_GLIBCXX_USE_CXX11_ABI=1\"] if CXX11_ABI else [\"-D_GLIBCXX_USE_CXX11_ABI=0\"]),\n undef_macros=[ \"NDEBUG\" ]\n )\n]\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='trtorch',\n version=__version__,\n author='NVIDIA',\n author_email='[email protected]',\n url='https://nvidia.github.io/TRTorch',\n description='A compiler backend for PyTorch JIT targeting NVIDIA GPUs',\n long_description_content_type='text/markdown',\n long_description=long_description,\n ext_modules=ext_modules,\n install_requires=[\n 'torch==1.6.0',\n ],\n setup_requires=[],\n cmdclass={\n 'install': InstallCommand,\n 'clean': CleanCommand,\n 'develop': DevelopCommand,\n 'build_ext': cpp_extension.BuildExtension,\n 'bdist_wheel': BdistCommand,\n },\n zip_safe=False,\n license=\"BSD\",\n packages=find_packages(),\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: GPU :: NVIDIA CUDA\",\n \"License :: OSI Approved :: BSD License\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: C++\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\"\n ],\n python_requires='>=3.6',\n include_package_data=True,\n package_data={\n 'trtorch': ['lib/*.so'],\n },\n exclude_package_data={\n '': ['*.cpp', '*.h'],\n 'trtorch': ['csrc/*.cpp'],\n }\n)\n", "path": "py/setup.py"}]} | 2,725 | 558 |
gh_patches_debug_297 | rasdani/github-patches | git_diff | beeware__toga-267 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GTK+: TypeError: on_close() takes 2 positional arguments but 3 were given
When creating a new Window and then closing it a TypeError is created in Linux. Once you have created an App, calling the following will reproduce the error:
window = toga.Window()
window.app = app
window.show()
Then close the window and you get:
TypeError: on_close() takes 2 positional arguments but 3 were given
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/gtk/toga_gtk/app.py`
Content:
```
1 import asyncio
2 import os
3 import signal
4 import sys
5
6 try:
7 import gi
8 except ImportError:
9 # app.py is the first module that will be imported when you import toga_gtk.
10 #
11 # If Gtk can't be imported, it may be because we're in a virtualenv,
12 # and the system python libraries aren't visible. This can be fixed by
13 # creating a symlink into the site-packages
14 # Try creating a symlink to the system library location.
15 # base_packages_dir is where the packages installed by the package manager
16 # can be found.
17 # gi_system_install_path is where gi can be found in the packages dir.
18 # installer_command is the command the user can run to install gi.
19 py_version = "%d.%d" % (sys.version_info.major, sys.version_info.minor)
20
21 if sys.version_info.major == 3:
22 if os.path.isdir('/usr/lib64/python%s/site-packages/' % (py_version,)):
23 # Fedora
24 base_packages_dir = '/usr/lib64/python%s/site-packages/' % (py_version,)
25 gi_system_install_path = '/usr/lib64/python%s/site-packages/gi' % (py_version,)
26 installer_command = 'dnf install pygobject3 python3-gobject'
27 elif os.path.isdir('/usr/lib/python3/dist-packages/'):
28 # Ubuntu, Debian
29 base_packages_dir = '/usr/lib/python3/dist-packages/'
30 gi_system_install_path = '/usr/local/lib/python3/dist-packages/gi'
31 installer_command = 'apt-get install python3-gi'
32 elif os.path.isdir('/usr/lib/python%s/site-packages/' % (py_version,)):
33 # Arch
34 base_packages_dir = '/usr/lib/python%s/site-packages/' % (py_version,)
35 gi_system_install_path = '/usr/lib/python%s/site-packages/gi' % (py_version,)
36 installer_command = 'pacman -S python-gobject'
37 else:
38 raise RuntimeError("Unable to locate your Python packages dir.")
39 else:
40 raise RuntimeError("Toga requires Python 3.")
41
42 # Use the location of this package to guide us to
43 # the location of the virtualenv.
44 gi_symlink_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gi')
45 pygtkcompat_symlink_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'pygtkcompat')
46
47 if gi_symlink_path == gi_system_install_path:
48 # If we're not in a virtualenv, just raise the original import error.
49 raise
50 else:
51 gi_path = os.path.join(base_packages_dir, 'gi')
52 pygtkcompat_path = os.path.join(base_packages_dir, 'pygtkcompat')
53 if os.path.exists(gi_path) and os.path.isdir(gi_path):
54 # If we can identify the gi library, create a symlink to it.
55 try:
56 print("Creating symlink (%s & %s) to system GTK+ libraries..." % (gi_symlink_path, pygtkcompat_symlink_path))
57 os.symlink(gi_path, gi_symlink_path)
58 os.symlink(pygtkcompat_path, pygtkcompat_symlink_path)
59
60 # The call to os.symlink will return almost immediately,
61 # but for some reason, it may not be fully flushed to
62 # the file system. One way to fix this is to start
63 # the process again. This call to os.execl restarts the
64 # program with the same arguments, replacing the original
65 # operating system process.
66 os.execl(sys.executable, sys.executable, *sys.argv)
67 except OSError:
68 raise RuntimeError("Unable to automatically create symlink to system Python GTK+ bindings.")
69 else:
70 raise RuntimeError("Unable to locate the Python GTK+ bindings. Have you run '%s'?" % installer_command)
71
72 gi.require_version('Gtk', '3.0')
73 from gi.repository import Gtk, Gio, GLib
74
75
76 from toga.command import GROUP_BREAK, SECTION_BREAK, Command, Group
77 # from .command import Command, Group
78 import toga
79 from .window import Window
80 from toga import Icon
81 from toga.utils import wrapped_handler
82
83 import gbulb
84
85
86 class MainWindow(Window):
87 _IMPL_CLASS = Gtk.ApplicationWindow
88
89 def on_close(self, widget):
90 pass
91
92
93 class App:
94 """
95 Todo:
96 * Creation of Menus is not working.
97 * Disabling of menu items is not working.
98 * App Icon is not showing up
99 """
100 def __init__(self, interface):
101 self.interface = interface
102 self.interface._impl = self
103
104 gbulb.install(gtk=True)
105 self.loop = asyncio.get_event_loop()
106
107 self.create()
108
109 def create(self):
110 Icon.app_icon = Icon.load(self.interface.icon, default=Icon.TIBERIUS_ICON)
111 # Stimulate the build of the app
112 self.native = Gtk.Application(application_id=self.interface.app_id, flags=Gio.ApplicationFlags.FLAGS_NONE)
113
114 # Connect the GTK signal that will cause app startup to occur
115 self.native.connect('startup', self.startup)
116 self.native.connect('activate', self.activate)
117 # self.native.connect('shutdown', self.shutdown)
118
119 self.actions = None
120
121 def startup(self, data=None):
122 self.interface.commands.add(
123 Command(None, 'About ' + self.interface.name, group=toga.Group.APP),
124 Command(None, 'Preferences', group=toga.Group.APP),
125 # Quit should always be the last item, in a section on it's own
126 Command(lambda s: self.exit(), 'Quit ' + self.interface.name, shortcut='q', group=toga.Group.APP, section=sys.maxsize),
127 Command(None, 'Visit homepage', group=toga.Group.HELP)
128 )
129
130 self.interface.startup()
131
132 # Create the lookup table of menu items,
133 # then force the creation of the menus.
134 self._actions = {}
135 self.create_menus()
136 # self.interface.main_window._impl.create_toolbar()
137
138 def activate(self, data=None):
139 pass
140
141 def open_document(self, fileURL):
142 '''Add a new document to this app.'''
143 print("STUB: If you want to handle opening documents, implement App.open_document(fileURL)")
144
145 def create_menus(self):
146 # Only create the menu if the menu item index has been created.
147 if hasattr(self, '_actions'):
148 self._actions = {}
149 menubar = Gio.Menu()
150 label = None
151 submenu = None
152 section = None
153 for cmd in self.interface.commands:
154 if cmd == GROUP_BREAK:
155 if section:
156 submenu.append_section(None, section)
157
158 if label == '*':
159 self.native.set_app_menu(submenu)
160 else:
161 menubar.append_submenu(label, submenu)
162
163 label = None
164 submenu = None
165 section = None
166 elif cmd == SECTION_BREAK:
167 submenu.append_section(None, section)
168 section = None
169
170 else:
171 if submenu is None:
172 label = cmd.group.label
173 submenu = Gio.Menu()
174
175 if section is None:
176 section = Gio.Menu()
177
178 try:
179 action = self._actions[cmd]
180 except KeyError:
181 cmd_id = "command-%s" % id(cmd)
182 action = Gio.SimpleAction.new(cmd_id, None)
183 if cmd.action:
184 action.connect("activate", wrapped_handler(cmd, cmd.action))
185 cmd._widgets.append(action)
186 self._actions[cmd] = action
187 self.native.add_action(action)
188
189 cmd._impl._set_enabled(cmd.enabled)
190
191 item = Gio.MenuItem.new(cmd.label, 'app.' + cmd_id)
192 if cmd.shortcut:
193 item.set_attribute_value('accel', GLib.Variant('s', '<Primary>%s' % cmd.shortcut.upper()))
194
195 # item.set_attribute_value('accel', GLib.Variant(cmd.shortcut, '<Primary>%s' % cmd.shortcut.upper()))
196
197 section.append_item(item)
198
199 if section:
200 submenu.append_section(None, section)
201
202 if submenu:
203 if label == '*':
204 self.native.set_app_menu(submenu)
205 else:
206 menubar.append_submenu(label, submenu)
207
208 # Set the menu for the app.
209 self.native.set_menubar(menubar)
210
211 def main_loop(self):
212 # Modify signal handlers to make sure Ctrl-C is caught and handled.
213 signal.signal(signal.SIGINT, signal.SIG_DFL)
214
215 self.loop.run_forever(application=self.native)
216
217 def exit(self):
218 self.native.quit()
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/gtk/toga_gtk/app.py b/src/gtk/toga_gtk/app.py
--- a/src/gtk/toga_gtk/app.py
+++ b/src/gtk/toga_gtk/app.py
@@ -86,7 +86,7 @@
class MainWindow(Window):
_IMPL_CLASS = Gtk.ApplicationWindow
- def on_close(self, widget):
+ def on_close(self, widget, data):
pass
| {"golden_diff": "diff --git a/src/gtk/toga_gtk/app.py b/src/gtk/toga_gtk/app.py\n--- a/src/gtk/toga_gtk/app.py\n+++ b/src/gtk/toga_gtk/app.py\n@@ -86,7 +86,7 @@\n class MainWindow(Window):\n _IMPL_CLASS = Gtk.ApplicationWindow\n \n- def on_close(self, widget):\n+ def on_close(self, widget, data):\n pass\n", "issue": "GTK+: TypeError: on_close() takes 2 positional arguments but 3 were given\nWhen creating a new Window and then closing it a TypeError is created in Linux. Once you have created an App, calling the following will reproduce the error:\r\n \r\n window = toga.Window()\r\n window.app = app\r\n window.show()\r\n\r\nThen close the window and you get:\r\n\r\n TypeError: on_close() takes 2 positional arguments but 3 were given\n", "before_files": [{"content": "import asyncio\nimport os\nimport signal\nimport sys\n\ntry:\n import gi\nexcept ImportError:\n # app.py is the first module that will be imported when you import toga_gtk.\n #\n # If Gtk can't be imported, it may be because we're in a virtualenv,\n # and the system python libraries aren't visible. This can be fixed by\n # creating a symlink into the site-packages\n # Try creating a symlink to the system library location.\n # base_packages_dir is where the packages installed by the package manager\n # can be found.\n # gi_system_install_path is where gi can be found in the packages dir.\n # installer_command is the command the user can run to install gi.\n py_version = \"%d.%d\" % (sys.version_info.major, sys.version_info.minor)\n\n if sys.version_info.major == 3:\n if os.path.isdir('/usr/lib64/python%s/site-packages/' % (py_version,)):\n # Fedora\n base_packages_dir = '/usr/lib64/python%s/site-packages/' % (py_version,)\n gi_system_install_path = '/usr/lib64/python%s/site-packages/gi' % (py_version,)\n installer_command = 'dnf install pygobject3 python3-gobject'\n elif os.path.isdir('/usr/lib/python3/dist-packages/'):\n # Ubuntu, Debian\n base_packages_dir = '/usr/lib/python3/dist-packages/'\n gi_system_install_path = '/usr/local/lib/python3/dist-packages/gi'\n installer_command = 'apt-get install python3-gi'\n elif os.path.isdir('/usr/lib/python%s/site-packages/' % (py_version,)):\n # Arch\n base_packages_dir = '/usr/lib/python%s/site-packages/' % (py_version,)\n gi_system_install_path = '/usr/lib/python%s/site-packages/gi' % (py_version,)\n installer_command = 'pacman -S python-gobject'\n else:\n raise RuntimeError(\"Unable to locate your Python packages dir.\")\n else:\n raise RuntimeError(\"Toga requires Python 3.\")\n\n # Use the location of this package to guide us to\n # the location of the virtualenv.\n gi_symlink_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gi')\n pygtkcompat_symlink_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'pygtkcompat')\n\n if gi_symlink_path == gi_system_install_path:\n # If we're not in a virtualenv, just raise the original import error.\n raise\n else:\n gi_path = os.path.join(base_packages_dir, 'gi')\n pygtkcompat_path = os.path.join(base_packages_dir, 'pygtkcompat')\n if os.path.exists(gi_path) and os.path.isdir(gi_path):\n # If we can identify the gi library, create a symlink to it.\n try:\n print(\"Creating symlink (%s & %s) to system GTK+ libraries...\" % (gi_symlink_path, pygtkcompat_symlink_path))\n os.symlink(gi_path, gi_symlink_path)\n os.symlink(pygtkcompat_path, pygtkcompat_symlink_path)\n\n # The call to os.symlink will return almost immediately,\n # but for some reason, it may not be fully flushed to\n # the file system. One way to fix this is to start\n # the process again. This call to os.execl restarts the\n # program with the same arguments, replacing the original\n # operating system process.\n os.execl(sys.executable, sys.executable, *sys.argv)\n except OSError:\n raise RuntimeError(\"Unable to automatically create symlink to system Python GTK+ bindings.\")\n else:\n raise RuntimeError(\"Unable to locate the Python GTK+ bindings. Have you run '%s'?\" % installer_command)\n\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gio, GLib\n\n\nfrom toga.command import GROUP_BREAK, SECTION_BREAK, Command, Group\n# from .command import Command, Group\nimport toga\nfrom .window import Window\nfrom toga import Icon\nfrom toga.utils import wrapped_handler\n\nimport gbulb\n\n\nclass MainWindow(Window):\n _IMPL_CLASS = Gtk.ApplicationWindow\n\n def on_close(self, widget):\n pass\n\n\nclass App:\n \"\"\"\n Todo:\n * Creation of Menus is not working.\n * Disabling of menu items is not working.\n * App Icon is not showing up\n \"\"\"\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n gbulb.install(gtk=True)\n self.loop = asyncio.get_event_loop()\n\n self.create()\n\n def create(self):\n Icon.app_icon = Icon.load(self.interface.icon, default=Icon.TIBERIUS_ICON)\n # Stimulate the build of the app\n self.native = Gtk.Application(application_id=self.interface.app_id, flags=Gio.ApplicationFlags.FLAGS_NONE)\n\n # Connect the GTK signal that will cause app startup to occur\n self.native.connect('startup', self.startup)\n self.native.connect('activate', self.activate)\n # self.native.connect('shutdown', self.shutdown)\n\n self.actions = None\n\n def startup(self, data=None):\n self.interface.commands.add(\n Command(None, 'About ' + self.interface.name, group=toga.Group.APP),\n Command(None, 'Preferences', group=toga.Group.APP),\n # Quit should always be the last item, in a section on it's own\n Command(lambda s: self.exit(), 'Quit ' + self.interface.name, shortcut='q', group=toga.Group.APP, section=sys.maxsize),\n Command(None, 'Visit homepage', group=toga.Group.HELP)\n )\n\n self.interface.startup()\n\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self._actions = {}\n self.create_menus()\n # self.interface.main_window._impl.create_toolbar()\n\n def activate(self, data=None):\n pass\n\n def open_document(self, fileURL):\n '''Add a new document to this app.'''\n print(\"STUB: If you want to handle opening documents, implement App.open_document(fileURL)\")\n\n def create_menus(self):\n # Only create the menu if the menu item index has been created.\n if hasattr(self, '_actions'):\n self._actions = {}\n menubar = Gio.Menu()\n label = None\n submenu = None\n section = None\n for cmd in self.interface.commands:\n if cmd == GROUP_BREAK:\n if section:\n submenu.append_section(None, section)\n\n if label == '*':\n self.native.set_app_menu(submenu)\n else:\n menubar.append_submenu(label, submenu)\n\n label = None\n submenu = None\n section = None\n elif cmd == SECTION_BREAK:\n submenu.append_section(None, section)\n section = None\n\n else:\n if submenu is None:\n label = cmd.group.label\n submenu = Gio.Menu()\n\n if section is None:\n section = Gio.Menu()\n\n try:\n action = self._actions[cmd]\n except KeyError:\n cmd_id = \"command-%s\" % id(cmd)\n action = Gio.SimpleAction.new(cmd_id, None)\n if cmd.action:\n action.connect(\"activate\", wrapped_handler(cmd, cmd.action))\n cmd._widgets.append(action)\n self._actions[cmd] = action\n self.native.add_action(action)\n\n cmd._impl._set_enabled(cmd.enabled)\n\n item = Gio.MenuItem.new(cmd.label, 'app.' + cmd_id)\n if cmd.shortcut:\n item.set_attribute_value('accel', GLib.Variant('s', '<Primary>%s' % cmd.shortcut.upper()))\n\n # item.set_attribute_value('accel', GLib.Variant(cmd.shortcut, '<Primary>%s' % cmd.shortcut.upper()))\n\n section.append_item(item)\n\n if section:\n submenu.append_section(None, section)\n\n if submenu:\n if label == '*':\n self.native.set_app_menu(submenu)\n else:\n menubar.append_submenu(label, submenu)\n\n # Set the menu for the app.\n self.native.set_menubar(menubar)\n\n def main_loop(self):\n # Modify signal handlers to make sure Ctrl-C is caught and handled.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n self.loop.run_forever(application=self.native)\n\n def exit(self):\n self.native.quit()\n", "path": "src/gtk/toga_gtk/app.py"}], "after_files": [{"content": "import asyncio\nimport os\nimport signal\nimport sys\n\ntry:\n import gi\nexcept ImportError:\n # app.py is the first module that will be imported when you import toga_gtk.\n #\n # If Gtk can't be imported, it may be because we're in a virtualenv,\n # and the system python libraries aren't visible. This can be fixed by\n # creating a symlink into the site-packages\n # Try creating a symlink to the system library location.\n # base_packages_dir is where the packages installed by the package manager\n # can be found.\n # gi_system_install_path is where gi can be found in the packages dir.\n # installer_command is the command the user can run to install gi.\n py_version = \"%d.%d\" % (sys.version_info.major, sys.version_info.minor)\n\n if sys.version_info.major == 3:\n if os.path.isdir('/usr/lib64/python%s/site-packages/' % (py_version,)):\n # Fedora\n base_packages_dir = '/usr/lib64/python%s/site-packages/' % (py_version,)\n gi_system_install_path = '/usr/lib64/python%s/site-packages/gi' % (py_version,)\n installer_command = 'dnf install pygobject3 python3-gobject'\n elif os.path.isdir('/usr/lib/python3/dist-packages/'):\n # Ubuntu, Debian\n base_packages_dir = '/usr/lib/python3/dist-packages/'\n gi_system_install_path = '/usr/local/lib/python3/dist-packages/gi'\n installer_command = 'apt-get install python3-gi'\n elif os.path.isdir('/usr/lib/python%s/site-packages/' % (py_version,)):\n # Arch\n base_packages_dir = '/usr/lib/python%s/site-packages/' % (py_version,)\n gi_system_install_path = '/usr/lib/python%s/site-packages/gi' % (py_version,)\n installer_command = 'pacman -S python-gobject'\n else:\n raise RuntimeError(\"Unable to locate your Python packages dir.\")\n else:\n raise RuntimeError(\"Toga requires Python 3.\")\n\n # Use the location of this package to guide us to\n # the location of the virtualenv.\n gi_symlink_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gi')\n pygtkcompat_symlink_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'pygtkcompat')\n\n if gi_symlink_path == gi_system_install_path:\n # If we're not in a virtualenv, just raise the original import error.\n raise\n else:\n gi_path = os.path.join(base_packages_dir, 'gi')\n pygtkcompat_path = os.path.join(base_packages_dir, 'pygtkcompat')\n if os.path.exists(gi_path) and os.path.isdir(gi_path):\n # If we can identify the gi library, create a symlink to it.\n try:\n print(\"Creating symlink (%s & %s) to system GTK+ libraries...\" % (gi_symlink_path, pygtkcompat_symlink_path))\n os.symlink(gi_path, gi_symlink_path)\n os.symlink(pygtkcompat_path, pygtkcompat_symlink_path)\n\n # The call to os.symlink will return almost immediately,\n # but for some reason, it may not be fully flushed to\n # the file system. One way to fix this is to start\n # the process again. This call to os.execl restarts the\n # program with the same arguments, replacing the original\n # operating system process.\n os.execl(sys.executable, sys.executable, *sys.argv)\n except OSError:\n raise RuntimeError(\"Unable to automatically create symlink to system Python GTK+ bindings.\")\n else:\n raise RuntimeError(\"Unable to locate the Python GTK+ bindings. Have you run '%s'?\" % installer_command)\n\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gio, GLib\n\n\nfrom toga.command import GROUP_BREAK, SECTION_BREAK, Command, Group\n# from .command import Command, Group\nimport toga\nfrom .window import Window\nfrom toga import Icon\nfrom toga.utils import wrapped_handler\n\nimport gbulb\n\n\nclass MainWindow(Window):\n _IMPL_CLASS = Gtk.ApplicationWindow\n\n def on_close(self, widget, data):\n pass\n\n\nclass App:\n \"\"\"\n Todo:\n * Creation of Menus is not working.\n * Disabling of menu items is not working.\n * App Icon is not showing up\n \"\"\"\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n gbulb.install(gtk=True)\n self.loop = asyncio.get_event_loop()\n\n self.create()\n\n def create(self):\n Icon.app_icon = Icon.load(self.interface.icon, default=Icon.TIBERIUS_ICON)\n # Stimulate the build of the app\n self.native = Gtk.Application(application_id=self.interface.app_id, flags=Gio.ApplicationFlags.FLAGS_NONE)\n\n # Connect the GTK signal that will cause app startup to occur\n self.native.connect('startup', self.startup)\n self.native.connect('activate', self.activate)\n # self.native.connect('shutdown', self.shutdown)\n\n self.actions = None\n\n def startup(self, data=None):\n self.interface.commands.add(\n Command(None, 'About ' + self.interface.name, group=toga.Group.APP),\n Command(None, 'Preferences', group=toga.Group.APP),\n # Quit should always be the last item, in a section on it's own\n Command(lambda s: self.exit(), 'Quit ' + self.interface.name, shortcut='q', group=toga.Group.APP, section=sys.maxsize),\n Command(None, 'Visit homepage', group=toga.Group.HELP)\n )\n\n self.interface.startup()\n\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self._actions = {}\n self.create_menus()\n # self.interface.main_window._impl.create_toolbar()\n\n def activate(self, data=None):\n pass\n\n def open_document(self, fileURL):\n '''Add a new document to this app.'''\n print(\"STUB: If you want to handle opening documents, implement App.open_document(fileURL)\")\n\n def create_menus(self):\n # Only create the menu if the menu item index has been created.\n if hasattr(self, '_actions'):\n self._actions = {}\n menubar = Gio.Menu()\n label = None\n submenu = None\n section = None\n for cmd in self.interface.commands:\n if cmd == GROUP_BREAK:\n if section:\n submenu.append_section(None, section)\n\n if label == '*':\n self.native.set_app_menu(submenu)\n else:\n menubar.append_submenu(label, submenu)\n\n label = None\n submenu = None\n section = None\n elif cmd == SECTION_BREAK:\n submenu.append_section(None, section)\n section = None\n\n else:\n if submenu is None:\n label = cmd.group.label\n submenu = Gio.Menu()\n\n if section is None:\n section = Gio.Menu()\n\n try:\n action = self._actions[cmd]\n except KeyError:\n cmd_id = \"command-%s\" % id(cmd)\n action = Gio.SimpleAction.new(cmd_id, None)\n if cmd.action:\n action.connect(\"activate\", wrapped_handler(cmd, cmd.action))\n cmd._widgets.append(action)\n self._actions[cmd] = action\n self.native.add_action(action)\n\n cmd._impl._set_enabled(cmd.enabled)\n\n item = Gio.MenuItem.new(cmd.label, 'app.' + cmd_id)\n if cmd.shortcut:\n item.set_attribute_value('accel', GLib.Variant('s', '<Primary>%s' % cmd.shortcut.upper()))\n\n # item.set_attribute_value('accel', GLib.Variant(cmd.shortcut, '<Primary>%s' % cmd.shortcut.upper()))\n\n section.append_item(item)\n\n if section:\n submenu.append_section(None, section)\n\n if submenu:\n if label == '*':\n self.native.set_app_menu(submenu)\n else:\n menubar.append_submenu(label, submenu)\n\n # Set the menu for the app.\n self.native.set_menubar(menubar)\n\n def main_loop(self):\n # Modify signal handlers to make sure Ctrl-C is caught and handled.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n self.loop.run_forever(application=self.native)\n\n def exit(self):\n self.native.quit()\n", "path": "src/gtk/toga_gtk/app.py"}]} | 2,734 | 97 |
gh_patches_debug_32151 | rasdani/github-patches | git_diff | apache__airflow-31140 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Restrict allowed characters in connection ids
### Description
I bumped into a bug where a connection id was suffixed with a whitespace e.g. "myconn ". When referencing the connection id "myconn" (without whitespace), you get a connection not found error.
To avoid such human errors, I suggest restricting the characters allowed for connection ids.
Some suggestions:
- There's an `airflow.utils.helpers.validate_key` function for validating the DAG id. Probably a good idea to reuse this.
- I believe variable ids are also not validated, would be good to check those too.
### Use case/motivation
_No response_
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/www/validators.py`
Content:
```
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 from __future__ import annotations
19
20 import json
21 from json import JSONDecodeError
22
23 from wtforms.validators import EqualTo, ValidationError
24
25
26 class GreaterEqualThan(EqualTo):
27 """Compares the values of two fields.
28
29 :param fieldname:
30 The name of the other field to compare to.
31 :param message:
32 Error message to raise in case of a validation error. Can be
33 interpolated with `%(other_label)s` and `%(other_name)s` to provide a
34 more helpful error.
35 """
36
37 def __call__(self, form, field):
38 try:
39 other = form[self.fieldname]
40 except KeyError:
41 raise ValidationError(field.gettext(f"Invalid field name '{self.fieldname}'."))
42
43 if field.data is None or other.data is None:
44 return
45
46 if field.data < other.data:
47 message_args = {
48 "other_label": hasattr(other, "label") and other.label.text or self.fieldname,
49 "other_name": self.fieldname,
50 }
51 message = self.message
52 if message is None:
53 message = field.gettext(
54 f"Field must be greater than or equal to {message_args['other_label']}."
55 )
56 else:
57 message = message % message_args
58
59 raise ValidationError(message)
60
61
62 class ValidJson:
63 """Validates data is valid JSON.
64
65 :param message:
66 Error message to raise in case of a validation error.
67 """
68
69 def __init__(self, message=None):
70 self.message = message
71
72 def __call__(self, form, field):
73 if field.data:
74 try:
75 json.loads(field.data)
76 except JSONDecodeError as ex:
77 message = self.message or f"JSON Validation Error: {ex}"
78 raise ValidationError(message=field.gettext(message.format(field.data)))
79
```
Path: `airflow/www/forms.py`
Content:
```
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 from __future__ import annotations
19
20 import json
21 import operator
22 from datetime import datetime as dt
23 from typing import Iterator
24
25 import pendulum
26 from flask_appbuilder.fieldwidgets import (
27 BS3PasswordFieldWidget,
28 BS3TextAreaFieldWidget,
29 BS3TextFieldWidget,
30 Select2Widget,
31 )
32 from flask_appbuilder.forms import DynamicForm
33 from flask_babel import lazy_gettext
34 from flask_wtf import FlaskForm
35 from wtforms import widgets
36 from wtforms.fields import Field, IntegerField, PasswordField, SelectField, StringField, TextAreaField
37 from wtforms.validators import InputRequired, Optional
38
39 from airflow.compat.functools import cache
40 from airflow.configuration import conf
41 from airflow.providers_manager import ProvidersManager
42 from airflow.utils import timezone
43 from airflow.utils.types import DagRunType
44 from airflow.www.widgets import (
45 AirflowDateTimePickerROWidget,
46 AirflowDateTimePickerWidget,
47 BS3TextAreaROWidget,
48 BS3TextFieldROWidget,
49 )
50
51
52 class DateTimeWithTimezoneField(Field):
53 """A text field which stores a `datetime.datetime` matching a format."""
54
55 widget = widgets.TextInput()
56
57 def __init__(self, label=None, validators=None, datetime_format="%Y-%m-%d %H:%M:%S%Z", **kwargs):
58 super().__init__(label, validators, **kwargs)
59 self.format = datetime_format
60 self.data = None
61
62 def _value(self):
63 if self.raw_data:
64 return " ".join(self.raw_data)
65 if self.data:
66 return self.data.strftime(self.format)
67 return ""
68
69 def process_formdata(self, valuelist):
70 if not valuelist:
71 return
72 date_str = " ".join(valuelist)
73 try:
74 # Check if the datetime string is in the format without timezone, if so convert it to the
75 # default timezone
76 if len(date_str) == 19:
77 parsed_datetime = dt.strptime(date_str, "%Y-%m-%d %H:%M:%S")
78 default_timezone = self._get_default_timezone()
79 self.data = default_timezone.convert(parsed_datetime)
80 else:
81 self.data = pendulum.parse(date_str)
82 except ValueError:
83 self.data = None
84 raise ValueError(self.gettext("Not a valid datetime value"))
85
86 def _get_default_timezone(self):
87 current_timezone = conf.get("core", "default_timezone")
88 if current_timezone == "system":
89 default_timezone = pendulum.local_timezone()
90 else:
91 default_timezone = pendulum.timezone(current_timezone)
92 return default_timezone
93
94
95 class DateTimeForm(FlaskForm):
96 """Date filter form needed for task views"""
97
98 execution_date = DateTimeWithTimezoneField("Logical date", widget=AirflowDateTimePickerWidget())
99
100
101 class DateTimeWithNumRunsForm(FlaskForm):
102 """
103 Date time and number of runs form for tree view, task duration
104 and landing times
105 """
106
107 base_date = DateTimeWithTimezoneField(
108 "Anchor date", widget=AirflowDateTimePickerWidget(), default=timezone.utcnow()
109 )
110 num_runs = SelectField(
111 "Number of runs",
112 default=25,
113 choices=(
114 (5, "5"),
115 (25, "25"),
116 (50, "50"),
117 (100, "100"),
118 (365, "365"),
119 ),
120 )
121
122
123 class DateTimeWithNumRunsWithDagRunsForm(DateTimeWithNumRunsForm):
124 """Date time and number of runs and dag runs form for graph and gantt view"""
125
126 execution_date = SelectField("DAG run")
127
128
129 class DagRunEditForm(DynamicForm):
130 """Form for editing DAG Run.
131
132 We don't actually want to allow editing, so everything is read-only here.
133 """
134
135 dag_id = StringField(lazy_gettext("Dag Id"), widget=BS3TextFieldROWidget())
136 start_date = DateTimeWithTimezoneField(lazy_gettext("Start Date"), widget=AirflowDateTimePickerROWidget())
137 end_date = DateTimeWithTimezoneField(lazy_gettext("End Date"), widget=AirflowDateTimePickerROWidget())
138 run_id = StringField(lazy_gettext("Run Id"), widget=BS3TextFieldROWidget())
139 state = StringField(lazy_gettext("State"), widget=BS3TextFieldROWidget())
140 execution_date = DateTimeWithTimezoneField(
141 lazy_gettext("Logical Date"),
142 widget=AirflowDateTimePickerROWidget(),
143 )
144 conf = TextAreaField(lazy_gettext("Conf"), widget=BS3TextAreaROWidget())
145 note = TextAreaField(lazy_gettext("User Note"), widget=BS3TextAreaFieldWidget())
146
147 def populate_obj(self, item):
148 """Populates the attributes of the passed obj with data from the form's fields."""
149 super().populate_obj(item)
150 item.run_type = DagRunType.from_run_id(item.run_id)
151 if item.conf:
152 item.conf = json.loads(item.conf)
153
154
155 class TaskInstanceEditForm(DynamicForm):
156 """Form for editing TaskInstance"""
157
158 dag_id = StringField(lazy_gettext("Dag Id"), validators=[InputRequired()], widget=BS3TextFieldROWidget())
159 task_id = StringField(
160 lazy_gettext("Task Id"), validators=[InputRequired()], widget=BS3TextFieldROWidget()
161 )
162 start_date = DateTimeWithTimezoneField(lazy_gettext("Start Date"), widget=AirflowDateTimePickerROWidget())
163 end_date = DateTimeWithTimezoneField(lazy_gettext("End Date"), widget=AirflowDateTimePickerROWidget())
164 state = SelectField(
165 lazy_gettext("State"),
166 choices=(
167 ("success", "success"),
168 ("running", "running"),
169 ("failed", "failed"),
170 ("up_for_retry", "up_for_retry"),
171 ),
172 widget=Select2Widget(),
173 validators=[InputRequired()],
174 )
175 execution_date = DateTimeWithTimezoneField(
176 lazy_gettext("Logical Date"),
177 widget=AirflowDateTimePickerROWidget(),
178 validators=[InputRequired()],
179 )
180 note = TextAreaField(lazy_gettext("User Note"), widget=BS3TextAreaFieldWidget())
181
182
183 @cache
184 def create_connection_form_class() -> type[DynamicForm]:
185 """Create a form class for editing and adding Connection.
186
187 This class is created dynamically because it relies heavily on run-time
188 provider discovery, which slows down webserver startup a lot.
189 By creating the class at runtime, we can delay loading the providers until
190 when the connection form is first used, which may as well be never for a
191 short-lived server.
192 """
193 providers_manager = ProvidersManager()
194
195 def _iter_connection_types() -> Iterator[tuple[str, str]]:
196 """List available connection types."""
197 yield ("email", "Email")
198 yield ("fs", "File (path)")
199 yield ("generic", "Generic")
200 yield ("mesos_framework-id", "Mesos Framework ID")
201 for connection_type, provider_info in providers_manager.hooks.items():
202 if provider_info:
203 yield (connection_type, provider_info.hook_name)
204
205 class ConnectionForm(DynamicForm):
206 conn_id = StringField(
207 lazy_gettext("Connection Id"),
208 validators=[InputRequired()],
209 widget=BS3TextFieldWidget(),
210 )
211 conn_type = SelectField(
212 lazy_gettext("Connection Type"),
213 choices=sorted(_iter_connection_types(), key=operator.itemgetter(1)),
214 widget=Select2Widget(),
215 validators=[InputRequired()],
216 description=(
217 "Connection Type missing? Make sure you've installed the "
218 "corresponding Airflow Provider Package."
219 ),
220 )
221 description = StringField(lazy_gettext("Description"), widget=BS3TextAreaFieldWidget())
222 host = StringField(lazy_gettext("Host"), widget=BS3TextFieldWidget())
223 schema = StringField(lazy_gettext("Schema"), widget=BS3TextFieldWidget())
224 login = StringField(lazy_gettext("Login"), widget=BS3TextFieldWidget())
225 password = PasswordField(lazy_gettext("Password"), widget=BS3PasswordFieldWidget())
226 port = IntegerField(lazy_gettext("Port"), validators=[Optional()], widget=BS3TextFieldWidget())
227 extra = TextAreaField(lazy_gettext("Extra"), widget=BS3TextAreaFieldWidget())
228
229 for key, value in providers_manager.connection_form_widgets.items():
230 setattr(ConnectionForm, key, value.field)
231
232 return ConnectionForm
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/www/forms.py b/airflow/www/forms.py
--- a/airflow/www/forms.py
+++ b/airflow/www/forms.py
@@ -41,6 +41,7 @@
from airflow.providers_manager import ProvidersManager
from airflow.utils import timezone
from airflow.utils.types import DagRunType
+from airflow.www.validators import ValidKey
from airflow.www.widgets import (
AirflowDateTimePickerROWidget,
AirflowDateTimePickerWidget,
@@ -205,7 +206,7 @@
class ConnectionForm(DynamicForm):
conn_id = StringField(
lazy_gettext("Connection Id"),
- validators=[InputRequired()],
+ validators=[InputRequired(), ValidKey()],
widget=BS3TextFieldWidget(),
)
conn_type = SelectField(
diff --git a/airflow/www/validators.py b/airflow/www/validators.py
--- a/airflow/www/validators.py
+++ b/airflow/www/validators.py
@@ -22,6 +22,8 @@
from wtforms.validators import EqualTo, ValidationError
+from airflow.utils import helpers
+
class GreaterEqualThan(EqualTo):
"""Compares the values of two fields.
@@ -76,3 +78,22 @@
except JSONDecodeError as ex:
message = self.message or f"JSON Validation Error: {ex}"
raise ValidationError(message=field.gettext(message.format(field.data)))
+
+
+class ValidKey:
+ """
+ Validates values that will be used as keys
+
+ :param max_length:
+ The maximum length of the given key
+ """
+
+ def __init__(self, max_length=200):
+ self.max_length = max_length
+
+ def __call__(self, form, field):
+ if field.data:
+ try:
+ helpers.validate_key(field.data, self.max_length)
+ except Exception as e:
+ raise ValidationError(str(e))
| {"golden_diff": "diff --git a/airflow/www/forms.py b/airflow/www/forms.py\n--- a/airflow/www/forms.py\n+++ b/airflow/www/forms.py\n@@ -41,6 +41,7 @@\n from airflow.providers_manager import ProvidersManager\n from airflow.utils import timezone\n from airflow.utils.types import DagRunType\n+from airflow.www.validators import ValidKey\n from airflow.www.widgets import (\n AirflowDateTimePickerROWidget,\n AirflowDateTimePickerWidget,\n@@ -205,7 +206,7 @@\n class ConnectionForm(DynamicForm):\n conn_id = StringField(\n lazy_gettext(\"Connection Id\"),\n- validators=[InputRequired()],\n+ validators=[InputRequired(), ValidKey()],\n widget=BS3TextFieldWidget(),\n )\n conn_type = SelectField(\ndiff --git a/airflow/www/validators.py b/airflow/www/validators.py\n--- a/airflow/www/validators.py\n+++ b/airflow/www/validators.py\n@@ -22,6 +22,8 @@\n \n from wtforms.validators import EqualTo, ValidationError\n \n+from airflow.utils import helpers\n+\n \n class GreaterEqualThan(EqualTo):\n \"\"\"Compares the values of two fields.\n@@ -76,3 +78,22 @@\n except JSONDecodeError as ex:\n message = self.message or f\"JSON Validation Error: {ex}\"\n raise ValidationError(message=field.gettext(message.format(field.data)))\n+\n+\n+class ValidKey:\n+ \"\"\"\n+ Validates values that will be used as keys\n+\n+ :param max_length:\n+ The maximum length of the given key\n+ \"\"\"\n+\n+ def __init__(self, max_length=200):\n+ self.max_length = max_length\n+\n+ def __call__(self, form, field):\n+ if field.data:\n+ try:\n+ helpers.validate_key(field.data, self.max_length)\n+ except Exception as e:\n+ raise ValidationError(str(e))\n", "issue": "Restrict allowed characters in connection ids\n### Description\r\n\r\nI bumped into a bug where a connection id was suffixed with a whitespace e.g. \"myconn \". When referencing the connection id \"myconn\" (without whitespace), you get a connection not found error.\r\n\r\nTo avoid such human errors, I suggest restricting the characters allowed for connection ids.\r\n\r\nSome suggestions:\r\n- There's an `airflow.utils.helpers.validate_key` function for validating the DAG id. Probably a good idea to reuse this.\r\n- I believe variable ids are also not validated, would be good to check those too.\r\n\r\n### Use case/motivation\r\n\r\n_No response_\r\n\r\n### Related issues\r\n\r\n_No response_\r\n\r\n### Are you willing to submit a PR?\r\n\r\n- [X] Yes I am willing to submit a PR!\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\r\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nimport json\nfrom json import JSONDecodeError\n\nfrom wtforms.validators import EqualTo, ValidationError\n\n\nclass GreaterEqualThan(EqualTo):\n \"\"\"Compares the values of two fields.\n\n :param fieldname:\n The name of the other field to compare to.\n :param message:\n Error message to raise in case of a validation error. Can be\n interpolated with `%(other_label)s` and `%(other_name)s` to provide a\n more helpful error.\n \"\"\"\n\n def __call__(self, form, field):\n try:\n other = form[self.fieldname]\n except KeyError:\n raise ValidationError(field.gettext(f\"Invalid field name '{self.fieldname}'.\"))\n\n if field.data is None or other.data is None:\n return\n\n if field.data < other.data:\n message_args = {\n \"other_label\": hasattr(other, \"label\") and other.label.text or self.fieldname,\n \"other_name\": self.fieldname,\n }\n message = self.message\n if message is None:\n message = field.gettext(\n f\"Field must be greater than or equal to {message_args['other_label']}.\"\n )\n else:\n message = message % message_args\n\n raise ValidationError(message)\n\n\nclass ValidJson:\n \"\"\"Validates data is valid JSON.\n\n :param message:\n Error message to raise in case of a validation error.\n \"\"\"\n\n def __init__(self, message=None):\n self.message = message\n\n def __call__(self, form, field):\n if field.data:\n try:\n json.loads(field.data)\n except JSONDecodeError as ex:\n message = self.message or f\"JSON Validation Error: {ex}\"\n raise ValidationError(message=field.gettext(message.format(field.data)))\n", "path": "airflow/www/validators.py"}, {"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nimport json\nimport operator\nfrom datetime import datetime as dt\nfrom typing import Iterator\n\nimport pendulum\nfrom flask_appbuilder.fieldwidgets import (\n BS3PasswordFieldWidget,\n BS3TextAreaFieldWidget,\n BS3TextFieldWidget,\n Select2Widget,\n)\nfrom flask_appbuilder.forms import DynamicForm\nfrom flask_babel import lazy_gettext\nfrom flask_wtf import FlaskForm\nfrom wtforms import widgets\nfrom wtforms.fields import Field, IntegerField, PasswordField, SelectField, StringField, TextAreaField\nfrom wtforms.validators import InputRequired, Optional\n\nfrom airflow.compat.functools import cache\nfrom airflow.configuration import conf\nfrom airflow.providers_manager import ProvidersManager\nfrom airflow.utils import timezone\nfrom airflow.utils.types import DagRunType\nfrom airflow.www.widgets import (\n AirflowDateTimePickerROWidget,\n AirflowDateTimePickerWidget,\n BS3TextAreaROWidget,\n BS3TextFieldROWidget,\n)\n\n\nclass DateTimeWithTimezoneField(Field):\n \"\"\"A text field which stores a `datetime.datetime` matching a format.\"\"\"\n\n widget = widgets.TextInput()\n\n def __init__(self, label=None, validators=None, datetime_format=\"%Y-%m-%d %H:%M:%S%Z\", **kwargs):\n super().__init__(label, validators, **kwargs)\n self.format = datetime_format\n self.data = None\n\n def _value(self):\n if self.raw_data:\n return \" \".join(self.raw_data)\n if self.data:\n return self.data.strftime(self.format)\n return \"\"\n\n def process_formdata(self, valuelist):\n if not valuelist:\n return\n date_str = \" \".join(valuelist)\n try:\n # Check if the datetime string is in the format without timezone, if so convert it to the\n # default timezone\n if len(date_str) == 19:\n parsed_datetime = dt.strptime(date_str, \"%Y-%m-%d %H:%M:%S\")\n default_timezone = self._get_default_timezone()\n self.data = default_timezone.convert(parsed_datetime)\n else:\n self.data = pendulum.parse(date_str)\n except ValueError:\n self.data = None\n raise ValueError(self.gettext(\"Not a valid datetime value\"))\n\n def _get_default_timezone(self):\n current_timezone = conf.get(\"core\", \"default_timezone\")\n if current_timezone == \"system\":\n default_timezone = pendulum.local_timezone()\n else:\n default_timezone = pendulum.timezone(current_timezone)\n return default_timezone\n\n\nclass DateTimeForm(FlaskForm):\n \"\"\"Date filter form needed for task views\"\"\"\n\n execution_date = DateTimeWithTimezoneField(\"Logical date\", widget=AirflowDateTimePickerWidget())\n\n\nclass DateTimeWithNumRunsForm(FlaskForm):\n \"\"\"\n Date time and number of runs form for tree view, task duration\n and landing times\n \"\"\"\n\n base_date = DateTimeWithTimezoneField(\n \"Anchor date\", widget=AirflowDateTimePickerWidget(), default=timezone.utcnow()\n )\n num_runs = SelectField(\n \"Number of runs\",\n default=25,\n choices=(\n (5, \"5\"),\n (25, \"25\"),\n (50, \"50\"),\n (100, \"100\"),\n (365, \"365\"),\n ),\n )\n\n\nclass DateTimeWithNumRunsWithDagRunsForm(DateTimeWithNumRunsForm):\n \"\"\"Date time and number of runs and dag runs form for graph and gantt view\"\"\"\n\n execution_date = SelectField(\"DAG run\")\n\n\nclass DagRunEditForm(DynamicForm):\n \"\"\"Form for editing DAG Run.\n\n We don't actually want to allow editing, so everything is read-only here.\n \"\"\"\n\n dag_id = StringField(lazy_gettext(\"Dag Id\"), widget=BS3TextFieldROWidget())\n start_date = DateTimeWithTimezoneField(lazy_gettext(\"Start Date\"), widget=AirflowDateTimePickerROWidget())\n end_date = DateTimeWithTimezoneField(lazy_gettext(\"End Date\"), widget=AirflowDateTimePickerROWidget())\n run_id = StringField(lazy_gettext(\"Run Id\"), widget=BS3TextFieldROWidget())\n state = StringField(lazy_gettext(\"State\"), widget=BS3TextFieldROWidget())\n execution_date = DateTimeWithTimezoneField(\n lazy_gettext(\"Logical Date\"),\n widget=AirflowDateTimePickerROWidget(),\n )\n conf = TextAreaField(lazy_gettext(\"Conf\"), widget=BS3TextAreaROWidget())\n note = TextAreaField(lazy_gettext(\"User Note\"), widget=BS3TextAreaFieldWidget())\n\n def populate_obj(self, item):\n \"\"\"Populates the attributes of the passed obj with data from the form's fields.\"\"\"\n super().populate_obj(item)\n item.run_type = DagRunType.from_run_id(item.run_id)\n if item.conf:\n item.conf = json.loads(item.conf)\n\n\nclass TaskInstanceEditForm(DynamicForm):\n \"\"\"Form for editing TaskInstance\"\"\"\n\n dag_id = StringField(lazy_gettext(\"Dag Id\"), validators=[InputRequired()], widget=BS3TextFieldROWidget())\n task_id = StringField(\n lazy_gettext(\"Task Id\"), validators=[InputRequired()], widget=BS3TextFieldROWidget()\n )\n start_date = DateTimeWithTimezoneField(lazy_gettext(\"Start Date\"), widget=AirflowDateTimePickerROWidget())\n end_date = DateTimeWithTimezoneField(lazy_gettext(\"End Date\"), widget=AirflowDateTimePickerROWidget())\n state = SelectField(\n lazy_gettext(\"State\"),\n choices=(\n (\"success\", \"success\"),\n (\"running\", \"running\"),\n (\"failed\", \"failed\"),\n (\"up_for_retry\", \"up_for_retry\"),\n ),\n widget=Select2Widget(),\n validators=[InputRequired()],\n )\n execution_date = DateTimeWithTimezoneField(\n lazy_gettext(\"Logical Date\"),\n widget=AirflowDateTimePickerROWidget(),\n validators=[InputRequired()],\n )\n note = TextAreaField(lazy_gettext(\"User Note\"), widget=BS3TextAreaFieldWidget())\n\n\n@cache\ndef create_connection_form_class() -> type[DynamicForm]:\n \"\"\"Create a form class for editing and adding Connection.\n\n This class is created dynamically because it relies heavily on run-time\n provider discovery, which slows down webserver startup a lot.\n By creating the class at runtime, we can delay loading the providers until\n when the connection form is first used, which may as well be never for a\n short-lived server.\n \"\"\"\n providers_manager = ProvidersManager()\n\n def _iter_connection_types() -> Iterator[tuple[str, str]]:\n \"\"\"List available connection types.\"\"\"\n yield (\"email\", \"Email\")\n yield (\"fs\", \"File (path)\")\n yield (\"generic\", \"Generic\")\n yield (\"mesos_framework-id\", \"Mesos Framework ID\")\n for connection_type, provider_info in providers_manager.hooks.items():\n if provider_info:\n yield (connection_type, provider_info.hook_name)\n\n class ConnectionForm(DynamicForm):\n conn_id = StringField(\n lazy_gettext(\"Connection Id\"),\n validators=[InputRequired()],\n widget=BS3TextFieldWidget(),\n )\n conn_type = SelectField(\n lazy_gettext(\"Connection Type\"),\n choices=sorted(_iter_connection_types(), key=operator.itemgetter(1)),\n widget=Select2Widget(),\n validators=[InputRequired()],\n description=(\n \"Connection Type missing? Make sure you've installed the \"\n \"corresponding Airflow Provider Package.\"\n ),\n )\n description = StringField(lazy_gettext(\"Description\"), widget=BS3TextAreaFieldWidget())\n host = StringField(lazy_gettext(\"Host\"), widget=BS3TextFieldWidget())\n schema = StringField(lazy_gettext(\"Schema\"), widget=BS3TextFieldWidget())\n login = StringField(lazy_gettext(\"Login\"), widget=BS3TextFieldWidget())\n password = PasswordField(lazy_gettext(\"Password\"), widget=BS3PasswordFieldWidget())\n port = IntegerField(lazy_gettext(\"Port\"), validators=[Optional()], widget=BS3TextFieldWidget())\n extra = TextAreaField(lazy_gettext(\"Extra\"), widget=BS3TextAreaFieldWidget())\n\n for key, value in providers_manager.connection_form_widgets.items():\n setattr(ConnectionForm, key, value.field)\n\n return ConnectionForm\n", "path": "airflow/www/forms.py"}], "after_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nimport json\nfrom json import JSONDecodeError\n\nfrom wtforms.validators import EqualTo, ValidationError\n\nfrom airflow.utils import helpers\n\n\nclass GreaterEqualThan(EqualTo):\n \"\"\"Compares the values of two fields.\n\n :param fieldname:\n The name of the other field to compare to.\n :param message:\n Error message to raise in case of a validation error. Can be\n interpolated with `%(other_label)s` and `%(other_name)s` to provide a\n more helpful error.\n \"\"\"\n\n def __call__(self, form, field):\n try:\n other = form[self.fieldname]\n except KeyError:\n raise ValidationError(field.gettext(f\"Invalid field name '{self.fieldname}'.\"))\n\n if field.data is None or other.data is None:\n return\n\n if field.data < other.data:\n message_args = {\n \"other_label\": hasattr(other, \"label\") and other.label.text or self.fieldname,\n \"other_name\": self.fieldname,\n }\n message = self.message\n if message is None:\n message = field.gettext(\n f\"Field must be greater than or equal to {message_args['other_label']}.\"\n )\n else:\n message = message % message_args\n\n raise ValidationError(message)\n\n\nclass ValidJson:\n \"\"\"Validates data is valid JSON.\n\n :param message:\n Error message to raise in case of a validation error.\n \"\"\"\n\n def __init__(self, message=None):\n self.message = message\n\n def __call__(self, form, field):\n if field.data:\n try:\n json.loads(field.data)\n except JSONDecodeError as ex:\n message = self.message or f\"JSON Validation Error: {ex}\"\n raise ValidationError(message=field.gettext(message.format(field.data)))\n\n\nclass ValidKey:\n \"\"\"\n Validates values that will be used as keys\n\n :param max_length:\n The maximum length of the given key\n \"\"\"\n\n def __init__(self, max_length=200):\n self.max_length = max_length\n\n def __call__(self, form, field):\n if field.data:\n try:\n helpers.validate_key(field.data, self.max_length)\n except Exception as e:\n raise ValidationError(str(e))\n", "path": "airflow/www/validators.py"}, {"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nimport json\nimport operator\nfrom datetime import datetime as dt\nfrom typing import Iterator\n\nimport pendulum\nfrom flask_appbuilder.fieldwidgets import (\n BS3PasswordFieldWidget,\n BS3TextAreaFieldWidget,\n BS3TextFieldWidget,\n Select2Widget,\n)\nfrom flask_appbuilder.forms import DynamicForm\nfrom flask_babel import lazy_gettext\nfrom flask_wtf import FlaskForm\nfrom wtforms import widgets\nfrom wtforms.fields import Field, IntegerField, PasswordField, SelectField, StringField, TextAreaField\nfrom wtforms.validators import InputRequired, Optional\n\nfrom airflow.compat.functools import cache\nfrom airflow.configuration import conf\nfrom airflow.providers_manager import ProvidersManager\nfrom airflow.utils import timezone\nfrom airflow.utils.types import DagRunType\nfrom airflow.www.validators import ValidKey\nfrom airflow.www.widgets import (\n AirflowDateTimePickerROWidget,\n AirflowDateTimePickerWidget,\n BS3TextAreaROWidget,\n BS3TextFieldROWidget,\n)\n\n\nclass DateTimeWithTimezoneField(Field):\n \"\"\"A text field which stores a `datetime.datetime` matching a format.\"\"\"\n\n widget = widgets.TextInput()\n\n def __init__(self, label=None, validators=None, datetime_format=\"%Y-%m-%d %H:%M:%S%Z\", **kwargs):\n super().__init__(label, validators, **kwargs)\n self.format = datetime_format\n self.data = None\n\n def _value(self):\n if self.raw_data:\n return \" \".join(self.raw_data)\n if self.data:\n return self.data.strftime(self.format)\n return \"\"\n\n def process_formdata(self, valuelist):\n if not valuelist:\n return\n date_str = \" \".join(valuelist)\n try:\n # Check if the datetime string is in the format without timezone, if so convert it to the\n # default timezone\n if len(date_str) == 19:\n parsed_datetime = dt.strptime(date_str, \"%Y-%m-%d %H:%M:%S\")\n default_timezone = self._get_default_timezone()\n self.data = default_timezone.convert(parsed_datetime)\n else:\n self.data = pendulum.parse(date_str)\n except ValueError:\n self.data = None\n raise ValueError(self.gettext(\"Not a valid datetime value\"))\n\n def _get_default_timezone(self):\n current_timezone = conf.get(\"core\", \"default_timezone\")\n if current_timezone == \"system\":\n default_timezone = pendulum.local_timezone()\n else:\n default_timezone = pendulum.timezone(current_timezone)\n return default_timezone\n\n\nclass DateTimeForm(FlaskForm):\n \"\"\"Date filter form needed for task views\"\"\"\n\n execution_date = DateTimeWithTimezoneField(\"Logical date\", widget=AirflowDateTimePickerWidget())\n\n\nclass DateTimeWithNumRunsForm(FlaskForm):\n \"\"\"\n Date time and number of runs form for tree view, task duration\n and landing times\n \"\"\"\n\n base_date = DateTimeWithTimezoneField(\n \"Anchor date\", widget=AirflowDateTimePickerWidget(), default=timezone.utcnow()\n )\n num_runs = SelectField(\n \"Number of runs\",\n default=25,\n choices=(\n (5, \"5\"),\n (25, \"25\"),\n (50, \"50\"),\n (100, \"100\"),\n (365, \"365\"),\n ),\n )\n\n\nclass DateTimeWithNumRunsWithDagRunsForm(DateTimeWithNumRunsForm):\n \"\"\"Date time and number of runs and dag runs form for graph and gantt view\"\"\"\n\n execution_date = SelectField(\"DAG run\")\n\n\nclass DagRunEditForm(DynamicForm):\n \"\"\"Form for editing DAG Run.\n\n We don't actually want to allow editing, so everything is read-only here.\n \"\"\"\n\n dag_id = StringField(lazy_gettext(\"Dag Id\"), widget=BS3TextFieldROWidget())\n start_date = DateTimeWithTimezoneField(lazy_gettext(\"Start Date\"), widget=AirflowDateTimePickerROWidget())\n end_date = DateTimeWithTimezoneField(lazy_gettext(\"End Date\"), widget=AirflowDateTimePickerROWidget())\n run_id = StringField(lazy_gettext(\"Run Id\"), widget=BS3TextFieldROWidget())\n state = StringField(lazy_gettext(\"State\"), widget=BS3TextFieldROWidget())\n execution_date = DateTimeWithTimezoneField(\n lazy_gettext(\"Logical Date\"),\n widget=AirflowDateTimePickerROWidget(),\n )\n conf = TextAreaField(lazy_gettext(\"Conf\"), widget=BS3TextAreaROWidget())\n note = TextAreaField(lazy_gettext(\"User Note\"), widget=BS3TextAreaFieldWidget())\n\n def populate_obj(self, item):\n \"\"\"Populates the attributes of the passed obj with data from the form's fields.\"\"\"\n super().populate_obj(item)\n item.run_type = DagRunType.from_run_id(item.run_id)\n if item.conf:\n item.conf = json.loads(item.conf)\n\n\nclass TaskInstanceEditForm(DynamicForm):\n \"\"\"Form for editing TaskInstance\"\"\"\n\n dag_id = StringField(lazy_gettext(\"Dag Id\"), validators=[InputRequired()], widget=BS3TextFieldROWidget())\n task_id = StringField(\n lazy_gettext(\"Task Id\"), validators=[InputRequired()], widget=BS3TextFieldROWidget()\n )\n start_date = DateTimeWithTimezoneField(lazy_gettext(\"Start Date\"), widget=AirflowDateTimePickerROWidget())\n end_date = DateTimeWithTimezoneField(lazy_gettext(\"End Date\"), widget=AirflowDateTimePickerROWidget())\n state = SelectField(\n lazy_gettext(\"State\"),\n choices=(\n (\"success\", \"success\"),\n (\"running\", \"running\"),\n (\"failed\", \"failed\"),\n (\"up_for_retry\", \"up_for_retry\"),\n ),\n widget=Select2Widget(),\n validators=[InputRequired()],\n )\n execution_date = DateTimeWithTimezoneField(\n lazy_gettext(\"Logical Date\"),\n widget=AirflowDateTimePickerROWidget(),\n validators=[InputRequired()],\n )\n note = TextAreaField(lazy_gettext(\"User Note\"), widget=BS3TextAreaFieldWidget())\n\n\n@cache\ndef create_connection_form_class() -> type[DynamicForm]:\n \"\"\"Create a form class for editing and adding Connection.\n\n This class is created dynamically because it relies heavily on run-time\n provider discovery, which slows down webserver startup a lot.\n By creating the class at runtime, we can delay loading the providers until\n when the connection form is first used, which may as well be never for a\n short-lived server.\n \"\"\"\n providers_manager = ProvidersManager()\n\n def _iter_connection_types() -> Iterator[tuple[str, str]]:\n \"\"\"List available connection types.\"\"\"\n yield (\"email\", \"Email\")\n yield (\"fs\", \"File (path)\")\n yield (\"generic\", \"Generic\")\n yield (\"mesos_framework-id\", \"Mesos Framework ID\")\n for connection_type, provider_info in providers_manager.hooks.items():\n if provider_info:\n yield (connection_type, provider_info.hook_name)\n\n class ConnectionForm(DynamicForm):\n conn_id = StringField(\n lazy_gettext(\"Connection Id\"),\n validators=[InputRequired(), ValidKey()],\n widget=BS3TextFieldWidget(),\n )\n conn_type = SelectField(\n lazy_gettext(\"Connection Type\"),\n choices=sorted(_iter_connection_types(), key=operator.itemgetter(1)),\n widget=Select2Widget(),\n validators=[InputRequired()],\n description=(\n \"Connection Type missing? Make sure you've installed the \"\n \"corresponding Airflow Provider Package.\"\n ),\n )\n description = StringField(lazy_gettext(\"Description\"), widget=BS3TextAreaFieldWidget())\n host = StringField(lazy_gettext(\"Host\"), widget=BS3TextFieldWidget())\n schema = StringField(lazy_gettext(\"Schema\"), widget=BS3TextFieldWidget())\n login = StringField(lazy_gettext(\"Login\"), widget=BS3TextFieldWidget())\n password = PasswordField(lazy_gettext(\"Password\"), widget=BS3PasswordFieldWidget())\n port = IntegerField(lazy_gettext(\"Port\"), validators=[Optional()], widget=BS3TextFieldWidget())\n extra = TextAreaField(lazy_gettext(\"Extra\"), widget=BS3TextAreaFieldWidget())\n\n for key, value in providers_manager.connection_form_widgets.items():\n setattr(ConnectionForm, key, value.field)\n\n return ConnectionForm\n", "path": "airflow/www/forms.py"}]} | 3,741 | 425 |
gh_patches_debug_20585 | rasdani/github-patches | git_diff | e-valuation__EvaP-794 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deal with update.sh
update.sh is out of date. We can either
- remove it
- update it
- replace it with something else
having a script would be pretty cool to document what needs to be done when updating a production server.
maybe this can go into a management command.
an idea for additional automation would be a git post-commit-hook that checks out the release branch and updates everything when someone pushes to the production server via ssh. logs of the update could be sent via email to the admins.
Deal with update.sh
update.sh is out of date. We can either
- remove it
- update it
- replace it with something else
having a script would be pretty cool to document what needs to be done when updating a production server.
maybe this can go into a management command.
an idea for additional automation would be a git post-commit-hook that checks out the release branch and updates everything when someone pushes to the production server via ssh. logs of the update could be sent via email to the admins.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/evaluation/management/commands/refresh_results_cache.py`
Content:
```
1 from django.core.management.base import BaseCommand
2 from django.core.cache import cache
3
4 from evap.evaluation.models import Course
5 from evap.evaluation.tools import calculate_results
6
7
8 class Command(BaseCommand):
9 args = ''
10 help = 'Clears the cache and pre-warms it with the results of all courses'
11
12 def handle(self, *args, **options):
13 self.stdout.write("Clearing cache...")
14 cache.clear()
15
16 self.stdout.write("Calculating results for all courses...")
17 for course in Course.objects.all():
18 calculate_results(course)
19
20 self.stdout.write("Done with updating cache.")
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/evaluation/management/commands/refresh_results_cache.py b/evap/evaluation/management/commands/refresh_results_cache.py
--- a/evap/evaluation/management/commands/refresh_results_cache.py
+++ b/evap/evaluation/management/commands/refresh_results_cache.py
@@ -1,4 +1,5 @@
from django.core.management.base import BaseCommand
+from django.core.serializers.base import ProgressBar
from django.core.cache import cache
from evap.evaluation.models import Course
@@ -12,9 +13,15 @@
def handle(self, *args, **options):
self.stdout.write("Clearing cache...")
cache.clear()
+ total_count = Course.objects.count()
self.stdout.write("Calculating results for all courses...")
- for course in Course.objects.all():
+
+ self.stdout.ending = None
+ progress_bar = ProgressBar(self.stdout, total_count)
+
+ for counter, course in enumerate(Course.objects.all()):
+ progress_bar.update(counter + 1)
calculate_results(course)
- self.stdout.write("Done with updating cache.")
+ self.stdout.write("Done with updating cache.\n")
| {"golden_diff": "diff --git a/evap/evaluation/management/commands/refresh_results_cache.py b/evap/evaluation/management/commands/refresh_results_cache.py\n--- a/evap/evaluation/management/commands/refresh_results_cache.py\n+++ b/evap/evaluation/management/commands/refresh_results_cache.py\n@@ -1,4 +1,5 @@\n from django.core.management.base import BaseCommand\n+from django.core.serializers.base import ProgressBar\n from django.core.cache import cache\n \n from evap.evaluation.models import Course\n@@ -12,9 +13,15 @@\n def handle(self, *args, **options):\n self.stdout.write(\"Clearing cache...\")\n cache.clear()\n+ total_count = Course.objects.count()\n \n self.stdout.write(\"Calculating results for all courses...\")\n- for course in Course.objects.all():\n+\n+ self.stdout.ending = None\n+ progress_bar = ProgressBar(self.stdout, total_count)\n+\n+ for counter, course in enumerate(Course.objects.all()):\n+ progress_bar.update(counter + 1)\n calculate_results(course)\n \n- self.stdout.write(\"Done with updating cache.\")\n+ self.stdout.write(\"Done with updating cache.\\n\")\n", "issue": "Deal with update.sh\nupdate.sh is out of date. We can either\n- remove it\n- update it\n- replace it with something else\n\nhaving a script would be pretty cool to document what needs to be done when updating a production server.\n\nmaybe this can go into a management command.\n\nan idea for additional automation would be a git post-commit-hook that checks out the release branch and updates everything when someone pushes to the production server via ssh. logs of the update could be sent via email to the admins.\n\nDeal with update.sh\nupdate.sh is out of date. We can either\n- remove it\n- update it\n- replace it with something else\n\nhaving a script would be pretty cool to document what needs to be done when updating a production server.\n\nmaybe this can go into a management command.\n\nan idea for additional automation would be a git post-commit-hook that checks out the release branch and updates everything when someone pushes to the production server via ssh. logs of the update could be sent via email to the admins.\n\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom django.core.cache import cache\n\nfrom evap.evaluation.models import Course\nfrom evap.evaluation.tools import calculate_results\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'Clears the cache and pre-warms it with the results of all courses'\n\n def handle(self, *args, **options):\n self.stdout.write(\"Clearing cache...\")\n cache.clear()\n\n self.stdout.write(\"Calculating results for all courses...\")\n for course in Course.objects.all():\n calculate_results(course)\n\n self.stdout.write(\"Done with updating cache.\")\n", "path": "evap/evaluation/management/commands/refresh_results_cache.py"}], "after_files": [{"content": "from django.core.management.base import BaseCommand\nfrom django.core.serializers.base import ProgressBar\nfrom django.core.cache import cache\n\nfrom evap.evaluation.models import Course\nfrom evap.evaluation.tools import calculate_results\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'Clears the cache and pre-warms it with the results of all courses'\n\n def handle(self, *args, **options):\n self.stdout.write(\"Clearing cache...\")\n cache.clear()\n total_count = Course.objects.count()\n\n self.stdout.write(\"Calculating results for all courses...\")\n\n self.stdout.ending = None\n progress_bar = ProgressBar(self.stdout, total_count)\n\n for counter, course in enumerate(Course.objects.all()):\n progress_bar.update(counter + 1)\n calculate_results(course)\n\n self.stdout.write(\"Done with updating cache.\\n\")\n", "path": "evap/evaluation/management/commands/refresh_results_cache.py"}]} | 637 | 257 |
gh_patches_debug_19815 | rasdani/github-patches | git_diff | e2nIEE__pandapower-870 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in rundcpp failed to initialize result table
```python
import pandapower as pp
import pandapower.networks as nw
net = nw.case57()
pp.rundcpp(net)
net.pop("res_gen")
# Problem by executing this
pp.rundcpp(net)
# No problem doing this
pp.runpp(net)
```
Can anyone recreate this? I tries pp 2.2.2, pp 2.3.0.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandapower/results.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
4 # and Energy System Technology (IEE), Kassel. All rights reserved.
5
6
7 import numpy as np
8 import pandas as pd
9
10 from pandapower.results_branch import _get_branch_results, _get_branch_results_3ph
11 from pandapower.results_bus import _get_bus_results, _set_buses_out_of_service, \
12 _get_shunt_results, _get_p_q_results, _get_bus_v_results, _get_bus_v_results_3ph, _get_p_q_results_3ph, \
13 _get_bus_results_3ph
14 from pandapower.results_gen import _get_gen_results, _get_gen_results_3ph
15
16 suffix_mode = {"sc": "sc", "se": "est", "pf_3ph": "3ph"}
17
18
19 def _extract_results(net, ppc):
20 _set_buses_out_of_service(ppc)
21 bus_lookup_aranged = _get_aranged_lookup(net)
22 _get_bus_v_results(net, ppc)
23 bus_pq = _get_p_q_results(net, ppc, bus_lookup_aranged)
24 _get_shunt_results(net, ppc, bus_lookup_aranged, bus_pq)
25 _get_branch_results(net, ppc, bus_lookup_aranged, bus_pq)
26 _get_gen_results(net, ppc, bus_lookup_aranged, bus_pq)
27 _get_bus_results(net, ppc, bus_pq)
28 if net._options["mode"] == "opf":
29 _get_costs(net, ppc)
30
31
32 def _extract_results_3ph(net, ppc0, ppc1, ppc2):
33 # reset_results(net, False)
34 _set_buses_out_of_service(ppc0)
35 _set_buses_out_of_service(ppc1)
36 _set_buses_out_of_service(ppc2)
37 bus_lookup_aranged = _get_aranged_lookup(net)
38
39 _get_bus_v_results_3ph(net, ppc0, ppc1, ppc2)
40 bus_pq = _get_p_q_results_3ph(net, bus_lookup_aranged)
41 # _get_shunt_results(net, ppc, bus_lookup_aranged, bus_pq)
42 _get_branch_results_3ph(net, ppc0, ppc1, ppc2, bus_lookup_aranged, bus_pq)
43 _get_gen_results_3ph(net, ppc0, ppc1, ppc2, bus_lookup_aranged, bus_pq)
44 _get_bus_results_3ph(net, bus_pq)
45
46
47 def _extract_results_se(net, ppc):
48 _set_buses_out_of_service(ppc)
49 bus_lookup_aranged = _get_aranged_lookup(net)
50 _get_bus_v_results(net, ppc, suffix="_est")
51 bus_pq = np.zeros(shape=(len(net["bus"].index), 2), dtype=np.float)
52 _get_branch_results(net, ppc, bus_lookup_aranged, bus_pq, suffix="_est")
53
54
55 def _get_costs(net, ppc):
56 net.res_cost = ppc['obj']
57
58
59 def _get_aranged_lookup(net):
60 # generate bus_lookup net -> consecutive ordering
61 maxBus = max(net["bus"].index.values)
62 bus_lookup_aranged = -np.ones(maxBus + 1, dtype=int)
63 bus_lookup_aranged[net["bus"].index.values] = np.arange(len(net["bus"].index.values))
64
65 return bus_lookup_aranged
66
67
68 def verify_results(net, mode="pf"):
69 elements = get_relevant_elements(mode)
70 suffix = suffix_mode.get(mode, None)
71 for element in elements:
72 res_element, res_empty_element = get_result_tables(element, suffix)
73 if len(net[element]) != len(net[res_element]):
74 init_element(net, element)
75 if element == "bus":
76 net._options["init_vm_pu"] = "auto"
77 net._options["init_va_degree"] = "auto"
78
79
80 def get_result_tables(element, suffix=None):
81 res_element = "res_" + element
82 res_element_with_suffix = res_element if suffix is None else res_element + "_%s" % suffix
83
84 if suffix == suffix_mode.get("se", None):
85 # State estimation used default result table
86 return res_element_with_suffix, "_empty_%s" % res_element
87 else:
88 return res_element_with_suffix, "_empty_%s" % res_element_with_suffix
89
90
91 def empty_res_element(net, element, suffix=None):
92 res_element, res_empty_element = get_result_tables(element, suffix)
93 if res_empty_element in net:
94 net[res_element] = net[res_empty_element].copy()
95 else:
96 net[res_element] = pd.DataFrame()
97
98
99 def init_element(net, element, suffix=None):
100 res_element, res_empty_element = get_result_tables(element, suffix)
101 index = net[element].index
102 if len(index):
103 # init empty dataframe
104 if res_empty_element in net:
105 columns = net[res_empty_element].columns
106 net[res_element] = pd.DataFrame(np.nan, index=index,
107 columns=columns, dtype='float')
108 else:
109 net[res_element] = pd.DataFrame(index=index, dtype='float')
110 else:
111 empty_res_element(net, element, suffix)
112
113
114 def get_relevant_elements(mode="pf"):
115 if mode == "pf" or mode == "opf":
116 return ["bus", "line", "trafo", "trafo3w", "impedance", "ext_grid",
117 "load", "sgen", "storage", "shunt", "gen", "ward", "xward",
118 "dcline"]
119 elif mode == "sc":
120 return ["bus", "line", "trafo", "trafo3w", "ext_grid", "gen", "sgen"]
121 elif mode == "se":
122 return ["bus", "line", "trafo", "trafo3w"]
123 elif mode == "pf_3ph":
124 return ["bus", "line", "trafo", "ext_grid", "shunt",
125 "load", "sgen", "storage", "asymmetric_load", "asymmetric_sgen"]
126
127
128 def init_results(net, mode="pf"):
129 elements = get_relevant_elements(mode)
130 suffix = suffix_mode.get(mode, None)
131 for element in elements:
132 init_element(net, element, suffix)
133
134
135 def reset_results(net, mode="pf"):
136 elements = get_relevant_elements(mode)
137 suffix = suffix_mode.get(mode, None)
138 for element in elements:
139 empty_res_element(net, element, suffix)
140
141
142 def _ppci_bus_to_ppc(result, ppc):
143 # result is the ppci (ppc without out of service buses)
144 # busses are sorted (REF, PV, PQ, NONE) -> results are the first 3 types
145 n_buses, bus_cols = np.shape(ppc['bus'])
146 n_rows_result, bus_cols_result = np.shape(result['bus'])
147 # create matrix of proper size
148 updated_bus = np.empty((n_buses, bus_cols_result))
149 # fill in results (first 3 types)
150 updated_bus[:n_rows_result, :] = result['bus']
151 if n_buses > n_rows_result:
152 # keep rows for busses of type NONE
153 updated_bus[n_rows_result:, :bus_cols] = ppc['bus'][n_rows_result:, :]
154 ppc['bus'] = updated_bus
155
156
157 def _ppci_branch_to_ppc(result, ppc):
158 # in service branches and gens are taken from 'internal'
159 branch_cols = np.shape(ppc['branch'])[1]
160 ppc['branch'][result["internal"]['branch_is'], :branch_cols] = result['branch'][:, :branch_cols]
161
162
163 def _ppci_gen_to_ppc(result, ppc):
164 gen_cols = np.shape(ppc['gen'])[1]
165 ppc['gen'][result["internal"]['gen_is'], :gen_cols] = result['gen'][:, :gen_cols]
166
167
168 def _ppci_other_to_ppc(result, ppc, mode):
169 ppc['internal'] = result['internal']
170
171 if mode != "sc" and mode != "se":
172 ppc['success'] = result['success']
173 ppc['et'] = result['et']
174
175 if mode == 'opf':
176 ppc['obj'] = result['f']
177 ppc['internal_gencost'] = result['gencost']
178
179 if "iterations" in result:
180 ppc["iterations"] = result["iterations"]
181
182
183 def _copy_results_ppci_to_ppc(result, ppc, mode):
184 """
185 result contains results for all in service elements
186 ppc gets the results for in- and out of service elements
187 -> results must be copied
188
189 ppc and ppci are structured as follows:
190 [in_service elements]
191 ppc = [out_of_service elements]
192 result = [in_service elements]
193
194 Parameters
195 ----------
196 result - ppci with results
197 ppc - ppc without results
198 mode - "pf","opf", "sc"...
199
200 Returns
201 -------
202 ppc with results
203 """
204
205 # copy the results for bus, gen and branch and some additional values like "success"
206 _ppci_bus_to_ppc(result, ppc)
207 _ppci_branch_to_ppc(result, ppc)
208 _ppci_gen_to_ppc(result, ppc)
209 _ppci_other_to_ppc(result, ppc, mode)
210
211 result = ppc
212 return result
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pandapower/results.py b/pandapower/results.py
--- a/pandapower/results.py
+++ b/pandapower/results.py
@@ -70,7 +70,15 @@
suffix = suffix_mode.get(mode, None)
for element in elements:
res_element, res_empty_element = get_result_tables(element, suffix)
- if len(net[element]) != len(net[res_element]):
+
+ index_equal = False if res_element not in net else net[element].index.equals(net[res_element].index)
+ if not index_equal:
+ if net["_options"]["init_results"] and element == "bus":
+ # if the indices of bus and res_bus are not equal, but init_results is set, the voltage vector
+ # is wrong. A UserWarning is raised in this case. For all other elements the result table is emptied.
+ raise UserWarning("index of result table '{}' is not equal to the element table '{}'. The init result"
+ " option may lead to a non-converged power flow.".format(res_element, element))
+ # init result table for
init_element(net, element)
if element == "bus":
net._options["init_vm_pu"] = "auto"
| {"golden_diff": "diff --git a/pandapower/results.py b/pandapower/results.py\n--- a/pandapower/results.py\n+++ b/pandapower/results.py\n@@ -70,7 +70,15 @@\n suffix = suffix_mode.get(mode, None)\n for element in elements:\n res_element, res_empty_element = get_result_tables(element, suffix)\n- if len(net[element]) != len(net[res_element]):\n+\n+ index_equal = False if res_element not in net else net[element].index.equals(net[res_element].index)\n+ if not index_equal:\n+ if net[\"_options\"][\"init_results\"] and element == \"bus\":\n+ # if the indices of bus and res_bus are not equal, but init_results is set, the voltage vector\n+ # is wrong. A UserWarning is raised in this case. For all other elements the result table is emptied.\n+ raise UserWarning(\"index of result table '{}' is not equal to the element table '{}'. The init result\"\n+ \" option may lead to a non-converged power flow.\".format(res_element, element))\n+ # init result table for\n init_element(net, element)\n if element == \"bus\":\n net._options[\"init_vm_pu\"] = \"auto\"\n", "issue": "Bug in rundcpp failed to initialize result table\n```python\r\nimport pandapower as pp\r\nimport pandapower.networks as nw\r\n\r\nnet = nw.case57()\r\npp.rundcpp(net) \r\nnet.pop(\"res_gen\")\r\n\r\n# Problem by executing this\r\npp.rundcpp(net)\r\n\r\n# No problem doing this\r\npp.runpp(net) \r\n```\r\n\r\nCan anyone recreate this? I tries pp 2.2.2, pp 2.3.0.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandapower.results_branch import _get_branch_results, _get_branch_results_3ph\nfrom pandapower.results_bus import _get_bus_results, _set_buses_out_of_service, \\\n _get_shunt_results, _get_p_q_results, _get_bus_v_results, _get_bus_v_results_3ph, _get_p_q_results_3ph, \\\n _get_bus_results_3ph\nfrom pandapower.results_gen import _get_gen_results, _get_gen_results_3ph\n\nsuffix_mode = {\"sc\": \"sc\", \"se\": \"est\", \"pf_3ph\": \"3ph\"}\n\n\ndef _extract_results(net, ppc):\n _set_buses_out_of_service(ppc)\n bus_lookup_aranged = _get_aranged_lookup(net)\n _get_bus_v_results(net, ppc)\n bus_pq = _get_p_q_results(net, ppc, bus_lookup_aranged)\n _get_shunt_results(net, ppc, bus_lookup_aranged, bus_pq)\n _get_branch_results(net, ppc, bus_lookup_aranged, bus_pq)\n _get_gen_results(net, ppc, bus_lookup_aranged, bus_pq)\n _get_bus_results(net, ppc, bus_pq)\n if net._options[\"mode\"] == \"opf\":\n _get_costs(net, ppc)\n\n\ndef _extract_results_3ph(net, ppc0, ppc1, ppc2):\n # reset_results(net, False)\n _set_buses_out_of_service(ppc0)\n _set_buses_out_of_service(ppc1)\n _set_buses_out_of_service(ppc2)\n bus_lookup_aranged = _get_aranged_lookup(net)\n\n _get_bus_v_results_3ph(net, ppc0, ppc1, ppc2)\n bus_pq = _get_p_q_results_3ph(net, bus_lookup_aranged)\n # _get_shunt_results(net, ppc, bus_lookup_aranged, bus_pq)\n _get_branch_results_3ph(net, ppc0, ppc1, ppc2, bus_lookup_aranged, bus_pq)\n _get_gen_results_3ph(net, ppc0, ppc1, ppc2, bus_lookup_aranged, bus_pq)\n _get_bus_results_3ph(net, bus_pq)\n\n\ndef _extract_results_se(net, ppc):\n _set_buses_out_of_service(ppc)\n bus_lookup_aranged = _get_aranged_lookup(net)\n _get_bus_v_results(net, ppc, suffix=\"_est\")\n bus_pq = np.zeros(shape=(len(net[\"bus\"].index), 2), dtype=np.float)\n _get_branch_results(net, ppc, bus_lookup_aranged, bus_pq, suffix=\"_est\")\n\n\ndef _get_costs(net, ppc):\n net.res_cost = ppc['obj']\n\n\ndef _get_aranged_lookup(net):\n # generate bus_lookup net -> consecutive ordering\n maxBus = max(net[\"bus\"].index.values)\n bus_lookup_aranged = -np.ones(maxBus + 1, dtype=int)\n bus_lookup_aranged[net[\"bus\"].index.values] = np.arange(len(net[\"bus\"].index.values))\n\n return bus_lookup_aranged\n\n\ndef verify_results(net, mode=\"pf\"):\n elements = get_relevant_elements(mode)\n suffix = suffix_mode.get(mode, None)\n for element in elements:\n res_element, res_empty_element = get_result_tables(element, suffix)\n if len(net[element]) != len(net[res_element]):\n init_element(net, element)\n if element == \"bus\":\n net._options[\"init_vm_pu\"] = \"auto\"\n net._options[\"init_va_degree\"] = \"auto\"\n\n\ndef get_result_tables(element, suffix=None):\n res_element = \"res_\" + element\n res_element_with_suffix = res_element if suffix is None else res_element + \"_%s\" % suffix\n\n if suffix == suffix_mode.get(\"se\", None):\n # State estimation used default result table\n return res_element_with_suffix, \"_empty_%s\" % res_element\n else:\n return res_element_with_suffix, \"_empty_%s\" % res_element_with_suffix\n\n\ndef empty_res_element(net, element, suffix=None):\n res_element, res_empty_element = get_result_tables(element, suffix)\n if res_empty_element in net:\n net[res_element] = net[res_empty_element].copy()\n else:\n net[res_element] = pd.DataFrame()\n\n\ndef init_element(net, element, suffix=None):\n res_element, res_empty_element = get_result_tables(element, suffix)\n index = net[element].index\n if len(index):\n # init empty dataframe\n if res_empty_element in net:\n columns = net[res_empty_element].columns\n net[res_element] = pd.DataFrame(np.nan, index=index,\n columns=columns, dtype='float')\n else:\n net[res_element] = pd.DataFrame(index=index, dtype='float')\n else:\n empty_res_element(net, element, suffix)\n\n\ndef get_relevant_elements(mode=\"pf\"):\n if mode == \"pf\" or mode == \"opf\":\n return [\"bus\", \"line\", \"trafo\", \"trafo3w\", \"impedance\", \"ext_grid\",\n \"load\", \"sgen\", \"storage\", \"shunt\", \"gen\", \"ward\", \"xward\",\n \"dcline\"]\n elif mode == \"sc\":\n return [\"bus\", \"line\", \"trafo\", \"trafo3w\", \"ext_grid\", \"gen\", \"sgen\"]\n elif mode == \"se\":\n return [\"bus\", \"line\", \"trafo\", \"trafo3w\"] \n elif mode == \"pf_3ph\":\n return [\"bus\", \"line\", \"trafo\", \"ext_grid\", \"shunt\",\n \"load\", \"sgen\", \"storage\", \"asymmetric_load\", \"asymmetric_sgen\"]\n\n\ndef init_results(net, mode=\"pf\"):\n elements = get_relevant_elements(mode)\n suffix = suffix_mode.get(mode, None)\n for element in elements:\n init_element(net, element, suffix)\n\n\ndef reset_results(net, mode=\"pf\"):\n elements = get_relevant_elements(mode)\n suffix = suffix_mode.get(mode, None)\n for element in elements:\n empty_res_element(net, element, suffix)\n\n\ndef _ppci_bus_to_ppc(result, ppc):\n # result is the ppci (ppc without out of service buses)\n # busses are sorted (REF, PV, PQ, NONE) -> results are the first 3 types\n n_buses, bus_cols = np.shape(ppc['bus'])\n n_rows_result, bus_cols_result = np.shape(result['bus'])\n # create matrix of proper size\n updated_bus = np.empty((n_buses, bus_cols_result))\n # fill in results (first 3 types)\n updated_bus[:n_rows_result, :] = result['bus']\n if n_buses > n_rows_result:\n # keep rows for busses of type NONE\n updated_bus[n_rows_result:, :bus_cols] = ppc['bus'][n_rows_result:, :]\n ppc['bus'] = updated_bus\n\n\ndef _ppci_branch_to_ppc(result, ppc):\n # in service branches and gens are taken from 'internal'\n branch_cols = np.shape(ppc['branch'])[1]\n ppc['branch'][result[\"internal\"]['branch_is'], :branch_cols] = result['branch'][:, :branch_cols]\n\n\ndef _ppci_gen_to_ppc(result, ppc):\n gen_cols = np.shape(ppc['gen'])[1]\n ppc['gen'][result[\"internal\"]['gen_is'], :gen_cols] = result['gen'][:, :gen_cols]\n\n\ndef _ppci_other_to_ppc(result, ppc, mode):\n ppc['internal'] = result['internal']\n\n if mode != \"sc\" and mode != \"se\":\n ppc['success'] = result['success']\n ppc['et'] = result['et']\n\n if mode == 'opf':\n ppc['obj'] = result['f']\n ppc['internal_gencost'] = result['gencost']\n\n if \"iterations\" in result:\n ppc[\"iterations\"] = result[\"iterations\"]\n\n\ndef _copy_results_ppci_to_ppc(result, ppc, mode):\n \"\"\"\n result contains results for all in service elements\n ppc gets the results for in- and out of service elements\n -> results must be copied\n\n ppc and ppci are structured as follows:\n [in_service elements]\n ppc = [out_of_service elements]\n result = [in_service elements]\n\n Parameters\n ----------\n result - ppci with results\n ppc - ppc without results\n mode - \"pf\",\"opf\", \"sc\"...\n\n Returns\n -------\n ppc with results\n \"\"\"\n\n # copy the results for bus, gen and branch and some additional values like \"success\"\n _ppci_bus_to_ppc(result, ppc)\n _ppci_branch_to_ppc(result, ppc)\n _ppci_gen_to_ppc(result, ppc)\n _ppci_other_to_ppc(result, ppc, mode)\n\n result = ppc\n return result\n", "path": "pandapower/results.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandapower.results_branch import _get_branch_results, _get_branch_results_3ph\nfrom pandapower.results_bus import _get_bus_results, _set_buses_out_of_service, \\\n _get_shunt_results, _get_p_q_results, _get_bus_v_results, _get_bus_v_results_3ph, _get_p_q_results_3ph, \\\n _get_bus_results_3ph\nfrom pandapower.results_gen import _get_gen_results, _get_gen_results_3ph\n\nsuffix_mode = {\"sc\": \"sc\", \"se\": \"est\", \"pf_3ph\": \"3ph\"}\n\n\ndef _extract_results(net, ppc):\n _set_buses_out_of_service(ppc)\n bus_lookup_aranged = _get_aranged_lookup(net)\n _get_bus_v_results(net, ppc)\n bus_pq = _get_p_q_results(net, ppc, bus_lookup_aranged)\n _get_shunt_results(net, ppc, bus_lookup_aranged, bus_pq)\n _get_branch_results(net, ppc, bus_lookup_aranged, bus_pq)\n _get_gen_results(net, ppc, bus_lookup_aranged, bus_pq)\n _get_bus_results(net, ppc, bus_pq)\n if net._options[\"mode\"] == \"opf\":\n _get_costs(net, ppc)\n\n\ndef _extract_results_3ph(net, ppc0, ppc1, ppc2):\n # reset_results(net, False)\n _set_buses_out_of_service(ppc0)\n _set_buses_out_of_service(ppc1)\n _set_buses_out_of_service(ppc2)\n bus_lookup_aranged = _get_aranged_lookup(net)\n\n _get_bus_v_results_3ph(net, ppc0, ppc1, ppc2)\n bus_pq = _get_p_q_results_3ph(net, bus_lookup_aranged)\n # _get_shunt_results(net, ppc, bus_lookup_aranged, bus_pq)\n _get_branch_results_3ph(net, ppc0, ppc1, ppc2, bus_lookup_aranged, bus_pq)\n _get_gen_results_3ph(net, ppc0, ppc1, ppc2, bus_lookup_aranged, bus_pq)\n _get_bus_results_3ph(net, bus_pq)\n\n\ndef _extract_results_se(net, ppc):\n _set_buses_out_of_service(ppc)\n bus_lookup_aranged = _get_aranged_lookup(net)\n _get_bus_v_results(net, ppc, suffix=\"_est\")\n bus_pq = np.zeros(shape=(len(net[\"bus\"].index), 2), dtype=np.float)\n _get_branch_results(net, ppc, bus_lookup_aranged, bus_pq, suffix=\"_est\")\n\n\ndef _get_costs(net, ppc):\n net.res_cost = ppc['obj']\n\n\ndef _get_aranged_lookup(net):\n # generate bus_lookup net -> consecutive ordering\n maxBus = max(net[\"bus\"].index.values)\n bus_lookup_aranged = -np.ones(maxBus + 1, dtype=int)\n bus_lookup_aranged[net[\"bus\"].index.values] = np.arange(len(net[\"bus\"].index.values))\n\n return bus_lookup_aranged\n\n\ndef verify_results(net, mode=\"pf\"):\n elements = get_relevant_elements(mode)\n suffix = suffix_mode.get(mode, None)\n for element in elements:\n res_element, res_empty_element = get_result_tables(element, suffix)\n\n index_equal = False if res_element not in net else net[element].index.equals(net[res_element].index)\n if not index_equal:\n if net[\"_options\"][\"init_results\"] and element == \"bus\":\n # if the indices of bus and res_bus are not equal, but init_results is set, the voltage vector\n # is wrong. A UserWarning is raised in this case. For all other elements the result table is emptied.\n raise UserWarning(\"index of result table '{}' is not equal to the element table '{}'. The init result\"\n \" option may lead to a non-converged power flow.\".format(res_element, element))\n # init result table for\n init_element(net, element)\n if element == \"bus\":\n net._options[\"init_vm_pu\"] = \"auto\"\n net._options[\"init_va_degree\"] = \"auto\"\n\n\ndef get_result_tables(element, suffix=None):\n res_element = \"res_\" + element\n res_element_with_suffix = res_element if suffix is None else res_element + \"_%s\" % suffix\n\n if suffix == suffix_mode.get(\"se\", None):\n # State estimation used default result table\n return res_element_with_suffix, \"_empty_%s\" % res_element\n else:\n return res_element_with_suffix, \"_empty_%s\" % res_element_with_suffix\n\n\ndef empty_res_element(net, element, suffix=None):\n res_element, res_empty_element = get_result_tables(element, suffix)\n if res_empty_element in net:\n net[res_element] = net[res_empty_element].copy()\n else:\n net[res_element] = pd.DataFrame()\n\n\ndef init_element(net, element, suffix=None):\n res_element, res_empty_element = get_result_tables(element, suffix)\n index = net[element].index\n if len(index):\n # init empty dataframe\n if res_empty_element in net:\n columns = net[res_empty_element].columns\n net[res_element] = pd.DataFrame(np.nan, index=index,\n columns=columns, dtype='float')\n else:\n net[res_element] = pd.DataFrame(index=index, dtype='float')\n else:\n empty_res_element(net, element, suffix)\n\n\ndef get_relevant_elements(mode=\"pf\"):\n if mode == \"pf\" or mode == \"opf\":\n return [\"bus\", \"line\", \"trafo\", \"trafo3w\", \"impedance\", \"ext_grid\",\n \"load\", \"sgen\", \"storage\", \"shunt\", \"gen\", \"ward\", \"xward\",\n \"dcline\"]\n elif mode == \"sc\":\n return [\"bus\", \"line\", \"trafo\", \"trafo3w\", \"ext_grid\", \"gen\", \"sgen\"]\n elif mode == \"se\":\n return [\"bus\", \"line\", \"trafo\", \"trafo3w\"] \n elif mode == \"pf_3ph\":\n return [\"bus\", \"line\", \"trafo\", \"ext_grid\", \"shunt\",\n \"load\", \"sgen\", \"storage\", \"asymmetric_load\", \"asymmetric_sgen\"]\n\n\ndef init_results(net, mode=\"pf\"):\n elements = get_relevant_elements(mode)\n suffix = suffix_mode.get(mode, None)\n for element in elements:\n init_element(net, element, suffix)\n\n\ndef reset_results(net, mode=\"pf\"):\n elements = get_relevant_elements(mode)\n suffix = suffix_mode.get(mode, None)\n for element in elements:\n empty_res_element(net, element, suffix)\n\n\ndef _ppci_bus_to_ppc(result, ppc):\n # result is the ppci (ppc without out of service buses)\n # busses are sorted (REF, PV, PQ, NONE) -> results are the first 3 types\n n_buses, bus_cols = np.shape(ppc['bus'])\n n_rows_result, bus_cols_result = np.shape(result['bus'])\n # create matrix of proper size\n updated_bus = np.empty((n_buses, bus_cols_result))\n # fill in results (first 3 types)\n updated_bus[:n_rows_result, :] = result['bus']\n if n_buses > n_rows_result:\n # keep rows for busses of type NONE\n updated_bus[n_rows_result:, :bus_cols] = ppc['bus'][n_rows_result:, :]\n ppc['bus'] = updated_bus\n\n\ndef _ppci_branch_to_ppc(result, ppc):\n # in service branches and gens are taken from 'internal'\n branch_cols = np.shape(ppc['branch'])[1]\n ppc['branch'][result[\"internal\"]['branch_is'], :branch_cols] = result['branch'][:, :branch_cols]\n\n\ndef _ppci_gen_to_ppc(result, ppc):\n gen_cols = np.shape(ppc['gen'])[1]\n ppc['gen'][result[\"internal\"]['gen_is'], :gen_cols] = result['gen'][:, :gen_cols]\n\n\ndef _ppci_other_to_ppc(result, ppc, mode):\n ppc['internal'] = result['internal']\n\n if mode != \"sc\" and mode != \"se\":\n ppc['success'] = result['success']\n ppc['et'] = result['et']\n\n if mode == 'opf':\n ppc['obj'] = result['f']\n ppc['internal_gencost'] = result['gencost']\n\n if \"iterations\" in result:\n ppc[\"iterations\"] = result[\"iterations\"]\n\n\ndef _copy_results_ppci_to_ppc(result, ppc, mode):\n \"\"\"\n result contains results for all in service elements\n ppc gets the results for in- and out of service elements\n -> results must be copied\n\n ppc and ppci are structured as follows:\n [in_service elements]\n ppc = [out_of_service elements]\n result = [in_service elements]\n\n Parameters\n ----------\n result - ppci with results\n ppc - ppc without results\n mode - \"pf\",\"opf\", \"sc\"...\n\n Returns\n -------\n ppc with results\n \"\"\"\n\n # copy the results for bus, gen and branch and some additional values like \"success\"\n _ppci_bus_to_ppc(result, ppc)\n _ppci_branch_to_ppc(result, ppc)\n _ppci_gen_to_ppc(result, ppc)\n _ppci_other_to_ppc(result, ppc, mode)\n\n result = ppc\n return result\n", "path": "pandapower/results.py"}]} | 3,001 | 272 |
gh_patches_debug_3099 | rasdani/github-patches | git_diff | MongoEngine__mongoengine-2431 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Version restriction on pillow
Do we still need to restrict pillow to less then 7.0.0? This looks to have been implemented because of python2 support, which mongoengine dropped with version 0.20.0
https://github.com/MongoEngine/mongoengine/blob/277b827d4dab4630145bc747fdab0df48a045273/setup.py#L118
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import sys
3
4 from pkg_resources import normalize_path
5 from setuptools import find_packages, setup
6 from setuptools.command.test import test as TestCommand
7
8 # Hack to silence atexit traceback in newer python versions
9 try:
10 import multiprocessing
11 except ImportError:
12 pass
13
14 DESCRIPTION = "MongoEngine is a Python Object-Document Mapper for working with MongoDB."
15
16 try:
17 with open("README.rst") as fin:
18 LONG_DESCRIPTION = fin.read()
19 except Exception:
20 LONG_DESCRIPTION = None
21
22
23 def get_version(version_tuple):
24 """Return the version tuple as a string, e.g. for (0, 10, 7),
25 return '0.10.7'.
26 """
27 return ".".join(map(str, version_tuple))
28
29
30 class PyTest(TestCommand):
31 """Will force pytest to search for tests inside the build directory
32 for 2to3 converted code (used by tox), instead of the current directory.
33 Required as long as we need 2to3
34
35 Known Limitation: https://tox.readthedocs.io/en/latest/example/pytest.html#known-issues-and-limitations
36 Source: https://www.hackzine.org/python-testing-with-pytest-and-2to3-plus-tox-and-travis-ci.html
37 """
38
39 # https://pytest.readthedocs.io/en/2.7.3/goodpractises.html#integration-with-setuptools-test-commands
40 # Allows to provide pytest command argument through the test runner command `python setup.py test`
41 # e.g: `python setup.py test -a "-k=test"`
42 # This only works for 1 argument though
43 user_options = [("pytest-args=", "a", "Arguments to pass to py.test")]
44
45 def initialize_options(self):
46 TestCommand.initialize_options(self)
47 self.pytest_args = ""
48
49 def finalize_options(self):
50 TestCommand.finalize_options(self)
51 self.test_args = ["tests"]
52 self.test_suite = True
53
54 def run_tests(self):
55 # import here, cause outside the eggs aren't loaded
56 from pkg_resources import _namespace_packages
57 import pytest
58
59 # Purge modules under test from sys.modules. The test loader will
60 # re-import them from the build location. Required when 2to3 is used
61 # with namespace packages.
62 if sys.version_info >= (3,) and getattr(self.distribution, "use_2to3", False):
63 module = self.test_args[-1].split(".")[0]
64 if module in _namespace_packages:
65 del_modules = []
66 if module in sys.modules:
67 del_modules.append(module)
68 module += "."
69 for name in sys.modules:
70 if name.startswith(module):
71 del_modules.append(name)
72 map(sys.modules.__delitem__, del_modules)
73
74 # Run on the build directory for 2to3-built code
75 # This will prevent the old 2.x code from being found
76 # by py.test discovery mechanism, that apparently
77 # ignores sys.path..
78 ei_cmd = self.get_finalized_command("egg_info")
79 self.test_args = [normalize_path(ei_cmd.egg_base)]
80
81 cmd_args = self.test_args + ([self.pytest_args] if self.pytest_args else [])
82 errno = pytest.main(cmd_args)
83 sys.exit(errno)
84
85
86 # Dirty hack to get version number from monogengine/__init__.py - we can't
87 # import it as it depends on PyMongo and PyMongo isn't installed until this
88 # file is read
89 init = os.path.join(os.path.dirname(__file__), "mongoengine", "__init__.py")
90 version_line = list(filter(lambda l: l.startswith("VERSION"), open(init)))[0]
91
92 VERSION = get_version(eval(version_line.split("=")[-1]))
93
94 CLASSIFIERS = [
95 "Development Status :: 5 - Production/Stable",
96 "Intended Audience :: Developers",
97 "License :: OSI Approved :: MIT License",
98 "Operating System :: OS Independent",
99 "Programming Language :: Python",
100 "Programming Language :: Python :: 3",
101 "Programming Language :: Python :: 3.5",
102 "Programming Language :: Python :: 3.6",
103 "Programming Language :: Python :: 3.7",
104 "Programming Language :: Python :: 3.8",
105 "Programming Language :: Python :: Implementation :: CPython",
106 "Programming Language :: Python :: Implementation :: PyPy",
107 "Topic :: Database",
108 "Topic :: Software Development :: Libraries :: Python Modules",
109 ]
110
111 extra_opts = {
112 "packages": find_packages(exclude=["tests", "tests.*"]),
113 "tests_require": [
114 "pytest<5.0",
115 "pytest-cov",
116 "coverage<5.0", # recent coverage switched to sqlite format for the .coverage file which isn't handled properly by coveralls
117 "blinker",
118 "Pillow>=2.0.0, <7.0.0", # 7.0.0 dropped Python2 support
119 ],
120 }
121
122 if "test" in sys.argv:
123 extra_opts["packages"] = find_packages()
124 extra_opts["package_data"] = {
125 "tests": ["fields/mongoengine.png", "fields/mongodb_leaf.png"]
126 }
127
128 setup(
129 name="mongoengine",
130 version=VERSION,
131 author="Harry Marr",
132 author_email="[email protected]",
133 maintainer="Stefan Wojcik",
134 maintainer_email="[email protected]",
135 url="http://mongoengine.org/",
136 download_url="https://github.com/MongoEngine/mongoengine/tarball/master",
137 license="MIT",
138 include_package_data=True,
139 description=DESCRIPTION,
140 long_description=LONG_DESCRIPTION,
141 platforms=["any"],
142 classifiers=CLASSIFIERS,
143 python_requires=">=3.5",
144 install_requires=["pymongo>=3.4, <4.0"],
145 cmdclass={"test": PyTest},
146 **extra_opts
147 )
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -115,7 +115,7 @@
"pytest-cov",
"coverage<5.0", # recent coverage switched to sqlite format for the .coverage file which isn't handled properly by coveralls
"blinker",
- "Pillow>=2.0.0, <7.0.0", # 7.0.0 dropped Python2 support
+ "Pillow>=7.0.0",
],
}
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -115,7 +115,7 @@\n \"pytest-cov\",\n \"coverage<5.0\", # recent coverage switched to sqlite format for the .coverage file which isn't handled properly by coveralls\n \"blinker\",\n- \"Pillow>=2.0.0, <7.0.0\", # 7.0.0 dropped Python2 support\n+ \"Pillow>=7.0.0\",\n ],\n }\n", "issue": "Version restriction on pillow\nDo we still need to restrict pillow to less then 7.0.0? This looks to have been implemented because of python2 support, which mongoengine dropped with version 0.20.0\r\n\r\nhttps://github.com/MongoEngine/mongoengine/blob/277b827d4dab4630145bc747fdab0df48a045273/setup.py#L118\n", "before_files": [{"content": "import os\nimport sys\n\nfrom pkg_resources import normalize_path\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\n# Hack to silence atexit traceback in newer python versions\ntry:\n import multiprocessing\nexcept ImportError:\n pass\n\nDESCRIPTION = \"MongoEngine is a Python Object-Document Mapper for working with MongoDB.\"\n\ntry:\n with open(\"README.rst\") as fin:\n LONG_DESCRIPTION = fin.read()\nexcept Exception:\n LONG_DESCRIPTION = None\n\n\ndef get_version(version_tuple):\n \"\"\"Return the version tuple as a string, e.g. for (0, 10, 7),\n return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, version_tuple))\n\n\nclass PyTest(TestCommand):\n \"\"\"Will force pytest to search for tests inside the build directory\n for 2to3 converted code (used by tox), instead of the current directory.\n Required as long as we need 2to3\n\n Known Limitation: https://tox.readthedocs.io/en/latest/example/pytest.html#known-issues-and-limitations\n Source: https://www.hackzine.org/python-testing-with-pytest-and-2to3-plus-tox-and-travis-ci.html\n \"\"\"\n\n # https://pytest.readthedocs.io/en/2.7.3/goodpractises.html#integration-with-setuptools-test-commands\n # Allows to provide pytest command argument through the test runner command `python setup.py test`\n # e.g: `python setup.py test -a \"-k=test\"`\n # This only works for 1 argument though\n user_options = [(\"pytest-args=\", \"a\", \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = \"\"\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = [\"tests\"]\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n from pkg_resources import _namespace_packages\n import pytest\n\n # Purge modules under test from sys.modules. The test loader will\n # re-import them from the build location. Required when 2to3 is used\n # with namespace packages.\n if sys.version_info >= (3,) and getattr(self.distribution, \"use_2to3\", False):\n module = self.test_args[-1].split(\".\")[0]\n if module in _namespace_packages:\n del_modules = []\n if module in sys.modules:\n del_modules.append(module)\n module += \".\"\n for name in sys.modules:\n if name.startswith(module):\n del_modules.append(name)\n map(sys.modules.__delitem__, del_modules)\n\n # Run on the build directory for 2to3-built code\n # This will prevent the old 2.x code from being found\n # by py.test discovery mechanism, that apparently\n # ignores sys.path..\n ei_cmd = self.get_finalized_command(\"egg_info\")\n self.test_args = [normalize_path(ei_cmd.egg_base)]\n\n cmd_args = self.test_args + ([self.pytest_args] if self.pytest_args else [])\n errno = pytest.main(cmd_args)\n sys.exit(errno)\n\n\n# Dirty hack to get version number from monogengine/__init__.py - we can't\n# import it as it depends on PyMongo and PyMongo isn't installed until this\n# file is read\ninit = os.path.join(os.path.dirname(__file__), \"mongoengine\", \"__init__.py\")\nversion_line = list(filter(lambda l: l.startswith(\"VERSION\"), open(init)))[0]\n\nVERSION = get_version(eval(version_line.split(\"=\")[-1]))\n\nCLASSIFIERS = [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Database\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nextra_opts = {\n \"packages\": find_packages(exclude=[\"tests\", \"tests.*\"]),\n \"tests_require\": [\n \"pytest<5.0\",\n \"pytest-cov\",\n \"coverage<5.0\", # recent coverage switched to sqlite format for the .coverage file which isn't handled properly by coveralls\n \"blinker\",\n \"Pillow>=2.0.0, <7.0.0\", # 7.0.0 dropped Python2 support\n ],\n}\n\nif \"test\" in sys.argv:\n extra_opts[\"packages\"] = find_packages()\n extra_opts[\"package_data\"] = {\n \"tests\": [\"fields/mongoengine.png\", \"fields/mongodb_leaf.png\"]\n }\n\nsetup(\n name=\"mongoengine\",\n version=VERSION,\n author=\"Harry Marr\",\n author_email=\"[email protected]\",\n maintainer=\"Stefan Wojcik\",\n maintainer_email=\"[email protected]\",\n url=\"http://mongoengine.org/\",\n download_url=\"https://github.com/MongoEngine/mongoengine/tarball/master\",\n license=\"MIT\",\n include_package_data=True,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n platforms=[\"any\"],\n classifiers=CLASSIFIERS,\n python_requires=\">=3.5\",\n install_requires=[\"pymongo>=3.4, <4.0\"],\n cmdclass={\"test\": PyTest},\n **extra_opts\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport sys\n\nfrom pkg_resources import normalize_path\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\n# Hack to silence atexit traceback in newer python versions\ntry:\n import multiprocessing\nexcept ImportError:\n pass\n\nDESCRIPTION = \"MongoEngine is a Python Object-Document Mapper for working with MongoDB.\"\n\ntry:\n with open(\"README.rst\") as fin:\n LONG_DESCRIPTION = fin.read()\nexcept Exception:\n LONG_DESCRIPTION = None\n\n\ndef get_version(version_tuple):\n \"\"\"Return the version tuple as a string, e.g. for (0, 10, 7),\n return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, version_tuple))\n\n\nclass PyTest(TestCommand):\n \"\"\"Will force pytest to search for tests inside the build directory\n for 2to3 converted code (used by tox), instead of the current directory.\n Required as long as we need 2to3\n\n Known Limitation: https://tox.readthedocs.io/en/latest/example/pytest.html#known-issues-and-limitations\n Source: https://www.hackzine.org/python-testing-with-pytest-and-2to3-plus-tox-and-travis-ci.html\n \"\"\"\n\n # https://pytest.readthedocs.io/en/2.7.3/goodpractises.html#integration-with-setuptools-test-commands\n # Allows to provide pytest command argument through the test runner command `python setup.py test`\n # e.g: `python setup.py test -a \"-k=test\"`\n # This only works for 1 argument though\n user_options = [(\"pytest-args=\", \"a\", \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = \"\"\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = [\"tests\"]\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n from pkg_resources import _namespace_packages\n import pytest\n\n # Purge modules under test from sys.modules. The test loader will\n # re-import them from the build location. Required when 2to3 is used\n # with namespace packages.\n if sys.version_info >= (3,) and getattr(self.distribution, \"use_2to3\", False):\n module = self.test_args[-1].split(\".\")[0]\n if module in _namespace_packages:\n del_modules = []\n if module in sys.modules:\n del_modules.append(module)\n module += \".\"\n for name in sys.modules:\n if name.startswith(module):\n del_modules.append(name)\n map(sys.modules.__delitem__, del_modules)\n\n # Run on the build directory for 2to3-built code\n # This will prevent the old 2.x code from being found\n # by py.test discovery mechanism, that apparently\n # ignores sys.path..\n ei_cmd = self.get_finalized_command(\"egg_info\")\n self.test_args = [normalize_path(ei_cmd.egg_base)]\n\n cmd_args = self.test_args + ([self.pytest_args] if self.pytest_args else [])\n errno = pytest.main(cmd_args)\n sys.exit(errno)\n\n\n# Dirty hack to get version number from monogengine/__init__.py - we can't\n# import it as it depends on PyMongo and PyMongo isn't installed until this\n# file is read\ninit = os.path.join(os.path.dirname(__file__), \"mongoengine\", \"__init__.py\")\nversion_line = list(filter(lambda l: l.startswith(\"VERSION\"), open(init)))[0]\n\nVERSION = get_version(eval(version_line.split(\"=\")[-1]))\n\nCLASSIFIERS = [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Database\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nextra_opts = {\n \"packages\": find_packages(exclude=[\"tests\", \"tests.*\"]),\n \"tests_require\": [\n \"pytest<5.0\",\n \"pytest-cov\",\n \"coverage<5.0\", # recent coverage switched to sqlite format for the .coverage file which isn't handled properly by coveralls\n \"blinker\",\n \"Pillow>=7.0.0\",\n ],\n}\n\nif \"test\" in sys.argv:\n extra_opts[\"packages\"] = find_packages()\n extra_opts[\"package_data\"] = {\n \"tests\": [\"fields/mongoengine.png\", \"fields/mongodb_leaf.png\"]\n }\n\nsetup(\n name=\"mongoengine\",\n version=VERSION,\n author=\"Harry Marr\",\n author_email=\"[email protected]\",\n maintainer=\"Stefan Wojcik\",\n maintainer_email=\"[email protected]\",\n url=\"http://mongoengine.org/\",\n download_url=\"https://github.com/MongoEngine/mongoengine/tarball/master\",\n license=\"MIT\",\n include_package_data=True,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n platforms=[\"any\"],\n classifiers=CLASSIFIERS,\n python_requires=\">=3.5\",\n install_requires=[\"pymongo>=3.4, <4.0\"],\n cmdclass={\"test\": PyTest},\n **extra_opts\n)\n", "path": "setup.py"}]} | 1,969 | 122 |
gh_patches_debug_22111 | rasdani/github-patches | git_diff | internetarchive__openlibrary-7946 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unique visitors graph on homepage numbers appear too large
<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->
### Evidence / Screenshot (if possible)

### Relevant url?
https://openlibrary.org/
### Proposal & Constraints
Mek thinks these might be off by a factor of something? Drini thinks they might be correct just include a tooooon of requests we weren't including before (images, script tags).
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
### Stakeholders
@mekarpeles @cclauss
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/core/admin.py`
Content:
```
1 """Admin functionality."""
2
3 import calendar
4 import datetime
5
6 import requests
7 import web
8 from infogami import config
9
10 from openlibrary.core import cache
11
12
13 class Stats:
14 def __init__(self, docs, key, total_key):
15 self.key = key
16 self.docs = docs
17 try:
18 self.latest = docs[-1].get(key, 0)
19 except IndexError:
20 self.latest = 0
21
22 try:
23 self.previous = docs[-2].get(key, 0)
24 except IndexError:
25 self.previous = 0
26
27 try:
28 # Last available total count
29 self.total = next(x for x in reversed(docs) if total_key in x)[total_key]
30 except (KeyError, StopIteration):
31 self.total = ""
32
33 def get_counts(self, ndays=28, times=False):
34 """Returns the stats for last n days as an array useful for
35 plotting. i.e. an array of [x, y] tuples where y is the value
36 and `x` the x coordinate.
37
38 If times is True, the x coordinate in the tuple will be
39 timestamps for the day.
40 """
41
42 def _convert_to_milli_timestamp(d):
43 """Uses the `_id` of the document `d` to create a UNIX
44 timestamp and coverts it to milliseconds"""
45 t = datetime.datetime.strptime(d, "counts-%Y-%m-%d")
46 return calendar.timegm(t.timetuple()) * 1000
47
48 if times:
49 return [
50 [_convert_to_milli_timestamp(x['_key']), x.get(self.key, 0)]
51 for x in self.docs[-ndays:]
52 ]
53 else:
54 return zip(
55 range(0, ndays * 5, 5), (x.get(self.key, 0) for x in self.docs[-ndays:])
56 ) # The *5 and 5 are for the bar widths
57
58 def get_summary(self, ndays=28):
59 """Returns the summary of counts for past n days.
60
61 Summary can be either sum or average depending on the type of stats.
62 This is used to find counts for last 7 days and last 28 days.
63 """
64 return sum(x[1] for x in self.get_counts(ndays))
65
66
67 @cache.memoize(
68 engine="memcache", key="admin._get_loan_counts_from_graphite", expires=5 * 60
69 )
70 def _get_loan_counts_from_graphite(ndays: int) -> list[list[int]] | None:
71 try:
72 r = requests.get(
73 'http://graphite.us.archive.org/render',
74 params={
75 'target': 'hitcount(stats.ol.loans.bookreader, "1d")',
76 'from': '-%ddays' % ndays,
77 'tz': 'UTC',
78 'format': 'json',
79 },
80 )
81 return r.json()[0]['datapoints']
82 except (requests.exceptions.RequestException, ValueError, AttributeError):
83 return None
84
85
86 class LoanStats(Stats):
87 """
88 Temporary (2020-03-19) override of Stats for loans, due to bug
89 which caused 1mo of loans stats to be missing from regular
90 stats db. This implementation uses graphite, but only on prod,
91 so that we don't forget.
92 """
93
94 def get_counts(self, ndays=28, times=False):
95 # Let dev.openlibrary.org show the true state of things
96 if 'dev' in config.features:
97 return Stats.get_counts(self, ndays, times)
98
99 if graphite_data := _get_loan_counts_from_graphite(ndays):
100 # convert timestamp seconds to ms (as required by API)
101 return [[timestamp * 1000, count] for [count, timestamp] in graphite_data]
102 else:
103 return Stats.get_counts(self, ndays, times)
104
105
106 @cache.memoize(
107 engine="memcache", key="admin._get_visitor_counts_from_graphite", expires=5 * 60
108 )
109 def _get_visitor_counts_from_graphite(self, ndays: int = 28) -> list[list[int]]:
110 """
111 Read the unique visitors (IP addresses) per day for the last ndays from graphite.
112 :param ndays: number of days to read
113 :return: list containing [count, timestamp] for ndays
114 """
115 try:
116 response = requests.get(
117 "http://graphite.us.archive.org/render/",
118 params={
119 "target": "hitcount(stats.uniqueips.openlibrary, '1d')",
120 "from": f"-{ndays}days",
121 "tz": "UTC",
122 "format": "json",
123 },
124 )
125 response.raise_for_status()
126 visitors = response.json()[0]['datapoints']
127 except requests.exceptions.RequestException:
128 visitors = []
129 return visitors
130
131
132 class VisitorStats(Stats):
133 def get_counts(self, ndays: int = 28, times: bool = False) -> list[tuple[int, int]]:
134 visitors = _get_visitor_counts_from_graphite(ndays)
135 # Flip the order, convert timestamp to msec and convert count==None to zero
136 return [
137 (int(timestamp * 1000), int(count or 0)) for count, timestamp in visitors
138 ]
139
140
141 @cache.memoize(engine="memcache", key="admin._get_count_docs", expires=5 * 60)
142 def _get_count_docs(ndays):
143 """Returns the count docs from admin stats database.
144
145 This function is memoized to avoid accessing the db for every request.
146 """
147 today = datetime.datetime.utcnow().date()
148 dates = [today - datetime.timedelta(days=i) for i in range(ndays)]
149
150 # we want the dates in reverse order
151 dates = dates[::-1]
152
153 docs = [web.ctx.site.store.get(d.strftime("counts-%Y-%m-%d")) for d in dates]
154 return [d for d in docs if d]
155
156
157 def get_stats(ndays=30):
158 """Returns the stats for the past `ndays`"""
159 docs = _get_count_docs(ndays)
160 return {
161 'human_edits': Stats(docs, "human_edits", "human_edits"),
162 'bot_edits': Stats(docs, "bot_edits", "bot_edits"),
163 'lists': Stats(docs, "lists", "total_lists"),
164 'visitors': VisitorStats(docs, "visitors", "visitors"),
165 'loans': LoanStats(docs, "loans", "loans"),
166 'members': Stats(docs, "members", "total_members"),
167 'works': Stats(docs, "works", "total_works"),
168 'editions': Stats(docs, "editions", "total_editions"),
169 'ebooks': Stats(docs, "ebooks", "total_ebooks"),
170 'covers': Stats(docs, "covers", "total_covers"),
171 'authors': Stats(docs, "authors", "total_authors"),
172 'subjects': Stats(docs, "subjects", "total_subjects"),
173 }
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openlibrary/core/admin.py b/openlibrary/core/admin.py
--- a/openlibrary/core/admin.py
+++ b/openlibrary/core/admin.py
@@ -116,7 +116,7 @@
response = requests.get(
"http://graphite.us.archive.org/render/",
params={
- "target": "hitcount(stats.uniqueips.openlibrary, '1d')",
+ "target": "summarize(stats.uniqueips.openlibrary, '1d')",
"from": f"-{ndays}days",
"tz": "UTC",
"format": "json",
@@ -132,7 +132,7 @@
class VisitorStats(Stats):
def get_counts(self, ndays: int = 28, times: bool = False) -> list[tuple[int, int]]:
visitors = _get_visitor_counts_from_graphite(ndays)
- # Flip the order, convert timestamp to msec and convert count==None to zero
+ # Flip the order, convert timestamp to msec, and convert count==None to zero
return [
(int(timestamp * 1000), int(count or 0)) for count, timestamp in visitors
]
| {"golden_diff": "diff --git a/openlibrary/core/admin.py b/openlibrary/core/admin.py\n--- a/openlibrary/core/admin.py\n+++ b/openlibrary/core/admin.py\n@@ -116,7 +116,7 @@\n response = requests.get(\n \"http://graphite.us.archive.org/render/\",\n params={\n- \"target\": \"hitcount(stats.uniqueips.openlibrary, '1d')\",\n+ \"target\": \"summarize(stats.uniqueips.openlibrary, '1d')\",\n \"from\": f\"-{ndays}days\",\n \"tz\": \"UTC\",\n \"format\": \"json\",\n@@ -132,7 +132,7 @@\n class VisitorStats(Stats):\n def get_counts(self, ndays: int = 28, times: bool = False) -> list[tuple[int, int]]:\n visitors = _get_visitor_counts_from_graphite(ndays)\n- # Flip the order, convert timestamp to msec and convert count==None to zero\n+ # Flip the order, convert timestamp to msec, and convert count==None to zero\n return [\n (int(timestamp * 1000), int(count or 0)) for count, timestamp in visitors\n ]\n", "issue": "Unique visitors graph on homepage numbers appear too large\n<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->\r\n\r\n### Evidence / Screenshot (if possible)\r\n\r\n\r\n### Relevant url?\r\nhttps://openlibrary.org/\r\n\r\n### Proposal & Constraints\r\nMek thinks these might be off by a factor of something? Drini thinks they might be correct just include a tooooon of requests we weren't including before (images, script tags).\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n@mekarpeles @cclauss \n", "before_files": [{"content": "\"\"\"Admin functionality.\"\"\"\n\nimport calendar\nimport datetime\n\nimport requests\nimport web\nfrom infogami import config\n\nfrom openlibrary.core import cache\n\n\nclass Stats:\n def __init__(self, docs, key, total_key):\n self.key = key\n self.docs = docs\n try:\n self.latest = docs[-1].get(key, 0)\n except IndexError:\n self.latest = 0\n\n try:\n self.previous = docs[-2].get(key, 0)\n except IndexError:\n self.previous = 0\n\n try:\n # Last available total count\n self.total = next(x for x in reversed(docs) if total_key in x)[total_key]\n except (KeyError, StopIteration):\n self.total = \"\"\n\n def get_counts(self, ndays=28, times=False):\n \"\"\"Returns the stats for last n days as an array useful for\n plotting. i.e. an array of [x, y] tuples where y is the value\n and `x` the x coordinate.\n\n If times is True, the x coordinate in the tuple will be\n timestamps for the day.\n \"\"\"\n\n def _convert_to_milli_timestamp(d):\n \"\"\"Uses the `_id` of the document `d` to create a UNIX\n timestamp and coverts it to milliseconds\"\"\"\n t = datetime.datetime.strptime(d, \"counts-%Y-%m-%d\")\n return calendar.timegm(t.timetuple()) * 1000\n\n if times:\n return [\n [_convert_to_milli_timestamp(x['_key']), x.get(self.key, 0)]\n for x in self.docs[-ndays:]\n ]\n else:\n return zip(\n range(0, ndays * 5, 5), (x.get(self.key, 0) for x in self.docs[-ndays:])\n ) # The *5 and 5 are for the bar widths\n\n def get_summary(self, ndays=28):\n \"\"\"Returns the summary of counts for past n days.\n\n Summary can be either sum or average depending on the type of stats.\n This is used to find counts for last 7 days and last 28 days.\n \"\"\"\n return sum(x[1] for x in self.get_counts(ndays))\n\n\[email protected](\n engine=\"memcache\", key=\"admin._get_loan_counts_from_graphite\", expires=5 * 60\n)\ndef _get_loan_counts_from_graphite(ndays: int) -> list[list[int]] | None:\n try:\n r = requests.get(\n 'http://graphite.us.archive.org/render',\n params={\n 'target': 'hitcount(stats.ol.loans.bookreader, \"1d\")',\n 'from': '-%ddays' % ndays,\n 'tz': 'UTC',\n 'format': 'json',\n },\n )\n return r.json()[0]['datapoints']\n except (requests.exceptions.RequestException, ValueError, AttributeError):\n return None\n\n\nclass LoanStats(Stats):\n \"\"\"\n Temporary (2020-03-19) override of Stats for loans, due to bug\n which caused 1mo of loans stats to be missing from regular\n stats db. This implementation uses graphite, but only on prod,\n so that we don't forget.\n \"\"\"\n\n def get_counts(self, ndays=28, times=False):\n # Let dev.openlibrary.org show the true state of things\n if 'dev' in config.features:\n return Stats.get_counts(self, ndays, times)\n\n if graphite_data := _get_loan_counts_from_graphite(ndays):\n # convert timestamp seconds to ms (as required by API)\n return [[timestamp * 1000, count] for [count, timestamp] in graphite_data]\n else:\n return Stats.get_counts(self, ndays, times)\n\n\[email protected](\n engine=\"memcache\", key=\"admin._get_visitor_counts_from_graphite\", expires=5 * 60\n)\ndef _get_visitor_counts_from_graphite(self, ndays: int = 28) -> list[list[int]]:\n \"\"\"\n Read the unique visitors (IP addresses) per day for the last ndays from graphite.\n :param ndays: number of days to read\n :return: list containing [count, timestamp] for ndays\n \"\"\"\n try:\n response = requests.get(\n \"http://graphite.us.archive.org/render/\",\n params={\n \"target\": \"hitcount(stats.uniqueips.openlibrary, '1d')\",\n \"from\": f\"-{ndays}days\",\n \"tz\": \"UTC\",\n \"format\": \"json\",\n },\n )\n response.raise_for_status()\n visitors = response.json()[0]['datapoints']\n except requests.exceptions.RequestException:\n visitors = []\n return visitors\n\n\nclass VisitorStats(Stats):\n def get_counts(self, ndays: int = 28, times: bool = False) -> list[tuple[int, int]]:\n visitors = _get_visitor_counts_from_graphite(ndays)\n # Flip the order, convert timestamp to msec and convert count==None to zero\n return [\n (int(timestamp * 1000), int(count or 0)) for count, timestamp in visitors\n ]\n\n\[email protected](engine=\"memcache\", key=\"admin._get_count_docs\", expires=5 * 60)\ndef _get_count_docs(ndays):\n \"\"\"Returns the count docs from admin stats database.\n\n This function is memoized to avoid accessing the db for every request.\n \"\"\"\n today = datetime.datetime.utcnow().date()\n dates = [today - datetime.timedelta(days=i) for i in range(ndays)]\n\n # we want the dates in reverse order\n dates = dates[::-1]\n\n docs = [web.ctx.site.store.get(d.strftime(\"counts-%Y-%m-%d\")) for d in dates]\n return [d for d in docs if d]\n\n\ndef get_stats(ndays=30):\n \"\"\"Returns the stats for the past `ndays`\"\"\"\n docs = _get_count_docs(ndays)\n return {\n 'human_edits': Stats(docs, \"human_edits\", \"human_edits\"),\n 'bot_edits': Stats(docs, \"bot_edits\", \"bot_edits\"),\n 'lists': Stats(docs, \"lists\", \"total_lists\"),\n 'visitors': VisitorStats(docs, \"visitors\", \"visitors\"),\n 'loans': LoanStats(docs, \"loans\", \"loans\"),\n 'members': Stats(docs, \"members\", \"total_members\"),\n 'works': Stats(docs, \"works\", \"total_works\"),\n 'editions': Stats(docs, \"editions\", \"total_editions\"),\n 'ebooks': Stats(docs, \"ebooks\", \"total_ebooks\"),\n 'covers': Stats(docs, \"covers\", \"total_covers\"),\n 'authors': Stats(docs, \"authors\", \"total_authors\"),\n 'subjects': Stats(docs, \"subjects\", \"total_subjects\"),\n }\n", "path": "openlibrary/core/admin.py"}], "after_files": [{"content": "\"\"\"Admin functionality.\"\"\"\n\nimport calendar\nimport datetime\n\nimport requests\nimport web\nfrom infogami import config\n\nfrom openlibrary.core import cache\n\n\nclass Stats:\n def __init__(self, docs, key, total_key):\n self.key = key\n self.docs = docs\n try:\n self.latest = docs[-1].get(key, 0)\n except IndexError:\n self.latest = 0\n\n try:\n self.previous = docs[-2].get(key, 0)\n except IndexError:\n self.previous = 0\n\n try:\n # Last available total count\n self.total = next(x for x in reversed(docs) if total_key in x)[total_key]\n except (KeyError, StopIteration):\n self.total = \"\"\n\n def get_counts(self, ndays=28, times=False):\n \"\"\"Returns the stats for last n days as an array useful for\n plotting. i.e. an array of [x, y] tuples where y is the value\n and `x` the x coordinate.\n\n If times is True, the x coordinate in the tuple will be\n timestamps for the day.\n \"\"\"\n\n def _convert_to_milli_timestamp(d):\n \"\"\"Uses the `_id` of the document `d` to create a UNIX\n timestamp and coverts it to milliseconds\"\"\"\n t = datetime.datetime.strptime(d, \"counts-%Y-%m-%d\")\n return calendar.timegm(t.timetuple()) * 1000\n\n if times:\n return [\n [_convert_to_milli_timestamp(x['_key']), x.get(self.key, 0)]\n for x in self.docs[-ndays:]\n ]\n else:\n return zip(\n range(0, ndays * 5, 5), (x.get(self.key, 0) for x in self.docs[-ndays:])\n ) # The *5 and 5 are for the bar widths\n\n def get_summary(self, ndays=28):\n \"\"\"Returns the summary of counts for past n days.\n\n Summary can be either sum or average depending on the type of stats.\n This is used to find counts for last 7 days and last 28 days.\n \"\"\"\n return sum(x[1] for x in self.get_counts(ndays))\n\n\[email protected](\n engine=\"memcache\", key=\"admin._get_loan_counts_from_graphite\", expires=5 * 60\n)\ndef _get_loan_counts_from_graphite(ndays: int) -> list[list[int]] | None:\n try:\n r = requests.get(\n 'http://graphite.us.archive.org/render',\n params={\n 'target': 'hitcount(stats.ol.loans.bookreader, \"1d\")',\n 'from': '-%ddays' % ndays,\n 'tz': 'UTC',\n 'format': 'json',\n },\n )\n return r.json()[0]['datapoints']\n except (requests.exceptions.RequestException, ValueError, AttributeError):\n return None\n\n\nclass LoanStats(Stats):\n \"\"\"\n Temporary (2020-03-19) override of Stats for loans, due to bug\n which caused 1mo of loans stats to be missing from regular\n stats db. This implementation uses graphite, but only on prod,\n so that we don't forget.\n \"\"\"\n\n def get_counts(self, ndays=28, times=False):\n # Let dev.openlibrary.org show the true state of things\n if 'dev' in config.features:\n return Stats.get_counts(self, ndays, times)\n\n if graphite_data := _get_loan_counts_from_graphite(ndays):\n # convert timestamp seconds to ms (as required by API)\n return [[timestamp * 1000, count] for [count, timestamp] in graphite_data]\n else:\n return Stats.get_counts(self, ndays, times)\n\n\[email protected](\n engine=\"memcache\", key=\"admin._get_visitor_counts_from_graphite\", expires=5 * 60\n)\ndef _get_visitor_counts_from_graphite(self, ndays: int = 28) -> list[list[int]]:\n \"\"\"\n Read the unique visitors (IP addresses) per day for the last ndays from graphite.\n :param ndays: number of days to read\n :return: list containing [count, timestamp] for ndays\n \"\"\"\n try:\n response = requests.get(\n \"http://graphite.us.archive.org/render/\",\n params={\n \"target\": \"summarize(stats.uniqueips.openlibrary, '1d')\",\n \"from\": f\"-{ndays}days\",\n \"tz\": \"UTC\",\n \"format\": \"json\",\n },\n )\n response.raise_for_status()\n visitors = response.json()[0]['datapoints']\n except requests.exceptions.RequestException:\n visitors = []\n return visitors\n\n\nclass VisitorStats(Stats):\n def get_counts(self, ndays: int = 28, times: bool = False) -> list[tuple[int, int]]:\n visitors = _get_visitor_counts_from_graphite(ndays)\n # Flip the order, convert timestamp to msec, and convert count==None to zero\n return [\n (int(timestamp * 1000), int(count or 0)) for count, timestamp in visitors\n ]\n\n\[email protected](engine=\"memcache\", key=\"admin._get_count_docs\", expires=5 * 60)\ndef _get_count_docs(ndays):\n \"\"\"Returns the count docs from admin stats database.\n\n This function is memoized to avoid accessing the db for every request.\n \"\"\"\n today = datetime.datetime.utcnow().date()\n dates = [today - datetime.timedelta(days=i) for i in range(ndays)]\n\n # we want the dates in reverse order\n dates = dates[::-1]\n\n docs = [web.ctx.site.store.get(d.strftime(\"counts-%Y-%m-%d\")) for d in dates]\n return [d for d in docs if d]\n\n\ndef get_stats(ndays=30):\n \"\"\"Returns the stats for the past `ndays`\"\"\"\n docs = _get_count_docs(ndays)\n return {\n 'human_edits': Stats(docs, \"human_edits\", \"human_edits\"),\n 'bot_edits': Stats(docs, \"bot_edits\", \"bot_edits\"),\n 'lists': Stats(docs, \"lists\", \"total_lists\"),\n 'visitors': VisitorStats(docs, \"visitors\", \"visitors\"),\n 'loans': LoanStats(docs, \"loans\", \"loans\"),\n 'members': Stats(docs, \"members\", \"total_members\"),\n 'works': Stats(docs, \"works\", \"total_works\"),\n 'editions': Stats(docs, \"editions\", \"total_editions\"),\n 'ebooks': Stats(docs, \"ebooks\", \"total_ebooks\"),\n 'covers': Stats(docs, \"covers\", \"total_covers\"),\n 'authors': Stats(docs, \"authors\", \"total_authors\"),\n 'subjects': Stats(docs, \"subjects\", \"total_subjects\"),\n }\n", "path": "openlibrary/core/admin.py"}]} | 2,406 | 261 |
gh_patches_debug_13428 | rasdani/github-patches | git_diff | pallets__werkzeug-2074 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
User agents not being detected
While using your utility to parse user agents for calls to my site, I have a few that don't get parsed correctly:
Version not getting parsed on the following user agents:
* `Mozilla/5.0 (Linux; Android 10; motorola edge) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.101 Mobile Safari/537.36`
* `Mozilla/5.0 (Linux; Android 10; motorola edge) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.81 Mobile Safari/537.36`
* `Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322)`
Detected as 'mozilla' but is actually msie v11:
* `Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/werkzeug/useragents.py`
Content:
```
1 import re
2 import typing as t
3
4 if t.TYPE_CHECKING:
5 from wsgiref.types import WSGIEnvironment
6
7
8 class UserAgentParser:
9 """A simple user agent parser. Used by the `UserAgent`."""
10
11 platform_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (
12 (" cros ", "chromeos"),
13 ("iphone|ios", "iphone"),
14 ("ipad", "ipad"),
15 (r"darwin\b|mac\b|os\s*x", "macos"),
16 ("win", "windows"),
17 (r"android", "android"),
18 ("netbsd", "netbsd"),
19 ("openbsd", "openbsd"),
20 ("freebsd", "freebsd"),
21 ("dragonfly", "dragonflybsd"),
22 ("(sun|i86)os", "solaris"),
23 (r"x11\b|lin(\b|ux)?", "linux"),
24 (r"nintendo\s+wii", "wii"),
25 ("irix", "irix"),
26 ("hp-?ux", "hpux"),
27 ("aix", "aix"),
28 ("sco|unix_sv", "sco"),
29 ("bsd", "bsd"),
30 ("amiga", "amiga"),
31 ("blackberry|playbook", "blackberry"),
32 ("symbian", "symbian"),
33 )
34 browser_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (
35 ("googlebot", "google"),
36 ("msnbot", "msn"),
37 ("yahoo", "yahoo"),
38 ("ask jeeves", "ask"),
39 (r"aol|america\s+online\s+browser", "aol"),
40 (r"opera|opr", "opera"),
41 ("edge|edg", "edge"),
42 ("chrome|crios", "chrome"),
43 ("seamonkey", "seamonkey"),
44 ("firefox|firebird|phoenix|iceweasel", "firefox"),
45 ("galeon", "galeon"),
46 ("safari|version", "safari"),
47 ("webkit", "webkit"),
48 ("camino", "camino"),
49 ("konqueror", "konqueror"),
50 ("k-meleon", "kmeleon"),
51 ("netscape", "netscape"),
52 (r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
53 ("lynx", "lynx"),
54 ("links", "links"),
55 ("Baiduspider", "baidu"),
56 ("bingbot", "bing"),
57 ("mozilla", "mozilla"),
58 )
59
60 _browser_version_re = r"(?:{pattern})[/\sa-z(]*(\d+[.\da-z]+)?"
61 _language_re = re.compile(
62 r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
63 r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
64 )
65
66 def __init__(self) -> None:
67 self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platform_rules]
68 self.browsers = [
69 (b, re.compile(self._browser_version_re.format(pattern=a), re.I))
70 for a, b in self.browser_rules
71 ]
72
73 def __call__(
74 self, user_agent: str
75 ) -> t.Tuple[t.Optional[str], t.Optional[str], t.Optional[str], t.Optional[str]]:
76 platform: t.Optional[str]
77 browser: t.Optional[str]
78 version: t.Optional[str]
79 language: t.Optional[str]
80
81 for platform, regex in self.platforms: # noqa: B007
82 match = regex.search(user_agent)
83 if match is not None:
84 break
85 else:
86 platform = None
87 for browser, regex in self.browsers: # noqa: B007
88 match = regex.search(user_agent)
89 if match is not None:
90 version = match.group(1)
91 break
92 else:
93 browser = version = None
94 match = self._language_re.search(user_agent)
95 if match is not None:
96 language = match.group(1) or match.group(2)
97 else:
98 language = None
99 return platform, browser, version, language
100
101
102 class UserAgent:
103 """Represents a user agent. Pass it a WSGI environment or a user agent
104 string and you can inspect some of the details from the user agent
105 string via the attributes. The following attributes exist:
106
107 .. attribute:: string
108
109 the raw user agent string
110
111 .. attribute:: platform
112
113 the browser platform. ``None`` if not recognized.
114 The following platforms are currently recognized:
115
116 - `aix`
117 - `amiga`
118 - `android`
119 - `blackberry`
120 - `bsd`
121 - `chromeos`
122 - `dragonflybsd`
123 - `freebsd`
124 - `hpux`
125 - `ipad`
126 - `iphone`
127 - `irix`
128 - `linux`
129 - `macos`
130 - `netbsd`
131 - `openbsd`
132 - `sco`
133 - `solaris`
134 - `symbian`
135 - `wii`
136 - `windows`
137
138 .. attribute:: browser
139
140 the name of the browser. ``None`` if not recognized.
141 The following browsers are currently recognized:
142
143 - `aol` *
144 - `ask` *
145 - `baidu` *
146 - `bing` *
147 - `camino`
148 - `chrome`
149 - `edge`
150 - `firefox`
151 - `galeon`
152 - `google` *
153 - `kmeleon`
154 - `konqueror`
155 - `links`
156 - `lynx`
157 - `mozilla`
158 - `msie`
159 - `msn`
160 - `netscape`
161 - `opera`
162 - `safari`
163 - `seamonkey`
164 - `webkit`
165 - `yahoo` *
166
167 (Browsers marked with a star (``*``) are crawlers.)
168
169 .. attribute:: version
170
171 the version of the browser. ``None`` if not recognized.
172
173 .. attribute:: language
174
175 the language of the browser. ``None`` if not recognized.
176 """
177
178 _parser = UserAgentParser()
179
180 def __init__(self, environ_or_string: t.Union["WSGIEnvironment", str]) -> None:
181 if isinstance(environ_or_string, dict):
182 self.string = environ_or_string.get("HTTP_USER_AGENT", "")
183 else:
184 self.string = environ_or_string
185
186 self.platform, self.browser, self.version, self.language = self._parser(
187 self.string
188 )
189
190 def to_header(self) -> str:
191 return self.string
192
193 def __str__(self) -> str:
194 return self.string
195
196 def __bool__(self) -> bool:
197 return bool(self.browser)
198
199 def __repr__(self):
200 return f"<{type(self).__name__} {self.browser!r}/{self.version}>"
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/werkzeug/useragents.py b/src/werkzeug/useragents.py
--- a/src/werkzeug/useragents.py
+++ b/src/werkzeug/useragents.py
@@ -84,8 +84,18 @@
break
else:
platform = None
+
+ # Except for Trident, all browser key words come after the last ')'
+ last_closing_paren = 0
+ if (
+ not re.compile(r"trident/.+? rv:", re.I).search(user_agent)
+ and ")" in user_agent
+ and user_agent[-1] != ")"
+ ):
+ last_closing_paren = user_agent.rindex(")")
+
for browser, regex in self.browsers: # noqa: B007
- match = regex.search(user_agent)
+ match = regex.search(user_agent[last_closing_paren:])
if match is not None:
version = match.group(1)
break
| {"golden_diff": "diff --git a/src/werkzeug/useragents.py b/src/werkzeug/useragents.py\n--- a/src/werkzeug/useragents.py\n+++ b/src/werkzeug/useragents.py\n@@ -84,8 +84,18 @@\n break\n else:\n platform = None\n+\n+ # Except for Trident, all browser key words come after the last ')'\n+ last_closing_paren = 0\n+ if (\n+ not re.compile(r\"trident/.+? rv:\", re.I).search(user_agent)\n+ and \")\" in user_agent\n+ and user_agent[-1] != \")\"\n+ ):\n+ last_closing_paren = user_agent.rindex(\")\")\n+\n for browser, regex in self.browsers: # noqa: B007\n- match = regex.search(user_agent)\n+ match = regex.search(user_agent[last_closing_paren:])\n if match is not None:\n version = match.group(1)\n break\n", "issue": "User agents not being detected\nWhile using your utility to parse user agents for calls to my site, I have a few that don't get parsed correctly:\r\n\r\nVersion not getting parsed on the following user agents:\r\n\r\n* `Mozilla/5.0 (Linux; Android 10; motorola edge) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.101 Mobile Safari/537.36`\r\n* `Mozilla/5.0 (Linux; Android 10; motorola edge) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.81 Mobile Safari/537.36`\r\n* `Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322)`\r\n\r\nDetected as 'mozilla' but is actually msie v11:\r\n\r\n* `Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko`\n", "before_files": [{"content": "import re\nimport typing as t\n\nif t.TYPE_CHECKING:\n from wsgiref.types import WSGIEnvironment\n\n\nclass UserAgentParser:\n \"\"\"A simple user agent parser. Used by the `UserAgent`.\"\"\"\n\n platform_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (\n (\" cros \", \"chromeos\"),\n (\"iphone|ios\", \"iphone\"),\n (\"ipad\", \"ipad\"),\n (r\"darwin\\b|mac\\b|os\\s*x\", \"macos\"),\n (\"win\", \"windows\"),\n (r\"android\", \"android\"),\n (\"netbsd\", \"netbsd\"),\n (\"openbsd\", \"openbsd\"),\n (\"freebsd\", \"freebsd\"),\n (\"dragonfly\", \"dragonflybsd\"),\n (\"(sun|i86)os\", \"solaris\"),\n (r\"x11\\b|lin(\\b|ux)?\", \"linux\"),\n (r\"nintendo\\s+wii\", \"wii\"),\n (\"irix\", \"irix\"),\n (\"hp-?ux\", \"hpux\"),\n (\"aix\", \"aix\"),\n (\"sco|unix_sv\", \"sco\"),\n (\"bsd\", \"bsd\"),\n (\"amiga\", \"amiga\"),\n (\"blackberry|playbook\", \"blackberry\"),\n (\"symbian\", \"symbian\"),\n )\n browser_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (\n (\"googlebot\", \"google\"),\n (\"msnbot\", \"msn\"),\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (r\"opera|opr\", \"opera\"),\n (\"edge|edg\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n (\"galeon\", \"galeon\"),\n (\"safari|version\", \"safari\"),\n (\"webkit\", \"webkit\"),\n (\"camino\", \"camino\"),\n (\"konqueror\", \"konqueror\"),\n (\"k-meleon\", \"kmeleon\"),\n (\"netscape\", \"netscape\"),\n (r\"msie|microsoft\\s+internet\\s+explorer|trident/.+? rv:\", \"msie\"),\n (\"lynx\", \"lynx\"),\n (\"links\", \"links\"),\n (\"Baiduspider\", \"baidu\"),\n (\"bingbot\", \"bing\"),\n (\"mozilla\", \"mozilla\"),\n )\n\n _browser_version_re = r\"(?:{pattern})[/\\sa-z(]*(\\d+[.\\da-z]+)?\"\n _language_re = re.compile(\n r\"(?:;\\s*|\\s+)(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*;|\"\n r\"(?:\\(|\\[|;)\\s*(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*(?:\\]|\\)|;)\"\n )\n\n def __init__(self) -> None:\n self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platform_rules]\n self.browsers = [\n (b, re.compile(self._browser_version_re.format(pattern=a), re.I))\n for a, b in self.browser_rules\n ]\n\n def __call__(\n self, user_agent: str\n ) -> t.Tuple[t.Optional[str], t.Optional[str], t.Optional[str], t.Optional[str]]:\n platform: t.Optional[str]\n browser: t.Optional[str]\n version: t.Optional[str]\n language: t.Optional[str]\n\n for platform, regex in self.platforms: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n break\n else:\n platform = None\n for browser, regex in self.browsers: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n version = match.group(1)\n break\n else:\n browser = version = None\n match = self._language_re.search(user_agent)\n if match is not None:\n language = match.group(1) or match.group(2)\n else:\n language = None\n return platform, browser, version, language\n\n\nclass UserAgent:\n \"\"\"Represents a user agent. Pass it a WSGI environment or a user agent\n string and you can inspect some of the details from the user agent\n string via the attributes. The following attributes exist:\n\n .. attribute:: string\n\n the raw user agent string\n\n .. attribute:: platform\n\n the browser platform. ``None`` if not recognized.\n The following platforms are currently recognized:\n\n - `aix`\n - `amiga`\n - `android`\n - `blackberry`\n - `bsd`\n - `chromeos`\n - `dragonflybsd`\n - `freebsd`\n - `hpux`\n - `ipad`\n - `iphone`\n - `irix`\n - `linux`\n - `macos`\n - `netbsd`\n - `openbsd`\n - `sco`\n - `solaris`\n - `symbian`\n - `wii`\n - `windows`\n\n .. attribute:: browser\n\n the name of the browser. ``None`` if not recognized.\n The following browsers are currently recognized:\n\n - `aol` *\n - `ask` *\n - `baidu` *\n - `bing` *\n - `camino`\n - `chrome`\n - `edge`\n - `firefox`\n - `galeon`\n - `google` *\n - `kmeleon`\n - `konqueror`\n - `links`\n - `lynx`\n - `mozilla`\n - `msie`\n - `msn`\n - `netscape`\n - `opera`\n - `safari`\n - `seamonkey`\n - `webkit`\n - `yahoo` *\n\n (Browsers marked with a star (``*``) are crawlers.)\n\n .. attribute:: version\n\n the version of the browser. ``None`` if not recognized.\n\n .. attribute:: language\n\n the language of the browser. ``None`` if not recognized.\n \"\"\"\n\n _parser = UserAgentParser()\n\n def __init__(self, environ_or_string: t.Union[\"WSGIEnvironment\", str]) -> None:\n if isinstance(environ_or_string, dict):\n self.string = environ_or_string.get(\"HTTP_USER_AGENT\", \"\")\n else:\n self.string = environ_or_string\n\n self.platform, self.browser, self.version, self.language = self._parser(\n self.string\n )\n\n def to_header(self) -> str:\n return self.string\n\n def __str__(self) -> str:\n return self.string\n\n def __bool__(self) -> bool:\n return bool(self.browser)\n\n def __repr__(self):\n return f\"<{type(self).__name__} {self.browser!r}/{self.version}>\"\n", "path": "src/werkzeug/useragents.py"}], "after_files": [{"content": "import re\nimport typing as t\n\nif t.TYPE_CHECKING:\n from wsgiref.types import WSGIEnvironment\n\n\nclass UserAgentParser:\n \"\"\"A simple user agent parser. Used by the `UserAgent`.\"\"\"\n\n platform_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (\n (\" cros \", \"chromeos\"),\n (\"iphone|ios\", \"iphone\"),\n (\"ipad\", \"ipad\"),\n (r\"darwin\\b|mac\\b|os\\s*x\", \"macos\"),\n (\"win\", \"windows\"),\n (r\"android\", \"android\"),\n (\"netbsd\", \"netbsd\"),\n (\"openbsd\", \"openbsd\"),\n (\"freebsd\", \"freebsd\"),\n (\"dragonfly\", \"dragonflybsd\"),\n (\"(sun|i86)os\", \"solaris\"),\n (r\"x11\\b|lin(\\b|ux)?\", \"linux\"),\n (r\"nintendo\\s+wii\", \"wii\"),\n (\"irix\", \"irix\"),\n (\"hp-?ux\", \"hpux\"),\n (\"aix\", \"aix\"),\n (\"sco|unix_sv\", \"sco\"),\n (\"bsd\", \"bsd\"),\n (\"amiga\", \"amiga\"),\n (\"blackberry|playbook\", \"blackberry\"),\n (\"symbian\", \"symbian\"),\n )\n browser_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (\n (\"googlebot\", \"google\"),\n (\"msnbot\", \"msn\"),\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (r\"opera|opr\", \"opera\"),\n (\"edge|edg\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n (\"galeon\", \"galeon\"),\n (\"safari|version\", \"safari\"),\n (\"webkit\", \"webkit\"),\n (\"camino\", \"camino\"),\n (\"konqueror\", \"konqueror\"),\n (\"k-meleon\", \"kmeleon\"),\n (\"netscape\", \"netscape\"),\n (r\"msie|microsoft\\s+internet\\s+explorer|trident/.+? rv:\", \"msie\"),\n (\"lynx\", \"lynx\"),\n (\"links\", \"links\"),\n (\"Baiduspider\", \"baidu\"),\n (\"bingbot\", \"bing\"),\n (\"mozilla\", \"mozilla\"),\n )\n\n _browser_version_re = r\"(?:{pattern})[/\\sa-z(]*(\\d+[.\\da-z]+)?\"\n _language_re = re.compile(\n r\"(?:;\\s*|\\s+)(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*;|\"\n r\"(?:\\(|\\[|;)\\s*(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*(?:\\]|\\)|;)\"\n )\n\n def __init__(self) -> None:\n self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platform_rules]\n self.browsers = [\n (b, re.compile(self._browser_version_re.format(pattern=a), re.I))\n for a, b in self.browser_rules\n ]\n\n def __call__(\n self, user_agent: str\n ) -> t.Tuple[t.Optional[str], t.Optional[str], t.Optional[str], t.Optional[str]]:\n platform: t.Optional[str]\n browser: t.Optional[str]\n version: t.Optional[str]\n language: t.Optional[str]\n\n for platform, regex in self.platforms: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n break\n else:\n platform = None\n\n # Except for Trident, all browser key words come after the last ')'\n last_closing_paren = 0\n if (\n not re.compile(r\"trident/.+? rv:\", re.I).search(user_agent)\n and \")\" in user_agent\n and user_agent[-1] != \")\"\n ):\n last_closing_paren = user_agent.rindex(\")\")\n\n for browser, regex in self.browsers: # noqa: B007\n match = regex.search(user_agent[last_closing_paren:])\n if match is not None:\n version = match.group(1)\n break\n else:\n browser = version = None\n match = self._language_re.search(user_agent)\n if match is not None:\n language = match.group(1) or match.group(2)\n else:\n language = None\n return platform, browser, version, language\n\n\nclass UserAgent:\n \"\"\"Represents a user agent. Pass it a WSGI environment or a user agent\n string and you can inspect some of the details from the user agent\n string via the attributes. The following attributes exist:\n\n .. attribute:: string\n\n the raw user agent string\n\n .. attribute:: platform\n\n the browser platform. ``None`` if not recognized.\n The following platforms are currently recognized:\n\n - `aix`\n - `amiga`\n - `android`\n - `blackberry`\n - `bsd`\n - `chromeos`\n - `dragonflybsd`\n - `freebsd`\n - `hpux`\n - `ipad`\n - `iphone`\n - `irix`\n - `linux`\n - `macos`\n - `netbsd`\n - `openbsd`\n - `sco`\n - `solaris`\n - `symbian`\n - `wii`\n - `windows`\n\n .. attribute:: browser\n\n the name of the browser. ``None`` if not recognized.\n The following browsers are currently recognized:\n\n - `aol` *\n - `ask` *\n - `baidu` *\n - `bing` *\n - `camino`\n - `chrome`\n - `edge`\n - `firefox`\n - `galeon`\n - `google` *\n - `kmeleon`\n - `konqueror`\n - `links`\n - `lynx`\n - `mozilla`\n - `msie`\n - `msn`\n - `netscape`\n - `opera`\n - `safari`\n - `seamonkey`\n - `webkit`\n - `yahoo` *\n\n (Browsers marked with a star (``*``) are crawlers.)\n\n .. attribute:: version\n\n the version of the browser. ``None`` if not recognized.\n\n .. attribute:: language\n\n the language of the browser. ``None`` if not recognized.\n \"\"\"\n\n _parser = UserAgentParser()\n\n def __init__(self, environ_or_string: t.Union[\"WSGIEnvironment\", str]) -> None:\n if isinstance(environ_or_string, dict):\n self.string = environ_or_string.get(\"HTTP_USER_AGENT\", \"\")\n else:\n self.string = environ_or_string\n\n self.platform, self.browser, self.version, self.language = self._parser(\n self.string\n )\n\n def to_header(self) -> str:\n return self.string\n\n def __str__(self) -> str:\n return self.string\n\n def __bool__(self) -> bool:\n return bool(self.browser)\n\n def __repr__(self):\n return f\"<{type(self).__name__} {self.browser!r}/{self.version}>\"\n", "path": "src/werkzeug/useragents.py"}]} | 2,652 | 214 |
gh_patches_debug_14107 | rasdani/github-patches | git_diff | spack__spack-17427 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Errors untaring source tarballs Docker
Some source tarballs are created in a way that tries to preserve user and group perms.
Yes, it's a bad to create a source tarball for release with these properties -- but I suspect it's easy to go undetected until you run in a security constrained linux.
Cython is one example example I hit when trying to build in a Docker container using ubuntu:latest
```
/bin/tar: Cython-0.25.2/.gitrev: Cannot change ownership to uid 96258, gid 5000: Invalid argument
.... (many many more errors)
/bin/tar: Cython-0.25.2/USAGE.txt: Cannot change ownership to uid 96258, gid 5000: Invalid argument
/bin/tar: Cython-0.25.2: Cannot change ownership to uid 96258, gid 5000: Invalid argument
/bin/tar: Exiting with failure status due to previous errors
```
The tar errors cause spack to stop. I am not sure if there is anyway to defensively avoid this, but if its possible it would be a good enhancement to spack.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/spack/spack/util/compression.py`
Content:
```
1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 import re
7 import os
8 from itertools import product
9 from spack.util.executable import which
10
11 # Supported archive extensions.
12 PRE_EXTS = ["tar", "TAR"]
13 EXTS = ["gz", "bz2", "xz", "Z"]
14 NOTAR_EXTS = ["zip", "tgz", "tbz2", "txz"]
15
16 # Add PRE_EXTS and EXTS last so that .tar.gz is matched *before* .tar or .gz
17 ALLOWED_ARCHIVE_TYPES = [".".join(l) for l in product(
18 PRE_EXTS, EXTS)] + PRE_EXTS + EXTS + NOTAR_EXTS
19
20
21 def allowed_archive(path):
22 return any(path.endswith(t) for t in ALLOWED_ARCHIVE_TYPES)
23
24
25 def decompressor_for(path, extension=None):
26 """Get the appropriate decompressor for a path."""
27 if ((extension and re.match(r'\.?zip$', extension)) or
28 path.endswith('.zip')):
29 unzip = which('unzip', required=True)
30 unzip.add_default_arg('-q')
31 return unzip
32 if extension and re.match(r'gz', extension):
33 gunzip = which('gunzip', required=True)
34 return gunzip
35 if extension and re.match(r'bz2', extension):
36 bunzip2 = which('bunzip2', required=True)
37 return bunzip2
38 tar = which('tar', required=True)
39 tar.add_default_arg('-xf')
40 return tar
41
42
43 def strip_extension(path):
44 """Get the part of a path that does not include its compressed
45 type extension."""
46 for type in ALLOWED_ARCHIVE_TYPES:
47 suffix = r'\.%s$' % type
48 if re.search(suffix, path):
49 return re.sub(suffix, "", path)
50 return path
51
52
53 def extension(path):
54 """Get the archive extension for a path."""
55 if path is None:
56 raise ValueError("Can't call extension() on None")
57
58 # Strip sourceforge suffix.
59 if re.search(r'((?:sourceforge.net|sf.net)/.*)/download$', path):
60 path = os.path.dirname(path)
61
62 for t in ALLOWED_ARCHIVE_TYPES:
63 suffix = r'\.%s$' % t
64 if re.search(suffix, path):
65 return t
66 return None
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/spack/spack/util/compression.py b/lib/spack/spack/util/compression.py
--- a/lib/spack/spack/util/compression.py
+++ b/lib/spack/spack/util/compression.py
@@ -14,7 +14,7 @@
NOTAR_EXTS = ["zip", "tgz", "tbz2", "txz"]
# Add PRE_EXTS and EXTS last so that .tar.gz is matched *before* .tar or .gz
-ALLOWED_ARCHIVE_TYPES = [".".join(l) for l in product(
+ALLOWED_ARCHIVE_TYPES = [".".join(ext) for ext in product(
PRE_EXTS, EXTS)] + PRE_EXTS + EXTS + NOTAR_EXTS
@@ -36,7 +36,7 @@
bunzip2 = which('bunzip2', required=True)
return bunzip2
tar = which('tar', required=True)
- tar.add_default_arg('-xf')
+ tar.add_default_arg('-oxf')
return tar
| {"golden_diff": "diff --git a/lib/spack/spack/util/compression.py b/lib/spack/spack/util/compression.py\n--- a/lib/spack/spack/util/compression.py\n+++ b/lib/spack/spack/util/compression.py\n@@ -14,7 +14,7 @@\n NOTAR_EXTS = [\"zip\", \"tgz\", \"tbz2\", \"txz\"]\n \n # Add PRE_EXTS and EXTS last so that .tar.gz is matched *before* .tar or .gz\n-ALLOWED_ARCHIVE_TYPES = [\".\".join(l) for l in product(\n+ALLOWED_ARCHIVE_TYPES = [\".\".join(ext) for ext in product(\n PRE_EXTS, EXTS)] + PRE_EXTS + EXTS + NOTAR_EXTS\n \n \n@@ -36,7 +36,7 @@\n bunzip2 = which('bunzip2', required=True)\n return bunzip2\n tar = which('tar', required=True)\n- tar.add_default_arg('-xf')\n+ tar.add_default_arg('-oxf')\n return tar\n", "issue": "Errors untaring source tarballs Docker\nSome source tarballs are created in a way that tries to preserve user and group perms. \r\nYes, it's a bad to create a source tarball for release with these properties -- but I suspect it's easy to go undetected until you run in a security constrained linux.\r\n\r\nCython is one example example I hit when trying to build in a Docker container using ubuntu:latest\r\n\r\n```\r\n/bin/tar: Cython-0.25.2/.gitrev: Cannot change ownership to uid 96258, gid 5000: Invalid argument\r\n.... (many many more errors)\r\n/bin/tar: Cython-0.25.2/USAGE.txt: Cannot change ownership to uid 96258, gid 5000: Invalid argument\r\n/bin/tar: Cython-0.25.2: Cannot change ownership to uid 96258, gid 5000: Invalid argument\r\n/bin/tar: Exiting with failure status due to previous errors\r\n```\r\nThe tar errors cause spack to stop. I am not sure if there is anyway to defensively avoid this, but if its possible it would be a good enhancement to spack. \r\n\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nimport re\nimport os\nfrom itertools import product\nfrom spack.util.executable import which\n\n# Supported archive extensions.\nPRE_EXTS = [\"tar\", \"TAR\"]\nEXTS = [\"gz\", \"bz2\", \"xz\", \"Z\"]\nNOTAR_EXTS = [\"zip\", \"tgz\", \"tbz2\", \"txz\"]\n\n# Add PRE_EXTS and EXTS last so that .tar.gz is matched *before* .tar or .gz\nALLOWED_ARCHIVE_TYPES = [\".\".join(l) for l in product(\n PRE_EXTS, EXTS)] + PRE_EXTS + EXTS + NOTAR_EXTS\n\n\ndef allowed_archive(path):\n return any(path.endswith(t) for t in ALLOWED_ARCHIVE_TYPES)\n\n\ndef decompressor_for(path, extension=None):\n \"\"\"Get the appropriate decompressor for a path.\"\"\"\n if ((extension and re.match(r'\\.?zip$', extension)) or\n path.endswith('.zip')):\n unzip = which('unzip', required=True)\n unzip.add_default_arg('-q')\n return unzip\n if extension and re.match(r'gz', extension):\n gunzip = which('gunzip', required=True)\n return gunzip\n if extension and re.match(r'bz2', extension):\n bunzip2 = which('bunzip2', required=True)\n return bunzip2\n tar = which('tar', required=True)\n tar.add_default_arg('-xf')\n return tar\n\n\ndef strip_extension(path):\n \"\"\"Get the part of a path that does not include its compressed\n type extension.\"\"\"\n for type in ALLOWED_ARCHIVE_TYPES:\n suffix = r'\\.%s$' % type\n if re.search(suffix, path):\n return re.sub(suffix, \"\", path)\n return path\n\n\ndef extension(path):\n \"\"\"Get the archive extension for a path.\"\"\"\n if path is None:\n raise ValueError(\"Can't call extension() on None\")\n\n # Strip sourceforge suffix.\n if re.search(r'((?:sourceforge.net|sf.net)/.*)/download$', path):\n path = os.path.dirname(path)\n\n for t in ALLOWED_ARCHIVE_TYPES:\n suffix = r'\\.%s$' % t\n if re.search(suffix, path):\n return t\n return None\n", "path": "lib/spack/spack/util/compression.py"}], "after_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nimport re\nimport os\nfrom itertools import product\nfrom spack.util.executable import which\n\n# Supported archive extensions.\nPRE_EXTS = [\"tar\", \"TAR\"]\nEXTS = [\"gz\", \"bz2\", \"xz\", \"Z\"]\nNOTAR_EXTS = [\"zip\", \"tgz\", \"tbz2\", \"txz\"]\n\n# Add PRE_EXTS and EXTS last so that .tar.gz is matched *before* .tar or .gz\nALLOWED_ARCHIVE_TYPES = [\".\".join(ext) for ext in product(\n PRE_EXTS, EXTS)] + PRE_EXTS + EXTS + NOTAR_EXTS\n\n\ndef allowed_archive(path):\n return any(path.endswith(t) for t in ALLOWED_ARCHIVE_TYPES)\n\n\ndef decompressor_for(path, extension=None):\n \"\"\"Get the appropriate decompressor for a path.\"\"\"\n if ((extension and re.match(r'\\.?zip$', extension)) or\n path.endswith('.zip')):\n unzip = which('unzip', required=True)\n unzip.add_default_arg('-q')\n return unzip\n if extension and re.match(r'gz', extension):\n gunzip = which('gunzip', required=True)\n return gunzip\n if extension and re.match(r'bz2', extension):\n bunzip2 = which('bunzip2', required=True)\n return bunzip2\n tar = which('tar', required=True)\n tar.add_default_arg('-oxf')\n return tar\n\n\ndef strip_extension(path):\n \"\"\"Get the part of a path that does not include its compressed\n type extension.\"\"\"\n for type in ALLOWED_ARCHIVE_TYPES:\n suffix = r'\\.%s$' % type\n if re.search(suffix, path):\n return re.sub(suffix, \"\", path)\n return path\n\n\ndef extension(path):\n \"\"\"Get the archive extension for a path.\"\"\"\n if path is None:\n raise ValueError(\"Can't call extension() on None\")\n\n # Strip sourceforge suffix.\n if re.search(r'((?:sourceforge.net|sf.net)/.*)/download$', path):\n path = os.path.dirname(path)\n\n for t in ALLOWED_ARCHIVE_TYPES:\n suffix = r'\\.%s$' % t\n if re.search(suffix, path):\n return t\n return None\n", "path": "lib/spack/spack/util/compression.py"}]} | 1,194 | 227 |
gh_patches_debug_27257 | rasdani/github-patches | git_diff | 3cn-ecn__nantralPlatform-177 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Frontend events planifés et events archivés ne fonctionnent pas
Events archivés renvoit une erreur 500.
Event planifiés ne renvoit rien
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `server/apps/event/api_views.py`
Content:
```
1 from datetime import datetime
2
3 from rest_framework import generics
4
5 from .models import BaseEvent
6 from .serializers import BaseEventSerializer
7
8
9 class ListEventsGroupAPIView(generics.ListAPIView):
10 """List events for a group depending on the chosen
11 time window. By default only returns current events."""
12 serializer_class = BaseEventSerializer
13
14 def get_queryset(self):
15 if self.request.method == 'GET':
16 if self.request.GET.get('view') == 'archives':
17 return BaseEvent.objects.filter(group=self.kwargs['group'], date__lt=datetime.today())
18 elif self.request.get('view') == 'all':
19 return BaseEvent.objects.filter(group=self.kwargs['group'])
20 return BaseEvent.objects.filter(group=self.kwargs['group'], date__gte=datetime.today())
21
22
23 class UpdateEventAPIView(generics.RetrieveDestroyAPIView):
24 serializer_class = BaseEventSerializer
25 lookup_field = 'slug'
26 lookup_url_kwarg = 'event_slug'
27
28 def get_queryset(self):
29 return BaseEvent.objects.filter(slug=self.kwargs['event_slug'])
30
```
Path: `server/apps/event/views.py`
Content:
```
1 from datetime import date
2
3 from django.shortcuts import redirect, render
4 from django.contrib import messages
5 from django.contrib.auth.models import User
6 from django.views.generic.base import TemplateView, View
7 from django.views.generic import UpdateView, FormView
8 from django.contrib.auth.decorators import login_required
9
10 from .models import *
11 from .forms import EventForm, EventFormSet
12
13 from apps.group.models import Group
14 from apps.utils.accessMixins import LoginRequiredAccessMixin, UserIsAdmin
15
16
17 class EventDetailView(LoginRequiredAccessMixin, TemplateView):
18 template_name = 'event/detail.html'
19
20 def get_context_data(self, **kwargs):
21 context = super().get_context_data(**kwargs)
22 self.object = BaseEvent.get_event_by_slug(self.kwargs['event_slug'])
23 context['object'] = self.object
24 context['group'] = self.object.get_group
25 context['is_participating'] = self.object.is_participating(
26 self.request.user)
27 return context
28
29
30 class UpdateGroupCreateEventView(UserIsAdmin, FormView):
31 """In the context of a group, create event view."""
32 template_name = 'group/event/create.html'
33 form_class = EventForm
34
35 def get_context_data(self, **kwargs):
36 context = super().get_context_data(**kwargs)
37 context['object'] = Group.get_group_by_slug(self.kwargs['group_slug'])
38 return context
39
40 def form_valid(self, form, **kwargs):
41 event = form.save(commit=False)
42 event.group = Group.get_group_by_slug(
43 slug=self.kwargs['group_slug']).slug
44 event.save()
45 return redirect('group:create-event', self.kwargs['group_slug'])
46
47
48 class EventUpdateView(UserIsAdmin, UpdateView):
49 template_name = 'event/update.html'
50 fields = ['title', 'description', 'location',
51 'date', 'publicity', 'color', 'image']
52
53 def test_func(self) -> bool:
54 self.kwargs['group_slug'] = self.object.get_group.slug
55 return super().test_func()
56
57 def get_context_data(self, **kwargs):
58 context = super().get_context_data(**kwargs)
59 context['object'] = self.object.get_group
60 context['event'] = self.object
61 return context
62
63 def get_object(self, **kwargs):
64 return BaseEvent.get_event_by_slug(self.kwargs['event_slug'])
65
66 def dispatch(self, request, *args, **kwargs):
67 self.object = BaseEvent.get_event_by_slug(self.kwargs['event_slug'])
68 self.kwargs['group_slug'] = self.object.get_group.slug
69 if isinstance(self.object, EatingEvent):
70 self.fields = ['title', 'description', 'location',
71 'date', 'publicity', 'color', 'image', 'menu']
72 return super().dispatch(request, *args, **kwargs)
73
74
75 class UpdateGroupEventsView(UserIsAdmin, View):
76 template_name = 'group/event/planned_edit.html'
77
78 def get_context_data(self, **kwargs):
79 context = {}
80 context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
81 context['events'] = BaseEvent.objects.filter(
82 group=kwargs['group_slug'], date__gte=date.today())
83 context['form'] = EventFormSet(queryset=context['events'])
84 return context
85
86 def get(self, request, group_slug):
87 return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))
88
89 def post(self, request, group_slug):
90 return edit_events(request, group_slug)
91
92
93 class UpdateGroupArchivedEventsView(UserIsAdmin, View):
94 template_name = 'group/archived_edit.html'
95
96 def get_context_data(self, **kwargs):
97 context = {}
98 context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
99 context['events'] = BaseEvent.objects.filter(
100 group=kwargs['group_slug'], date__lt=date.today())
101 context['form'] = EventFormSet(queryset=context['events'])
102 return context
103
104 def get(self, request, group_slug):
105 return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))
106
107 def post(self, request, group_slug):
108 return edit_events(request, group_slug)
109
110
111 @login_required
112 def add_participant(request, event_slug):
113 """Adds the user to the list of participants."""
114 event = BaseEvent.get_event_by_slug(event_slug)
115 event.participants.add(request.user.student)
116 if request.GET.get('redirect'):
117 return redirect('home:home')
118 return redirect(event.get_absolute_url())
119
120
121 @login_required
122 def remove_participant(request, event_slug):
123 """Removes the user from the list of participants."""
124 event = BaseEvent.get_event_by_slug(event_slug)
125 event.participants.remove(request.user.student)
126 if request.GET.get('redirect'):
127 return redirect('home:home')
128 return redirect(event.get_absolute_url())
129
130
131 @login_required
132 def edit_events(request, group_slug):
133 group = Group.get_group_by_slug(group_slug)
134 form = EventFormSet(request.POST)
135 if form.is_valid():
136 events = form.save(commit=False)
137 # Link each event to the group
138 for event in events:
139 event.group = group.slug
140 event.save()
141 # Delete missing events
142 for event in form.deleted_objects:
143 event.delete()
144 messages.success(request, 'Events modifies')
145 return redirect('group:update-events', group_slug)
146 else:
147 messages.warning(request, form.errors)
148 return redirect('group:update-events', group_slug)
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/server/apps/event/api_views.py b/server/apps/event/api_views.py
--- a/server/apps/event/api_views.py
+++ b/server/apps/event/api_views.py
@@ -15,7 +15,7 @@
if self.request.method == 'GET':
if self.request.GET.get('view') == 'archives':
return BaseEvent.objects.filter(group=self.kwargs['group'], date__lt=datetime.today())
- elif self.request.get('view') == 'all':
+ elif self.request.GET.get('view') == 'all':
return BaseEvent.objects.filter(group=self.kwargs['group'])
return BaseEvent.objects.filter(group=self.kwargs['group'], date__gte=datetime.today())
diff --git a/server/apps/event/views.py b/server/apps/event/views.py
--- a/server/apps/event/views.py
+++ b/server/apps/event/views.py
@@ -91,13 +91,13 @@
class UpdateGroupArchivedEventsView(UserIsAdmin, View):
- template_name = 'group/archived_edit.html'
+ template_name = 'group/event/archived_edit.html'
def get_context_data(self, **kwargs):
context = {}
context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
context['events'] = BaseEvent.objects.filter(
- group=kwargs['group_slug'], date__lt=date.today())
+ group=kwargs['group_slug'], date__lte=date.today())
context['form'] = EventFormSet(queryset=context['events'])
return context
| {"golden_diff": "diff --git a/server/apps/event/api_views.py b/server/apps/event/api_views.py\n--- a/server/apps/event/api_views.py\n+++ b/server/apps/event/api_views.py\n@@ -15,7 +15,7 @@\n if self.request.method == 'GET':\n if self.request.GET.get('view') == 'archives':\n return BaseEvent.objects.filter(group=self.kwargs['group'], date__lt=datetime.today())\n- elif self.request.get('view') == 'all':\n+ elif self.request.GET.get('view') == 'all':\n return BaseEvent.objects.filter(group=self.kwargs['group'])\n return BaseEvent.objects.filter(group=self.kwargs['group'], date__gte=datetime.today())\n \ndiff --git a/server/apps/event/views.py b/server/apps/event/views.py\n--- a/server/apps/event/views.py\n+++ b/server/apps/event/views.py\n@@ -91,13 +91,13 @@\n \n \n class UpdateGroupArchivedEventsView(UserIsAdmin, View):\n- template_name = 'group/archived_edit.html'\n+ template_name = 'group/event/archived_edit.html'\n \n def get_context_data(self, **kwargs):\n context = {}\n context['object'] = Group.get_group_by_slug(kwargs['group_slug'])\n context['events'] = BaseEvent.objects.filter(\n- group=kwargs['group_slug'], date__lt=date.today())\n+ group=kwargs['group_slug'], date__lte=date.today())\n context['form'] = EventFormSet(queryset=context['events'])\n return context\n", "issue": "Frontend events planif\u00e9s et events archiv\u00e9s ne fonctionnent pas\nEvents archiv\u00e9s renvoit une erreur 500.\r\nEvent planifi\u00e9s ne renvoit rien\n", "before_files": [{"content": "from datetime import datetime\n\nfrom rest_framework import generics\n\nfrom .models import BaseEvent\nfrom .serializers import BaseEventSerializer\n\n\nclass ListEventsGroupAPIView(generics.ListAPIView):\n \"\"\"List events for a group depending on the chosen\n time window. By default only returns current events.\"\"\"\n serializer_class = BaseEventSerializer\n\n def get_queryset(self):\n if self.request.method == 'GET':\n if self.request.GET.get('view') == 'archives':\n return BaseEvent.objects.filter(group=self.kwargs['group'], date__lt=datetime.today())\n elif self.request.get('view') == 'all':\n return BaseEvent.objects.filter(group=self.kwargs['group'])\n return BaseEvent.objects.filter(group=self.kwargs['group'], date__gte=datetime.today())\n\n\nclass UpdateEventAPIView(generics.RetrieveDestroyAPIView):\n serializer_class = BaseEventSerializer\n lookup_field = 'slug'\n lookup_url_kwarg = 'event_slug'\n\n def get_queryset(self):\n return BaseEvent.objects.filter(slug=self.kwargs['event_slug'])\n", "path": "server/apps/event/api_views.py"}, {"content": "from datetime import date\n\nfrom django.shortcuts import redirect, render\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.views.generic.base import TemplateView, View\nfrom django.views.generic import UpdateView, FormView\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import *\nfrom .forms import EventForm, EventFormSet\n\nfrom apps.group.models import Group\nfrom apps.utils.accessMixins import LoginRequiredAccessMixin, UserIsAdmin\n\n\nclass EventDetailView(LoginRequiredAccessMixin, TemplateView):\n template_name = 'event/detail.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n self.object = BaseEvent.get_event_by_slug(self.kwargs['event_slug'])\n context['object'] = self.object\n context['group'] = self.object.get_group\n context['is_participating'] = self.object.is_participating(\n self.request.user)\n return context\n\n\nclass UpdateGroupCreateEventView(UserIsAdmin, FormView):\n \"\"\"In the context of a group, create event view.\"\"\"\n template_name = 'group/event/create.html'\n form_class = EventForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['object'] = Group.get_group_by_slug(self.kwargs['group_slug'])\n return context\n\n def form_valid(self, form, **kwargs):\n event = form.save(commit=False)\n event.group = Group.get_group_by_slug(\n slug=self.kwargs['group_slug']).slug\n event.save()\n return redirect('group:create-event', self.kwargs['group_slug'])\n\n\nclass EventUpdateView(UserIsAdmin, UpdateView):\n template_name = 'event/update.html'\n fields = ['title', 'description', 'location',\n 'date', 'publicity', 'color', 'image']\n\n def test_func(self) -> bool:\n self.kwargs['group_slug'] = self.object.get_group.slug\n return super().test_func()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['object'] = self.object.get_group\n context['event'] = self.object\n return context\n\n def get_object(self, **kwargs):\n return BaseEvent.get_event_by_slug(self.kwargs['event_slug'])\n\n def dispatch(self, request, *args, **kwargs):\n self.object = BaseEvent.get_event_by_slug(self.kwargs['event_slug'])\n self.kwargs['group_slug'] = self.object.get_group.slug\n if isinstance(self.object, EatingEvent):\n self.fields = ['title', 'description', 'location',\n 'date', 'publicity', 'color', 'image', 'menu']\n return super().dispatch(request, *args, **kwargs)\n\n\nclass UpdateGroupEventsView(UserIsAdmin, View):\n template_name = 'group/event/planned_edit.html'\n\n def get_context_data(self, **kwargs):\n context = {}\n context['object'] = Group.get_group_by_slug(kwargs['group_slug'])\n context['events'] = BaseEvent.objects.filter(\n group=kwargs['group_slug'], date__gte=date.today())\n context['form'] = EventFormSet(queryset=context['events'])\n return context\n\n def get(self, request, group_slug):\n return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))\n\n def post(self, request, group_slug):\n return edit_events(request, group_slug)\n\n\nclass UpdateGroupArchivedEventsView(UserIsAdmin, View):\n template_name = 'group/archived_edit.html'\n\n def get_context_data(self, **kwargs):\n context = {}\n context['object'] = Group.get_group_by_slug(kwargs['group_slug'])\n context['events'] = BaseEvent.objects.filter(\n group=kwargs['group_slug'], date__lt=date.today())\n context['form'] = EventFormSet(queryset=context['events'])\n return context\n\n def get(self, request, group_slug):\n return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))\n\n def post(self, request, group_slug):\n return edit_events(request, group_slug)\n\n\n@login_required\ndef add_participant(request, event_slug):\n \"\"\"Adds the user to the list of participants.\"\"\"\n event = BaseEvent.get_event_by_slug(event_slug)\n event.participants.add(request.user.student)\n if request.GET.get('redirect'):\n return redirect('home:home')\n return redirect(event.get_absolute_url())\n\n\n@login_required\ndef remove_participant(request, event_slug):\n \"\"\"Removes the user from the list of participants.\"\"\"\n event = BaseEvent.get_event_by_slug(event_slug)\n event.participants.remove(request.user.student)\n if request.GET.get('redirect'):\n return redirect('home:home')\n return redirect(event.get_absolute_url())\n\n\n@login_required\ndef edit_events(request, group_slug):\n group = Group.get_group_by_slug(group_slug)\n form = EventFormSet(request.POST)\n if form.is_valid():\n events = form.save(commit=False)\n # Link each event to the group\n for event in events:\n event.group = group.slug\n event.save()\n # Delete missing events\n for event in form.deleted_objects:\n event.delete()\n messages.success(request, 'Events modifies')\n return redirect('group:update-events', group_slug)\n else:\n messages.warning(request, form.errors)\n return redirect('group:update-events', group_slug)\n", "path": "server/apps/event/views.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom rest_framework import generics\n\nfrom .models import BaseEvent\nfrom .serializers import BaseEventSerializer\n\n\nclass ListEventsGroupAPIView(generics.ListAPIView):\n \"\"\"List events for a group depending on the chosen\n time window. By default only returns current events.\"\"\"\n serializer_class = BaseEventSerializer\n\n def get_queryset(self):\n if self.request.method == 'GET':\n if self.request.GET.get('view') == 'archives':\n return BaseEvent.objects.filter(group=self.kwargs['group'], date__lt=datetime.today())\n elif self.request.GET.get('view') == 'all':\n return BaseEvent.objects.filter(group=self.kwargs['group'])\n return BaseEvent.objects.filter(group=self.kwargs['group'], date__gte=datetime.today())\n\n\nclass UpdateEventAPIView(generics.RetrieveDestroyAPIView):\n serializer_class = BaseEventSerializer\n lookup_field = 'slug'\n lookup_url_kwarg = 'event_slug'\n\n def get_queryset(self):\n return BaseEvent.objects.filter(slug=self.kwargs['event_slug'])\n", "path": "server/apps/event/api_views.py"}, {"content": "from datetime import date\n\nfrom django.shortcuts import redirect, render\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.views.generic.base import TemplateView, View\nfrom django.views.generic import UpdateView, FormView\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import *\nfrom .forms import EventForm, EventFormSet\n\nfrom apps.group.models import Group\nfrom apps.utils.accessMixins import LoginRequiredAccessMixin, UserIsAdmin\n\n\nclass EventDetailView(LoginRequiredAccessMixin, TemplateView):\n template_name = 'event/detail.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n self.object = BaseEvent.get_event_by_slug(self.kwargs['event_slug'])\n context['object'] = self.object\n context['group'] = self.object.get_group\n context['is_participating'] = self.object.is_participating(\n self.request.user)\n return context\n\n\nclass UpdateGroupCreateEventView(UserIsAdmin, FormView):\n \"\"\"In the context of a group, create event view.\"\"\"\n template_name = 'group/event/create.html'\n form_class = EventForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['object'] = Group.get_group_by_slug(self.kwargs['group_slug'])\n return context\n\n def form_valid(self, form, **kwargs):\n event = form.save(commit=False)\n event.group = Group.get_group_by_slug(\n slug=self.kwargs['group_slug']).slug\n event.save()\n return redirect('group:create-event', self.kwargs['group_slug'])\n\n\nclass EventUpdateView(UserIsAdmin, UpdateView):\n template_name = 'event/update.html'\n fields = ['title', 'description', 'location',\n 'date', 'publicity', 'color', 'image']\n\n def test_func(self) -> bool:\n self.kwargs['group_slug'] = self.object.get_group.slug\n return super().test_func()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['object'] = self.object.get_group\n context['event'] = self.object\n return context\n\n def get_object(self, **kwargs):\n return BaseEvent.get_event_by_slug(self.kwargs['event_slug'])\n\n def dispatch(self, request, *args, **kwargs):\n self.object = BaseEvent.get_event_by_slug(self.kwargs['event_slug'])\n self.kwargs['group_slug'] = self.object.get_group.slug\n if isinstance(self.object, EatingEvent):\n self.fields = ['title', 'description', 'location',\n 'date', 'publicity', 'color', 'image', 'menu']\n return super().dispatch(request, *args, **kwargs)\n\n\nclass UpdateGroupEventsView(UserIsAdmin, View):\n template_name = 'group/event/planned_edit.html'\n\n def get_context_data(self, **kwargs):\n context = {}\n context['object'] = Group.get_group_by_slug(kwargs['group_slug'])\n context['events'] = BaseEvent.objects.filter(\n group=kwargs['group_slug'], date__gte=date.today())\n context['form'] = EventFormSet(queryset=context['events'])\n return context\n\n def get(self, request, group_slug):\n return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))\n\n def post(self, request, group_slug):\n return edit_events(request, group_slug)\n\n\nclass UpdateGroupArchivedEventsView(UserIsAdmin, View):\n template_name = 'group/event/archived_edit.html'\n\n def get_context_data(self, **kwargs):\n context = {}\n context['object'] = Group.get_group_by_slug(kwargs['group_slug'])\n context['events'] = BaseEvent.objects.filter(\n group=kwargs['group_slug'], date__lte=date.today())\n context['form'] = EventFormSet(queryset=context['events'])\n return context\n\n def get(self, request, group_slug):\n return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))\n\n def post(self, request, group_slug):\n return edit_events(request, group_slug)\n\n\n@login_required\ndef add_participant(request, event_slug):\n \"\"\"Adds the user to the list of participants.\"\"\"\n event = BaseEvent.get_event_by_slug(event_slug)\n event.participants.add(request.user.student)\n if request.GET.get('redirect'):\n return redirect('home:home')\n return redirect(event.get_absolute_url())\n\n\n@login_required\ndef remove_participant(request, event_slug):\n \"\"\"Removes the user from the list of participants.\"\"\"\n event = BaseEvent.get_event_by_slug(event_slug)\n event.participants.remove(request.user.student)\n if request.GET.get('redirect'):\n return redirect('home:home')\n return redirect(event.get_absolute_url())\n\n\n@login_required\ndef edit_events(request, group_slug):\n group = Group.get_group_by_slug(group_slug)\n form = EventFormSet(request.POST)\n if form.is_valid():\n events = form.save(commit=False)\n # Link each event to the group\n for event in events:\n event.group = group.slug\n event.save()\n # Delete missing events\n for event in form.deleted_objects:\n event.delete()\n messages.success(request, 'Events modifies')\n return redirect('group:update-events', group_slug)\n else:\n messages.warning(request, form.errors)\n return redirect('group:update-events', group_slug)\n", "path": "server/apps/event/views.py"}]} | 2,108 | 322 |
gh_patches_debug_4645 | rasdani/github-patches | git_diff | vispy__vispy-1676 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Window transparency issue in molecular_viewer.py
On my Windows 10 machine with a GeForce GTX 1080 Ti (Driver version 430.39) there are serious issues in the visualization of the molecular_viewer.py example code (/examples/demo/gloo/molecular_viewer.py).
The background of the image is black no matter what; but wherever there is any color, the color takes on a strange transparency, where you can see through the display window if the window is superimposed over non-black backgrounds.
Additionally, if you introduce code to take screenshots, the screenshots always show a black background with pure white molecule images.

The image exhibits the undesirable behavior. You can see the shape of my (black) console window placed *behind* the display window.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/demo/gloo/molecular_viewer.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # vispy: gallery 30
3 # -----------------------------------------------------------------------------
4 # 2014, Aurore Deschildre, Gael Goret, Cyrille Rossant, Nicolas P. Rougier.
5 # Distributed under the terms of the new BSD License.
6 # -----------------------------------------------------------------------------
7 import numpy as np
8
9 from vispy import gloo
10 from vispy import app
11 from vispy.util.transforms import perspective, translate, rotate
12 from vispy.io import load_data_file
13
14 vertex = """
15 #version 120
16
17 uniform mat4 u_model;
18 uniform mat4 u_view;
19 uniform mat4 u_projection;
20 uniform vec3 u_light_position;
21 uniform vec3 u_light_spec_position;
22
23 attribute vec3 a_position;
24 attribute vec3 a_color;
25 attribute float a_radius;
26
27 varying vec3 v_color;
28 varying vec4 v_eye_position;
29 varying float v_radius;
30 varying vec3 v_light_direction;
31
32 void main (void) {
33 v_radius = a_radius;
34 v_color = a_color;
35
36 v_eye_position = u_view * u_model * vec4(a_position,1.0);
37 v_light_direction = normalize(u_light_position);
38 float dist = length(v_eye_position.xyz);
39
40 gl_Position = u_projection * v_eye_position;
41
42 // stackoverflow.com/questions/8608844/...
43 // ... resizing-point-sprites-based-on-distance-from-the-camera
44 vec4 proj_corner = u_projection * vec4(a_radius, a_radius, v_eye_position.z, v_eye_position.w); // # noqa
45 gl_PointSize = 512.0 * proj_corner.x / proj_corner.w;
46 }
47 """
48
49 fragment = """
50 #version 120
51
52 uniform mat4 u_model;
53 uniform mat4 u_view;
54 uniform mat4 u_projection;
55 uniform vec3 u_light_position;
56 uniform vec3 u_light_spec_position;
57
58 varying vec3 v_color;
59 varying vec4 v_eye_position;
60 varying float v_radius;
61 varying vec3 v_light_direction;
62 void main()
63 {
64 // r^2 = (x - x0)^2 + (y - y0)^2 + (z - z0)^2
65 vec2 texcoord = gl_PointCoord* 2.0 - vec2(1.0);
66 float x = texcoord.x;
67 float y = texcoord.y;
68 float d = 1.0 - x*x - y*y;
69 if (d <= 0.0)
70 discard;
71
72 float z = sqrt(d);
73 vec4 pos = v_eye_position;
74 pos.z += v_radius*z;
75 vec3 pos2 = pos.xyz;
76 pos = u_projection * pos;
77 // gl_FragDepth = 0.5*(pos.z / pos.w)+0.5;
78 vec3 normal = vec3(x,y,z);
79 float diffuse = clamp(dot(normal, v_light_direction), 0.0, 1.0);
80
81 // Specular lighting.
82 vec3 M = pos2.xyz;
83 vec3 O = v_eye_position.xyz;
84 vec3 L = u_light_spec_position;
85 vec3 K = normalize(normalize(L - M) + normalize(O - M));
86 // WARNING: abs() is necessary, otherwise weird bugs may appear with some
87 // GPU drivers...
88 float specular = clamp(pow(abs(dot(normal, K)), 40.), 0.0, 1.0);
89 vec3 v_light = vec3(1., 1., 1.);
90 gl_FragColor.rgb = (.15*v_color + .55*diffuse * v_color
91 + .35*specular * v_light);
92 }
93 """
94
95
96 class Canvas(app.Canvas):
97
98 def __init__(self):
99 app.Canvas.__init__(self, title='Molecular viewer',
100 keys='interactive', size=(1200, 800))
101 self.ps = self.pixel_scale
102
103 self.translate = 40
104 self.program = gloo.Program(vertex, fragment)
105 self.view = translate((0, 0, -self.translate))
106 self.model = np.eye(4, dtype=np.float32)
107 self.projection = np.eye(4, dtype=np.float32)
108
109 self.apply_zoom()
110
111 fname = load_data_file('molecular_viewer/micelle.npz')
112 self.load_molecule(fname)
113 self.load_data()
114
115 self.theta = 0
116 self.phi = 0
117
118 gloo.set_state(depth_test=True, clear_color='black')
119 self._timer = app.Timer('auto', connect=self.on_timer, start=True)
120
121 self.show()
122
123 def load_molecule(self, fname):
124 molecule = np.load(fname)['molecule']
125 self._nAtoms = molecule.shape[0]
126
127 # The x,y,z values store in one array
128 self.coords = molecule[:, :3]
129
130 # The array that will store the color and alpha scale for all the atoms
131 self.atomsColours = molecule[:, 3:6]
132
133 # The array that will store the scale for all the atoms.
134 self.atomsScales = molecule[:, 6]
135
136 def load_data(self):
137 n = self._nAtoms
138
139 data = np.zeros(n, [('a_position', np.float32, 3),
140 ('a_color', np.float32, 3),
141 ('a_radius', np.float32, 1)])
142
143 data['a_position'] = self.coords
144 data['a_color'] = self.atomsColours
145 data['a_radius'] = self.atomsScales*self.ps
146
147 self.program.bind(gloo.VertexBuffer(data))
148
149 self.program['u_model'] = self.model
150 self.program['u_view'] = self.view
151 self.program['u_light_position'] = 0., 0., 2.
152 self.program['u_light_spec_position'] = -5., 5., -5.
153
154 def on_key_press(self, event):
155 if event.text == ' ':
156 if self.timer.running:
157 self.timer.stop()
158 else:
159 self.timer.start()
160 # if event.text == 'A':
161 # self.
162
163 def on_timer(self, event):
164 self.theta += .25
165 self.phi += .25
166 self.model = np.dot(rotate(self.theta, (0, 0, 1)),
167 rotate(self.phi, (0, 1, 0)))
168 self.program['u_model'] = self.model
169 self.update()
170
171 def on_resize(self, event):
172 width, height = event.size
173
174 def apply_zoom(self):
175 width, height = self.physical_size
176 gloo.set_viewport(0, 0, width, height)
177 self.projection = perspective(25.0, width / float(height), 2.0, 100.0)
178 self.program['u_projection'] = self.projection
179
180 def on_mouse_wheel(self, event):
181 self.translate -= event.delta[1]
182 self.translate = max(-1, self.translate)
183 self.view = translate((0, 0, -self.translate))
184
185 self.program['u_view'] = self.view
186 self.update()
187
188 def on_draw(self, event):
189 gloo.clear()
190 self.program.draw('points')
191
192
193 if __name__ == '__main__':
194 mvc = Canvas()
195 app.run()
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/demo/gloo/molecular_viewer.py b/examples/demo/gloo/molecular_viewer.py
--- a/examples/demo/gloo/molecular_viewer.py
+++ b/examples/demo/gloo/molecular_viewer.py
@@ -87,8 +87,8 @@
// GPU drivers...
float specular = clamp(pow(abs(dot(normal, K)), 40.), 0.0, 1.0);
vec3 v_light = vec3(1., 1., 1.);
- gl_FragColor.rgb = (.15*v_color + .55*diffuse * v_color
- + .35*specular * v_light);
+ gl_FragColor.rgba = vec4(.15*v_color + .55*diffuse * v_color
+ + .35*specular * v_light, 1.0);
}
"""
| {"golden_diff": "diff --git a/examples/demo/gloo/molecular_viewer.py b/examples/demo/gloo/molecular_viewer.py\n--- a/examples/demo/gloo/molecular_viewer.py\n+++ b/examples/demo/gloo/molecular_viewer.py\n@@ -87,8 +87,8 @@\n // GPU drivers...\n float specular = clamp(pow(abs(dot(normal, K)), 40.), 0.0, 1.0);\n vec3 v_light = vec3(1., 1., 1.);\n- gl_FragColor.rgb = (.15*v_color + .55*diffuse * v_color\n- + .35*specular * v_light);\n+ gl_FragColor.rgba = vec4(.15*v_color + .55*diffuse * v_color\n+ + .35*specular * v_light, 1.0);\n }\n \"\"\"\n", "issue": "Window transparency issue in molecular_viewer.py\nOn my Windows 10 machine with a GeForce GTX 1080 Ti (Driver version 430.39) there are serious issues in the visualization of the molecular_viewer.py example code (/examples/demo/gloo/molecular_viewer.py).\r\n\r\nThe background of the image is black no matter what; but wherever there is any color, the color takes on a strange transparency, where you can see through the display window if the window is superimposed over non-black backgrounds.\r\n\r\nAdditionally, if you introduce code to take screenshots, the screenshots always show a black background with pure white molecule images.\r\n\r\nThe image exhibits the undesirable behavior. You can see the shape of my (black) console window placed *behind* the display window.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# vispy: gallery 30\n# -----------------------------------------------------------------------------\n# 2014, Aurore Deschildre, Gael Goret, Cyrille Rossant, Nicolas P. Rougier.\n# Distributed under the terms of the new BSD License.\n# -----------------------------------------------------------------------------\nimport numpy as np\n\nfrom vispy import gloo\nfrom vispy import app\nfrom vispy.util.transforms import perspective, translate, rotate\nfrom vispy.io import load_data_file\n\nvertex = \"\"\"\n#version 120\n\nuniform mat4 u_model;\nuniform mat4 u_view;\nuniform mat4 u_projection;\nuniform vec3 u_light_position;\nuniform vec3 u_light_spec_position;\n\nattribute vec3 a_position;\nattribute vec3 a_color;\nattribute float a_radius;\n\nvarying vec3 v_color;\nvarying vec4 v_eye_position;\nvarying float v_radius;\nvarying vec3 v_light_direction;\n\nvoid main (void) {\n v_radius = a_radius;\n v_color = a_color;\n\n v_eye_position = u_view * u_model * vec4(a_position,1.0);\n v_light_direction = normalize(u_light_position);\n float dist = length(v_eye_position.xyz);\n\n gl_Position = u_projection * v_eye_position;\n\n // stackoverflow.com/questions/8608844/...\n // ... resizing-point-sprites-based-on-distance-from-the-camera\n vec4 proj_corner = u_projection * vec4(a_radius, a_radius, v_eye_position.z, v_eye_position.w); // # noqa\n gl_PointSize = 512.0 * proj_corner.x / proj_corner.w;\n}\n\"\"\"\n\nfragment = \"\"\"\n#version 120\n\nuniform mat4 u_model;\nuniform mat4 u_view;\nuniform mat4 u_projection;\nuniform vec3 u_light_position;\nuniform vec3 u_light_spec_position;\n\nvarying vec3 v_color;\nvarying vec4 v_eye_position;\nvarying float v_radius;\nvarying vec3 v_light_direction;\nvoid main()\n{\n // r^2 = (x - x0)^2 + (y - y0)^2 + (z - z0)^2\n vec2 texcoord = gl_PointCoord* 2.0 - vec2(1.0);\n float x = texcoord.x;\n float y = texcoord.y;\n float d = 1.0 - x*x - y*y;\n if (d <= 0.0)\n discard;\n\n float z = sqrt(d);\n vec4 pos = v_eye_position;\n pos.z += v_radius*z;\n vec3 pos2 = pos.xyz;\n pos = u_projection * pos;\n// gl_FragDepth = 0.5*(pos.z / pos.w)+0.5;\n vec3 normal = vec3(x,y,z);\n float diffuse = clamp(dot(normal, v_light_direction), 0.0, 1.0);\n\n // Specular lighting.\n vec3 M = pos2.xyz;\n vec3 O = v_eye_position.xyz;\n vec3 L = u_light_spec_position;\n vec3 K = normalize(normalize(L - M) + normalize(O - M));\n // WARNING: abs() is necessary, otherwise weird bugs may appear with some\n // GPU drivers...\n float specular = clamp(pow(abs(dot(normal, K)), 40.), 0.0, 1.0);\n vec3 v_light = vec3(1., 1., 1.);\n gl_FragColor.rgb = (.15*v_color + .55*diffuse * v_color\n + .35*specular * v_light);\n}\n\"\"\"\n\n\nclass Canvas(app.Canvas):\n\n def __init__(self):\n app.Canvas.__init__(self, title='Molecular viewer',\n keys='interactive', size=(1200, 800))\n self.ps = self.pixel_scale\n\n self.translate = 40\n self.program = gloo.Program(vertex, fragment)\n self.view = translate((0, 0, -self.translate))\n self.model = np.eye(4, dtype=np.float32)\n self.projection = np.eye(4, dtype=np.float32)\n\n self.apply_zoom()\n\n fname = load_data_file('molecular_viewer/micelle.npz')\n self.load_molecule(fname)\n self.load_data()\n\n self.theta = 0\n self.phi = 0\n\n gloo.set_state(depth_test=True, clear_color='black')\n self._timer = app.Timer('auto', connect=self.on_timer, start=True)\n\n self.show()\n\n def load_molecule(self, fname):\n molecule = np.load(fname)['molecule']\n self._nAtoms = molecule.shape[0]\n\n # The x,y,z values store in one array\n self.coords = molecule[:, :3]\n\n # The array that will store the color and alpha scale for all the atoms\n self.atomsColours = molecule[:, 3:6]\n\n # The array that will store the scale for all the atoms.\n self.atomsScales = molecule[:, 6]\n\n def load_data(self):\n n = self._nAtoms\n\n data = np.zeros(n, [('a_position', np.float32, 3),\n ('a_color', np.float32, 3),\n ('a_radius', np.float32, 1)])\n\n data['a_position'] = self.coords\n data['a_color'] = self.atomsColours\n data['a_radius'] = self.atomsScales*self.ps\n\n self.program.bind(gloo.VertexBuffer(data))\n\n self.program['u_model'] = self.model\n self.program['u_view'] = self.view\n self.program['u_light_position'] = 0., 0., 2.\n self.program['u_light_spec_position'] = -5., 5., -5.\n\n def on_key_press(self, event):\n if event.text == ' ':\n if self.timer.running:\n self.timer.stop()\n else:\n self.timer.start()\n # if event.text == 'A':\n # self.\n\n def on_timer(self, event):\n self.theta += .25\n self.phi += .25\n self.model = np.dot(rotate(self.theta, (0, 0, 1)),\n rotate(self.phi, (0, 1, 0)))\n self.program['u_model'] = self.model\n self.update()\n\n def on_resize(self, event):\n width, height = event.size\n\n def apply_zoom(self):\n width, height = self.physical_size\n gloo.set_viewport(0, 0, width, height)\n self.projection = perspective(25.0, width / float(height), 2.0, 100.0)\n self.program['u_projection'] = self.projection\n\n def on_mouse_wheel(self, event):\n self.translate -= event.delta[1]\n self.translate = max(-1, self.translate)\n self.view = translate((0, 0, -self.translate))\n\n self.program['u_view'] = self.view\n self.update()\n\n def on_draw(self, event):\n gloo.clear()\n self.program.draw('points')\n\n\nif __name__ == '__main__':\n mvc = Canvas()\n app.run()\n", "path": "examples/demo/gloo/molecular_viewer.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# vispy: gallery 30\n# -----------------------------------------------------------------------------\n# 2014, Aurore Deschildre, Gael Goret, Cyrille Rossant, Nicolas P. Rougier.\n# Distributed under the terms of the new BSD License.\n# -----------------------------------------------------------------------------\nimport numpy as np\n\nfrom vispy import gloo\nfrom vispy import app\nfrom vispy.util.transforms import perspective, translate, rotate\nfrom vispy.io import load_data_file\n\nvertex = \"\"\"\n#version 120\n\nuniform mat4 u_model;\nuniform mat4 u_view;\nuniform mat4 u_projection;\nuniform vec3 u_light_position;\nuniform vec3 u_light_spec_position;\n\nattribute vec3 a_position;\nattribute vec3 a_color;\nattribute float a_radius;\n\nvarying vec3 v_color;\nvarying vec4 v_eye_position;\nvarying float v_radius;\nvarying vec3 v_light_direction;\n\nvoid main (void) {\n v_radius = a_radius;\n v_color = a_color;\n\n v_eye_position = u_view * u_model * vec4(a_position,1.0);\n v_light_direction = normalize(u_light_position);\n float dist = length(v_eye_position.xyz);\n\n gl_Position = u_projection * v_eye_position;\n\n // stackoverflow.com/questions/8608844/...\n // ... resizing-point-sprites-based-on-distance-from-the-camera\n vec4 proj_corner = u_projection * vec4(a_radius, a_radius, v_eye_position.z, v_eye_position.w); // # noqa\n gl_PointSize = 512.0 * proj_corner.x / proj_corner.w;\n}\n\"\"\"\n\nfragment = \"\"\"\n#version 120\n\nuniform mat4 u_model;\nuniform mat4 u_view;\nuniform mat4 u_projection;\nuniform vec3 u_light_position;\nuniform vec3 u_light_spec_position;\n\nvarying vec3 v_color;\nvarying vec4 v_eye_position;\nvarying float v_radius;\nvarying vec3 v_light_direction;\nvoid main()\n{\n // r^2 = (x - x0)^2 + (y - y0)^2 + (z - z0)^2\n vec2 texcoord = gl_PointCoord* 2.0 - vec2(1.0);\n float x = texcoord.x;\n float y = texcoord.y;\n float d = 1.0 - x*x - y*y;\n if (d <= 0.0)\n discard;\n\n float z = sqrt(d);\n vec4 pos = v_eye_position;\n pos.z += v_radius*z;\n vec3 pos2 = pos.xyz;\n pos = u_projection * pos;\n// gl_FragDepth = 0.5*(pos.z / pos.w)+0.5;\n vec3 normal = vec3(x,y,z);\n float diffuse = clamp(dot(normal, v_light_direction), 0.0, 1.0);\n\n // Specular lighting.\n vec3 M = pos2.xyz;\n vec3 O = v_eye_position.xyz;\n vec3 L = u_light_spec_position;\n vec3 K = normalize(normalize(L - M) + normalize(O - M));\n // WARNING: abs() is necessary, otherwise weird bugs may appear with some\n // GPU drivers...\n float specular = clamp(pow(abs(dot(normal, K)), 40.), 0.0, 1.0);\n vec3 v_light = vec3(1., 1., 1.);\n gl_FragColor.rgba = vec4(.15*v_color + .55*diffuse * v_color\n + .35*specular * v_light, 1.0);\n}\n\"\"\"\n\n\nclass Canvas(app.Canvas):\n\n def __init__(self):\n app.Canvas.__init__(self, title='Molecular viewer',\n keys='interactive', size=(1200, 800))\n self.ps = self.pixel_scale\n\n self.translate = 40\n self.program = gloo.Program(vertex, fragment)\n self.view = translate((0, 0, -self.translate))\n self.model = np.eye(4, dtype=np.float32)\n self.projection = np.eye(4, dtype=np.float32)\n\n self.apply_zoom()\n\n fname = load_data_file('molecular_viewer/micelle.npz')\n self.load_molecule(fname)\n self.load_data()\n\n self.theta = 0\n self.phi = 0\n\n gloo.set_state(depth_test=True, clear_color='black')\n self._timer = app.Timer('auto', connect=self.on_timer, start=True)\n\n self.show()\n\n def load_molecule(self, fname):\n molecule = np.load(fname)['molecule']\n self._nAtoms = molecule.shape[0]\n\n # The x,y,z values store in one array\n self.coords = molecule[:, :3]\n\n # The array that will store the color and alpha scale for all the atoms\n self.atomsColours = molecule[:, 3:6]\n\n # The array that will store the scale for all the atoms.\n self.atomsScales = molecule[:, 6]\n\n def load_data(self):\n n = self._nAtoms\n\n data = np.zeros(n, [('a_position', np.float32, 3),\n ('a_color', np.float32, 3),\n ('a_radius', np.float32, 1)])\n\n data['a_position'] = self.coords\n data['a_color'] = self.atomsColours\n data['a_radius'] = self.atomsScales*self.ps\n\n self.program.bind(gloo.VertexBuffer(data))\n\n self.program['u_model'] = self.model\n self.program['u_view'] = self.view\n self.program['u_light_position'] = 0., 0., 2.\n self.program['u_light_spec_position'] = -5., 5., -5.\n\n def on_key_press(self, event):\n if event.text == ' ':\n if self.timer.running:\n self.timer.stop()\n else:\n self.timer.start()\n # if event.text == 'A':\n # self.\n\n def on_timer(self, event):\n self.theta += .25\n self.phi += .25\n self.model = np.dot(rotate(self.theta, (0, 0, 1)),\n rotate(self.phi, (0, 1, 0)))\n self.program['u_model'] = self.model\n self.update()\n\n def on_resize(self, event):\n width, height = event.size\n\n def apply_zoom(self):\n width, height = self.physical_size\n gloo.set_viewport(0, 0, width, height)\n self.projection = perspective(25.0, width / float(height), 2.0, 100.0)\n self.program['u_projection'] = self.projection\n\n def on_mouse_wheel(self, event):\n self.translate -= event.delta[1]\n self.translate = max(-1, self.translate)\n self.view = translate((0, 0, -self.translate))\n\n self.program['u_view'] = self.view\n self.update()\n\n def on_draw(self, event):\n gloo.clear()\n self.program.draw('points')\n\n\nif __name__ == '__main__':\n mvc = Canvas()\n app.run()\n", "path": "examples/demo/gloo/molecular_viewer.py"}]} | 2,552 | 188 |
gh_patches_debug_23113 | rasdani/github-patches | git_diff | sopel-irc__sopel-1848 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
xkcd outputs link even if triggered by one
The `xkcd` plugin shouldn't output a link to the comic if it was triggered by a link.
See `wikipedia`, for example. This is an anti-cycling measure in case multiple bots handling the same link(s) are present in the channel.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/xkcd.py`
Content:
```
1 # coding=utf-8
2 """
3 xkcd.py - Sopel xkcd Module
4 Copyright 2010, Michael Yanovich (yanovich.net), and Morgan Goose
5 Copyright 2012, Lior Ramati
6 Copyright 2013, Elsie Powell (embolalia.com)
7 Licensed under the Eiffel Forum License 2.
8
9 https://sopel.chat
10 """
11 from __future__ import unicode_literals, absolute_import, print_function, division
12
13 import random
14 import re
15
16 import requests
17
18 from sopel.modules.search import bing_search
19 from sopel.module import commands, url
20
21
22 ignored_sites = [
23 # For searching the web
24 'almamater.xkcd.com',
25 'blog.xkcd.com',
26 'blag.xkcd.com',
27 'forums.xkcd.com',
28 'fora.xkcd.com',
29 'forums3.xkcd.com',
30 'store.xkcd.com',
31 'wiki.xkcd.com',
32 'what-if.xkcd.com',
33 ]
34 sites_query = ' site:xkcd.com -site:' + ' -site:'.join(ignored_sites)
35
36
37 def get_info(number=None):
38 if number:
39 url = 'https://xkcd.com/{}/info.0.json'.format(number)
40 else:
41 url = 'https://xkcd.com/info.0.json'
42 data = requests.get(url).json()
43 data['url'] = 'https://xkcd.com/' + str(data['num'])
44 return data
45
46
47 def web_search(query):
48 url = bing_search(query + sites_query)
49 if not url:
50 return None
51 match = re.match(r'(?:https?://)?xkcd.com/(\d+)/?', url)
52 if match:
53 return match.group(1)
54
55
56 @commands('xkcd')
57 def xkcd(bot, trigger):
58 """.xkcd - Finds an xkcd comic strip.
59
60 Takes one of 3 inputs:
61
62 * If no input is provided it will return a random comic
63 * If numeric input is provided it will return that comic, or the
64 nth-latest comic if the number is non-positive
65 * If non-numeric input is provided it will return the first search result
66 for those keywords on the xkcd.com site
67 """
68 # get latest comic for rand function and numeric input
69 latest = get_info()
70 max_int = latest['num']
71
72 # if no input is given (pre - lior's edits code)
73 if not trigger.group(2): # get rand comic
74 random.seed()
75 requested = get_info(random.randint(1, max_int + 1))
76 else:
77 query = trigger.group(2).strip()
78
79 numbered = re.match(r"^(#|\+|-)?(\d+)$", query)
80 if numbered:
81 query = int(numbered.group(2))
82 if numbered.group(1) == "-":
83 query = -query
84 return numbered_result(bot, query, latest)
85 else:
86 # Non-number: search the web.
87 if (query.lower() == "latest" or query.lower() == "newest"):
88 requested = latest
89 else:
90 number = web_search(query)
91 if not number:
92 bot.say('Could not find any comics for that query.')
93 return
94 requested = get_info(number)
95
96 say_result(bot, requested)
97
98
99 def numbered_result(bot, query, latest):
100 max_int = latest['num']
101 if query > max_int:
102 bot.say(("Sorry, comic #{} hasn't been posted yet. "
103 "The last comic was #{}").format(query, max_int))
104 return
105 elif query <= -max_int:
106 bot.say(("Sorry, but there were only {} comics "
107 "released yet so far").format(max_int))
108 return
109 elif abs(query) == 0:
110 requested = latest
111 elif query == 404 or max_int + query == 404:
112 bot.say("404 - Not Found") # don't error on that one
113 return
114 elif query > 0:
115 requested = get_info(query)
116 else:
117 # Negative: go back that many from current
118 requested = get_info(max_int + query)
119
120 say_result(bot, requested)
121
122
123 def say_result(bot, result):
124 message = '{} | {} | Alt-text: {}'.format(result['url'], result['title'],
125 result['alt'])
126 bot.say(message)
127
128
129 @url(r'xkcd.com/(\d+)')
130 def get_url(bot, trigger, match):
131 latest = get_info()
132 numbered_result(bot, int(match.group(1)), latest)
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sopel/modules/xkcd.py b/sopel/modules/xkcd.py
--- a/sopel/modules/xkcd.py
+++ b/sopel/modules/xkcd.py
@@ -96,7 +96,7 @@
say_result(bot, requested)
-def numbered_result(bot, query, latest):
+def numbered_result(bot, query, latest, commanded=True):
max_int = latest['num']
if query > max_int:
bot.say(("Sorry, comic #{} hasn't been posted yet. "
@@ -117,16 +117,18 @@
# Negative: go back that many from current
requested = get_info(max_int + query)
- say_result(bot, requested)
+ say_result(bot, requested, commanded)
-def say_result(bot, result):
- message = '{} | {} | Alt-text: {}'.format(result['url'], result['title'],
- result['alt'])
+def say_result(bot, result, commanded=True):
+ message = '{}{} | Alt-text: {}'.format(
+ result['url'] + ' | ' if commanded else '',
+ result['title'], result['alt']
+ )
bot.say(message)
@url(r'xkcd.com/(\d+)')
def get_url(bot, trigger, match):
latest = get_info()
- numbered_result(bot, int(match.group(1)), latest)
+ numbered_result(bot, int(match.group(1)), latest, commanded=False)
| {"golden_diff": "diff --git a/sopel/modules/xkcd.py b/sopel/modules/xkcd.py\n--- a/sopel/modules/xkcd.py\n+++ b/sopel/modules/xkcd.py\n@@ -96,7 +96,7 @@\n say_result(bot, requested)\n \n \n-def numbered_result(bot, query, latest):\n+def numbered_result(bot, query, latest, commanded=True):\n max_int = latest['num']\n if query > max_int:\n bot.say((\"Sorry, comic #{} hasn't been posted yet. \"\n@@ -117,16 +117,18 @@\n # Negative: go back that many from current\n requested = get_info(max_int + query)\n \n- say_result(bot, requested)\n+ say_result(bot, requested, commanded)\n \n \n-def say_result(bot, result):\n- message = '{} | {} | Alt-text: {}'.format(result['url'], result['title'],\n- result['alt'])\n+def say_result(bot, result, commanded=True):\n+ message = '{}{} | Alt-text: {}'.format(\n+ result['url'] + ' | ' if commanded else '',\n+ result['title'], result['alt']\n+ )\n bot.say(message)\n \n \n @url(r'xkcd.com/(\\d+)')\n def get_url(bot, trigger, match):\n latest = get_info()\n- numbered_result(bot, int(match.group(1)), latest)\n+ numbered_result(bot, int(match.group(1)), latest, commanded=False)\n", "issue": "xkcd outputs link even if triggered by one\nThe `xkcd` plugin shouldn't output a link to the comic if it was triggered by a link.\r\n\r\nSee `wikipedia`, for example. This is an anti-cycling measure in case multiple bots handling the same link(s) are present in the channel.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nxkcd.py - Sopel xkcd Module\nCopyright 2010, Michael Yanovich (yanovich.net), and Morgan Goose\nCopyright 2012, Lior Ramati\nCopyright 2013, Elsie Powell (embolalia.com)\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport random\nimport re\n\nimport requests\n\nfrom sopel.modules.search import bing_search\nfrom sopel.module import commands, url\n\n\nignored_sites = [\n # For searching the web\n 'almamater.xkcd.com',\n 'blog.xkcd.com',\n 'blag.xkcd.com',\n 'forums.xkcd.com',\n 'fora.xkcd.com',\n 'forums3.xkcd.com',\n 'store.xkcd.com',\n 'wiki.xkcd.com',\n 'what-if.xkcd.com',\n]\nsites_query = ' site:xkcd.com -site:' + ' -site:'.join(ignored_sites)\n\n\ndef get_info(number=None):\n if number:\n url = 'https://xkcd.com/{}/info.0.json'.format(number)\n else:\n url = 'https://xkcd.com/info.0.json'\n data = requests.get(url).json()\n data['url'] = 'https://xkcd.com/' + str(data['num'])\n return data\n\n\ndef web_search(query):\n url = bing_search(query + sites_query)\n if not url:\n return None\n match = re.match(r'(?:https?://)?xkcd.com/(\\d+)/?', url)\n if match:\n return match.group(1)\n\n\n@commands('xkcd')\ndef xkcd(bot, trigger):\n \"\"\".xkcd - Finds an xkcd comic strip.\n\n Takes one of 3 inputs:\n\n * If no input is provided it will return a random comic\n * If numeric input is provided it will return that comic, or the\n nth-latest comic if the number is non-positive\n * If non-numeric input is provided it will return the first search result\n for those keywords on the xkcd.com site\n \"\"\"\n # get latest comic for rand function and numeric input\n latest = get_info()\n max_int = latest['num']\n\n # if no input is given (pre - lior's edits code)\n if not trigger.group(2): # get rand comic\n random.seed()\n requested = get_info(random.randint(1, max_int + 1))\n else:\n query = trigger.group(2).strip()\n\n numbered = re.match(r\"^(#|\\+|-)?(\\d+)$\", query)\n if numbered:\n query = int(numbered.group(2))\n if numbered.group(1) == \"-\":\n query = -query\n return numbered_result(bot, query, latest)\n else:\n # Non-number: search the web.\n if (query.lower() == \"latest\" or query.lower() == \"newest\"):\n requested = latest\n else:\n number = web_search(query)\n if not number:\n bot.say('Could not find any comics for that query.')\n return\n requested = get_info(number)\n\n say_result(bot, requested)\n\n\ndef numbered_result(bot, query, latest):\n max_int = latest['num']\n if query > max_int:\n bot.say((\"Sorry, comic #{} hasn't been posted yet. \"\n \"The last comic was #{}\").format(query, max_int))\n return\n elif query <= -max_int:\n bot.say((\"Sorry, but there were only {} comics \"\n \"released yet so far\").format(max_int))\n return\n elif abs(query) == 0:\n requested = latest\n elif query == 404 or max_int + query == 404:\n bot.say(\"404 - Not Found\") # don't error on that one\n return\n elif query > 0:\n requested = get_info(query)\n else:\n # Negative: go back that many from current\n requested = get_info(max_int + query)\n\n say_result(bot, requested)\n\n\ndef say_result(bot, result):\n message = '{} | {} | Alt-text: {}'.format(result['url'], result['title'],\n result['alt'])\n bot.say(message)\n\n\n@url(r'xkcd.com/(\\d+)')\ndef get_url(bot, trigger, match):\n latest = get_info()\n numbered_result(bot, int(match.group(1)), latest)\n", "path": "sopel/modules/xkcd.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\nxkcd.py - Sopel xkcd Module\nCopyright 2010, Michael Yanovich (yanovich.net), and Morgan Goose\nCopyright 2012, Lior Ramati\nCopyright 2013, Elsie Powell (embolalia.com)\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport random\nimport re\n\nimport requests\n\nfrom sopel.modules.search import bing_search\nfrom sopel.module import commands, url\n\n\nignored_sites = [\n # For searching the web\n 'almamater.xkcd.com',\n 'blog.xkcd.com',\n 'blag.xkcd.com',\n 'forums.xkcd.com',\n 'fora.xkcd.com',\n 'forums3.xkcd.com',\n 'store.xkcd.com',\n 'wiki.xkcd.com',\n 'what-if.xkcd.com',\n]\nsites_query = ' site:xkcd.com -site:' + ' -site:'.join(ignored_sites)\n\n\ndef get_info(number=None):\n if number:\n url = 'https://xkcd.com/{}/info.0.json'.format(number)\n else:\n url = 'https://xkcd.com/info.0.json'\n data = requests.get(url).json()\n data['url'] = 'https://xkcd.com/' + str(data['num'])\n return data\n\n\ndef web_search(query):\n url = bing_search(query + sites_query)\n if not url:\n return None\n match = re.match(r'(?:https?://)?xkcd.com/(\\d+)/?', url)\n if match:\n return match.group(1)\n\n\n@commands('xkcd')\ndef xkcd(bot, trigger):\n \"\"\".xkcd - Finds an xkcd comic strip.\n\n Takes one of 3 inputs:\n\n * If no input is provided it will return a random comic\n * If numeric input is provided it will return that comic, or the\n nth-latest comic if the number is non-positive\n * If non-numeric input is provided it will return the first search result\n for those keywords on the xkcd.com site\n \"\"\"\n # get latest comic for rand function and numeric input\n latest = get_info()\n max_int = latest['num']\n\n # if no input is given (pre - lior's edits code)\n if not trigger.group(2): # get rand comic\n random.seed()\n requested = get_info(random.randint(1, max_int + 1))\n else:\n query = trigger.group(2).strip()\n\n numbered = re.match(r\"^(#|\\+|-)?(\\d+)$\", query)\n if numbered:\n query = int(numbered.group(2))\n if numbered.group(1) == \"-\":\n query = -query\n return numbered_result(bot, query, latest)\n else:\n # Non-number: search the web.\n if (query.lower() == \"latest\" or query.lower() == \"newest\"):\n requested = latest\n else:\n number = web_search(query)\n if not number:\n bot.say('Could not find any comics for that query.')\n return\n requested = get_info(number)\n\n say_result(bot, requested)\n\n\ndef numbered_result(bot, query, latest, commanded=True):\n max_int = latest['num']\n if query > max_int:\n bot.say((\"Sorry, comic #{} hasn't been posted yet. \"\n \"The last comic was #{}\").format(query, max_int))\n return\n elif query <= -max_int:\n bot.say((\"Sorry, but there were only {} comics \"\n \"released yet so far\").format(max_int))\n return\n elif abs(query) == 0:\n requested = latest\n elif query == 404 or max_int + query == 404:\n bot.say(\"404 - Not Found\") # don't error on that one\n return\n elif query > 0:\n requested = get_info(query)\n else:\n # Negative: go back that many from current\n requested = get_info(max_int + query)\n\n say_result(bot, requested, commanded)\n\n\ndef say_result(bot, result, commanded=True):\n message = '{}{} | Alt-text: {}'.format(\n result['url'] + ' | ' if commanded else '',\n result['title'], result['alt']\n )\n bot.say(message)\n\n\n@url(r'xkcd.com/(\\d+)')\ndef get_url(bot, trigger, match):\n latest = get_info()\n numbered_result(bot, int(match.group(1)), latest, commanded=False)\n", "path": "sopel/modules/xkcd.py"}]} | 1,636 | 324 |
gh_patches_debug_1564 | rasdani/github-patches | git_diff | python-trio__trio-1041 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
trio.Cancelled missing readable str()
Trying to print a trio.Cancelled exception e.g. with `f'{exc}'` yields an empty string. I usually resort to `type(exc)` when I need to see what's going on.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `trio/_core/_exceptions.py`
Content:
```
1 import attr
2
3
4 class TrioInternalError(Exception):
5 """Raised by :func:`run` if we encounter a bug in trio, or (possibly) a
6 misuse of one of the low-level :mod:`trio.hazmat` APIs.
7
8 This should never happen! If you get this error, please file a bug.
9
10 Unfortunately, if you get this error it also means that all bets are off –
11 trio doesn't know what is going on and its normal invariants may be void.
12 (For example, we might have "lost track" of a task. Or lost track of all
13 tasks.) Again, though, this shouldn't happen.
14
15 """
16 pass
17
18
19 class RunFinishedError(RuntimeError):
20 """Raised by ``run_in_trio_thread`` and similar functions if the
21 corresponding call to :func:`trio.run` has already finished.
22
23 """
24 pass
25
26
27 class WouldBlock(Exception):
28 """Raised by ``X_nowait`` functions if ``X`` would block.
29
30 """
31 pass
32
33
34 class Cancelled(BaseException):
35 """Raised by blocking calls if the surrounding scope has been cancelled.
36
37 You should let this exception propagate, to be caught by the relevant
38 cancel scope. To remind you of this, it inherits from :exc:`BaseException`
39 instead of :exc:`Exception`, just like :exc:`KeyboardInterrupt` and
40 :exc:`SystemExit` do. This means that if you write something like::
41
42 try:
43 ...
44 except Exception:
45 ...
46
47 then this *won't* catch a :exc:`Cancelled` exception.
48
49 You cannot raise :exc:`Cancelled` yourself. Attempting to do so
50 will produce a :exc:`RuntimeError`. Use :meth:`cancel_scope.cancel()
51 <trio.CancelScope.cancel>` instead.
52
53 .. note::
54
55 In the US it's also common to see this word spelled "canceled", with
56 only one "l". This is a `recent
57 <https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=5&smoothing=3&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__
58 and `US-specific
59 <https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=18&smoothing=3&share=&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__
60 innovation, and even in the US both forms are still commonly used. So
61 for consistency with the rest of the world and with "cancellation"
62 (which always has two "l"s), trio uses the two "l" spelling
63 everywhere.
64
65 """
66 __marker = object()
67
68 def __init__(self, _marker=None):
69 if _marker is not self.__marker:
70 raise RuntimeError(
71 'Cancelled should not be raised directly. Use the cancel() '
72 'method on your cancel scope.'
73 )
74 super().__init__()
75
76 @classmethod
77 def _init(cls):
78 """A private constructor so that a user-created instance of Cancelled
79 can raise an appropriate error. see `issue #342
80 <https://github.com/python-trio/trio/issues/342>`__.
81 """
82 return cls(_marker=cls.__marker)
83
84
85 class BusyResourceError(Exception):
86 """Raised when a task attempts to use a resource that some other task is
87 already using, and this would lead to bugs and nonsense.
88
89 For example, if two tasks try to send data through the same socket at the
90 same time, trio will raise :class:`BusyResourceError` instead of letting
91 the data get scrambled.
92
93 """
94
95
96 class ClosedResourceError(Exception):
97 """Raised when attempting to use a resource after it has been closed.
98
99 Note that "closed" here means that *your* code closed the resource,
100 generally by calling a method with a name like ``close`` or ``aclose``, or
101 by exiting a context manager. If a problem arises elsewhere – for example,
102 because of a network failure, or because a remote peer closed their end of
103 a connection – then that should be indicated by a different exception
104 class, like :exc:`BrokenResourceError` or an :exc:`OSError` subclass.
105
106 """
107
108
109 class BrokenResourceError(Exception):
110 """Raised when an attempt to use a resource fails due to external
111 circumstances.
112
113 For example, you might get this if you try to send data on a stream where
114 the remote side has already closed the connection.
115
116 You *don't* get this error if *you* closed the resource – in that case you
117 get :class:`ClosedResourceError`.
118
119 This exception's ``__cause__`` attribute will often contain more
120 information about the underlying error.
121
122 """
123
124
125 class EndOfChannel(Exception):
126 """Raised when trying to receive from a :class:`trio.abc.ReceiveChannel`
127 that has no more data to receive.
128
129 This is analogous to an "end-of-file" condition, but for channels.
130
131 """
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/trio/_core/_exceptions.py b/trio/_core/_exceptions.py
--- a/trio/_core/_exceptions.py
+++ b/trio/_core/_exceptions.py
@@ -73,6 +73,9 @@
)
super().__init__()
+ def __str__(self):
+ return "Cancelled"
+
@classmethod
def _init(cls):
"""A private constructor so that a user-created instance of Cancelled
| {"golden_diff": "diff --git a/trio/_core/_exceptions.py b/trio/_core/_exceptions.py\n--- a/trio/_core/_exceptions.py\n+++ b/trio/_core/_exceptions.py\n@@ -73,6 +73,9 @@\n )\n super().__init__()\n \n+ def __str__(self):\n+ return \"Cancelled\"\n+\n @classmethod\n def _init(cls):\n \"\"\"A private constructor so that a user-created instance of Cancelled\n", "issue": "trio.Cancelled missing readable str()\nTrying to print a trio.Cancelled exception e.g. with `f'{exc}'` yields an empty string. I usually resort to `type(exc)` when I need to see what's going on.\n", "before_files": [{"content": "import attr\n\n\nclass TrioInternalError(Exception):\n \"\"\"Raised by :func:`run` if we encounter a bug in trio, or (possibly) a\n misuse of one of the low-level :mod:`trio.hazmat` APIs.\n\n This should never happen! If you get this error, please file a bug.\n\n Unfortunately, if you get this error it also means that all bets are off \u2013\n trio doesn't know what is going on and its normal invariants may be void.\n (For example, we might have \"lost track\" of a task. Or lost track of all\n tasks.) Again, though, this shouldn't happen.\n\n \"\"\"\n pass\n\n\nclass RunFinishedError(RuntimeError):\n \"\"\"Raised by ``run_in_trio_thread`` and similar functions if the\n corresponding call to :func:`trio.run` has already finished.\n\n \"\"\"\n pass\n\n\nclass WouldBlock(Exception):\n \"\"\"Raised by ``X_nowait`` functions if ``X`` would block.\n\n \"\"\"\n pass\n\n\nclass Cancelled(BaseException):\n \"\"\"Raised by blocking calls if the surrounding scope has been cancelled.\n\n You should let this exception propagate, to be caught by the relevant\n cancel scope. To remind you of this, it inherits from :exc:`BaseException`\n instead of :exc:`Exception`, just like :exc:`KeyboardInterrupt` and\n :exc:`SystemExit` do. This means that if you write something like::\n\n try:\n ...\n except Exception:\n ...\n\n then this *won't* catch a :exc:`Cancelled` exception.\n\n You cannot raise :exc:`Cancelled` yourself. Attempting to do so\n will produce a :exc:`RuntimeError`. Use :meth:`cancel_scope.cancel()\n <trio.CancelScope.cancel>` instead.\n\n .. note::\n\n In the US it's also common to see this word spelled \"canceled\", with\n only one \"l\". This is a `recent\n <https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=5&smoothing=3&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__\n and `US-specific\n <https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=18&smoothing=3&share=&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__\n innovation, and even in the US both forms are still commonly used. So\n for consistency with the rest of the world and with \"cancellation\"\n (which always has two \"l\"s), trio uses the two \"l\" spelling\n everywhere.\n\n \"\"\"\n __marker = object()\n\n def __init__(self, _marker=None):\n if _marker is not self.__marker:\n raise RuntimeError(\n 'Cancelled should not be raised directly. Use the cancel() '\n 'method on your cancel scope.'\n )\n super().__init__()\n\n @classmethod\n def _init(cls):\n \"\"\"A private constructor so that a user-created instance of Cancelled\n can raise an appropriate error. see `issue #342\n <https://github.com/python-trio/trio/issues/342>`__.\n \"\"\"\n return cls(_marker=cls.__marker)\n\n\nclass BusyResourceError(Exception):\n \"\"\"Raised when a task attempts to use a resource that some other task is\n already using, and this would lead to bugs and nonsense.\n\n For example, if two tasks try to send data through the same socket at the\n same time, trio will raise :class:`BusyResourceError` instead of letting\n the data get scrambled.\n\n \"\"\"\n\n\nclass ClosedResourceError(Exception):\n \"\"\"Raised when attempting to use a resource after it has been closed.\n\n Note that \"closed\" here means that *your* code closed the resource,\n generally by calling a method with a name like ``close`` or ``aclose``, or\n by exiting a context manager. If a problem arises elsewhere \u2013 for example,\n because of a network failure, or because a remote peer closed their end of\n a connection \u2013 then that should be indicated by a different exception\n class, like :exc:`BrokenResourceError` or an :exc:`OSError` subclass.\n\n \"\"\"\n\n\nclass BrokenResourceError(Exception):\n \"\"\"Raised when an attempt to use a resource fails due to external\n circumstances.\n\n For example, you might get this if you try to send data on a stream where\n the remote side has already closed the connection.\n\n You *don't* get this error if *you* closed the resource \u2013 in that case you\n get :class:`ClosedResourceError`.\n\n This exception's ``__cause__`` attribute will often contain more\n information about the underlying error.\n\n \"\"\"\n\n\nclass EndOfChannel(Exception):\n \"\"\"Raised when trying to receive from a :class:`trio.abc.ReceiveChannel`\n that has no more data to receive.\n\n This is analogous to an \"end-of-file\" condition, but for channels.\n\n \"\"\"\n", "path": "trio/_core/_exceptions.py"}], "after_files": [{"content": "import attr\n\n\nclass TrioInternalError(Exception):\n \"\"\"Raised by :func:`run` if we encounter a bug in trio, or (possibly) a\n misuse of one of the low-level :mod:`trio.hazmat` APIs.\n\n This should never happen! If you get this error, please file a bug.\n\n Unfortunately, if you get this error it also means that all bets are off \u2013\n trio doesn't know what is going on and its normal invariants may be void.\n (For example, we might have \"lost track\" of a task. Or lost track of all\n tasks.) Again, though, this shouldn't happen.\n\n \"\"\"\n pass\n\n\nclass RunFinishedError(RuntimeError):\n \"\"\"Raised by ``run_in_trio_thread`` and similar functions if the\n corresponding call to :func:`trio.run` has already finished.\n\n \"\"\"\n pass\n\n\nclass WouldBlock(Exception):\n \"\"\"Raised by ``X_nowait`` functions if ``X`` would block.\n\n \"\"\"\n pass\n\n\nclass Cancelled(BaseException):\n \"\"\"Raised by blocking calls if the surrounding scope has been cancelled.\n\n You should let this exception propagate, to be caught by the relevant\n cancel scope. To remind you of this, it inherits from :exc:`BaseException`\n instead of :exc:`Exception`, just like :exc:`KeyboardInterrupt` and\n :exc:`SystemExit` do. This means that if you write something like::\n\n try:\n ...\n except Exception:\n ...\n\n then this *won't* catch a :exc:`Cancelled` exception.\n\n You cannot raise :exc:`Cancelled` yourself. Attempting to do so\n will produce a :exc:`RuntimeError`. Use :meth:`cancel_scope.cancel()\n <trio.CancelScope.cancel>` instead.\n\n .. note::\n\n In the US it's also common to see this word spelled \"canceled\", with\n only one \"l\". This is a `recent\n <https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=5&smoothing=3&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__\n and `US-specific\n <https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=18&smoothing=3&share=&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__\n innovation, and even in the US both forms are still commonly used. So\n for consistency with the rest of the world and with \"cancellation\"\n (which always has two \"l\"s), trio uses the two \"l\" spelling\n everywhere.\n\n \"\"\"\n __marker = object()\n\n def __init__(self, _marker=None):\n if _marker is not self.__marker:\n raise RuntimeError(\n 'Cancelled should not be raised directly. Use the cancel() '\n 'method on your cancel scope.'\n )\n super().__init__()\n\n def __str__(self):\n return \"Cancelled\"\n\n @classmethod\n def _init(cls):\n \"\"\"A private constructor so that a user-created instance of Cancelled\n can raise an appropriate error. see `issue #342\n <https://github.com/python-trio/trio/issues/342>`__.\n \"\"\"\n return cls(_marker=cls.__marker)\n\n\nclass BusyResourceError(Exception):\n \"\"\"Raised when a task attempts to use a resource that some other task is\n already using, and this would lead to bugs and nonsense.\n\n For example, if two tasks try to send data through the same socket at the\n same time, trio will raise :class:`BusyResourceError` instead of letting\n the data get scrambled.\n\n \"\"\"\n\n\nclass ClosedResourceError(Exception):\n \"\"\"Raised when attempting to use a resource after it has been closed.\n\n Note that \"closed\" here means that *your* code closed the resource,\n generally by calling a method with a name like ``close`` or ``aclose``, or\n by exiting a context manager. If a problem arises elsewhere \u2013 for example,\n because of a network failure, or because a remote peer closed their end of\n a connection \u2013 then that should be indicated by a different exception\n class, like :exc:`BrokenResourceError` or an :exc:`OSError` subclass.\n\n \"\"\"\n\n\nclass BrokenResourceError(Exception):\n \"\"\"Raised when an attempt to use a resource fails due to external\n circumstances.\n\n For example, you might get this if you try to send data on a stream where\n the remote side has already closed the connection.\n\n You *don't* get this error if *you* closed the resource \u2013 in that case you\n get :class:`ClosedResourceError`.\n\n This exception's ``__cause__`` attribute will often contain more\n information about the underlying error.\n\n \"\"\"\n\n\nclass EndOfChannel(Exception):\n \"\"\"Raised when trying to receive from a :class:`trio.abc.ReceiveChannel`\n that has no more data to receive.\n\n This is analogous to an \"end-of-file\" condition, but for channels.\n\n \"\"\"\n", "path": "trio/_core/_exceptions.py"}]} | 1,783 | 100 |
gh_patches_debug_14423 | rasdani/github-patches | git_diff | translate__pootle-4087 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Elasticsearch 1.7.0 breaks Pootle if ES not set up
With `elasticsearch==1.7.0` doing `es.ping()` on a not-setup server raises `ConnectionError`.
This breaks the default install here:
https://github.com/translate/pootle/blob/master/pootle/core/search/backends/elasticsearch.py#L29
Elasticsearch 1.7.0 breaks Pootle if ES not set up
With `elasticsearch==1.7.0` doing `es.ping()` on a not-setup server raises `ConnectionError`.
This breaks the default install here:
https://github.com/translate/pootle/blob/master/pootle/core/search/backends/elasticsearch.py#L29
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/core/search/backends/elasticsearch.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from __future__ import absolute_import
11
12 __all__ = ('ElasticSearchBackend',)
13
14 try:
15 from elasticsearch import Elasticsearch
16 except:
17 Elasticsearch = None
18
19 from ..base import SearchBackend
20
21
22 class ElasticSearchBackend(SearchBackend):
23 def __init__(self, config_name):
24 super(ElasticSearchBackend, self).__init__(config_name)
25 self._es = self._get_es_server()
26 self._create_index_if_missing()
27
28 def _server_setup_and_alive(self):
29 return self._es is not None and self._es.ping()
30
31 def _get_es_server(self):
32 if self._settings is None or Elasticsearch is None:
33 return None
34 return Elasticsearch([
35 {'host': self._settings['HOST'],
36 'port': self._settings['PORT']},
37 ])
38
39 def _create_index_if_missing(self):
40 if self._server_setup_and_alive():
41 if not self._es.indices.exists(self._settings['INDEX_NAME']):
42 self._es.indices.create(self._settings['INDEX_NAME'])
43
44 def _is_valuable_hit(self, unit, hit):
45 return str(unit.id) != hit['_id']
46
47 def search(self, unit):
48 if not self._server_setup_and_alive():
49 return []
50
51 counter = {}
52 res = []
53 language = unit.store.translation_project.language.code
54 es_res = self._es.search(
55 index=self._settings['INDEX_NAME'],
56 doc_type=language,
57 body={
58 "query": {
59 "match": {
60 "source": {
61 "query": unit.source,
62 "fuzziness": self._settings['MIN_SCORE'],
63 }
64 }
65 }
66 }
67 )
68
69 for hit in es_res['hits']['hits']:
70 if self._is_valuable_hit(unit, hit):
71 translation_pair = hit['_source']['source'] + hit['_source']['target']
72 if translation_pair not in counter:
73 counter[translation_pair] = 1
74 res.append({
75 'unit_id': hit['_id'],
76 'source': hit['_source']['source'],
77 'target': hit['_source']['target'],
78 'project': hit['_source']['project'],
79 'path': hit['_source']['path'],
80 'username': hit['_source']['username'],
81 'fullname': hit['_source']['fullname'],
82 'email_md5': hit['_source']['email_md5'],
83 })
84 else:
85 counter[translation_pair] += 1
86
87 for item in res:
88 item['count'] = counter[item['source']+item['target']]
89
90 return res
91
92 def update(self, language, obj):
93 if self._server_setup_and_alive():
94 self._es.index(
95 index=self._settings['INDEX_NAME'],
96 doc_type=language,
97 body=obj,
98 id=obj['id']
99 )
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/core/search/backends/elasticsearch.py b/pootle/core/search/backends/elasticsearch.py
--- a/pootle/core/search/backends/elasticsearch.py
+++ b/pootle/core/search/backends/elasticsearch.py
@@ -13,6 +13,7 @@
try:
from elasticsearch import Elasticsearch
+ from elasticsearch.exceptions import ConnectionError
except:
Elasticsearch = None
@@ -26,7 +27,12 @@
self._create_index_if_missing()
def _server_setup_and_alive(self):
- return self._es is not None and self._es.ping()
+ if self._es is None:
+ return False
+ try:
+ return self._es.ping()
+ except ConnectionError:
+ return False
def _get_es_server(self):
if self._settings is None or Elasticsearch is None:
| {"golden_diff": "diff --git a/pootle/core/search/backends/elasticsearch.py b/pootle/core/search/backends/elasticsearch.py\n--- a/pootle/core/search/backends/elasticsearch.py\n+++ b/pootle/core/search/backends/elasticsearch.py\n@@ -13,6 +13,7 @@\n \n try:\n from elasticsearch import Elasticsearch\n+ from elasticsearch.exceptions import ConnectionError\n except:\n Elasticsearch = None\n \n@@ -26,7 +27,12 @@\n self._create_index_if_missing()\n \n def _server_setup_and_alive(self):\n- return self._es is not None and self._es.ping()\n+ if self._es is None:\n+ return False\n+ try:\n+ return self._es.ping()\n+ except ConnectionError:\n+ return False\n \n def _get_es_server(self):\n if self._settings is None or Elasticsearch is None:\n", "issue": "Elasticsearch 1.7.0 breaks Pootle if ES not set up\nWith `elasticsearch==1.7.0` doing `es.ping()` on a not-setup server raises `ConnectionError`.\n\nThis breaks the default install here:\n\nhttps://github.com/translate/pootle/blob/master/pootle/core/search/backends/elasticsearch.py#L29\n\nElasticsearch 1.7.0 breaks Pootle if ES not set up\nWith `elasticsearch==1.7.0` doing `es.ping()` on a not-setup server raises `ConnectionError`.\n\nThis breaks the default install here:\n\nhttps://github.com/translate/pootle/blob/master/pootle/core/search/backends/elasticsearch.py#L29\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom __future__ import absolute_import\n\n__all__ = ('ElasticSearchBackend',)\n\ntry:\n from elasticsearch import Elasticsearch\nexcept:\n Elasticsearch = None\n\nfrom ..base import SearchBackend\n\n\nclass ElasticSearchBackend(SearchBackend):\n def __init__(self, config_name):\n super(ElasticSearchBackend, self).__init__(config_name)\n self._es = self._get_es_server()\n self._create_index_if_missing()\n\n def _server_setup_and_alive(self):\n return self._es is not None and self._es.ping()\n\n def _get_es_server(self):\n if self._settings is None or Elasticsearch is None:\n return None\n return Elasticsearch([\n {'host': self._settings['HOST'],\n 'port': self._settings['PORT']},\n ])\n\n def _create_index_if_missing(self):\n if self._server_setup_and_alive():\n if not self._es.indices.exists(self._settings['INDEX_NAME']):\n self._es.indices.create(self._settings['INDEX_NAME'])\n\n def _is_valuable_hit(self, unit, hit):\n return str(unit.id) != hit['_id']\n\n def search(self, unit):\n if not self._server_setup_and_alive():\n return []\n\n counter = {}\n res = []\n language = unit.store.translation_project.language.code\n es_res = self._es.search(\n index=self._settings['INDEX_NAME'],\n doc_type=language,\n body={\n \"query\": {\n \"match\": {\n \"source\": {\n \"query\": unit.source,\n \"fuzziness\": self._settings['MIN_SCORE'],\n }\n }\n }\n }\n )\n\n for hit in es_res['hits']['hits']:\n if self._is_valuable_hit(unit, hit):\n translation_pair = hit['_source']['source'] + hit['_source']['target']\n if translation_pair not in counter:\n counter[translation_pair] = 1\n res.append({\n 'unit_id': hit['_id'],\n 'source': hit['_source']['source'],\n 'target': hit['_source']['target'],\n 'project': hit['_source']['project'],\n 'path': hit['_source']['path'],\n 'username': hit['_source']['username'],\n 'fullname': hit['_source']['fullname'],\n 'email_md5': hit['_source']['email_md5'],\n })\n else:\n counter[translation_pair] += 1\n\n for item in res:\n item['count'] = counter[item['source']+item['target']]\n\n return res\n\n def update(self, language, obj):\n if self._server_setup_and_alive():\n self._es.index(\n index=self._settings['INDEX_NAME'],\n doc_type=language,\n body=obj,\n id=obj['id']\n )\n", "path": "pootle/core/search/backends/elasticsearch.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom __future__ import absolute_import\n\n__all__ = ('ElasticSearchBackend',)\n\ntry:\n from elasticsearch import Elasticsearch\n from elasticsearch.exceptions import ConnectionError\nexcept:\n Elasticsearch = None\n\nfrom ..base import SearchBackend\n\n\nclass ElasticSearchBackend(SearchBackend):\n def __init__(self, config_name):\n super(ElasticSearchBackend, self).__init__(config_name)\n self._es = self._get_es_server()\n self._create_index_if_missing()\n\n def _server_setup_and_alive(self):\n if self._es is None:\n return False\n try:\n return self._es.ping()\n except ConnectionError:\n return False\n\n def _get_es_server(self):\n if self._settings is None or Elasticsearch is None:\n return None\n return Elasticsearch([\n {'host': self._settings['HOST'],\n 'port': self._settings['PORT']},\n ])\n\n def _create_index_if_missing(self):\n if self._server_setup_and_alive():\n if not self._es.indices.exists(self._settings['INDEX_NAME']):\n self._es.indices.create(self._settings['INDEX_NAME'])\n\n def _is_valuable_hit(self, unit, hit):\n return str(unit.id) != hit['_id']\n\n def search(self, unit):\n if not self._server_setup_and_alive():\n return []\n\n counter = {}\n res = []\n language = unit.store.translation_project.language.code\n es_res = self._es.search(\n index=self._settings['INDEX_NAME'],\n doc_type=language,\n body={\n \"query\": {\n \"match\": {\n \"source\": {\n \"query\": unit.source,\n \"fuzziness\": self._settings['MIN_SCORE'],\n }\n }\n }\n }\n )\n\n for hit in es_res['hits']['hits']:\n if self._is_valuable_hit(unit, hit):\n translation_pair = hit['_source']['source'] + hit['_source']['target']\n if translation_pair not in counter:\n counter[translation_pair] = 1\n res.append({\n 'unit_id': hit['_id'],\n 'source': hit['_source']['source'],\n 'target': hit['_source']['target'],\n 'project': hit['_source']['project'],\n 'path': hit['_source']['path'],\n 'username': hit['_source']['username'],\n 'fullname': hit['_source']['fullname'],\n 'email_md5': hit['_source']['email_md5'],\n })\n else:\n counter[translation_pair] += 1\n\n for item in res:\n item['count'] = counter[item['source']+item['target']]\n\n return res\n\n def update(self, language, obj):\n if self._server_setup_and_alive():\n self._es.index(\n index=self._settings['INDEX_NAME'],\n doc_type=language,\n body=obj,\n id=obj['id']\n )\n", "path": "pootle/core/search/backends/elasticsearch.py"}]} | 1,284 | 194 |
gh_patches_debug_199 | rasdani/github-patches | git_diff | twisted__twisted-1695 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 22.2.0
|[<img alt="adiroiban's avatar" src="https://avatars.githubusercontent.com/u/204609?s=50" width="50" height="50">](https://github.com/adiroiban)| @adiroiban reported|
|-|-|
|Trac ID|trac#10306|
|Type|enhancement|
|Created|2022-02-08 14:05:11Z|
<details><summary>Searchable metadata</summary>
```
trac-id__10306 10306
type__enhancement enhancement
reporter__adiroiban adiroiban
priority__normal normal
milestone__None None
branch__
branch_author__
status__closed closed
resolution__fixed fixed
component__core core
keywords__None None
time__1644329111193403 1644329111193403
changetime__1646513115841857 1646513115841857
version__None None
owner__None None
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/twisted/_version.py`
Content:
```
1 """
2 Provides Twisted version information.
3 """
4
5 # This file is auto-generated! Do not edit!
6 # Use `python -m incremental.update Twisted` to change this file.
7
8 from incremental import Version
9
10 __version__ = Version("Twisted", 22, 1, 0, post=0)
11 __all__ = ["__version__"]
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/twisted/_version.py b/src/twisted/_version.py
--- a/src/twisted/_version.py
+++ b/src/twisted/_version.py
@@ -7,5 +7,5 @@
from incremental import Version
-__version__ = Version("Twisted", 22, 1, 0, post=0)
+__version__ = Version("Twisted", 22, 2, 0, post=0)
__all__ = ["__version__"]
| {"golden_diff": "diff --git a/src/twisted/_version.py b/src/twisted/_version.py\n--- a/src/twisted/_version.py\n+++ b/src/twisted/_version.py\n@@ -7,5 +7,5 @@\n \n from incremental import Version\n \n-__version__ = Version(\"Twisted\", 22, 1, 0, post=0)\n+__version__ = Version(\"Twisted\", 22, 2, 0, post=0)\n __all__ = [\"__version__\"]\n", "issue": "Release 22.2.0\n|[<img alt=\"adiroiban's avatar\" src=\"https://avatars.githubusercontent.com/u/204609?s=50\" width=\"50\" height=\"50\">](https://github.com/adiroiban)| @adiroiban reported|\n|-|-|\n|Trac ID|trac#10306|\n|Type|enhancement|\n|Created|2022-02-08 14:05:11Z|\n\n\n\n<details><summary>Searchable metadata</summary>\n\n```\ntrac-id__10306 10306\ntype__enhancement enhancement\nreporter__adiroiban adiroiban\npriority__normal normal\nmilestone__None None\nbranch__ \nbranch_author__ \nstatus__closed closed\nresolution__fixed fixed\ncomponent__core core\nkeywords__None None\ntime__1644329111193403 1644329111193403\nchangetime__1646513115841857 1646513115841857\nversion__None None\nowner__None None\n\n```\n</details>\n\n", "before_files": [{"content": "\"\"\"\nProvides Twisted version information.\n\"\"\"\n\n# This file is auto-generated! Do not edit!\n# Use `python -m incremental.update Twisted` to change this file.\n\nfrom incremental import Version\n\n__version__ = Version(\"Twisted\", 22, 1, 0, post=0)\n__all__ = [\"__version__\"]\n", "path": "src/twisted/_version.py"}], "after_files": [{"content": "\"\"\"\nProvides Twisted version information.\n\"\"\"\n\n# This file is auto-generated! Do not edit!\n# Use `python -m incremental.update Twisted` to change this file.\n\nfrom incremental import Version\n\n__version__ = Version(\"Twisted\", 22, 2, 0, post=0)\n__all__ = [\"__version__\"]\n", "path": "src/twisted/_version.py"}]} | 637 | 113 |
gh_patches_debug_39189 | rasdani/github-patches | git_diff | cupy__cupy-2915 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error with _ufunc_config.config
When dispatching using CuPy from Dask (multithreaded python) there is a failure due to `_ufunc_config.config`
* Conditions (you can just paste the output of `python -c 'import cupy; cupy.show_config()'`)
```
CuPy Version : 7.0.0
CUDA Root : /usr/local/cuda-10.1/
CUDA Build Version : 10010
CUDA Driver Version : 10010
CUDA Runtime Version : 10010
cuBLAS Version : 10201
cuFFT Version : 10101
cuRAND Version : 10101
cuSOLVER Version : (10, 2, 0)
cuSPARSE Version : 10300
NVRTC Version : (10, 1)
cuDNN Build Version : None
cuDNN Version : None
NCCL Build Version : None
NCCL Runtime Version : None
```
* Code to reproduce
```
import dask, cupy
import dask.array as da
rs = dask.array.random.RandomState(RandomState=cupy.random.RandomState)
x = rs.random((100, 100))
x = x.persist()
u, s, v = da.linalg.svd(x)
u.compute()
```
* Error messages, stack traces, or logs
<details>
<summary>Traceback</summary>
```
Traceback (most recent call last):
File "cpu-svd.py", line 10, in <module>
u.compute()
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/base.py", line 165, in compute
(result,) = compute(self, traverse=False, **kwargs)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/base.py", line 436, in compute
results = schedule(dsk, keys, **kwargs)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/threaded.py", line 81, in get
**kwargs
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 486, in get_async
raise_exception(exc, tb)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 316, in reraise
raise exc
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 222, in execute_task
result = _execute_task(task, data)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 119, in _execute_task
return func(*args2)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/optimization.py", line 982, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 149, in get
result = _execute_task(task, cache)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 119, in _execute_task
return func(*args2)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/array/linalg.py", line 48, in _wrapped_qr
return np.linalg.qr(a)
File "<__array_function__ internals>", line 6, in qr
File "cupy/core/core.pyx", line 1344, in cupy.core.core.ndarray.__array_function__
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/cupy/linalg/decomposition.py", line 248, in qr
geqrf, dev_info)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/cupy/linalg/util.py", line 42, in _check_cusolver_dev_info_if_synchronization_allowed
print(cupyx._ufunc_config.config.linalg)
AttributeError: '_thread._local' object has no attribute 'linalg'
```
</details>
Rather than using `threading.local()` here
https://github.com/cupy/cupy/blob/f06f509167957240d17924965238cb0901b700ab/cupyx/_ufunc_config.py#L4-L13
We could use `types` for a two line code change:
```python
import types
config = types.SimpleNamespace()
```
But I may be missing the reason for using threads here
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/linalg/util.py`
Content:
```
1 from numpy import linalg
2
3 import cupy
4 from cupy import core
5 import cupyx
6
7
8 def _assert_cupy_array(*arrays):
9 for a in arrays:
10 if not isinstance(a, cupy.core.ndarray):
11 raise linalg.LinAlgError(
12 'cupy.linalg only supports cupy.core.ndarray')
13
14
15 def _assert_rank2(*arrays):
16 for a in arrays:
17 if a.ndim != 2:
18 raise linalg.LinAlgError(
19 '{}-dimensional array given. Array must be '
20 'two-dimensional'.format(a.ndim))
21
22
23 def _assert_nd_squareness(*arrays):
24 for a in arrays:
25 if max(a.shape[-2:]) != min(a.shape[-2:]):
26 raise linalg.LinAlgError(
27 'Last 2 dimensions of the array must be square')
28
29
30 def _check_cusolver_dev_info_if_synchronization_allowed(routine, dev_info):
31 # `dev_info` contains a single integer, the status code of a cuSOLVER
32 # routine call. It is referred to as "devInfo" in the official cuSOLVER
33 # documentation.
34 assert isinstance(dev_info, core.ndarray)
35 assert dev_info.size == 1
36
37 config_linalg = cupyx._ufunc_config.config.linalg
38 # Only 'ignore' and 'raise' are currently supported.
39 if config_linalg == 'ignore':
40 return
41
42 assert config_linalg == 'raise'
43 dev_info_host = dev_info.item()
44 if dev_info_host != 0:
45 raise linalg.LinAlgError(
46 'Error reported by {} in cuSOLVER. devInfo = {}. Please refer'
47 ' to the cuSOLVER documentation.'.format(
48 routine.__name__, dev_info_host))
49
50
51 def _check_cublas_info_array_if_synchronization_allowed(routine, info_array):
52 # `info_array` contains integers, the status codes of a cuBLAS routine
53 # call. It is referrd to as "infoArray" or "devInfoArray" in the official
54 # cuBLAS documentation.
55 assert isinstance(info_array, core.ndarray)
56 assert info_array.ndim == 1
57
58 config_linalg = cupyx._ufunc_config.config.linalg
59 # Only 'ignore' and 'raise' are currently supported.
60 if config_linalg == 'ignore':
61 return
62
63 assert config_linalg == 'raise'
64 if (info_array != 0).any():
65 raise linalg.LinAlgError(
66 'Error reported by {} in cuBLAS. infoArray/devInfoArray = {}.'
67 ' Please refer to the cuBLAS documentation.'.format(
68 routine.__name__, info_array))
69
70
71 _tril_kernel = core.ElementwiseKernel(
72 'int64 k', 'S x',
73 'x = (_ind.get()[1] - _ind.get()[0] <= k) ? x : 0',
74 'tril_kernel',
75 reduce_dims=False
76 )
77
78
79 def _tril(x, k=0):
80 _tril_kernel(k, x)
81 return x
82
83
84 _triu_kernel = core.ElementwiseKernel(
85 'int64 k', 'S x',
86 'x = (_ind.get()[1] - _ind.get()[0] >= k) ? x : 0',
87 'triu_kernel',
88 reduce_dims=False
89 )
90
91
92 def _triu(x, k=0):
93 _triu_kernel(k, x)
94 return x
95
```
Path: `cupyx/_ufunc_config.py`
Content:
```
1 import contextlib
2 import threading
3
4 config = threading.local()
5 config.divide = None
6 config.over = None
7 config.under = None
8 config.invalid = None
9 # In favor of performance, the `devInfo` input/output from cuSOLVER routine
10 # calls that is necessary to check the validity of the other outputs, are
11 # ignored, as D2H copy incurring device synchronizations would otherwise be
12 # required.
13 config.linalg = 'ignore'
14
15
16 @contextlib.contextmanager
17 def errstate(*, divide=None, over=None, under=None, invalid=None, linalg=None):
18 """
19 TODO(hvy): Write docs.
20 """
21 old_state = seterr(
22 divide=divide, over=over, under=under, invalid=invalid, linalg=linalg)
23 try:
24 yield # Return `None` similar to `numpy.errstate`.
25 finally:
26 seterr(**old_state)
27
28
29 def seterr(*, divide=None, over=None, under=None, invalid=None, linalg=None):
30 """
31 TODO(hvy): Write docs.
32 """
33 if divide is not None:
34 raise NotImplementedError()
35 if over is not None:
36 raise NotImplementedError()
37 if under is not None:
38 raise NotImplementedError()
39 if invalid is not None:
40 raise NotImplementedError()
41 if linalg is not None:
42 if linalg not in ('ignore', 'raise'):
43 raise NotImplementedError()
44
45 old_state = geterr()
46
47 config.divide = divide
48 config.under = under
49 config.over = over
50 config.invalid = invalid
51 config.linalg = linalg
52
53 return old_state
54
55
56 def geterr():
57 """
58 TODO(hvy): Write docs.
59 """
60 return dict(
61 divide=config.divide,
62 over=config.over,
63 under=config.under,
64 invalid=config.invalid,
65 linalg=config.linalg,
66 )
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/linalg/util.py b/cupy/linalg/util.py
--- a/cupy/linalg/util.py
+++ b/cupy/linalg/util.py
@@ -33,8 +33,7 @@
# documentation.
assert isinstance(dev_info, core.ndarray)
assert dev_info.size == 1
-
- config_linalg = cupyx._ufunc_config.config.linalg
+ config_linalg = cupyx._ufunc_config.get_config_linalg()
# Only 'ignore' and 'raise' are currently supported.
if config_linalg == 'ignore':
return
@@ -55,7 +54,7 @@
assert isinstance(info_array, core.ndarray)
assert info_array.ndim == 1
- config_linalg = cupyx._ufunc_config.config.linalg
+ config_linalg = cupyx._ufunc_config.get_config_linalg()
# Only 'ignore' and 'raise' are currently supported.
if config_linalg == 'ignore':
return
diff --git a/cupyx/_ufunc_config.py b/cupyx/_ufunc_config.py
--- a/cupyx/_ufunc_config.py
+++ b/cupyx/_ufunc_config.py
@@ -1,16 +1,51 @@
import contextlib
import threading
-config = threading.local()
-config.divide = None
-config.over = None
-config.under = None
-config.invalid = None
-# In favor of performance, the `devInfo` input/output from cuSOLVER routine
-# calls that is necessary to check the validity of the other outputs, are
-# ignored, as D2H copy incurring device synchronizations would otherwise be
-# required.
-config.linalg = 'ignore'
+_config = threading.local()
+
+
+def get_config_divide():
+ try:
+ value = _config.divide
+ except AttributeError:
+ value = _config.divide = None
+ return value
+
+
+def get_config_over():
+ try:
+ value = _config.over
+ except AttributeError:
+ value = _config.over = None
+ return value
+
+
+def get_config_under():
+ try:
+ value = _config.under
+ except AttributeError:
+ value = _config.under = None
+ return value
+
+
+def get_config_invalid():
+ try:
+ value = _config.invalid
+ except AttributeError:
+ value = _config.invalid = None
+ return value
+
+
+def get_config_linalg():
+ # In favor of performance, the `devInfo` input/output from cuSOLVER routine
+ # calls that is necessary to check the validity of the other outputs, are
+ # ignored, as D2H copy incurring device synchronizations would otherwise be
+ # required.
+ try:
+ value = _config.linalg
+ except AttributeError:
+ value = _config.linalg = 'ignore'
+ return value
@contextlib.contextmanager
@@ -44,11 +79,11 @@
old_state = geterr()
- config.divide = divide
- config.under = under
- config.over = over
- config.invalid = invalid
- config.linalg = linalg
+ _config.divide = divide
+ _config.under = under
+ _config.over = over
+ _config.invalid = invalid
+ _config.linalg = linalg
return old_state
@@ -58,9 +93,9 @@
TODO(hvy): Write docs.
"""
return dict(
- divide=config.divide,
- over=config.over,
- under=config.under,
- invalid=config.invalid,
- linalg=config.linalg,
+ divide=get_config_divide(),
+ over=get_config_over(),
+ under=get_config_under(),
+ invalid=get_config_invalid(),
+ linalg=get_config_linalg(),
)
| {"golden_diff": "diff --git a/cupy/linalg/util.py b/cupy/linalg/util.py\n--- a/cupy/linalg/util.py\n+++ b/cupy/linalg/util.py\n@@ -33,8 +33,7 @@\n # documentation.\n assert isinstance(dev_info, core.ndarray)\n assert dev_info.size == 1\n-\n- config_linalg = cupyx._ufunc_config.config.linalg\n+ config_linalg = cupyx._ufunc_config.get_config_linalg()\n # Only 'ignore' and 'raise' are currently supported.\n if config_linalg == 'ignore':\n return\n@@ -55,7 +54,7 @@\n assert isinstance(info_array, core.ndarray)\n assert info_array.ndim == 1\n \n- config_linalg = cupyx._ufunc_config.config.linalg\n+ config_linalg = cupyx._ufunc_config.get_config_linalg()\n # Only 'ignore' and 'raise' are currently supported.\n if config_linalg == 'ignore':\n return\ndiff --git a/cupyx/_ufunc_config.py b/cupyx/_ufunc_config.py\n--- a/cupyx/_ufunc_config.py\n+++ b/cupyx/_ufunc_config.py\n@@ -1,16 +1,51 @@\n import contextlib\n import threading\n \n-config = threading.local()\n-config.divide = None\n-config.over = None\n-config.under = None\n-config.invalid = None\n-# In favor of performance, the `devInfo` input/output from cuSOLVER routine\n-# calls that is necessary to check the validity of the other outputs, are\n-# ignored, as D2H copy incurring device synchronizations would otherwise be\n-# required.\n-config.linalg = 'ignore'\n+_config = threading.local()\n+\n+\n+def get_config_divide():\n+ try:\n+ value = _config.divide\n+ except AttributeError:\n+ value = _config.divide = None\n+ return value\n+\n+\n+def get_config_over():\n+ try:\n+ value = _config.over\n+ except AttributeError:\n+ value = _config.over = None\n+ return value\n+\n+\n+def get_config_under():\n+ try:\n+ value = _config.under\n+ except AttributeError:\n+ value = _config.under = None\n+ return value\n+\n+\n+def get_config_invalid():\n+ try:\n+ value = _config.invalid\n+ except AttributeError:\n+ value = _config.invalid = None\n+ return value\n+\n+\n+def get_config_linalg():\n+ # In favor of performance, the `devInfo` input/output from cuSOLVER routine\n+ # calls that is necessary to check the validity of the other outputs, are\n+ # ignored, as D2H copy incurring device synchronizations would otherwise be\n+ # required.\n+ try:\n+ value = _config.linalg\n+ except AttributeError:\n+ value = _config.linalg = 'ignore'\n+ return value\n \n \n @contextlib.contextmanager\n@@ -44,11 +79,11 @@\n \n old_state = geterr()\n \n- config.divide = divide\n- config.under = under\n- config.over = over\n- config.invalid = invalid\n- config.linalg = linalg\n+ _config.divide = divide\n+ _config.under = under\n+ _config.over = over\n+ _config.invalid = invalid\n+ _config.linalg = linalg\n \n return old_state\n \n@@ -58,9 +93,9 @@\n TODO(hvy): Write docs.\n \"\"\"\n return dict(\n- divide=config.divide,\n- over=config.over,\n- under=config.under,\n- invalid=config.invalid,\n- linalg=config.linalg,\n+ divide=get_config_divide(),\n+ over=get_config_over(),\n+ under=get_config_under(),\n+ invalid=get_config_invalid(),\n+ linalg=get_config_linalg(),\n )\n", "issue": "Error with _ufunc_config.config\nWhen dispatching using CuPy from Dask (multithreaded python) there is a failure due to `_ufunc_config.config`\r\n* Conditions (you can just paste the output of `python -c 'import cupy; cupy.show_config()'`)\r\n\r\n```\r\nCuPy Version : 7.0.0\r\nCUDA Root : /usr/local/cuda-10.1/\r\nCUDA Build Version : 10010\r\nCUDA Driver Version : 10010\r\nCUDA Runtime Version : 10010\r\ncuBLAS Version : 10201\r\ncuFFT Version : 10101\r\ncuRAND Version : 10101\r\ncuSOLVER Version : (10, 2, 0)\r\ncuSPARSE Version : 10300\r\nNVRTC Version : (10, 1)\r\ncuDNN Build Version : None\r\ncuDNN Version : None\r\nNCCL Build Version : None\r\nNCCL Runtime Version : None\r\n```\r\n* Code to reproduce\r\n```\r\nimport dask, cupy\r\nimport dask.array as da\r\n\r\nrs = dask.array.random.RandomState(RandomState=cupy.random.RandomState)\r\nx = rs.random((100, 100))\r\nx = x.persist()\r\nu, s, v = da.linalg.svd(x)\r\nu.compute()\r\n```\r\n* Error messages, stack traces, or logs\r\n<details>\r\n<summary>Traceback</summary>\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"cpu-svd.py\", line 10, in <module>\r\n u.compute()\r\n File \"/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/base.py\", line 165, in compute\r\n (result,) = compute(self, traverse=False, **kwargs)\r\n File \"/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/base.py\", line 436, in compute\r\n results = schedule(dsk, keys, **kwargs)\r\n File \"/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/threaded.py\", line 81, in get\r\n **kwargs\r\n File \"/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py\", line 486, in get_async\r\n raise_exception(exc, tb)\r\n File \"/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py\", line 316, in reraise\r\n raise exc\r\n File \"/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py\", line 222, in execute_task\r\n result = _execute_task(task, data)\r\n File \"/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py\", line 119, in _execute_task\r\n return func(*args2)\r\n File \"/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/optimization.py\", line 982, in __call__\r\n return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))\r\n File \"/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py\", line 149, in get\r\n result = _execute_task(task, cache)\r\n File \"/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py\", line 119, in _execute_task\r\n return func(*args2)\r\n File \"/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/array/linalg.py\", line 48, in _wrapped_qr\r\n return np.linalg.qr(a)\r\n File \"<__array_function__ internals>\", line 6, in qr\r\n File \"cupy/core/core.pyx\", line 1344, in cupy.core.core.ndarray.__array_function__\r\n File \"/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/cupy/linalg/decomposition.py\", line 248, in qr\r\n geqrf, dev_info)\r\n File \"/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/cupy/linalg/util.py\", line 42, in _check_cusolver_dev_info_if_synchronization_allowed\r\n print(cupyx._ufunc_config.config.linalg)\r\nAttributeError: '_thread._local' object has no attribute 'linalg'\r\n```\r\n</details>\r\n\r\n\r\nRather than using `threading.local()` here \r\nhttps://github.com/cupy/cupy/blob/f06f509167957240d17924965238cb0901b700ab/cupyx/_ufunc_config.py#L4-L13\r\n\r\nWe could use `types` for a two line code change:\r\n\r\n```python\r\nimport types\r\n\r\nconfig = types.SimpleNamespace()\r\n```\r\n\r\nBut I may be missing the reason for using threads here\n", "before_files": [{"content": "from numpy import linalg\n\nimport cupy\nfrom cupy import core\nimport cupyx\n\n\ndef _assert_cupy_array(*arrays):\n for a in arrays:\n if not isinstance(a, cupy.core.ndarray):\n raise linalg.LinAlgError(\n 'cupy.linalg only supports cupy.core.ndarray')\n\n\ndef _assert_rank2(*arrays):\n for a in arrays:\n if a.ndim != 2:\n raise linalg.LinAlgError(\n '{}-dimensional array given. Array must be '\n 'two-dimensional'.format(a.ndim))\n\n\ndef _assert_nd_squareness(*arrays):\n for a in arrays:\n if max(a.shape[-2:]) != min(a.shape[-2:]):\n raise linalg.LinAlgError(\n 'Last 2 dimensions of the array must be square')\n\n\ndef _check_cusolver_dev_info_if_synchronization_allowed(routine, dev_info):\n # `dev_info` contains a single integer, the status code of a cuSOLVER\n # routine call. It is referred to as \"devInfo\" in the official cuSOLVER\n # documentation.\n assert isinstance(dev_info, core.ndarray)\n assert dev_info.size == 1\n\n config_linalg = cupyx._ufunc_config.config.linalg\n # Only 'ignore' and 'raise' are currently supported.\n if config_linalg == 'ignore':\n return\n\n assert config_linalg == 'raise'\n dev_info_host = dev_info.item()\n if dev_info_host != 0:\n raise linalg.LinAlgError(\n 'Error reported by {} in cuSOLVER. devInfo = {}. Please refer'\n ' to the cuSOLVER documentation.'.format(\n routine.__name__, dev_info_host))\n\n\ndef _check_cublas_info_array_if_synchronization_allowed(routine, info_array):\n # `info_array` contains integers, the status codes of a cuBLAS routine\n # call. It is referrd to as \"infoArray\" or \"devInfoArray\" in the official\n # cuBLAS documentation.\n assert isinstance(info_array, core.ndarray)\n assert info_array.ndim == 1\n\n config_linalg = cupyx._ufunc_config.config.linalg\n # Only 'ignore' and 'raise' are currently supported.\n if config_linalg == 'ignore':\n return\n\n assert config_linalg == 'raise'\n if (info_array != 0).any():\n raise linalg.LinAlgError(\n 'Error reported by {} in cuBLAS. infoArray/devInfoArray = {}.'\n ' Please refer to the cuBLAS documentation.'.format(\n routine.__name__, info_array))\n\n\n_tril_kernel = core.ElementwiseKernel(\n 'int64 k', 'S x',\n 'x = (_ind.get()[1] - _ind.get()[0] <= k) ? x : 0',\n 'tril_kernel',\n reduce_dims=False\n)\n\n\ndef _tril(x, k=0):\n _tril_kernel(k, x)\n return x\n\n\n_triu_kernel = core.ElementwiseKernel(\n 'int64 k', 'S x',\n 'x = (_ind.get()[1] - _ind.get()[0] >= k) ? x : 0',\n 'triu_kernel',\n reduce_dims=False\n)\n\n\ndef _triu(x, k=0):\n _triu_kernel(k, x)\n return x\n", "path": "cupy/linalg/util.py"}, {"content": "import contextlib\nimport threading\n\nconfig = threading.local()\nconfig.divide = None\nconfig.over = None\nconfig.under = None\nconfig.invalid = None\n# In favor of performance, the `devInfo` input/output from cuSOLVER routine\n# calls that is necessary to check the validity of the other outputs, are\n# ignored, as D2H copy incurring device synchronizations would otherwise be\n# required.\nconfig.linalg = 'ignore'\n\n\[email protected]\ndef errstate(*, divide=None, over=None, under=None, invalid=None, linalg=None):\n \"\"\"\n TODO(hvy): Write docs.\n \"\"\"\n old_state = seterr(\n divide=divide, over=over, under=under, invalid=invalid, linalg=linalg)\n try:\n yield # Return `None` similar to `numpy.errstate`.\n finally:\n seterr(**old_state)\n\n\ndef seterr(*, divide=None, over=None, under=None, invalid=None, linalg=None):\n \"\"\"\n TODO(hvy): Write docs.\n \"\"\"\n if divide is not None:\n raise NotImplementedError()\n if over is not None:\n raise NotImplementedError()\n if under is not None:\n raise NotImplementedError()\n if invalid is not None:\n raise NotImplementedError()\n if linalg is not None:\n if linalg not in ('ignore', 'raise'):\n raise NotImplementedError()\n\n old_state = geterr()\n\n config.divide = divide\n config.under = under\n config.over = over\n config.invalid = invalid\n config.linalg = linalg\n\n return old_state\n\n\ndef geterr():\n \"\"\"\n TODO(hvy): Write docs.\n \"\"\"\n return dict(\n divide=config.divide,\n over=config.over,\n under=config.under,\n invalid=config.invalid,\n linalg=config.linalg,\n )\n", "path": "cupyx/_ufunc_config.py"}], "after_files": [{"content": "from numpy import linalg\n\nimport cupy\nfrom cupy import core\nimport cupyx\n\n\ndef _assert_cupy_array(*arrays):\n for a in arrays:\n if not isinstance(a, cupy.core.ndarray):\n raise linalg.LinAlgError(\n 'cupy.linalg only supports cupy.core.ndarray')\n\n\ndef _assert_rank2(*arrays):\n for a in arrays:\n if a.ndim != 2:\n raise linalg.LinAlgError(\n '{}-dimensional array given. Array must be '\n 'two-dimensional'.format(a.ndim))\n\n\ndef _assert_nd_squareness(*arrays):\n for a in arrays:\n if max(a.shape[-2:]) != min(a.shape[-2:]):\n raise linalg.LinAlgError(\n 'Last 2 dimensions of the array must be square')\n\n\ndef _check_cusolver_dev_info_if_synchronization_allowed(routine, dev_info):\n # `dev_info` contains a single integer, the status code of a cuSOLVER\n # routine call. It is referred to as \"devInfo\" in the official cuSOLVER\n # documentation.\n assert isinstance(dev_info, core.ndarray)\n assert dev_info.size == 1\n config_linalg = cupyx._ufunc_config.get_config_linalg()\n # Only 'ignore' and 'raise' are currently supported.\n if config_linalg == 'ignore':\n return\n\n assert config_linalg == 'raise'\n dev_info_host = dev_info.item()\n if dev_info_host != 0:\n raise linalg.LinAlgError(\n 'Error reported by {} in cuSOLVER. devInfo = {}. Please refer'\n ' to the cuSOLVER documentation.'.format(\n routine.__name__, dev_info_host))\n\n\ndef _check_cublas_info_array_if_synchronization_allowed(routine, info_array):\n # `info_array` contains integers, the status codes of a cuBLAS routine\n # call. It is referrd to as \"infoArray\" or \"devInfoArray\" in the official\n # cuBLAS documentation.\n assert isinstance(info_array, core.ndarray)\n assert info_array.ndim == 1\n\n config_linalg = cupyx._ufunc_config.get_config_linalg()\n # Only 'ignore' and 'raise' are currently supported.\n if config_linalg == 'ignore':\n return\n\n assert config_linalg == 'raise'\n if (info_array != 0).any():\n raise linalg.LinAlgError(\n 'Error reported by {} in cuBLAS. infoArray/devInfoArray = {}.'\n ' Please refer to the cuBLAS documentation.'.format(\n routine.__name__, info_array))\n\n\n_tril_kernel = core.ElementwiseKernel(\n 'int64 k', 'S x',\n 'x = (_ind.get()[1] - _ind.get()[0] <= k) ? x : 0',\n 'tril_kernel',\n reduce_dims=False\n)\n\n\ndef _tril(x, k=0):\n _tril_kernel(k, x)\n return x\n\n\n_triu_kernel = core.ElementwiseKernel(\n 'int64 k', 'S x',\n 'x = (_ind.get()[1] - _ind.get()[0] >= k) ? x : 0',\n 'triu_kernel',\n reduce_dims=False\n)\n\n\ndef _triu(x, k=0):\n _triu_kernel(k, x)\n return x\n", "path": "cupy/linalg/util.py"}, {"content": "import contextlib\nimport threading\n\n_config = threading.local()\n\n\ndef get_config_divide():\n try:\n value = _config.divide\n except AttributeError:\n value = _config.divide = None\n return value\n\n\ndef get_config_over():\n try:\n value = _config.over\n except AttributeError:\n value = _config.over = None\n return value\n\n\ndef get_config_under():\n try:\n value = _config.under\n except AttributeError:\n value = _config.under = None\n return value\n\n\ndef get_config_invalid():\n try:\n value = _config.invalid\n except AttributeError:\n value = _config.invalid = None\n return value\n\n\ndef get_config_linalg():\n # In favor of performance, the `devInfo` input/output from cuSOLVER routine\n # calls that is necessary to check the validity of the other outputs, are\n # ignored, as D2H copy incurring device synchronizations would otherwise be\n # required.\n try:\n value = _config.linalg\n except AttributeError:\n value = _config.linalg = 'ignore'\n return value\n\n\[email protected]\ndef errstate(*, divide=None, over=None, under=None, invalid=None, linalg=None):\n \"\"\"\n TODO(hvy): Write docs.\n \"\"\"\n old_state = seterr(\n divide=divide, over=over, under=under, invalid=invalid, linalg=linalg)\n try:\n yield # Return `None` similar to `numpy.errstate`.\n finally:\n seterr(**old_state)\n\n\ndef seterr(*, divide=None, over=None, under=None, invalid=None, linalg=None):\n \"\"\"\n TODO(hvy): Write docs.\n \"\"\"\n if divide is not None:\n raise NotImplementedError()\n if over is not None:\n raise NotImplementedError()\n if under is not None:\n raise NotImplementedError()\n if invalid is not None:\n raise NotImplementedError()\n if linalg is not None:\n if linalg not in ('ignore', 'raise'):\n raise NotImplementedError()\n\n old_state = geterr()\n\n _config.divide = divide\n _config.under = under\n _config.over = over\n _config.invalid = invalid\n _config.linalg = linalg\n\n return old_state\n\n\ndef geterr():\n \"\"\"\n TODO(hvy): Write docs.\n \"\"\"\n return dict(\n divide=get_config_divide(),\n over=get_config_over(),\n under=get_config_under(),\n invalid=get_config_invalid(),\n linalg=get_config_linalg(),\n )\n", "path": "cupyx/_ufunc_config.py"}]} | 2,960 | 859 |
gh_patches_debug_16220 | rasdani/github-patches | git_diff | pypa__pipenv-1426 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update internal dotenv (or make it a dependency)
Variables in a `.env` file that have single-quoted spaces are not loaded into the environment. If double quotes are used, then the variables are loaded into the environment.
##### Describe your environment
1. ubuntu 17.10
1. Python version: `$ python -V` - 3.6.4
1. Pipenv version: `$ pipenv --version` - 9.0.3
##### Expected result
All variables in `.env` would be loaded
##### Actual result
Variables in `.env` that are single-quoted and contain spaces are not loaded
##### Steps to replicate
1. Given the following `.env` file:
```
FOO='Hello World'
BAR='Test'
BAZ="Hello World"
```
2. Running `pipenv shell`
3. `FOO` will not be set in the environment.
4. This happens because the `FOO` line doesn't match the regex used (`'^\s*(\w*)\s*=\s*("[^"]*"|[^\s]*)\s*$'`) in the parsing algorithm.
##### Additional information
I tracked this issue down by stepping through the dotenv loading code used by pipenv. When I found this line of code, I learned that only double quotes would allow spaces:
https://github.com/pypa/pipenv/blob/master/pipenv/patched/dotenv/main.py#L100
This parsing algorithm diverges a bit from the official dotenv logic:
https://github.com/theskumar/python-dotenv/blob/master/dotenv/main.py#L98
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pipenv/patched/dotenv/main.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import
3
4 import codecs
5 import os
6 import sys
7 import warnings
8 import re
9 from collections import OrderedDict
10
11 __escape_decoder = codecs.getdecoder('unicode_escape')
12 __posix_variable = re.compile('\$\{[^\}]*\}')
13
14
15 def decode_escaped(escaped):
16 return __escape_decoder(escaped)[0]
17
18
19 def load_dotenv(dotenv_path, verbose=False, override=False):
20 """
21 Read a .env file and load into os.environ.
22 """
23 if not os.path.exists(dotenv_path):
24 if verbose:
25 warnings.warn("Not loading %s - it doesn't exist." % dotenv_path)
26 return None
27 for k, v in dotenv_values(dotenv_path).items():
28 if override:
29 os.environ[k] = v
30 else:
31 os.environ.setdefault(k, v)
32 return True
33
34
35 def get_key(dotenv_path, key_to_get):
36 """
37 Gets the value of a given key from the given .env
38
39 If the .env path given doesn't exist, fails
40 """
41 key_to_get = str(key_to_get)
42 if not os.path.exists(dotenv_path):
43 warnings.warn("can't read %s - it doesn't exist." % dotenv_path)
44 return None
45 dotenv_as_dict = dotenv_values(dotenv_path)
46 if key_to_get in dotenv_as_dict:
47 return dotenv_as_dict[key_to_get]
48 else:
49 warnings.warn("key %s not found in %s." % (key_to_get, dotenv_path))
50 return None
51
52
53 def set_key(dotenv_path, key_to_set, value_to_set, quote_mode="always"):
54 """
55 Adds or Updates a key/value to the given .env
56
57 If the .env path given doesn't exist, fails instead of risking creating
58 an orphan .env somewhere in the filesystem
59 """
60 key_to_set = str(key_to_set)
61 value_to_set = str(value_to_set).strip("'").strip('"')
62 if not os.path.exists(dotenv_path):
63 warnings.warn("can't write to %s - it doesn't exist." % dotenv_path)
64 return None, key_to_set, value_to_set
65 dotenv_as_dict = OrderedDict(parse_dotenv(dotenv_path))
66 dotenv_as_dict[key_to_set] = value_to_set
67 success = flatten_and_write(dotenv_path, dotenv_as_dict, quote_mode)
68 return success, key_to_set, value_to_set
69
70
71 def unset_key(dotenv_path, key_to_unset, quote_mode="always"):
72 """
73 Removes a given key from the given .env
74
75 If the .env path given doesn't exist, fails
76 If the given key doesn't exist in the .env, fails
77 """
78 key_to_unset = str(key_to_unset)
79 if not os.path.exists(dotenv_path):
80 warnings.warn("can't delete from %s - it doesn't exist." % dotenv_path)
81 return None, key_to_unset
82 dotenv_as_dict = dotenv_values(dotenv_path)
83 if key_to_unset in dotenv_as_dict:
84 dotenv_as_dict.pop(key_to_unset, None)
85 else:
86 warnings.warn("key %s not removed from %s - key doesn't exist." % (key_to_unset, dotenv_path))
87 return None, key_to_unset
88 success = flatten_and_write(dotenv_path, dotenv_as_dict, quote_mode)
89 return success, key_to_unset
90
91
92 def dotenv_values(dotenv_path):
93 values = OrderedDict(parse_dotenv(dotenv_path))
94 values = resolve_nested_variables(values)
95 return values
96
97
98 def parse_dotenv(dotenv_path):
99 with open(dotenv_path) as f:
100 for k, v in re.findall('^\s*(\w*)\s*=\s*("[^"]*"|[^\s]*)\s*$', f.read(), flags=re.MULTILINE):
101 if len(v) > 0:
102 quoted = v[0] == v[len(v) - 1] in ['"', "'"]
103
104 if quoted:
105 v = decode_escaped(v[1:-1])
106
107 yield k, v
108
109
110 def resolve_nested_variables(values):
111 def _replacement(name):
112 """
113 get appropriate value for a variable name.
114 first search in environ, if not found,
115 then look into the dotenv variables
116 """
117 ret = os.getenv(name, values.get(name, ""))
118 return ret
119
120 def _re_sub_callback(match_object):
121 """
122 From a match object gets the variable name and returns
123 the correct replacement
124 """
125 return _replacement(match_object.group()[2:-1])
126
127 for k, v in values.items():
128 values[k] = __posix_variable.sub(_re_sub_callback, v)
129
130 return values
131
132
133 def flatten_and_write(dotenv_path, dotenv_as_dict, quote_mode="always"):
134 with open(dotenv_path, "w") as f:
135 for k, v in dotenv_as_dict.items():
136 _mode = quote_mode
137 if _mode == "auto" and " " in v:
138 _mode = "always"
139 str_format = '%s="%s"\n' if _mode == "always" else '%s=%s\n'
140 f.write(str_format % (k, v))
141 return True
142
143
144 def _walk_to_root(path):
145 """
146 Yield directories starting from the given directory up to the root
147 """
148 if not os.path.exists(path):
149 raise IOError('Starting path not found')
150
151 if os.path.isfile(path):
152 path = os.path.dirname(path)
153
154 last_dir = None
155 current_dir = os.path.abspath(path)
156 while last_dir != current_dir:
157 yield current_dir
158 parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
159 last_dir, current_dir = current_dir, parent_dir
160
161
162 def find_dotenv(filename='.env', raise_error_if_not_found=False, usecwd=False):
163 """
164 Search in increasingly higher folders for the given file
165
166 Returns path to the file if found, or an empty string otherwise
167 """
168 if usecwd or '__file__' not in globals():
169 # should work without __file__, e.g. in REPL or IPython notebook
170 path = os.getcwd()
171 else:
172 # will work for .py files
173 frame_filename = sys._getframe().f_back.f_code.co_filename
174 path = os.path.dirname(os.path.abspath(frame_filename))
175
176 for dirname in _walk_to_root(path):
177 check_path = os.path.join(dirname, filename)
178 if os.path.exists(check_path):
179 return check_path
180
181 if raise_error_if_not_found:
182 raise IOError('File not found')
183
184 return ''
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pipenv/patched/dotenv/main.py b/pipenv/patched/dotenv/main.py
--- a/pipenv/patched/dotenv/main.py
+++ b/pipenv/patched/dotenv/main.py
@@ -10,6 +10,8 @@
__escape_decoder = codecs.getdecoder('unicode_escape')
__posix_variable = re.compile('\$\{[^\}]*\}')
+__variable_declaration = re.compile('^\s*(\w*)\s*=\s*("[^"]*"|\'[^\']*\'|[^\s]*)\s*$',
+ flags=re.MULTILINE)
def decode_escaped(escaped):
@@ -97,7 +99,7 @@
def parse_dotenv(dotenv_path):
with open(dotenv_path) as f:
- for k, v in re.findall('^\s*(\w*)\s*=\s*("[^"]*"|[^\s]*)\s*$', f.read(), flags=re.MULTILINE):
+ for k, v in __variable_declaration.findall(f.read()):
if len(v) > 0:
quoted = v[0] == v[len(v) - 1] in ['"', "'"]
| {"golden_diff": "diff --git a/pipenv/patched/dotenv/main.py b/pipenv/patched/dotenv/main.py\n--- a/pipenv/patched/dotenv/main.py\n+++ b/pipenv/patched/dotenv/main.py\n@@ -10,6 +10,8 @@\n \n __escape_decoder = codecs.getdecoder('unicode_escape')\n __posix_variable = re.compile('\\$\\{[^\\}]*\\}')\n+__variable_declaration = re.compile('^\\s*(\\w*)\\s*=\\s*(\"[^\"]*\"|\\'[^\\']*\\'|[^\\s]*)\\s*$',\n+ flags=re.MULTILINE)\n \n \n def decode_escaped(escaped):\n@@ -97,7 +99,7 @@\n \n def parse_dotenv(dotenv_path):\n with open(dotenv_path) as f:\n- for k, v in re.findall('^\\s*(\\w*)\\s*=\\s*(\"[^\"]*\"|[^\\s]*)\\s*$', f.read(), flags=re.MULTILINE):\n+ for k, v in __variable_declaration.findall(f.read()):\n if len(v) > 0:\n quoted = v[0] == v[len(v) - 1] in ['\"', \"'\"]\n", "issue": "Update internal dotenv (or make it a dependency)\nVariables in a `.env` file that have single-quoted spaces are not loaded into the environment. If double quotes are used, then the variables are loaded into the environment.\r\n\r\n##### Describe your environment\r\n\r\n1. ubuntu 17.10\r\n1. Python version: `$ python -V` - 3.6.4\r\n1. Pipenv version: `$ pipenv --version` - 9.0.3\r\n\r\n##### Expected result\r\n\r\nAll variables in `.env` would be loaded\r\n\r\n##### Actual result\r\n\r\nVariables in `.env` that are single-quoted and contain spaces are not loaded\r\n\r\n##### Steps to replicate\r\n\r\n1. Given the following `.env` file:\r\n\r\n```\r\nFOO='Hello World'\r\nBAR='Test'\r\nBAZ=\"Hello World\"\r\n```\r\n\r\n2. Running `pipenv shell`\r\n3. `FOO` will not be set in the environment.\r\n4. This happens because the `FOO` line doesn't match the regex used (`'^\\s*(\\w*)\\s*=\\s*(\"[^\"]*\"|[^\\s]*)\\s*$'`) in the parsing algorithm. \r\n\r\n##### Additional information\r\n\r\nI tracked this issue down by stepping through the dotenv loading code used by pipenv. When I found this line of code, I learned that only double quotes would allow spaces:\r\n\r\nhttps://github.com/pypa/pipenv/blob/master/pipenv/patched/dotenv/main.py#L100\r\n\r\nThis parsing algorithm diverges a bit from the official dotenv logic:\r\n\r\nhttps://github.com/theskumar/python-dotenv/blob/master/dotenv/main.py#L98\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport codecs\nimport os\nimport sys\nimport warnings\nimport re\nfrom collections import OrderedDict\n\n__escape_decoder = codecs.getdecoder('unicode_escape')\n__posix_variable = re.compile('\\$\\{[^\\}]*\\}')\n\n\ndef decode_escaped(escaped):\n return __escape_decoder(escaped)[0]\n\n\ndef load_dotenv(dotenv_path, verbose=False, override=False):\n \"\"\"\n Read a .env file and load into os.environ.\n \"\"\"\n if not os.path.exists(dotenv_path):\n if verbose:\n warnings.warn(\"Not loading %s - it doesn't exist.\" % dotenv_path)\n return None\n for k, v in dotenv_values(dotenv_path).items():\n if override:\n os.environ[k] = v\n else:\n os.environ.setdefault(k, v)\n return True\n\n\ndef get_key(dotenv_path, key_to_get):\n \"\"\"\n Gets the value of a given key from the given .env\n\n If the .env path given doesn't exist, fails\n \"\"\"\n key_to_get = str(key_to_get)\n if not os.path.exists(dotenv_path):\n warnings.warn(\"can't read %s - it doesn't exist.\" % dotenv_path)\n return None\n dotenv_as_dict = dotenv_values(dotenv_path)\n if key_to_get in dotenv_as_dict:\n return dotenv_as_dict[key_to_get]\n else:\n warnings.warn(\"key %s not found in %s.\" % (key_to_get, dotenv_path))\n return None\n\n\ndef set_key(dotenv_path, key_to_set, value_to_set, quote_mode=\"always\"):\n \"\"\"\n Adds or Updates a key/value to the given .env\n\n If the .env path given doesn't exist, fails instead of risking creating\n an orphan .env somewhere in the filesystem\n \"\"\"\n key_to_set = str(key_to_set)\n value_to_set = str(value_to_set).strip(\"'\").strip('\"')\n if not os.path.exists(dotenv_path):\n warnings.warn(\"can't write to %s - it doesn't exist.\" % dotenv_path)\n return None, key_to_set, value_to_set\n dotenv_as_dict = OrderedDict(parse_dotenv(dotenv_path))\n dotenv_as_dict[key_to_set] = value_to_set\n success = flatten_and_write(dotenv_path, dotenv_as_dict, quote_mode)\n return success, key_to_set, value_to_set\n\n\ndef unset_key(dotenv_path, key_to_unset, quote_mode=\"always\"):\n \"\"\"\n Removes a given key from the given .env\n\n If the .env path given doesn't exist, fails\n If the given key doesn't exist in the .env, fails\n \"\"\"\n key_to_unset = str(key_to_unset)\n if not os.path.exists(dotenv_path):\n warnings.warn(\"can't delete from %s - it doesn't exist.\" % dotenv_path)\n return None, key_to_unset\n dotenv_as_dict = dotenv_values(dotenv_path)\n if key_to_unset in dotenv_as_dict:\n dotenv_as_dict.pop(key_to_unset, None)\n else:\n warnings.warn(\"key %s not removed from %s - key doesn't exist.\" % (key_to_unset, dotenv_path))\n return None, key_to_unset\n success = flatten_and_write(dotenv_path, dotenv_as_dict, quote_mode)\n return success, key_to_unset\n\n\ndef dotenv_values(dotenv_path):\n values = OrderedDict(parse_dotenv(dotenv_path))\n values = resolve_nested_variables(values)\n return values\n\n\ndef parse_dotenv(dotenv_path):\n with open(dotenv_path) as f:\n for k, v in re.findall('^\\s*(\\w*)\\s*=\\s*(\"[^\"]*\"|[^\\s]*)\\s*$', f.read(), flags=re.MULTILINE):\n if len(v) > 0:\n quoted = v[0] == v[len(v) - 1] in ['\"', \"'\"]\n\n if quoted:\n v = decode_escaped(v[1:-1])\n\n yield k, v\n\n\ndef resolve_nested_variables(values):\n def _replacement(name):\n \"\"\"\n get appropriate value for a variable name.\n first search in environ, if not found,\n then look into the dotenv variables\n \"\"\"\n ret = os.getenv(name, values.get(name, \"\"))\n return ret\n\n def _re_sub_callback(match_object):\n \"\"\"\n From a match object gets the variable name and returns\n the correct replacement\n \"\"\"\n return _replacement(match_object.group()[2:-1])\n\n for k, v in values.items():\n values[k] = __posix_variable.sub(_re_sub_callback, v)\n\n return values\n\n\ndef flatten_and_write(dotenv_path, dotenv_as_dict, quote_mode=\"always\"):\n with open(dotenv_path, \"w\") as f:\n for k, v in dotenv_as_dict.items():\n _mode = quote_mode\n if _mode == \"auto\" and \" \" in v:\n _mode = \"always\"\n str_format = '%s=\"%s\"\\n' if _mode == \"always\" else '%s=%s\\n'\n f.write(str_format % (k, v))\n return True\n\n\ndef _walk_to_root(path):\n \"\"\"\n Yield directories starting from the given directory up to the root\n \"\"\"\n if not os.path.exists(path):\n raise IOError('Starting path not found')\n\n if os.path.isfile(path):\n path = os.path.dirname(path)\n\n last_dir = None\n current_dir = os.path.abspath(path)\n while last_dir != current_dir:\n yield current_dir\n parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))\n last_dir, current_dir = current_dir, parent_dir\n\n\ndef find_dotenv(filename='.env', raise_error_if_not_found=False, usecwd=False):\n \"\"\"\n Search in increasingly higher folders for the given file\n\n Returns path to the file if found, or an empty string otherwise\n \"\"\"\n if usecwd or '__file__' not in globals():\n # should work without __file__, e.g. in REPL or IPython notebook\n path = os.getcwd()\n else:\n # will work for .py files\n frame_filename = sys._getframe().f_back.f_code.co_filename\n path = os.path.dirname(os.path.abspath(frame_filename))\n\n for dirname in _walk_to_root(path):\n check_path = os.path.join(dirname, filename)\n if os.path.exists(check_path):\n return check_path\n\n if raise_error_if_not_found:\n raise IOError('File not found')\n\n return ''\n", "path": "pipenv/patched/dotenv/main.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport codecs\nimport os\nimport sys\nimport warnings\nimport re\nfrom collections import OrderedDict\n\n__escape_decoder = codecs.getdecoder('unicode_escape')\n__posix_variable = re.compile('\\$\\{[^\\}]*\\}')\n__variable_declaration = re.compile('^\\s*(\\w*)\\s*=\\s*(\"[^\"]*\"|\\'[^\\']*\\'|[^\\s]*)\\s*$',\n flags=re.MULTILINE)\n\n\ndef decode_escaped(escaped):\n return __escape_decoder(escaped)[0]\n\n\ndef load_dotenv(dotenv_path, verbose=False, override=False):\n \"\"\"\n Read a .env file and load into os.environ.\n \"\"\"\n if not os.path.exists(dotenv_path):\n if verbose:\n warnings.warn(\"Not loading %s - it doesn't exist.\" % dotenv_path)\n return None\n for k, v in dotenv_values(dotenv_path).items():\n if override:\n os.environ[k] = v\n else:\n os.environ.setdefault(k, v)\n return True\n\n\ndef get_key(dotenv_path, key_to_get):\n \"\"\"\n Gets the value of a given key from the given .env\n\n If the .env path given doesn't exist, fails\n \"\"\"\n key_to_get = str(key_to_get)\n if not os.path.exists(dotenv_path):\n warnings.warn(\"can't read %s - it doesn't exist.\" % dotenv_path)\n return None\n dotenv_as_dict = dotenv_values(dotenv_path)\n if key_to_get in dotenv_as_dict:\n return dotenv_as_dict[key_to_get]\n else:\n warnings.warn(\"key %s not found in %s.\" % (key_to_get, dotenv_path))\n return None\n\n\ndef set_key(dotenv_path, key_to_set, value_to_set, quote_mode=\"always\"):\n \"\"\"\n Adds or Updates a key/value to the given .env\n\n If the .env path given doesn't exist, fails instead of risking creating\n an orphan .env somewhere in the filesystem\n \"\"\"\n key_to_set = str(key_to_set)\n value_to_set = str(value_to_set).strip(\"'\").strip('\"')\n if not os.path.exists(dotenv_path):\n warnings.warn(\"can't write to %s - it doesn't exist.\" % dotenv_path)\n return None, key_to_set, value_to_set\n dotenv_as_dict = OrderedDict(parse_dotenv(dotenv_path))\n dotenv_as_dict[key_to_set] = value_to_set\n success = flatten_and_write(dotenv_path, dotenv_as_dict, quote_mode)\n return success, key_to_set, value_to_set\n\n\ndef unset_key(dotenv_path, key_to_unset, quote_mode=\"always\"):\n \"\"\"\n Removes a given key from the given .env\n\n If the .env path given doesn't exist, fails\n If the given key doesn't exist in the .env, fails\n \"\"\"\n key_to_unset = str(key_to_unset)\n if not os.path.exists(dotenv_path):\n warnings.warn(\"can't delete from %s - it doesn't exist.\" % dotenv_path)\n return None, key_to_unset\n dotenv_as_dict = dotenv_values(dotenv_path)\n if key_to_unset in dotenv_as_dict:\n dotenv_as_dict.pop(key_to_unset, None)\n else:\n warnings.warn(\"key %s not removed from %s - key doesn't exist.\" % (key_to_unset, dotenv_path))\n return None, key_to_unset\n success = flatten_and_write(dotenv_path, dotenv_as_dict, quote_mode)\n return success, key_to_unset\n\n\ndef dotenv_values(dotenv_path):\n values = OrderedDict(parse_dotenv(dotenv_path))\n values = resolve_nested_variables(values)\n return values\n\n\ndef parse_dotenv(dotenv_path):\n with open(dotenv_path) as f:\n for k, v in __variable_declaration.findall(f.read()):\n if len(v) > 0:\n quoted = v[0] == v[len(v) - 1] in ['\"', \"'\"]\n\n if quoted:\n v = decode_escaped(v[1:-1])\n\n yield k, v\n\n\ndef resolve_nested_variables(values):\n def _replacement(name):\n \"\"\"\n get appropriate value for a variable name.\n first search in environ, if not found,\n then look into the dotenv variables\n \"\"\"\n ret = os.getenv(name, values.get(name, \"\"))\n return ret\n\n def _re_sub_callback(match_object):\n \"\"\"\n From a match object gets the variable name and returns\n the correct replacement\n \"\"\"\n return _replacement(match_object.group()[2:-1])\n\n for k, v in values.items():\n values[k] = __posix_variable.sub(_re_sub_callback, v)\n\n return values\n\n\ndef flatten_and_write(dotenv_path, dotenv_as_dict, quote_mode=\"always\"):\n with open(dotenv_path, \"w\") as f:\n for k, v in dotenv_as_dict.items():\n _mode = quote_mode\n if _mode == \"auto\" and \" \" in v:\n _mode = \"always\"\n str_format = '%s=\"%s\"\\n' if _mode == \"always\" else '%s=%s\\n'\n f.write(str_format % (k, v))\n return True\n\n\ndef _walk_to_root(path):\n \"\"\"\n Yield directories starting from the given directory up to the root\n \"\"\"\n if not os.path.exists(path):\n raise IOError('Starting path not found')\n\n if os.path.isfile(path):\n path = os.path.dirname(path)\n\n last_dir = None\n current_dir = os.path.abspath(path)\n while last_dir != current_dir:\n yield current_dir\n parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))\n last_dir, current_dir = current_dir, parent_dir\n\n\ndef find_dotenv(filename='.env', raise_error_if_not_found=False, usecwd=False):\n \"\"\"\n Search in increasingly higher folders for the given file\n\n Returns path to the file if found, or an empty string otherwise\n \"\"\"\n if usecwd or '__file__' not in globals():\n # should work without __file__, e.g. in REPL or IPython notebook\n path = os.getcwd()\n else:\n # will work for .py files\n frame_filename = sys._getframe().f_back.f_code.co_filename\n path = os.path.dirname(os.path.abspath(frame_filename))\n\n for dirname in _walk_to_root(path):\n check_path = os.path.join(dirname, filename)\n if os.path.exists(check_path):\n return check_path\n\n if raise_error_if_not_found:\n raise IOError('File not found')\n\n return ''\n", "path": "pipenv/patched/dotenv/main.py"}]} | 2,487 | 265 |
gh_patches_debug_40634 | rasdani/github-patches | git_diff | obspy__obspy-1903 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
get_datalogger_resp() and get_sensor_resp()
I appreciate the new NRL class in the Obspy master.. great job!
It would be valuable to be able to pull separate sensor and datalogger response stages into a Obspy Response object, as get_response() does.
In some of my use cases, my sensor is home-made not part of the NRL library while the datalogger is. After this, I could append the stages using response.response_stages.append()
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `obspy/clients/nrl/client.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Client for accessing the Nominal Response Library (http://ds.iris.edu/NRL/).
5
6 :copyright:
7 Lloyd Carothers IRIS/PASSCAL, 2016
8 The ObsPy Development Team ([email protected])
9 :license:
10 GNU Lesser General Public License, Version 3
11 (http://www.gnu.org/copyleft/lesser.html)
12 """
13 from __future__ import (absolute_import, division, print_function,
14 unicode_literals)
15 from future.builtins import * # NOQA
16
17 import codecs
18 import io
19 import os
20 import sys
21 import warnings
22
23 import requests
24
25 import obspy
26 from obspy.core.compatibility import configparser
27 from obspy.core.inventory.util import _textwrap
28
29
30 # Simple cache for remote NRL access. The total data amount will always be
31 # fairly small so I don't think it needs any cache eviction for now.
32 _remote_nrl_cache = {}
33
34
35 class NRL(object):
36 """
37 NRL client base class for accessing the Nominal Response Library.
38
39 http://ds.iris.edu/NRL/
40
41 Created with a URL for remote access or filesystem accessing a local copy.
42 """
43 _index = 'index.txt'
44
45 def __new__(cls, root=None):
46 # Check if its a folder on the file-system.
47 if root and os.path.isdir(root):
48 return super(NRL, cls).__new__(LocalNRL)
49 # Otherwise delegate to the remote NRL client to deal with all kinds
50 # of remote resources (currently only HTTP).
51 return super(NRL, cls).__new__(RemoteNRL)
52
53 def __init__(self):
54 sensor_index = self._join(self.root, 'sensors', self._index)
55 self.sensors = self._parse_ini(sensor_index)
56
57 datalogger_index = self._join(self.root, 'dataloggers', self._index)
58 self.dataloggers = self._parse_ini(datalogger_index)
59
60 def __str__(self):
61 info = ['NRL library at ' + self.root]
62 if self.sensors is None:
63 info.append(' Sensors not parsed yet.')
64 else:
65 info.append(
66 ' Sensors: {} manufacturers'.format(len(self.sensors)))
67 if len(self.sensors):
68 keys = [key for key in sorted(self.sensors)]
69 lines = _textwrap("'" + "', '".join(keys) + "'",
70 initial_indent=' ',
71 subsequent_indent=' ')
72 info.extend(lines)
73 if self.dataloggers is None:
74 info.append(' Dataloggers not parsed yet.')
75 else:
76 info.append(' Dataloggers: {} manufacturers'.format(
77 len(self.dataloggers)))
78 if len(self.dataloggers):
79 keys = [key for key in sorted(self.dataloggers)]
80 lines = _textwrap("'" + "', '".join(keys) + "'",
81 initial_indent=' ',
82 subsequent_indent=' ')
83 info.extend(lines)
84 return '\n'.join(_i.rstrip() for _i in info)
85
86 def _repr_pretty_(self, p, cycle): # pragma: no cover
87 p.text(str(self))
88
89 def _choose(self, choice, path):
90 # Should return either a path or a resp
91 cp = self._get_cp_from_ini(path)
92 options = cp.options(choice)
93 if 'path' in options:
94 newpath = cp.get(choice, 'path')
95 elif 'resp' in options:
96 newpath = cp.get(choice, 'resp')
97 # Strip quotes of new path
98 newpath = self._clean_str(newpath)
99 path = os.path.dirname(path)
100 return self._join(path, newpath)
101
102 def _parse_ini(self, path):
103 nrl_dict = NRLDict(self)
104 cp = self._get_cp_from_ini(path)
105 for section in cp.sections():
106 options = sorted(cp.options(section))
107 if section.lower() == 'main':
108 if options not in (['question'],
109 ['detail', 'question']): # pragma: no cover
110 msg = "Unexpected structure of NRL file '{}'".format(path)
111 raise NotImplementedError(msg)
112 nrl_dict._question = self._clean_str(cp.get(section,
113 'question'))
114 continue
115 else:
116 if options == ['path']:
117 nrl_dict[section] = NRLPath(self._choose(section, path))
118 continue
119 # sometimes the description field is named 'description', but
120 # sometimes also 'descr'
121 elif options in (['description', 'resp'], ['descr', 'resp'],
122 ['resp']):
123 if 'descr' in options:
124 descr = cp.get(section, 'descr')
125 elif 'description' in options:
126 descr = cp.get(section, 'description')
127 else:
128 descr = '<no description>'
129 descr = self._clean_str(descr)
130 resp_path = self._choose(section, path)
131 nrl_dict[section] = (descr, resp_path)
132 continue
133 else: # pragma: no cover
134 msg = "Unexpected structure of NRL file '{}'".format(path)
135 raise NotImplementedError(msg)
136 return nrl_dict
137
138 def _clean_str(self, string):
139 return string.strip('\'"')
140
141 def get_datalogger_resp(self, datalogger_keys):
142 """
143 Get the RESP string of a datalogger by keys.
144
145 :type datalogger_keys: list of str
146 :rtype: str
147 """
148 datalogger = self.dataloggers
149 for key in datalogger_keys:
150 datalogger = datalogger[key]
151 return self._read_resp(datalogger[1])
152
153 def get_sensor_resp(self, sensor_keys):
154 """
155 Get the RESP string of a sensor by keys.
156
157 :type sensor_keys: list of str
158 :rtype: str
159 """
160 sensor = self.sensors
161 for key in sensor_keys:
162 sensor = sensor[key]
163 return self._read_resp(sensor[1])
164
165 def get_response(self, datalogger_keys, sensor_keys):
166 """
167 Get Response from NRL tree structure
168
169 :param datalogger_keys: List of data-loggers.
170 :type datalogger_keys: list[str]
171 :param sensor_keys: List of sensors.
172 :type sensor_keys: list[str]
173 :rtype: :class:`~obspy.core.inventory.response.Response`
174
175 >>> nrl = NRL()
176 >>> response = nrl.get_response(
177 ... sensor_keys=['Nanometrics', 'Trillium Compact', '120 s'],
178 ... datalogger_keys=['REF TEK', 'RT 130 & 130-SMA', '1', '200'])
179 >>> print(response) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
180 Channel Response
181 From M/S (Velocity in Meters per Second) to COUNTS (Digital Counts)
182 Overall Sensitivity: 4.74576e+08 defined at 1.000 Hz
183 10 stages:
184 Stage 1: PolesZerosResponseStage from M/S to V, gain: 754.3
185 Stage 2: ResponseStage from V to V, gain: 1
186 Stage 3: Coefficients... from V to COUNTS, gain: 629129
187 Stage 4: Coefficients... from COUNTS to COUNTS, gain: 1
188 Stage 5: Coefficients... from COUNTS to COUNTS, gain: 1
189 Stage 6: Coefficients... from COUNTS to COUNTS, gain: 1
190 Stage 7: Coefficients... from COUNTS to COUNTS, gain: 1
191 Stage 8: Coefficients... from COUNTS to COUNTS, gain: 1
192 Stage 9: Coefficients... from COUNTS to COUNTS, gain: 1
193 Stage 10: Coefficients... from COUNTS to COUNTS, gain: 1
194 """
195 # Parse both to inventory objects.
196 with io.BytesIO(
197 self.get_datalogger_resp(datalogger_keys).encode()) as buf:
198 buf.seek(0, 0)
199 dl_resp = obspy.read_inventory(buf, format="RESP")
200 with io.BytesIO(
201 self.get_sensor_resp(sensor_keys).encode()) as buf:
202 buf.seek(0, 0)
203 sensor_resp = obspy.read_inventory(buf, format="RESP")
204
205 # Both can by construction only contain a single channel with a
206 # response object.
207 dl_resp = dl_resp[0][0][0].response
208 sensor_resp = sensor_resp[0][0][0].response
209
210 # Combine both by replace stage one in the data logger with stage
211 # one of the sensor.
212 dl_resp.response_stages.pop(0)
213 dl_resp.response_stages.insert(0, sensor_resp.response_stages[0])
214 try:
215 dl_resp.recalculate_overall_sensitivity()
216 except ValueError:
217 msg = "Failed to recalculate overall sensitivity."
218 warnings.warn(msg)
219
220 return dl_resp
221
222
223 class NRLDict(dict):
224 def __init__(self, nrl):
225 self._nrl = nrl
226
227 def __str__(self):
228 if len(self):
229 if self._question:
230 info = ['{} ({} items):'.format(self._question, len(self))]
231 else:
232 info = ['{} items:'.format(len(self))]
233 texts = ["'{}'".format(k) for k in sorted(self.keys())]
234 info.extend(_textwrap(", ".join(texts), initial_indent=' ',
235 subsequent_indent=' '))
236 return '\n'.join(_i.rstrip() for _i in info)
237 else:
238 return '0 items.'
239
240 def _repr_pretty_(self, p, cycle): # pragma: no cover
241 p.text(str(self))
242
243 def __getitem__(self, name):
244 value = super(NRLDict, self).__getitem__(name)
245 # if encountering a not yet parsed NRL Path, expand it now
246 if isinstance(value, NRLPath):
247 value = self._nrl._parse_ini(value)
248 self[name] = value
249 return value
250
251
252 class NRLPath(str):
253 pass
254
255
256 class LocalNRL(NRL):
257 """
258 Subclass of NRL for accessing local copy NRL.
259 """
260 def __init__(self, root):
261 self.root = root
262 self._join = os.path.join
263 super(self.__class__, self).__init__()
264
265 def _get_cp_from_ini(self, path):
266 """
267 Returns a configparser from a path to an index.txt
268 """
269 cp = configparser.SafeConfigParser()
270 with codecs.open(path, mode='r', encoding='UTF-8') as f:
271 if sys.version_info.major == 2: # pragma: no cover
272 cp.readfp(f)
273 else: # pragma: no cover
274 cp.read_file(f)
275 return cp
276
277 def _read_resp(self, path):
278 # Returns Unicode string of RESP
279 with open(path, 'r') as f:
280 return f.read()
281
282
283 class RemoteNRL(NRL):
284 """
285 Subclass of NRL for accessing remote copy of NRL.
286 """
287 def __init__(self, root='http://ds.iris.edu/NRL'):
288 self.root = root
289 super(self.__class__, self).__init__()
290
291 def _download(self, url):
292 """
293 Download service with basic cache.
294 """
295 if url not in _remote_nrl_cache:
296 response = requests.get(url)
297 _remote_nrl_cache[url] = response.text
298 return _remote_nrl_cache[url]
299
300 def _join(self, *paths):
301 url = paths[0]
302 for path in paths[1:]:
303 url = requests.compat.urljoin(url + '/', path)
304 return url
305
306 def _get_cp_from_ini(self, path):
307 '''
308 Returns a configparser from a path to an index.txt
309 '''
310 cp = configparser.SafeConfigParser()
311 with io.StringIO(self._download(path)) as buf:
312 if sys.version_info.major == 2: # pragma: no cover
313 cp.readfp(buf)
314 else: # pragma: no cover
315 cp.read_file(buf)
316 return cp
317
318 def _read_resp(self, path):
319 return self._download(path)
320
321
322 if __name__ == "__main__": # pragma: no cover
323 import doctest
324 doctest.testmod(exclude_empty=True)
325
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/obspy/clients/nrl/client.py b/obspy/clients/nrl/client.py
--- a/obspy/clients/nrl/client.py
+++ b/obspy/clients/nrl/client.py
@@ -138,29 +138,37 @@
def _clean_str(self, string):
return string.strip('\'"')
- def get_datalogger_resp(self, datalogger_keys):
+ def get_datalogger_response(self, datalogger_keys):
"""
- Get the RESP string of a datalogger by keys.
+ Get the datalogger response.
:type datalogger_keys: list of str
- :rtype: str
+ :rtype: :class:`~obspy.core.inventory.response.Response`
"""
datalogger = self.dataloggers
for key in datalogger_keys:
datalogger = datalogger[key]
- return self._read_resp(datalogger[1])
- def get_sensor_resp(self, sensor_keys):
+ # Parse to an inventory object and return a response object.
+ with io.BytesIO(self._read_resp(datalogger[1]).encode()) as buf:
+ buf.seek(0, 0)
+ return obspy.read_inventory(buf, format="RESP")[0][0][0].response
+
+ def get_sensor_response(self, sensor_keys):
"""
- Get the RESP string of a sensor by keys.
+ Get the sensor response.
:type sensor_keys: list of str
- :rtype: str
+ :rtype: :class:`~obspy.core.inventory.response.Response`
"""
sensor = self.sensors
for key in sensor_keys:
sensor = sensor[key]
- return self._read_resp(sensor[1])
+
+ # Parse to an inventory object and return a response object.
+ with io.BytesIO(self._read_resp(sensor[1]).encode()) as buf:
+ buf.seek(0, 0)
+ return obspy.read_inventory(buf, format="RESP")[0][0][0].response
def get_response(self, datalogger_keys, sensor_keys):
"""
@@ -192,20 +200,8 @@
Stage 9: Coefficients... from COUNTS to COUNTS, gain: 1
Stage 10: Coefficients... from COUNTS to COUNTS, gain: 1
"""
- # Parse both to inventory objects.
- with io.BytesIO(
- self.get_datalogger_resp(datalogger_keys).encode()) as buf:
- buf.seek(0, 0)
- dl_resp = obspy.read_inventory(buf, format="RESP")
- with io.BytesIO(
- self.get_sensor_resp(sensor_keys).encode()) as buf:
- buf.seek(0, 0)
- sensor_resp = obspy.read_inventory(buf, format="RESP")
-
- # Both can by construction only contain a single channel with a
- # response object.
- dl_resp = dl_resp[0][0][0].response
- sensor_resp = sensor_resp[0][0][0].response
+ dl_resp = self.get_datalogger_response(datalogger_keys)
+ sensor_resp = self.get_sensor_response(sensor_keys)
# Combine both by replace stage one in the data logger with stage
# one of the sensor.
@@ -266,7 +262,7 @@
"""
Returns a configparser from a path to an index.txt
"""
- cp = configparser.SafeConfigParser()
+ cp = configparser.ConfigParser()
with codecs.open(path, mode='r', encoding='UTF-8') as f:
if sys.version_info.major == 2: # pragma: no cover
cp.readfp(f)
| {"golden_diff": "diff --git a/obspy/clients/nrl/client.py b/obspy/clients/nrl/client.py\n--- a/obspy/clients/nrl/client.py\n+++ b/obspy/clients/nrl/client.py\n@@ -138,29 +138,37 @@\n def _clean_str(self, string):\n return string.strip('\\'\"')\n \n- def get_datalogger_resp(self, datalogger_keys):\n+ def get_datalogger_response(self, datalogger_keys):\n \"\"\"\n- Get the RESP string of a datalogger by keys.\n+ Get the datalogger response.\n \n :type datalogger_keys: list of str\n- :rtype: str\n+ :rtype: :class:`~obspy.core.inventory.response.Response`\n \"\"\"\n datalogger = self.dataloggers\n for key in datalogger_keys:\n datalogger = datalogger[key]\n- return self._read_resp(datalogger[1])\n \n- def get_sensor_resp(self, sensor_keys):\n+ # Parse to an inventory object and return a response object.\n+ with io.BytesIO(self._read_resp(datalogger[1]).encode()) as buf:\n+ buf.seek(0, 0)\n+ return obspy.read_inventory(buf, format=\"RESP\")[0][0][0].response\n+\n+ def get_sensor_response(self, sensor_keys):\n \"\"\"\n- Get the RESP string of a sensor by keys.\n+ Get the sensor response.\n \n :type sensor_keys: list of str\n- :rtype: str\n+ :rtype: :class:`~obspy.core.inventory.response.Response`\n \"\"\"\n sensor = self.sensors\n for key in sensor_keys:\n sensor = sensor[key]\n- return self._read_resp(sensor[1])\n+\n+ # Parse to an inventory object and return a response object.\n+ with io.BytesIO(self._read_resp(sensor[1]).encode()) as buf:\n+ buf.seek(0, 0)\n+ return obspy.read_inventory(buf, format=\"RESP\")[0][0][0].response\n \n def get_response(self, datalogger_keys, sensor_keys):\n \"\"\"\n@@ -192,20 +200,8 @@\n Stage 9: Coefficients... from COUNTS to COUNTS, gain: 1\n Stage 10: Coefficients... from COUNTS to COUNTS, gain: 1\n \"\"\"\n- # Parse both to inventory objects.\n- with io.BytesIO(\n- self.get_datalogger_resp(datalogger_keys).encode()) as buf:\n- buf.seek(0, 0)\n- dl_resp = obspy.read_inventory(buf, format=\"RESP\")\n- with io.BytesIO(\n- self.get_sensor_resp(sensor_keys).encode()) as buf:\n- buf.seek(0, 0)\n- sensor_resp = obspy.read_inventory(buf, format=\"RESP\")\n-\n- # Both can by construction only contain a single channel with a\n- # response object.\n- dl_resp = dl_resp[0][0][0].response\n- sensor_resp = sensor_resp[0][0][0].response\n+ dl_resp = self.get_datalogger_response(datalogger_keys)\n+ sensor_resp = self.get_sensor_response(sensor_keys)\n \n # Combine both by replace stage one in the data logger with stage\n # one of the sensor.\n@@ -266,7 +262,7 @@\n \"\"\"\n Returns a configparser from a path to an index.txt\n \"\"\"\n- cp = configparser.SafeConfigParser()\n+ cp = configparser.ConfigParser()\n with codecs.open(path, mode='r', encoding='UTF-8') as f:\n if sys.version_info.major == 2: # pragma: no cover\n cp.readfp(f)\n", "issue": "get_datalogger_resp() and get_sensor_resp()\nI appreciate the new NRL class in the Obspy master.. great job!\r\n\r\nIt would be valuable to be able to pull separate sensor and datalogger response stages into a Obspy Response object, as get_response() does.\r\n\r\nIn some of my use cases, my sensor is home-made not part of the NRL library while the datalogger is. After this, I could append the stages using response.response_stages.append()\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nClient for accessing the Nominal Response Library (http://ds.iris.edu/NRL/).\n\n:copyright:\n Lloyd Carothers IRIS/PASSCAL, 2016\n The ObsPy Development Team ([email protected])\n:license:\n GNU Lesser General Public License, Version 3\n (http://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nimport codecs\nimport io\nimport os\nimport sys\nimport warnings\n\nimport requests\n\nimport obspy\nfrom obspy.core.compatibility import configparser\nfrom obspy.core.inventory.util import _textwrap\n\n\n# Simple cache for remote NRL access. The total data amount will always be\n# fairly small so I don't think it needs any cache eviction for now.\n_remote_nrl_cache = {}\n\n\nclass NRL(object):\n \"\"\"\n NRL client base class for accessing the Nominal Response Library.\n\n http://ds.iris.edu/NRL/\n\n Created with a URL for remote access or filesystem accessing a local copy.\n \"\"\"\n _index = 'index.txt'\n\n def __new__(cls, root=None):\n # Check if its a folder on the file-system.\n if root and os.path.isdir(root):\n return super(NRL, cls).__new__(LocalNRL)\n # Otherwise delegate to the remote NRL client to deal with all kinds\n # of remote resources (currently only HTTP).\n return super(NRL, cls).__new__(RemoteNRL)\n\n def __init__(self):\n sensor_index = self._join(self.root, 'sensors', self._index)\n self.sensors = self._parse_ini(sensor_index)\n\n datalogger_index = self._join(self.root, 'dataloggers', self._index)\n self.dataloggers = self._parse_ini(datalogger_index)\n\n def __str__(self):\n info = ['NRL library at ' + self.root]\n if self.sensors is None:\n info.append(' Sensors not parsed yet.')\n else:\n info.append(\n ' Sensors: {} manufacturers'.format(len(self.sensors)))\n if len(self.sensors):\n keys = [key for key in sorted(self.sensors)]\n lines = _textwrap(\"'\" + \"', '\".join(keys) + \"'\",\n initial_indent=' ',\n subsequent_indent=' ')\n info.extend(lines)\n if self.dataloggers is None:\n info.append(' Dataloggers not parsed yet.')\n else:\n info.append(' Dataloggers: {} manufacturers'.format(\n len(self.dataloggers)))\n if len(self.dataloggers):\n keys = [key for key in sorted(self.dataloggers)]\n lines = _textwrap(\"'\" + \"', '\".join(keys) + \"'\",\n initial_indent=' ',\n subsequent_indent=' ')\n info.extend(lines)\n return '\\n'.join(_i.rstrip() for _i in info)\n\n def _repr_pretty_(self, p, cycle): # pragma: no cover\n p.text(str(self))\n\n def _choose(self, choice, path):\n # Should return either a path or a resp\n cp = self._get_cp_from_ini(path)\n options = cp.options(choice)\n if 'path' in options:\n newpath = cp.get(choice, 'path')\n elif 'resp' in options:\n newpath = cp.get(choice, 'resp')\n # Strip quotes of new path\n newpath = self._clean_str(newpath)\n path = os.path.dirname(path)\n return self._join(path, newpath)\n\n def _parse_ini(self, path):\n nrl_dict = NRLDict(self)\n cp = self._get_cp_from_ini(path)\n for section in cp.sections():\n options = sorted(cp.options(section))\n if section.lower() == 'main':\n if options not in (['question'],\n ['detail', 'question']): # pragma: no cover\n msg = \"Unexpected structure of NRL file '{}'\".format(path)\n raise NotImplementedError(msg)\n nrl_dict._question = self._clean_str(cp.get(section,\n 'question'))\n continue\n else:\n if options == ['path']:\n nrl_dict[section] = NRLPath(self._choose(section, path))\n continue\n # sometimes the description field is named 'description', but\n # sometimes also 'descr'\n elif options in (['description', 'resp'], ['descr', 'resp'],\n ['resp']):\n if 'descr' in options:\n descr = cp.get(section, 'descr')\n elif 'description' in options:\n descr = cp.get(section, 'description')\n else:\n descr = '<no description>'\n descr = self._clean_str(descr)\n resp_path = self._choose(section, path)\n nrl_dict[section] = (descr, resp_path)\n continue\n else: # pragma: no cover\n msg = \"Unexpected structure of NRL file '{}'\".format(path)\n raise NotImplementedError(msg)\n return nrl_dict\n\n def _clean_str(self, string):\n return string.strip('\\'\"')\n\n def get_datalogger_resp(self, datalogger_keys):\n \"\"\"\n Get the RESP string of a datalogger by keys.\n\n :type datalogger_keys: list of str\n :rtype: str\n \"\"\"\n datalogger = self.dataloggers\n for key in datalogger_keys:\n datalogger = datalogger[key]\n return self._read_resp(datalogger[1])\n\n def get_sensor_resp(self, sensor_keys):\n \"\"\"\n Get the RESP string of a sensor by keys.\n\n :type sensor_keys: list of str\n :rtype: str\n \"\"\"\n sensor = self.sensors\n for key in sensor_keys:\n sensor = sensor[key]\n return self._read_resp(sensor[1])\n\n def get_response(self, datalogger_keys, sensor_keys):\n \"\"\"\n Get Response from NRL tree structure\n\n :param datalogger_keys: List of data-loggers.\n :type datalogger_keys: list[str]\n :param sensor_keys: List of sensors.\n :type sensor_keys: list[str]\n :rtype: :class:`~obspy.core.inventory.response.Response`\n\n >>> nrl = NRL()\n >>> response = nrl.get_response(\n ... sensor_keys=['Nanometrics', 'Trillium Compact', '120 s'],\n ... datalogger_keys=['REF TEK', 'RT 130 & 130-SMA', '1', '200'])\n >>> print(response) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS\n Channel Response\n From M/S (Velocity in Meters per Second) to COUNTS (Digital Counts)\n Overall Sensitivity: 4.74576e+08 defined at 1.000 Hz\n 10 stages:\n Stage 1: PolesZerosResponseStage from M/S to V, gain: 754.3\n Stage 2: ResponseStage from V to V, gain: 1\n Stage 3: Coefficients... from V to COUNTS, gain: 629129\n Stage 4: Coefficients... from COUNTS to COUNTS, gain: 1\n Stage 5: Coefficients... from COUNTS to COUNTS, gain: 1\n Stage 6: Coefficients... from COUNTS to COUNTS, gain: 1\n Stage 7: Coefficients... from COUNTS to COUNTS, gain: 1\n Stage 8: Coefficients... from COUNTS to COUNTS, gain: 1\n Stage 9: Coefficients... from COUNTS to COUNTS, gain: 1\n Stage 10: Coefficients... from COUNTS to COUNTS, gain: 1\n \"\"\"\n # Parse both to inventory objects.\n with io.BytesIO(\n self.get_datalogger_resp(datalogger_keys).encode()) as buf:\n buf.seek(0, 0)\n dl_resp = obspy.read_inventory(buf, format=\"RESP\")\n with io.BytesIO(\n self.get_sensor_resp(sensor_keys).encode()) as buf:\n buf.seek(0, 0)\n sensor_resp = obspy.read_inventory(buf, format=\"RESP\")\n\n # Both can by construction only contain a single channel with a\n # response object.\n dl_resp = dl_resp[0][0][0].response\n sensor_resp = sensor_resp[0][0][0].response\n\n # Combine both by replace stage one in the data logger with stage\n # one of the sensor.\n dl_resp.response_stages.pop(0)\n dl_resp.response_stages.insert(0, sensor_resp.response_stages[0])\n try:\n dl_resp.recalculate_overall_sensitivity()\n except ValueError:\n msg = \"Failed to recalculate overall sensitivity.\"\n warnings.warn(msg)\n\n return dl_resp\n\n\nclass NRLDict(dict):\n def __init__(self, nrl):\n self._nrl = nrl\n\n def __str__(self):\n if len(self):\n if self._question:\n info = ['{} ({} items):'.format(self._question, len(self))]\n else:\n info = ['{} items:'.format(len(self))]\n texts = [\"'{}'\".format(k) for k in sorted(self.keys())]\n info.extend(_textwrap(\", \".join(texts), initial_indent=' ',\n subsequent_indent=' '))\n return '\\n'.join(_i.rstrip() for _i in info)\n else:\n return '0 items.'\n\n def _repr_pretty_(self, p, cycle): # pragma: no cover\n p.text(str(self))\n\n def __getitem__(self, name):\n value = super(NRLDict, self).__getitem__(name)\n # if encountering a not yet parsed NRL Path, expand it now\n if isinstance(value, NRLPath):\n value = self._nrl._parse_ini(value)\n self[name] = value\n return value\n\n\nclass NRLPath(str):\n pass\n\n\nclass LocalNRL(NRL):\n \"\"\"\n Subclass of NRL for accessing local copy NRL.\n \"\"\"\n def __init__(self, root):\n self.root = root\n self._join = os.path.join\n super(self.__class__, self).__init__()\n\n def _get_cp_from_ini(self, path):\n \"\"\"\n Returns a configparser from a path to an index.txt\n \"\"\"\n cp = configparser.SafeConfigParser()\n with codecs.open(path, mode='r', encoding='UTF-8') as f:\n if sys.version_info.major == 2: # pragma: no cover\n cp.readfp(f)\n else: # pragma: no cover\n cp.read_file(f)\n return cp\n\n def _read_resp(self, path):\n # Returns Unicode string of RESP\n with open(path, 'r') as f:\n return f.read()\n\n\nclass RemoteNRL(NRL):\n \"\"\"\n Subclass of NRL for accessing remote copy of NRL.\n \"\"\"\n def __init__(self, root='http://ds.iris.edu/NRL'):\n self.root = root\n super(self.__class__, self).__init__()\n\n def _download(self, url):\n \"\"\"\n Download service with basic cache.\n \"\"\"\n if url not in _remote_nrl_cache:\n response = requests.get(url)\n _remote_nrl_cache[url] = response.text\n return _remote_nrl_cache[url]\n\n def _join(self, *paths):\n url = paths[0]\n for path in paths[1:]:\n url = requests.compat.urljoin(url + '/', path)\n return url\n\n def _get_cp_from_ini(self, path):\n '''\n Returns a configparser from a path to an index.txt\n '''\n cp = configparser.SafeConfigParser()\n with io.StringIO(self._download(path)) as buf:\n if sys.version_info.major == 2: # pragma: no cover\n cp.readfp(buf)\n else: # pragma: no cover\n cp.read_file(buf)\n return cp\n\n def _read_resp(self, path):\n return self._download(path)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n import doctest\n doctest.testmod(exclude_empty=True)\n", "path": "obspy/clients/nrl/client.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nClient for accessing the Nominal Response Library (http://ds.iris.edu/NRL/).\n\n:copyright:\n Lloyd Carothers IRIS/PASSCAL, 2016\n The ObsPy Development Team ([email protected])\n:license:\n GNU Lesser General Public License, Version 3\n (http://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nimport codecs\nimport io\nimport os\nimport sys\nimport warnings\n\nimport requests\n\nimport obspy\nfrom obspy.core.compatibility import configparser\nfrom obspy.core.inventory.util import _textwrap\n\n\n# Simple cache for remote NRL access. The total data amount will always be\n# fairly small so I don't think it needs any cache eviction for now.\n_remote_nrl_cache = {}\n\n\nclass NRL(object):\n \"\"\"\n NRL client base class for accessing the Nominal Response Library.\n\n http://ds.iris.edu/NRL/\n\n Created with a URL for remote access or filesystem accessing a local copy.\n \"\"\"\n _index = 'index.txt'\n\n def __new__(cls, root=None):\n # Check if its a folder on the file-system.\n if root and os.path.isdir(root):\n return super(NRL, cls).__new__(LocalNRL)\n # Otherwise delegate to the remote NRL client to deal with all kinds\n # of remote resources (currently only HTTP).\n return super(NRL, cls).__new__(RemoteNRL)\n\n def __init__(self):\n sensor_index = self._join(self.root, 'sensors', self._index)\n self.sensors = self._parse_ini(sensor_index)\n\n datalogger_index = self._join(self.root, 'dataloggers', self._index)\n self.dataloggers = self._parse_ini(datalogger_index)\n\n def __str__(self):\n info = ['NRL library at ' + self.root]\n if self.sensors is None:\n info.append(' Sensors not parsed yet.')\n else:\n info.append(\n ' Sensors: {} manufacturers'.format(len(self.sensors)))\n if len(self.sensors):\n keys = [key for key in sorted(self.sensors)]\n lines = _textwrap(\"'\" + \"', '\".join(keys) + \"'\",\n initial_indent=' ',\n subsequent_indent=' ')\n info.extend(lines)\n if self.dataloggers is None:\n info.append(' Dataloggers not parsed yet.')\n else:\n info.append(' Dataloggers: {} manufacturers'.format(\n len(self.dataloggers)))\n if len(self.dataloggers):\n keys = [key for key in sorted(self.dataloggers)]\n lines = _textwrap(\"'\" + \"', '\".join(keys) + \"'\",\n initial_indent=' ',\n subsequent_indent=' ')\n info.extend(lines)\n return '\\n'.join(_i.rstrip() for _i in info)\n\n def _repr_pretty_(self, p, cycle): # pragma: no cover\n p.text(str(self))\n\n def _choose(self, choice, path):\n # Should return either a path or a resp\n cp = self._get_cp_from_ini(path)\n options = cp.options(choice)\n if 'path' in options:\n newpath = cp.get(choice, 'path')\n elif 'resp' in options:\n newpath = cp.get(choice, 'resp')\n # Strip quotes of new path\n newpath = self._clean_str(newpath)\n path = os.path.dirname(path)\n return self._join(path, newpath)\n\n def _parse_ini(self, path):\n nrl_dict = NRLDict(self)\n cp = self._get_cp_from_ini(path)\n for section in cp.sections():\n options = sorted(cp.options(section))\n if section.lower() == 'main':\n if options not in (['question'],\n ['detail', 'question']): # pragma: no cover\n msg = \"Unexpected structure of NRL file '{}'\".format(path)\n raise NotImplementedError(msg)\n nrl_dict._question = self._clean_str(cp.get(section,\n 'question'))\n continue\n else:\n if options == ['path']:\n nrl_dict[section] = NRLPath(self._choose(section, path))\n continue\n # sometimes the description field is named 'description', but\n # sometimes also 'descr'\n elif options in (['description', 'resp'], ['descr', 'resp'],\n ['resp']):\n if 'descr' in options:\n descr = cp.get(section, 'descr')\n elif 'description' in options:\n descr = cp.get(section, 'description')\n else:\n descr = '<no description>'\n descr = self._clean_str(descr)\n resp_path = self._choose(section, path)\n nrl_dict[section] = (descr, resp_path)\n continue\n else: # pragma: no cover\n msg = \"Unexpected structure of NRL file '{}'\".format(path)\n raise NotImplementedError(msg)\n return nrl_dict\n\n def _clean_str(self, string):\n return string.strip('\\'\"')\n\n def get_datalogger_response(self, datalogger_keys):\n \"\"\"\n Get the datalogger response.\n\n :type datalogger_keys: list of str\n :rtype: :class:`~obspy.core.inventory.response.Response`\n \"\"\"\n datalogger = self.dataloggers\n for key in datalogger_keys:\n datalogger = datalogger[key]\n\n # Parse to an inventory object and return a response object.\n with io.BytesIO(self._read_resp(datalogger[1]).encode()) as buf:\n buf.seek(0, 0)\n return obspy.read_inventory(buf, format=\"RESP\")[0][0][0].response\n\n def get_sensor_response(self, sensor_keys):\n \"\"\"\n Get the sensor response.\n\n :type sensor_keys: list of str\n :rtype: :class:`~obspy.core.inventory.response.Response`\n \"\"\"\n sensor = self.sensors\n for key in sensor_keys:\n sensor = sensor[key]\n\n # Parse to an inventory object and return a response object.\n with io.BytesIO(self._read_resp(sensor[1]).encode()) as buf:\n buf.seek(0, 0)\n return obspy.read_inventory(buf, format=\"RESP\")[0][0][0].response\n\n def get_response(self, datalogger_keys, sensor_keys):\n \"\"\"\n Get Response from NRL tree structure\n\n :param datalogger_keys: List of data-loggers.\n :type datalogger_keys: list[str]\n :param sensor_keys: List of sensors.\n :type sensor_keys: list[str]\n :rtype: :class:`~obspy.core.inventory.response.Response`\n\n >>> nrl = NRL()\n >>> response = nrl.get_response(\n ... sensor_keys=['Nanometrics', 'Trillium Compact', '120 s'],\n ... datalogger_keys=['REF TEK', 'RT 130 & 130-SMA', '1', '200'])\n >>> print(response) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS\n Channel Response\n From M/S (Velocity in Meters per Second) to COUNTS (Digital Counts)\n Overall Sensitivity: 4.74576e+08 defined at 1.000 Hz\n 10 stages:\n Stage 1: PolesZerosResponseStage from M/S to V, gain: 754.3\n Stage 2: ResponseStage from V to V, gain: 1\n Stage 3: Coefficients... from V to COUNTS, gain: 629129\n Stage 4: Coefficients... from COUNTS to COUNTS, gain: 1\n Stage 5: Coefficients... from COUNTS to COUNTS, gain: 1\n Stage 6: Coefficients... from COUNTS to COUNTS, gain: 1\n Stage 7: Coefficients... from COUNTS to COUNTS, gain: 1\n Stage 8: Coefficients... from COUNTS to COUNTS, gain: 1\n Stage 9: Coefficients... from COUNTS to COUNTS, gain: 1\n Stage 10: Coefficients... from COUNTS to COUNTS, gain: 1\n \"\"\"\n dl_resp = self.get_datalogger_response(datalogger_keys)\n sensor_resp = self.get_sensor_response(sensor_keys)\n\n # Combine both by replace stage one in the data logger with stage\n # one of the sensor.\n dl_resp.response_stages.pop(0)\n dl_resp.response_stages.insert(0, sensor_resp.response_stages[0])\n try:\n dl_resp.recalculate_overall_sensitivity()\n except ValueError:\n msg = \"Failed to recalculate overall sensitivity.\"\n warnings.warn(msg)\n\n return dl_resp\n\n\nclass NRLDict(dict):\n def __init__(self, nrl):\n self._nrl = nrl\n\n def __str__(self):\n if len(self):\n if self._question:\n info = ['{} ({} items):'.format(self._question, len(self))]\n else:\n info = ['{} items:'.format(len(self))]\n texts = [\"'{}'\".format(k) for k in sorted(self.keys())]\n info.extend(_textwrap(\", \".join(texts), initial_indent=' ',\n subsequent_indent=' '))\n return '\\n'.join(_i.rstrip() for _i in info)\n else:\n return '0 items.'\n\n def _repr_pretty_(self, p, cycle): # pragma: no cover\n p.text(str(self))\n\n def __getitem__(self, name):\n value = super(NRLDict, self).__getitem__(name)\n # if encountering a not yet parsed NRL Path, expand it now\n if isinstance(value, NRLPath):\n value = self._nrl._parse_ini(value)\n self[name] = value\n return value\n\n\nclass NRLPath(str):\n pass\n\n\nclass LocalNRL(NRL):\n \"\"\"\n Subclass of NRL for accessing local copy NRL.\n \"\"\"\n def __init__(self, root):\n self.root = root\n self._join = os.path.join\n super(self.__class__, self).__init__()\n\n def _get_cp_from_ini(self, path):\n \"\"\"\n Returns a configparser from a path to an index.txt\n \"\"\"\n cp = configparser.ConfigParser()\n with codecs.open(path, mode='r', encoding='UTF-8') as f:\n if sys.version_info.major == 2: # pragma: no cover\n cp.readfp(f)\n else: # pragma: no cover\n cp.read_file(f)\n return cp\n\n def _read_resp(self, path):\n # Returns Unicode string of RESP\n with open(path, 'r') as f:\n return f.read()\n\n\nclass RemoteNRL(NRL):\n \"\"\"\n Subclass of NRL for accessing remote copy of NRL.\n \"\"\"\n def __init__(self, root='http://ds.iris.edu/NRL'):\n self.root = root\n super(self.__class__, self).__init__()\n\n def _download(self, url):\n \"\"\"\n Download service with basic cache.\n \"\"\"\n if url not in _remote_nrl_cache:\n response = requests.get(url)\n _remote_nrl_cache[url] = response.text\n return _remote_nrl_cache[url]\n\n def _join(self, *paths):\n url = paths[0]\n for path in paths[1:]:\n url = requests.compat.urljoin(url + '/', path)\n return url\n\n def _get_cp_from_ini(self, path):\n '''\n Returns a configparser from a path to an index.txt\n '''\n cp = configparser.SafeConfigParser()\n with io.StringIO(self._download(path)) as buf:\n if sys.version_info.major == 2: # pragma: no cover\n cp.readfp(buf)\n else: # pragma: no cover\n cp.read_file(buf)\n return cp\n\n def _read_resp(self, path):\n return self._download(path)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n import doctest\n doctest.testmod(exclude_empty=True)\n", "path": "obspy/clients/nrl/client.py"}]} | 3,948 | 833 |
gh_patches_debug_4921 | rasdani/github-patches | git_diff | ocadotechnology__aimmo-123 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Users should see which avatar is theirs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `players/views.py`
Content:
```
1 import logging
2
3 from django.http import JsonResponse
4 from django.contrib.auth.decorators import login_required
5 from django.http import HttpResponse
6 from django.views.generic import TemplateView
7
8 import os
9
10 from models import Player
11 from . import app_settings
12
13
14 def _post_code_success_response(message):
15 return create_response("SUCCESS", message)
16
17
18 def create_response(status, message):
19 response = {
20 "status": status,
21 "message": message
22 }
23 return JsonResponse(response)
24
25
26 @login_required
27 def code(request):
28 try:
29 player = request.user.player
30 except Player.DoesNotExist:
31 initial_code_file_name = os.path.join(
32 os.path.abspath(os.path.dirname(__file__)),
33 'avatar_examples/dumb_avatar.py',
34 )
35 with open(initial_code_file_name) as initial_code_file:
36 initial_code = initial_code_file.read()
37 player = Player.objects.create(user=request.user, code=initial_code)
38 if request.method == 'POST':
39 player.code = request.POST['code']
40 player.save()
41
42 return _post_code_success_response("Your code was saved!")
43 else:
44 return HttpResponse(player.code)
45
46
47 def games(request):
48 response = {
49 'main': {
50 'parameters': [],
51 'users': [
52 {
53 'id': player.user.pk,
54 'code': player.code,
55 } for player in Player.objects.all()
56 ]
57 }
58 }
59 return JsonResponse(response)
60
61
62 class WatchView(TemplateView):
63 template_name = 'players/watch.html'
64
65 def get_context_data(self, **kwargs):
66 context = super(WatchView, self).get_context_data(**kwargs)
67 context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION('main')
68 return context
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/players/views.py b/players/views.py
--- a/players/views.py
+++ b/players/views.py
@@ -65,4 +65,5 @@
def get_context_data(self, **kwargs):
context = super(WatchView, self).get_context_data(**kwargs)
context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION('main')
+ context['current_user_player_key'] = self.request.user.pk
return context
| {"golden_diff": "diff --git a/players/views.py b/players/views.py\n--- a/players/views.py\n+++ b/players/views.py\n@@ -65,4 +65,5 @@\n def get_context_data(self, **kwargs):\n context = super(WatchView, self).get_context_data(**kwargs)\n context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION('main')\n+ context['current_user_player_key'] = self.request.user.pk\n return context\n", "issue": "Users should see which avatar is theirs\n\n", "before_files": [{"content": "import logging\n\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView\n\nimport os\n\nfrom models import Player\nfrom . import app_settings\n\n\ndef _post_code_success_response(message):\n return create_response(\"SUCCESS\", message)\n\n\ndef create_response(status, message):\n response = {\n \"status\": status,\n \"message\": message\n }\n return JsonResponse(response)\n\n\n@login_required\ndef code(request):\n try:\n player = request.user.player\n except Player.DoesNotExist:\n initial_code_file_name = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n 'avatar_examples/dumb_avatar.py',\n )\n with open(initial_code_file_name) as initial_code_file:\n initial_code = initial_code_file.read()\n player = Player.objects.create(user=request.user, code=initial_code)\n if request.method == 'POST':\n player.code = request.POST['code']\n player.save()\n\n return _post_code_success_response(\"Your code was saved!\")\n else:\n return HttpResponse(player.code)\n\n\ndef games(request):\n response = {\n 'main': {\n 'parameters': [],\n 'users': [\n {\n 'id': player.user.pk,\n 'code': player.code,\n } for player in Player.objects.all()\n ]\n }\n }\n return JsonResponse(response)\n\n\nclass WatchView(TemplateView):\n template_name = 'players/watch.html'\n\n def get_context_data(self, **kwargs):\n context = super(WatchView, self).get_context_data(**kwargs)\n context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION('main')\n return context\n", "path": "players/views.py"}], "after_files": [{"content": "import logging\n\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView\n\nimport os\n\nfrom models import Player\nfrom . import app_settings\n\n\ndef _post_code_success_response(message):\n return create_response(\"SUCCESS\", message)\n\n\ndef create_response(status, message):\n response = {\n \"status\": status,\n \"message\": message\n }\n return JsonResponse(response)\n\n\n@login_required\ndef code(request):\n try:\n player = request.user.player\n except Player.DoesNotExist:\n initial_code_file_name = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n 'avatar_examples/dumb_avatar.py',\n )\n with open(initial_code_file_name) as initial_code_file:\n initial_code = initial_code_file.read()\n player = Player.objects.create(user=request.user, code=initial_code)\n if request.method == 'POST':\n player.code = request.POST['code']\n player.save()\n\n return _post_code_success_response(\"Your code was saved!\")\n else:\n return HttpResponse(player.code)\n\n\ndef games(request):\n response = {\n 'main': {\n 'parameters': [],\n 'users': [\n {\n 'id': player.user.pk,\n 'code': player.code,\n } for player in Player.objects.all()\n ]\n }\n }\n return JsonResponse(response)\n\n\nclass WatchView(TemplateView):\n template_name = 'players/watch.html'\n\n def get_context_data(self, **kwargs):\n context = super(WatchView, self).get_context_data(**kwargs)\n context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION('main')\n context['current_user_player_key'] = self.request.user.pk\n return context\n", "path": "players/views.py"}]} | 767 | 109 |
gh_patches_debug_65129 | rasdani/github-patches | git_diff | zulip__zulip-29386 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add clarification tooltip when settings can't be saved due to invalid Jitsi URL
In SETTINGS / ORGANIZATION SETTINGS > Other settings, we disable the "Save changes" button when the custom Jitsi URL is invalid. We should add a tooltip do the disabled button to explain why it is disabled: "Cannot save invalid Jitsi server URL."
<img width="809" alt="Screenshot 2023-11-02 at 10 31 14 PM" src="https://github.com/zulip/zulip/assets/2090066/b6bbb302-8b01-41ae-be98-1181497ecbf5">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/lib/capitalization.py`
Content:
```
1 import re
2 from typing import List, Match, Tuple
3
4 from bs4 import BeautifulSoup
5
6 # The phrases in this list will be ignored. The longest phrase is
7 # tried first; this removes the chance of smaller phrases changing
8 # the text before longer phrases are tried.
9 # The errors shown by `tools/check-capitalization` can be added to
10 # this list without any modification.
11 IGNORED_PHRASES = [
12 # Proper nouns and acronyms
13 r"API",
14 r"APNS",
15 r"Botserver",
16 r"Cookie Bot",
17 r"DevAuthBackend",
18 r"DSN",
19 r"Esc",
20 r"GCM",
21 r"GitHub",
22 r"Gravatar",
23 r"Help Center",
24 r"HTTP",
25 r"ID",
26 r"IDs",
27 r"Inbox",
28 r"IP",
29 r"JSON",
30 r"Kerberos",
31 r"LinkedIn",
32 r"LDAP",
33 r"Markdown",
34 r"OTP",
35 r"Pivotal",
36 r"Recent conversations",
37 r"DM",
38 r"DMs",
39 r"Slack",
40 r"Google",
41 r"Terms of Service",
42 r"Tuesday",
43 r"URL",
44 r"UUID",
45 r"Webathena",
46 r"WordPress",
47 r"Zephyr",
48 r"Zoom",
49 r"Zulip",
50 r"Zulip Server",
51 r"Zulip Account Security",
52 r"Zulip Security",
53 r"Zulip Cloud",
54 r"Zulip Cloud Standard",
55 r"Zulip Cloud Plus",
56 r"BigBlueButton",
57 # Code things
58 r"\.zuliprc",
59 # BeautifulSoup will remove <z-user> which is horribly confusing,
60 # so we need more of the sentence.
61 r"<z-user></z-user> will have the same role",
62 r"<z-user></z-user> will have the same properties",
63 # Things using "I"
64 r"I understand",
65 r"I'm",
66 r"I've",
67 r"Topics I participate in",
68 r"Topics I send a message to",
69 r"Topics I start",
70 # Specific short words
71 r"beta",
72 r"and",
73 r"bot",
74 r"e\.g\.",
75 r"enabled",
76 r"signups",
77 # Placeholders
78 r"keyword",
79 r"streamname",
80 r"user@example\.com",
81 r"example\.com",
82 r"acme",
83 # Fragments of larger strings
84 r"is …",
85 r"your subscriptions on your Streams page",
86 r"Add global time<br />Everyone sees global times in their own time zone\.",
87 r"user",
88 r"an unknown operating system",
89 r"Go to Settings",
90 r"find accounts for another email address",
91 # SPECIAL CASES
92 # Because topics usually are lower-case, this would look weird if it were capitalized
93 r"more topics",
94 # Used alone in a parenthetical where capitalized looks worse.
95 r"^deprecated$",
96 # We want the similar text in the Private Messages section to have the same capitalization.
97 r"more conversations",
98 r"back to streams",
99 # Capital 'i' looks weird in reminders popover
100 r"in 1 hour",
101 r"in 20 minutes",
102 r"in 3 hours",
103 # these are used as topics
104 r"^new streams$",
105 r"^stream events$",
106 # These are used as example short names (e.g. an uncapitalized context):
107 r"^marketing$",
108 r"^cookie$",
109 # Used to refer custom time limits
110 r"\bN\b",
111 # Capital c feels obtrusive in clear status option
112 r"clear",
113 r"group direct messages with \{recipient\}",
114 r"direct messages with \{recipient\}",
115 r"direct messages with yourself",
116 r"GIF",
117 # Emoji name placeholder
118 r"leafy green vegetable",
119 # Subdomain placeholder
120 r"your-organization-url",
121 # Used in invite modal
122 r"or",
123 # Used in GIPHY integration setting. GIFs Rating.
124 r"rated Y",
125 r"rated G",
126 r"rated PG",
127 r"rated PG13",
128 r"rated R",
129 # Used in GIPHY popover.
130 r"GIFs",
131 r"GIPHY",
132 # Used in our case studies
133 r"Technical University of Munich",
134 r"University of California San Diego",
135 # Used in stream creation form
136 r"email hidden",
137 # Use in compose box.
138 r"to send",
139 r"to add a new line",
140 # Used in showing Notification Bot read receipts message
141 "Notification Bot",
142 # Used in presence_enabled setting label
143 r"invisible mode off",
144 # Typeahead suggestions for "Pronouns" custom field type.
145 r"he/him",
146 r"she/her",
147 r"they/them",
148 # Used in message-move-time-limit setting label
149 r"does not apply to moderators and administrators",
150 # Used in message-delete-time-limit setting label
151 r"does not apply to administrators",
152 # Used as indicator with names for guest users.
153 r"guest",
154 # Used in pills for deactivated users.
155 r"deactivated",
156 # This is a reference to a setting/secret and should be lowercase.
157 r"zulip_org_id",
158 ]
159
160 # Sort regexes in descending order of their lengths. As a result, the
161 # longer phrases will be ignored first.
162 IGNORED_PHRASES.sort(key=len, reverse=True)
163
164 # Compile regexes to improve performance. This also extracts the
165 # text using BeautifulSoup and then removes extra whitespaces from
166 # it. This step enables us to add HTML in our regexes directly.
167 COMPILED_IGNORED_PHRASES = [
168 re.compile(" ".join(BeautifulSoup(regex, "lxml").text.split())) for regex in IGNORED_PHRASES
169 ]
170
171 SPLIT_BOUNDARY = "?.!" # Used to split string into sentences.
172 SPLIT_BOUNDARY_REGEX = re.compile(rf"[{SPLIT_BOUNDARY}]")
173
174 # Regexes which check capitalization in sentences.
175 DISALLOWED = [
176 r"^[a-z](?!\})", # Checks if the sentence starts with a lower case character.
177 r"^[A-Z][a-z]+[\sa-z0-9]+[A-Z]", # Checks if an upper case character exists
178 # after a lower case character when the first character is in upper case.
179 ]
180 DISALLOWED_REGEX = re.compile(r"|".join(DISALLOWED))
181
182 BANNED_WORDS = {
183 "realm": "The term realm should not appear in user-facing strings. Use organization instead.",
184 }
185
186
187 def get_safe_phrase(phrase: str) -> str:
188 """
189 Safe phrase is in lower case and doesn't contain characters which can
190 conflict with split boundaries. All conflicting characters are replaced
191 with low dash (_).
192 """
193 phrase = SPLIT_BOUNDARY_REGEX.sub("_", phrase)
194 return phrase.lower()
195
196
197 def replace_with_safe_phrase(matchobj: Match[str]) -> str:
198 """
199 The idea is to convert IGNORED_PHRASES into safe phrases, see
200 `get_safe_phrase()` function. The only exception is when the
201 IGNORED_PHRASE is at the start of the text or after a split
202 boundary; in this case, we change the first letter of the phrase
203 to upper case.
204 """
205 ignored_phrase = matchobj.group(0)
206 safe_string = get_safe_phrase(ignored_phrase)
207
208 start_index = matchobj.start()
209 complete_string = matchobj.string
210
211 is_string_start = start_index == 0
212 # We expect that there will be one space between split boundary
213 # and the next word.
214 punctuation = complete_string[max(start_index - 2, 0)]
215 is_after_split_boundary = punctuation in SPLIT_BOUNDARY
216 if is_string_start or is_after_split_boundary:
217 return safe_string.capitalize()
218
219 return safe_string
220
221
222 def get_safe_text(text: str) -> str:
223 """
224 This returns text which is rendered by BeautifulSoup and is in the
225 form that can be split easily and has all IGNORED_PHRASES processed.
226 """
227 soup = BeautifulSoup(text, "lxml")
228 text = " ".join(soup.text.split()) # Remove extra whitespaces.
229 for phrase_regex in COMPILED_IGNORED_PHRASES:
230 text = phrase_regex.sub(replace_with_safe_phrase, text)
231
232 return text
233
234
235 def is_capitalized(safe_text: str) -> bool:
236 sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)
237 return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)
238
239
240 def check_banned_words(text: str) -> List[str]:
241 lower_cased_text = text.lower()
242 errors = []
243 for word, reason in BANNED_WORDS.items():
244 if word in lower_cased_text:
245 # Hack: Should move this into BANNED_WORDS framework; for
246 # now, just hand-code the skips:
247 if (
248 "realm_name" in lower_cased_text
249 or "realm_uri" in lower_cased_text
250 or "remote_realm_host" in lower_cased_text
251 ):
252 continue
253 kwargs = dict(word=word, text=text, reason=reason)
254 msg = "{word} found in '{text}'. {reason}".format(**kwargs)
255 errors.append(msg)
256
257 return errors
258
259
260 def check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:
261 errors = []
262 ignored = []
263 banned_word_errors = []
264 for text in strings:
265 text = " ".join(text.split()) # Remove extra whitespaces.
266 safe_text = get_safe_text(text)
267 has_ignored_phrase = text != safe_text
268 capitalized = is_capitalized(safe_text)
269 if not capitalized:
270 errors.append(text)
271 elif has_ignored_phrase:
272 ignored.append(text)
273
274 banned_word_errors.extend(check_banned_words(text))
275
276 return sorted(errors), sorted(ignored), sorted(banned_word_errors)
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py
--- a/tools/lib/capitalization.py
+++ b/tools/lib/capitalization.py
@@ -27,6 +27,7 @@
r"Inbox",
r"IP",
r"JSON",
+ r"Jitsi",
r"Kerberos",
r"LinkedIn",
r"LDAP",
| {"golden_diff": "diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py\n--- a/tools/lib/capitalization.py\n+++ b/tools/lib/capitalization.py\n@@ -27,6 +27,7 @@\n r\"Inbox\",\n r\"IP\",\n r\"JSON\",\n+ r\"Jitsi\",\n r\"Kerberos\",\n r\"LinkedIn\",\n r\"LDAP\",\n", "issue": "Add clarification tooltip when settings can't be saved due to invalid Jitsi URL\nIn SETTINGS / ORGANIZATION SETTINGS > Other settings, we disable the \"Save changes\" button when the custom Jitsi URL is invalid. We should add a tooltip do the disabled button to explain why it is disabled: \"Cannot save invalid Jitsi server URL.\"\r\n\r\n<img width=\"809\" alt=\"Screenshot 2023-11-02 at 10 31 14\u202fPM\" src=\"https://github.com/zulip/zulip/assets/2090066/b6bbb302-8b01-41ae-be98-1181497ecbf5\">\r\n\n", "before_files": [{"content": "import re\nfrom typing import List, Match, Tuple\n\nfrom bs4 import BeautifulSoup\n\n# The phrases in this list will be ignored. The longest phrase is\n# tried first; this removes the chance of smaller phrases changing\n# the text before longer phrases are tried.\n# The errors shown by `tools/check-capitalization` can be added to\n# this list without any modification.\nIGNORED_PHRASES = [\n # Proper nouns and acronyms\n r\"API\",\n r\"APNS\",\n r\"Botserver\",\n r\"Cookie Bot\",\n r\"DevAuthBackend\",\n r\"DSN\",\n r\"Esc\",\n r\"GCM\",\n r\"GitHub\",\n r\"Gravatar\",\n r\"Help Center\",\n r\"HTTP\",\n r\"ID\",\n r\"IDs\",\n r\"Inbox\",\n r\"IP\",\n r\"JSON\",\n r\"Kerberos\",\n r\"LinkedIn\",\n r\"LDAP\",\n r\"Markdown\",\n r\"OTP\",\n r\"Pivotal\",\n r\"Recent conversations\",\n r\"DM\",\n r\"DMs\",\n r\"Slack\",\n r\"Google\",\n r\"Terms of Service\",\n r\"Tuesday\",\n r\"URL\",\n r\"UUID\",\n r\"Webathena\",\n r\"WordPress\",\n r\"Zephyr\",\n r\"Zoom\",\n r\"Zulip\",\n r\"Zulip Server\",\n r\"Zulip Account Security\",\n r\"Zulip Security\",\n r\"Zulip Cloud\",\n r\"Zulip Cloud Standard\",\n r\"Zulip Cloud Plus\",\n r\"BigBlueButton\",\n # Code things\n r\"\\.zuliprc\",\n # BeautifulSoup will remove <z-user> which is horribly confusing,\n # so we need more of the sentence.\n r\"<z-user></z-user> will have the same role\",\n r\"<z-user></z-user> will have the same properties\",\n # Things using \"I\"\n r\"I understand\",\n r\"I'm\",\n r\"I've\",\n r\"Topics I participate in\",\n r\"Topics I send a message to\",\n r\"Topics I start\",\n # Specific short words\n r\"beta\",\n r\"and\",\n r\"bot\",\n r\"e\\.g\\.\",\n r\"enabled\",\n r\"signups\",\n # Placeholders\n r\"keyword\",\n r\"streamname\",\n r\"user@example\\.com\",\n r\"example\\.com\",\n r\"acme\",\n # Fragments of larger strings\n r\"is \u2026\",\n r\"your subscriptions on your Streams page\",\n r\"Add global time<br />Everyone sees global times in their own time zone\\.\",\n r\"user\",\n r\"an unknown operating system\",\n r\"Go to Settings\",\n r\"find accounts for another email address\",\n # SPECIAL CASES\n # Because topics usually are lower-case, this would look weird if it were capitalized\n r\"more topics\",\n # Used alone in a parenthetical where capitalized looks worse.\n r\"^deprecated$\",\n # We want the similar text in the Private Messages section to have the same capitalization.\n r\"more conversations\",\n r\"back to streams\",\n # Capital 'i' looks weird in reminders popover\n r\"in 1 hour\",\n r\"in 20 minutes\",\n r\"in 3 hours\",\n # these are used as topics\n r\"^new streams$\",\n r\"^stream events$\",\n # These are used as example short names (e.g. an uncapitalized context):\n r\"^marketing$\",\n r\"^cookie$\",\n # Used to refer custom time limits\n r\"\\bN\\b\",\n # Capital c feels obtrusive in clear status option\n r\"clear\",\n r\"group direct messages with \\{recipient\\}\",\n r\"direct messages with \\{recipient\\}\",\n r\"direct messages with yourself\",\n r\"GIF\",\n # Emoji name placeholder\n r\"leafy green vegetable\",\n # Subdomain placeholder\n r\"your-organization-url\",\n # Used in invite modal\n r\"or\",\n # Used in GIPHY integration setting. GIFs Rating.\n r\"rated Y\",\n r\"rated G\",\n r\"rated PG\",\n r\"rated PG13\",\n r\"rated R\",\n # Used in GIPHY popover.\n r\"GIFs\",\n r\"GIPHY\",\n # Used in our case studies\n r\"Technical University of Munich\",\n r\"University of California San Diego\",\n # Used in stream creation form\n r\"email hidden\",\n # Use in compose box.\n r\"to send\",\n r\"to add a new line\",\n # Used in showing Notification Bot read receipts message\n \"Notification Bot\",\n # Used in presence_enabled setting label\n r\"invisible mode off\",\n # Typeahead suggestions for \"Pronouns\" custom field type.\n r\"he/him\",\n r\"she/her\",\n r\"they/them\",\n # Used in message-move-time-limit setting label\n r\"does not apply to moderators and administrators\",\n # Used in message-delete-time-limit setting label\n r\"does not apply to administrators\",\n # Used as indicator with names for guest users.\n r\"guest\",\n # Used in pills for deactivated users.\n r\"deactivated\",\n # This is a reference to a setting/secret and should be lowercase.\n r\"zulip_org_id\",\n]\n\n# Sort regexes in descending order of their lengths. As a result, the\n# longer phrases will be ignored first.\nIGNORED_PHRASES.sort(key=len, reverse=True)\n\n# Compile regexes to improve performance. This also extracts the\n# text using BeautifulSoup and then removes extra whitespaces from\n# it. This step enables us to add HTML in our regexes directly.\nCOMPILED_IGNORED_PHRASES = [\n re.compile(\" \".join(BeautifulSoup(regex, \"lxml\").text.split())) for regex in IGNORED_PHRASES\n]\n\nSPLIT_BOUNDARY = \"?.!\" # Used to split string into sentences.\nSPLIT_BOUNDARY_REGEX = re.compile(rf\"[{SPLIT_BOUNDARY}]\")\n\n# Regexes which check capitalization in sentences.\nDISALLOWED = [\n r\"^[a-z](?!\\})\", # Checks if the sentence starts with a lower case character.\n r\"^[A-Z][a-z]+[\\sa-z0-9]+[A-Z]\", # Checks if an upper case character exists\n # after a lower case character when the first character is in upper case.\n]\nDISALLOWED_REGEX = re.compile(r\"|\".join(DISALLOWED))\n\nBANNED_WORDS = {\n \"realm\": \"The term realm should not appear in user-facing strings. Use organization instead.\",\n}\n\n\ndef get_safe_phrase(phrase: str) -> str:\n \"\"\"\n Safe phrase is in lower case and doesn't contain characters which can\n conflict with split boundaries. All conflicting characters are replaced\n with low dash (_).\n \"\"\"\n phrase = SPLIT_BOUNDARY_REGEX.sub(\"_\", phrase)\n return phrase.lower()\n\n\ndef replace_with_safe_phrase(matchobj: Match[str]) -> str:\n \"\"\"\n The idea is to convert IGNORED_PHRASES into safe phrases, see\n `get_safe_phrase()` function. The only exception is when the\n IGNORED_PHRASE is at the start of the text or after a split\n boundary; in this case, we change the first letter of the phrase\n to upper case.\n \"\"\"\n ignored_phrase = matchobj.group(0)\n safe_string = get_safe_phrase(ignored_phrase)\n\n start_index = matchobj.start()\n complete_string = matchobj.string\n\n is_string_start = start_index == 0\n # We expect that there will be one space between split boundary\n # and the next word.\n punctuation = complete_string[max(start_index - 2, 0)]\n is_after_split_boundary = punctuation in SPLIT_BOUNDARY\n if is_string_start or is_after_split_boundary:\n return safe_string.capitalize()\n\n return safe_string\n\n\ndef get_safe_text(text: str) -> str:\n \"\"\"\n This returns text which is rendered by BeautifulSoup and is in the\n form that can be split easily and has all IGNORED_PHRASES processed.\n \"\"\"\n soup = BeautifulSoup(text, \"lxml\")\n text = \" \".join(soup.text.split()) # Remove extra whitespaces.\n for phrase_regex in COMPILED_IGNORED_PHRASES:\n text = phrase_regex.sub(replace_with_safe_phrase, text)\n\n return text\n\n\ndef is_capitalized(safe_text: str) -> bool:\n sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)\n return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)\n\n\ndef check_banned_words(text: str) -> List[str]:\n lower_cased_text = text.lower()\n errors = []\n for word, reason in BANNED_WORDS.items():\n if word in lower_cased_text:\n # Hack: Should move this into BANNED_WORDS framework; for\n # now, just hand-code the skips:\n if (\n \"realm_name\" in lower_cased_text\n or \"realm_uri\" in lower_cased_text\n or \"remote_realm_host\" in lower_cased_text\n ):\n continue\n kwargs = dict(word=word, text=text, reason=reason)\n msg = \"{word} found in '{text}'. {reason}\".format(**kwargs)\n errors.append(msg)\n\n return errors\n\n\ndef check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:\n errors = []\n ignored = []\n banned_word_errors = []\n for text in strings:\n text = \" \".join(text.split()) # Remove extra whitespaces.\n safe_text = get_safe_text(text)\n has_ignored_phrase = text != safe_text\n capitalized = is_capitalized(safe_text)\n if not capitalized:\n errors.append(text)\n elif has_ignored_phrase:\n ignored.append(text)\n\n banned_word_errors.extend(check_banned_words(text))\n\n return sorted(errors), sorted(ignored), sorted(banned_word_errors)\n", "path": "tools/lib/capitalization.py"}], "after_files": [{"content": "import re\nfrom typing import List, Match, Tuple\n\nfrom bs4 import BeautifulSoup\n\n# The phrases in this list will be ignored. The longest phrase is\n# tried first; this removes the chance of smaller phrases changing\n# the text before longer phrases are tried.\n# The errors shown by `tools/check-capitalization` can be added to\n# this list without any modification.\nIGNORED_PHRASES = [\n # Proper nouns and acronyms\n r\"API\",\n r\"APNS\",\n r\"Botserver\",\n r\"Cookie Bot\",\n r\"DevAuthBackend\",\n r\"DSN\",\n r\"Esc\",\n r\"GCM\",\n r\"GitHub\",\n r\"Gravatar\",\n r\"Help Center\",\n r\"HTTP\",\n r\"ID\",\n r\"IDs\",\n r\"Inbox\",\n r\"IP\",\n r\"JSON\",\n r\"Jitsi\",\n r\"Kerberos\",\n r\"LinkedIn\",\n r\"LDAP\",\n r\"Markdown\",\n r\"OTP\",\n r\"Pivotal\",\n r\"Recent conversations\",\n r\"DM\",\n r\"DMs\",\n r\"Slack\",\n r\"Google\",\n r\"Terms of Service\",\n r\"Tuesday\",\n r\"URL\",\n r\"UUID\",\n r\"Webathena\",\n r\"WordPress\",\n r\"Zephyr\",\n r\"Zoom\",\n r\"Zulip\",\n r\"Zulip Server\",\n r\"Zulip Account Security\",\n r\"Zulip Security\",\n r\"Zulip Cloud\",\n r\"Zulip Cloud Standard\",\n r\"Zulip Cloud Plus\",\n r\"BigBlueButton\",\n # Code things\n r\"\\.zuliprc\",\n # BeautifulSoup will remove <z-user> which is horribly confusing,\n # so we need more of the sentence.\n r\"<z-user></z-user> will have the same role\",\n r\"<z-user></z-user> will have the same properties\",\n # Things using \"I\"\n r\"I understand\",\n r\"I'm\",\n r\"I've\",\n r\"Topics I participate in\",\n r\"Topics I send a message to\",\n r\"Topics I start\",\n # Specific short words\n r\"beta\",\n r\"and\",\n r\"bot\",\n r\"e\\.g\\.\",\n r\"enabled\",\n r\"signups\",\n # Placeholders\n r\"keyword\",\n r\"streamname\",\n r\"user@example\\.com\",\n r\"example\\.com\",\n r\"acme\",\n # Fragments of larger strings\n r\"is \u2026\",\n r\"your subscriptions on your Streams page\",\n r\"Add global time<br />Everyone sees global times in their own time zone\\.\",\n r\"user\",\n r\"an unknown operating system\",\n r\"Go to Settings\",\n r\"find accounts for another email address\",\n # SPECIAL CASES\n # Because topics usually are lower-case, this would look weird if it were capitalized\n r\"more topics\",\n # Used alone in a parenthetical where capitalized looks worse.\n r\"^deprecated$\",\n # We want the similar text in the Private Messages section to have the same capitalization.\n r\"more conversations\",\n r\"back to streams\",\n # Capital 'i' looks weird in reminders popover\n r\"in 1 hour\",\n r\"in 20 minutes\",\n r\"in 3 hours\",\n # these are used as topics\n r\"^new streams$\",\n r\"^stream events$\",\n # These are used as example short names (e.g. an uncapitalized context):\n r\"^marketing$\",\n r\"^cookie$\",\n # Used to refer custom time limits\n r\"\\bN\\b\",\n # Capital c feels obtrusive in clear status option\n r\"clear\",\n r\"group direct messages with \\{recipient\\}\",\n r\"direct messages with \\{recipient\\}\",\n r\"direct messages with yourself\",\n r\"GIF\",\n # Emoji name placeholder\n r\"leafy green vegetable\",\n # Subdomain placeholder\n r\"your-organization-url\",\n # Used in invite modal\n r\"or\",\n # Used in GIPHY integration setting. GIFs Rating.\n r\"rated Y\",\n r\"rated G\",\n r\"rated PG\",\n r\"rated PG13\",\n r\"rated R\",\n # Used in GIPHY popover.\n r\"GIFs\",\n r\"GIPHY\",\n # Used in our case studies\n r\"Technical University of Munich\",\n r\"University of California San Diego\",\n # Used in stream creation form\n r\"email hidden\",\n # Use in compose box.\n r\"to send\",\n r\"to add a new line\",\n # Used in showing Notification Bot read receipts message\n \"Notification Bot\",\n # Used in presence_enabled setting label\n r\"invisible mode off\",\n # Typeahead suggestions for \"Pronouns\" custom field type.\n r\"he/him\",\n r\"she/her\",\n r\"they/them\",\n # Used in message-move-time-limit setting label\n r\"does not apply to moderators and administrators\",\n # Used in message-delete-time-limit setting label\n r\"does not apply to administrators\",\n # Used as indicator with names for guest users.\n r\"guest\",\n # Used in pills for deactivated users.\n r\"deactivated\",\n # This is a reference to a setting/secret and should be lowercase.\n r\"zulip_org_id\",\n]\n\n# Sort regexes in descending order of their lengths. As a result, the\n# longer phrases will be ignored first.\nIGNORED_PHRASES.sort(key=len, reverse=True)\n\n# Compile regexes to improve performance. This also extracts the\n# text using BeautifulSoup and then removes extra whitespaces from\n# it. This step enables us to add HTML in our regexes directly.\nCOMPILED_IGNORED_PHRASES = [\n re.compile(\" \".join(BeautifulSoup(regex, \"lxml\").text.split())) for regex in IGNORED_PHRASES\n]\n\nSPLIT_BOUNDARY = \"?.!\" # Used to split string into sentences.\nSPLIT_BOUNDARY_REGEX = re.compile(rf\"[{SPLIT_BOUNDARY}]\")\n\n# Regexes which check capitalization in sentences.\nDISALLOWED = [\n r\"^[a-z](?!\\})\", # Checks if the sentence starts with a lower case character.\n r\"^[A-Z][a-z]+[\\sa-z0-9]+[A-Z]\", # Checks if an upper case character exists\n # after a lower case character when the first character is in upper case.\n]\nDISALLOWED_REGEX = re.compile(r\"|\".join(DISALLOWED))\n\nBANNED_WORDS = {\n \"realm\": \"The term realm should not appear in user-facing strings. Use organization instead.\",\n}\n\n\ndef get_safe_phrase(phrase: str) -> str:\n \"\"\"\n Safe phrase is in lower case and doesn't contain characters which can\n conflict with split boundaries. All conflicting characters are replaced\n with low dash (_).\n \"\"\"\n phrase = SPLIT_BOUNDARY_REGEX.sub(\"_\", phrase)\n return phrase.lower()\n\n\ndef replace_with_safe_phrase(matchobj: Match[str]) -> str:\n \"\"\"\n The idea is to convert IGNORED_PHRASES into safe phrases, see\n `get_safe_phrase()` function. The only exception is when the\n IGNORED_PHRASE is at the start of the text or after a split\n boundary; in this case, we change the first letter of the phrase\n to upper case.\n \"\"\"\n ignored_phrase = matchobj.group(0)\n safe_string = get_safe_phrase(ignored_phrase)\n\n start_index = matchobj.start()\n complete_string = matchobj.string\n\n is_string_start = start_index == 0\n # We expect that there will be one space between split boundary\n # and the next word.\n punctuation = complete_string[max(start_index - 2, 0)]\n is_after_split_boundary = punctuation in SPLIT_BOUNDARY\n if is_string_start or is_after_split_boundary:\n return safe_string.capitalize()\n\n return safe_string\n\n\ndef get_safe_text(text: str) -> str:\n \"\"\"\n This returns text which is rendered by BeautifulSoup and is in the\n form that can be split easily and has all IGNORED_PHRASES processed.\n \"\"\"\n soup = BeautifulSoup(text, \"lxml\")\n text = \" \".join(soup.text.split()) # Remove extra whitespaces.\n for phrase_regex in COMPILED_IGNORED_PHRASES:\n text = phrase_regex.sub(replace_with_safe_phrase, text)\n\n return text\n\n\ndef is_capitalized(safe_text: str) -> bool:\n sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)\n return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)\n\n\ndef check_banned_words(text: str) -> List[str]:\n lower_cased_text = text.lower()\n errors = []\n for word, reason in BANNED_WORDS.items():\n if word in lower_cased_text:\n # Hack: Should move this into BANNED_WORDS framework; for\n # now, just hand-code the skips:\n if (\n \"realm_name\" in lower_cased_text\n or \"realm_uri\" in lower_cased_text\n or \"remote_realm_host\" in lower_cased_text\n ):\n continue\n kwargs = dict(word=word, text=text, reason=reason)\n msg = \"{word} found in '{text}'. {reason}\".format(**kwargs)\n errors.append(msg)\n\n return errors\n\n\ndef check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:\n errors = []\n ignored = []\n banned_word_errors = []\n for text in strings:\n text = \" \".join(text.split()) # Remove extra whitespaces.\n safe_text = get_safe_text(text)\n has_ignored_phrase = text != safe_text\n capitalized = is_capitalized(safe_text)\n if not capitalized:\n errors.append(text)\n elif has_ignored_phrase:\n ignored.append(text)\n\n banned_word_errors.extend(check_banned_words(text))\n\n return sorted(errors), sorted(ignored), sorted(banned_word_errors)\n", "path": "tools/lib/capitalization.py"}]} | 3,371 | 92 |
gh_patches_debug_40748 | rasdani/github-patches | git_diff | vacanza__python-holidays-639 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ireland considering UK as base class and hence not being a country itself
Issue also opened here:
home-assistant/core#67542
Looks like Ireland is being considered as being part of the UK which is wrong as not all the holidays in the UK exist, or necessarily exist in Ireland.
Take a reference on this comment: https://github.com/home-assistant/core/issues/67542#issuecomment-1058079650
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holidays/countries/ireland.py`
Content:
```
1 # python-holidays
2 # ---------------
3 # A fast, efficient Python library for generating country, province and state
4 # specific sets of holidays on the fly. It aims to make determining whether a
5 # specific date is a holiday as fast and flexible as possible.
6 #
7 # Authors: dr-prodigy <[email protected]> (c) 2017-2022
8 # ryanss <[email protected]> (c) 2014-2017
9 # Website: https://github.com/dr-prodigy/python-holidays
10 # License: MIT (see LICENSE file)
11
12 from datetime import date
13
14 from dateutil.easter import easter
15 from dateutil.relativedelta import relativedelta as rd, MO
16
17 from holidays.constants import MAR, MAY, JUN, AUG, OCT, DEC
18 from holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND
19 from holidays.holiday_base import HolidayBase
20 from .united_kingdom import UnitedKingdom
21
22
23 class Ireland(UnitedKingdom):
24 country = "IE"
25
26 def __init__(self, **kwargs):
27 HolidayBase.__init__(self, **kwargs)
28
29 def _country_specific(self, year):
30 # Ireland exclusive holidays
31
32 # St. Patrick's Day
33 name = "St. Patrick's Day"
34 self[date(year, MAR, 17)] = name
35 if self.observed and date(year, MAR, 17).weekday() in WEEKEND:
36 self[date(year, MAR, 17) + rd(weekday=MO)] = name + " (Observed)"
37
38 # Easter Monday
39 self[easter(year) + rd(weekday=MO)] = "Easter Monday"
40
41 # May Day bank holiday (first Monday in May)
42 if year >= 1978:
43 name = "May Day"
44 if year == 1995:
45 dt = date(year, MAY, 8)
46 else:
47 dt = date(year, MAY, 1)
48 if dt.weekday() == MON:
49 self[dt] = name
50 elif dt.weekday() == TUE:
51 self[dt + rd(days=+6)] = name
52 elif dt.weekday() == WED:
53 self[dt + rd(days=+5)] = name
54 elif dt.weekday() == THU:
55 self[dt + rd(days=+4)] = name
56 elif dt.weekday() == FRI:
57 self[dt + rd(days=+3)] = name
58 elif dt.weekday() == SAT:
59 self[dt + rd(days=+2)] = name
60 elif dt.weekday() == SUN:
61 self[dt + rd(days=+1)] = name
62
63 # June bank holiday (first Monday in June)
64 self[date(year, JUN, 1) + rd(weekday=MO)] = "June Bank Holiday"
65
66 # Summer bank holiday (first Monday in August)
67 self[date(year, AUG, 1) + rd(weekday=MO)] = "Summer Bank Holiday"
68
69 # October Bank Holiday (last Monday in October)
70 self[date(year, OCT, 31) + rd(weekday=MO(-1))] = "October Bank Holiday"
71
72 # St. Stephen's Day
73 name = "St. Stephen's Day"
74 self[date(year, DEC, 26)] = name
75 if self.observed and date(year, DEC, 26).weekday() == SAT:
76 self[date(year, DEC, 28)] = name + " (Observed)"
77 elif self.observed and date(year, DEC, 26).weekday() == SUN:
78 self[date(year, DEC, 28)] = name + " (Observed)"
79
80
81 class IE(Ireland):
82 pass
83
84
85 class IRL(Ireland):
86 pass
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/holidays/countries/ireland.py b/holidays/countries/ireland.py
--- a/holidays/countries/ireland.py
+++ b/holidays/countries/ireland.py
@@ -16,20 +16,37 @@
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, MO
-from holidays.constants import MAR, MAY, JUN, AUG, OCT, DEC
+from holidays.constants import FEB, MAR, MAY, JUN, AUG, OCT, DEC
from holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND
from holidays.holiday_base import HolidayBase
-from .united_kingdom import UnitedKingdom
+class Ireland(HolidayBase):
+ """
+ Official holidays in Ireland, as declared in the Citizen's Information
+ bulletin:
+ https://www.citizensinformation.ie/en/employment/employment_rights_and_conditions/leave_and_holidays/public_holidays_in_ireland.html
+ """
-class Ireland(UnitedKingdom):
country = "IE"
+ subdivisions = []
def __init__(self, **kwargs):
HolidayBase.__init__(self, **kwargs)
- def _country_specific(self, year):
- # Ireland exclusive holidays
+ def _populate(self, year):
+ self[date(year, JAN, 1)] = "New Year's Day"
+
+ # St. Brigid's Day
+ if year >= 2023:
+ dt = date(year, FEB, 1)
+ self[dt] = "St. Brigid's Day"
+
+ if self.observed and dt.weekday() != FRI:
+ self[date(year, FEB, 1) + rd(weekday=MO)] = "St. Brigid's Day (Observed)"
+
+ # One-off day of rememberance and recognition
+ if year == 2022:
+ self[date(year, MAR, 18)] = "Day of Rememberance and Recognition"
# St. Patrick's Day
name = "St. Patrick's Day"
@@ -40,7 +57,7 @@
# Easter Monday
self[easter(year) + rd(weekday=MO)] = "Easter Monday"
- # May Day bank holiday (first Monday in May)
+ # May bank holiday (first Monday in May)
if year >= 1978:
name = "May Day"
if year == 1995:
@@ -66,18 +83,24 @@
self[date(year, JUN, 1) + rd(weekday=MO)] = "June Bank Holiday"
# Summer bank holiday (first Monday in August)
- self[date(year, AUG, 1) + rd(weekday=MO)] = "Summer Bank Holiday"
+ self[date(year, AUG, 1) + rd(weekday=MO)] = "August Bank Holiday"
# October Bank Holiday (last Monday in October)
self[date(year, OCT, 31) + rd(weekday=MO(-1))] = "October Bank Holiday"
+ # Christmas Day
+ name = "Christmas Day"
+ self[date(year, DEC, 25)] = "Christmas Day"
+ if self.observed and date(year, DEC, 25).weekday() in WEEKEND:
+ self[date(year, DEC, 25) + rd(weekday=MON)] = name + " (Observed)"
+
# St. Stephen's Day
name = "St. Stephen's Day"
self[date(year, DEC, 26)] = name
if self.observed and date(year, DEC, 26).weekday() == SAT:
- self[date(year, DEC, 28)] = name + " (Observed)"
+ self[date(year, DEC, 26) + rd(weekday=MON)] = name + " (Observed)"
elif self.observed and date(year, DEC, 26).weekday() == SUN:
- self[date(year, DEC, 28)] = name + " (Observed)"
+ self[date(year, DEC, 26) + rd(weekday=TUE)] = name + " (Observed)"
class IE(Ireland):
| {"golden_diff": "diff --git a/holidays/countries/ireland.py b/holidays/countries/ireland.py\n--- a/holidays/countries/ireland.py\n+++ b/holidays/countries/ireland.py\n@@ -16,20 +16,37 @@\n from dateutil.easter import easter\n from dateutil.relativedelta import relativedelta as rd, MO\n \n-from holidays.constants import MAR, MAY, JUN, AUG, OCT, DEC\n+from holidays.constants import FEB, MAR, MAY, JUN, AUG, OCT, DEC\n from holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND\n from holidays.holiday_base import HolidayBase\n-from .united_kingdom import UnitedKingdom\n \n+class Ireland(HolidayBase):\n+ \"\"\"\n+ Official holidays in Ireland, as declared in the Citizen's Information\n+ bulletin:\n+ https://www.citizensinformation.ie/en/employment/employment_rights_and_conditions/leave_and_holidays/public_holidays_in_ireland.html\n+ \"\"\"\n \n-class Ireland(UnitedKingdom):\n country = \"IE\"\n+ subdivisions = []\n \n def __init__(self, **kwargs):\n HolidayBase.__init__(self, **kwargs)\n \n- def _country_specific(self, year):\n- # Ireland exclusive holidays\n+ def _populate(self, year):\n+ self[date(year, JAN, 1)] = \"New Year's Day\"\n+\n+ # St. Brigid's Day\n+ if year >= 2023:\n+ dt = date(year, FEB, 1)\n+ self[dt] = \"St. Brigid's Day\"\n+\n+ if self.observed and dt.weekday() != FRI:\n+ self[date(year, FEB, 1) + rd(weekday=MO)] = \"St. Brigid's Day (Observed)\"\n+\n+ # One-off day of rememberance and recognition\n+ if year == 2022:\n+ self[date(year, MAR, 18)] = \"Day of Rememberance and Recognition\"\n \n # St. Patrick's Day\n name = \"St. Patrick's Day\"\n@@ -40,7 +57,7 @@\n # Easter Monday\n self[easter(year) + rd(weekday=MO)] = \"Easter Monday\"\n \n- # May Day bank holiday (first Monday in May)\n+ # May bank holiday (first Monday in May)\n if year >= 1978:\n name = \"May Day\"\n if year == 1995:\n@@ -66,18 +83,24 @@\n self[date(year, JUN, 1) + rd(weekday=MO)] = \"June Bank Holiday\"\n \n # Summer bank holiday (first Monday in August)\n- self[date(year, AUG, 1) + rd(weekday=MO)] = \"Summer Bank Holiday\"\n+ self[date(year, AUG, 1) + rd(weekday=MO)] = \"August Bank Holiday\"\n \n # October Bank Holiday (last Monday in October)\n self[date(year, OCT, 31) + rd(weekday=MO(-1))] = \"October Bank Holiday\"\n \n+ # Christmas Day\n+ name = \"Christmas Day\"\n+ self[date(year, DEC, 25)] = \"Christmas Day\"\n+ if self.observed and date(year, DEC, 25).weekday() in WEEKEND:\n+ self[date(year, DEC, 25) + rd(weekday=MON)] = name + \" (Observed)\"\n+\n # St. Stephen's Day\n name = \"St. Stephen's Day\"\n self[date(year, DEC, 26)] = name\n if self.observed and date(year, DEC, 26).weekday() == SAT:\n- self[date(year, DEC, 28)] = name + \" (Observed)\"\n+ self[date(year, DEC, 26) + rd(weekday=MON)] = name + \" (Observed)\"\n elif self.observed and date(year, DEC, 26).weekday() == SUN:\n- self[date(year, DEC, 28)] = name + \" (Observed)\"\n+ self[date(year, DEC, 26) + rd(weekday=TUE)] = name + \" (Observed)\"\n \n \n class IE(Ireland):\n", "issue": "Ireland considering UK as base class and hence not being a country itself\nIssue also opened here:\r\nhome-assistant/core#67542\r\n\r\nLooks like Ireland is being considered as being part of the UK which is wrong as not all the holidays in the UK exist, or necessarily exist in Ireland.\r\n\r\nTake a reference on this comment: https://github.com/home-assistant/core/issues/67542#issuecomment-1058079650\n", "before_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd, MO\n\nfrom holidays.constants import MAR, MAY, JUN, AUG, OCT, DEC\nfrom holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND\nfrom holidays.holiday_base import HolidayBase\nfrom .united_kingdom import UnitedKingdom\n\n\nclass Ireland(UnitedKingdom):\n country = \"IE\"\n\n def __init__(self, **kwargs):\n HolidayBase.__init__(self, **kwargs)\n\n def _country_specific(self, year):\n # Ireland exclusive holidays\n\n # St. Patrick's Day\n name = \"St. Patrick's Day\"\n self[date(year, MAR, 17)] = name\n if self.observed and date(year, MAR, 17).weekday() in WEEKEND:\n self[date(year, MAR, 17) + rd(weekday=MO)] = name + \" (Observed)\"\n\n # Easter Monday\n self[easter(year) + rd(weekday=MO)] = \"Easter Monday\"\n\n # May Day bank holiday (first Monday in May)\n if year >= 1978:\n name = \"May Day\"\n if year == 1995:\n dt = date(year, MAY, 8)\n else:\n dt = date(year, MAY, 1)\n if dt.weekday() == MON:\n self[dt] = name\n elif dt.weekday() == TUE:\n self[dt + rd(days=+6)] = name\n elif dt.weekday() == WED:\n self[dt + rd(days=+5)] = name\n elif dt.weekday() == THU:\n self[dt + rd(days=+4)] = name\n elif dt.weekday() == FRI:\n self[dt + rd(days=+3)] = name\n elif dt.weekday() == SAT:\n self[dt + rd(days=+2)] = name\n elif dt.weekday() == SUN:\n self[dt + rd(days=+1)] = name\n\n # June bank holiday (first Monday in June)\n self[date(year, JUN, 1) + rd(weekday=MO)] = \"June Bank Holiday\"\n\n # Summer bank holiday (first Monday in August)\n self[date(year, AUG, 1) + rd(weekday=MO)] = \"Summer Bank Holiday\"\n\n # October Bank Holiday (last Monday in October)\n self[date(year, OCT, 31) + rd(weekday=MO(-1))] = \"October Bank Holiday\"\n\n # St. Stephen's Day\n name = \"St. Stephen's Day\"\n self[date(year, DEC, 26)] = name\n if self.observed and date(year, DEC, 26).weekday() == SAT:\n self[date(year, DEC, 28)] = name + \" (Observed)\"\n elif self.observed and date(year, DEC, 26).weekday() == SUN:\n self[date(year, DEC, 28)] = name + \" (Observed)\"\n\n\nclass IE(Ireland):\n pass\n\n\nclass IRL(Ireland):\n pass\n", "path": "holidays/countries/ireland.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd, MO\n\nfrom holidays.constants import FEB, MAR, MAY, JUN, AUG, OCT, DEC\nfrom holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND\nfrom holidays.holiday_base import HolidayBase\n\nclass Ireland(HolidayBase):\n \"\"\"\n Official holidays in Ireland, as declared in the Citizen's Information\n bulletin:\n https://www.citizensinformation.ie/en/employment/employment_rights_and_conditions/leave_and_holidays/public_holidays_in_ireland.html\n \"\"\"\n\n country = \"IE\"\n subdivisions = []\n\n def __init__(self, **kwargs):\n HolidayBase.__init__(self, **kwargs)\n\n def _populate(self, year):\n self[date(year, JAN, 1)] = \"New Year's Day\"\n\n # St. Brigid's Day\n if year >= 2023:\n dt = date(year, FEB, 1)\n self[dt] = \"St. Brigid's Day\"\n\n if self.observed and dt.weekday() != FRI:\n self[date(year, FEB, 1) + rd(weekday=MO)] = \"St. Brigid's Day (Observed)\"\n\n # One-off day of rememberance and recognition\n if year == 2022:\n self[date(year, MAR, 18)] = \"Day of Rememberance and Recognition\"\n\n # St. Patrick's Day\n name = \"St. Patrick's Day\"\n self[date(year, MAR, 17)] = name\n if self.observed and date(year, MAR, 17).weekday() in WEEKEND:\n self[date(year, MAR, 17) + rd(weekday=MO)] = name + \" (Observed)\"\n\n # Easter Monday\n self[easter(year) + rd(weekday=MO)] = \"Easter Monday\"\n\n # May bank holiday (first Monday in May)\n if year >= 1978:\n name = \"May Day\"\n if year == 1995:\n dt = date(year, MAY, 8)\n else:\n dt = date(year, MAY, 1)\n if dt.weekday() == MON:\n self[dt] = name\n elif dt.weekday() == TUE:\n self[dt + rd(days=+6)] = name\n elif dt.weekday() == WED:\n self[dt + rd(days=+5)] = name\n elif dt.weekday() == THU:\n self[dt + rd(days=+4)] = name\n elif dt.weekday() == FRI:\n self[dt + rd(days=+3)] = name\n elif dt.weekday() == SAT:\n self[dt + rd(days=+2)] = name\n elif dt.weekday() == SUN:\n self[dt + rd(days=+1)] = name\n\n # June bank holiday (first Monday in June)\n self[date(year, JUN, 1) + rd(weekday=MO)] = \"June Bank Holiday\"\n\n # Summer bank holiday (first Monday in August)\n self[date(year, AUG, 1) + rd(weekday=MO)] = \"August Bank Holiday\"\n\n # October Bank Holiday (last Monday in October)\n self[date(year, OCT, 31) + rd(weekday=MO(-1))] = \"October Bank Holiday\"\n\n # Christmas Day\n name = \"Christmas Day\"\n self[date(year, DEC, 25)] = \"Christmas Day\"\n if self.observed and date(year, DEC, 25).weekday() in WEEKEND:\n self[date(year, DEC, 25) + rd(weekday=MON)] = name + \" (Observed)\"\n\n # St. Stephen's Day\n name = \"St. Stephen's Day\"\n self[date(year, DEC, 26)] = name\n if self.observed and date(year, DEC, 26).weekday() == SAT:\n self[date(year, DEC, 26) + rd(weekday=MON)] = name + \" (Observed)\"\n elif self.observed and date(year, DEC, 26).weekday() == SUN:\n self[date(year, DEC, 26) + rd(weekday=TUE)] = name + \" (Observed)\"\n\n\nclass IE(Ireland):\n pass\n\n\nclass IRL(Ireland):\n pass\n", "path": "holidays/countries/ireland.py"}]} | 1,391 | 957 |
gh_patches_debug_17361 | rasdani/github-patches | git_diff | certbot__certbot-8895 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nginx parser cannot handle empty file
We have some simple tooling that empties a vhosts config file to disable it. However certbot throws errors in the debug log about it.
latest python-certbot-nginx on ubuntu 18.04 PPA
```
2019-05-25 16:18:38,333:DEBUG:certbot_nginx.parser:Could not parse file: /etc/nginx/sites-generated/xdomain due to Expected {Group:({[<SPC><TAB><CR><LF>] "#" rest of line}) | Group:(Forward: ...) | Group:({[<SPC><TAB><CR><LF>] {Combine:({{quoted string, starting with " ending with " | quoted string, starting with ' ending with '} ")" [Re:('(\\$\\{)|[^{;\\s]')]...}) | Combine:({Re:('(\\$\\{)|[^{};\\s\'\\"]') [Re:('(\\$\\{)|[^{;\\s]')]...}) | quoted string, starting with " ending with " | quoted string, starting with ' ending with '} [{<SPC><TAB><CR><LF> {Combine:({{quoted string, starting with " ending with " | quoted string, starting with ' ending with '} ")" [Re:('(\\$\\{)|[^{;\\s]')]...}) | Combine:({Re:('(\\$\\{)|[^{};\\s\'\\"]') [Re:('(\\$\\{)|[^{;\\s]')]...}) | quoted string, starting with " ending with " | quoted string, starting with ' ending with '}}]... [<SPC><TAB><CR><LF>] Suppress:(";")})} (at char 1), (line:2, col:1)
```
In this case `/etc/nginx/sites-generated/xdomain` would be pointing to an empty file, nginx is ok with that, the certbot parser isn't.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `certbot-nginx/certbot_nginx/_internal/nginxparser.py`
Content:
```
1 """Very low-level nginx config parser based on pyparsing."""
2 # Forked from https://github.com/fatiherikli/nginxparser (MIT Licensed)
3 import copy
4 import logging
5 from typing import Any
6 from typing import IO
7
8 from pyparsing import Combine
9 from pyparsing import Forward
10 from pyparsing import Group
11 from pyparsing import Literal
12 from pyparsing import OneOrMore
13 from pyparsing import Optional
14 from pyparsing import QuotedString
15 from pyparsing import Regex
16 from pyparsing import restOfLine
17 from pyparsing import stringEnd
18 from pyparsing import White
19 from pyparsing import ZeroOrMore
20
21 logger = logging.getLogger(__name__)
22
23
24 class RawNginxParser:
25 # pylint: disable=pointless-statement
26 """A class that parses nginx configuration with pyparsing."""
27
28 # constants
29 space = Optional(White()).leaveWhitespace()
30 required_space = White().leaveWhitespace()
31
32 left_bracket = Literal("{").suppress()
33 right_bracket = space + Literal("}").suppress()
34 semicolon = Literal(";").suppress()
35 dquoted = QuotedString('"', multiline=True, unquoteResults=False, escChar='\\')
36 squoted = QuotedString("'", multiline=True, unquoteResults=False, escChar='\\')
37 quoted = dquoted | squoted
38 head_tokenchars = Regex(r"(\$\{)|[^{};\s'\"]") # if (last_space)
39 tail_tokenchars = Regex(r"(\$\{)|[^{;\s]") # else
40 tokenchars = Combine(head_tokenchars + ZeroOrMore(tail_tokenchars))
41 paren_quote_extend = Combine(quoted + Literal(')') + ZeroOrMore(tail_tokenchars))
42 # note: ')' allows extension, but then we fall into else, not last_space.
43
44 token = paren_quote_extend | tokenchars | quoted
45
46 whitespace_token_group = space + token + ZeroOrMore(required_space + token) + space
47 assignment = whitespace_token_group + semicolon
48
49 comment = space + Literal('#') + restOfLine
50
51 block = Forward()
52
53 # order matters! see issue 518, and also http { # server { \n}
54 contents = Group(comment) | Group(block) | Group(assignment)
55
56 block_begin = Group(whitespace_token_group)
57 block_innards = Group(ZeroOrMore(contents) + space).leaveWhitespace()
58 block << block_begin + left_bracket + block_innards + right_bracket
59
60 script = OneOrMore(contents) + space + stringEnd
61 script.parseWithTabs().leaveWhitespace()
62
63 def __init__(self, source):
64 self.source = source
65
66 def parse(self):
67 """Returns the parsed tree."""
68 return self.script.parseString(self.source)
69
70 def as_list(self):
71 """Returns the parsed tree as a list."""
72 return self.parse().asList()
73
74 class RawNginxDumper:
75 """A class that dumps nginx configuration from the provided tree."""
76 def __init__(self, blocks):
77 self.blocks = blocks
78
79 def __iter__(self, blocks=None):
80 """Iterates the dumped nginx content."""
81 blocks = blocks or self.blocks
82 for b0 in blocks:
83 if isinstance(b0, str):
84 yield b0
85 continue
86 item = copy.deepcopy(b0)
87 if spacey(item[0]):
88 yield item.pop(0) # indentation
89 if not item:
90 continue
91
92 if isinstance(item[0], list): # block
93 yield "".join(item.pop(0)) + '{'
94 for parameter in item.pop(0):
95 for line in self.__iter__([parameter]): # negate "for b0 in blocks"
96 yield line
97 yield '}'
98 else: # not a block - list of strings
99 semicolon = ";"
100 if isinstance(item[0], str) and item[0].strip() == '#': # comment
101 semicolon = ""
102 yield "".join(item) + semicolon
103
104 def __str__(self):
105 """Return the parsed block as a string."""
106 return ''.join(self)
107
108
109 spacey = lambda x: (isinstance(x, str) and x.isspace()) or x == ''
110
111
112 class UnspacedList(list):
113 """Wrap a list [of lists], making any whitespace entries magically invisible"""
114
115 def __init__(self, list_source):
116 # ensure our argument is not a generator, and duplicate any sublists
117 self.spaced = copy.deepcopy(list(list_source))
118 self.dirty = False
119
120 # Turn self into a version of the source list that has spaces removed
121 # and all sub-lists also UnspacedList()ed
122 list.__init__(self, list_source)
123 for i, entry in reversed(list(enumerate(self))):
124 if isinstance(entry, list):
125 sublist = UnspacedList(entry)
126 list.__setitem__(self, i, sublist)
127 self.spaced[i] = sublist.spaced
128 elif spacey(entry):
129 # don't delete comments
130 if "#" not in self[:i]:
131 list.__delitem__(self, i)
132
133 def _coerce(self, inbound):
134 """
135 Coerce some inbound object to be appropriately usable in this object
136
137 :param inbound: string or None or list or UnspacedList
138 :returns: (coerced UnspacedList or string or None, spaced equivalent)
139 :rtype: tuple
140
141 """
142 if not isinstance(inbound, list): # str or None
143 return inbound, inbound
144 else:
145 if not hasattr(inbound, "spaced"):
146 inbound = UnspacedList(inbound)
147 return inbound, inbound.spaced
148
149 def insert(self, i, x):
150 item, spaced_item = self._coerce(x)
151 slicepos = self._spaced_position(i) if i < len(self) else len(self.spaced)
152 self.spaced.insert(slicepos, spaced_item)
153 if not spacey(item):
154 list.insert(self, i, item)
155 self.dirty = True
156
157 def append(self, x):
158 item, spaced_item = self._coerce(x)
159 self.spaced.append(spaced_item)
160 if not spacey(item):
161 list.append(self, item)
162 self.dirty = True
163
164 def extend(self, x):
165 item, spaced_item = self._coerce(x)
166 self.spaced.extend(spaced_item)
167 list.extend(self, item)
168 self.dirty = True
169
170 def __add__(self, other):
171 l = copy.deepcopy(self)
172 l.extend(other)
173 l.dirty = True
174 return l
175
176 def pop(self, _i=None):
177 raise NotImplementedError("UnspacedList.pop() not yet implemented")
178 def remove(self, _):
179 raise NotImplementedError("UnspacedList.remove() not yet implemented")
180 def reverse(self):
181 raise NotImplementedError("UnspacedList.reverse() not yet implemented")
182 def sort(self, _cmp=None, _key=None, _Rev=None):
183 raise NotImplementedError("UnspacedList.sort() not yet implemented")
184 def __setslice__(self, _i, _j, _newslice):
185 raise NotImplementedError("Slice operations on UnspacedLists not yet implemented")
186
187 def __setitem__(self, i, value):
188 if isinstance(i, slice):
189 raise NotImplementedError("Slice operations on UnspacedLists not yet implemented")
190 item, spaced_item = self._coerce(value)
191 self.spaced.__setitem__(self._spaced_position(i), spaced_item)
192 if not spacey(item):
193 list.__setitem__(self, i, item)
194 self.dirty = True
195
196 def __delitem__(self, i):
197 self.spaced.__delitem__(self._spaced_position(i))
198 list.__delitem__(self, i)
199 self.dirty = True
200
201 def __deepcopy__(self, memo):
202 new_spaced = copy.deepcopy(self.spaced, memo=memo)
203 l = UnspacedList(new_spaced)
204 l.dirty = self.dirty
205 return l
206
207 def is_dirty(self):
208 """Recurse through the parse tree to figure out if any sublists are dirty"""
209 if self.dirty:
210 return True
211 return any((isinstance(x, UnspacedList) and x.is_dirty() for x in self))
212
213 def _spaced_position(self, idx):
214 "Convert from indexes in the unspaced list to positions in the spaced one"
215 pos = spaces = 0
216 # Normalize indexes like list[-1] etc, and save the result
217 if idx < 0:
218 idx = len(self) + idx
219 if not 0 <= idx < len(self):
220 raise IndexError("list index out of range")
221 idx0 = idx
222 # Count the number of spaces in the spaced list before idx in the unspaced one
223 while idx != -1:
224 if spacey(self.spaced[pos]):
225 spaces += 1
226 else:
227 idx -= 1
228 pos += 1
229 return idx0 + spaces
230
231
232 # Shortcut functions to respect Python's serialization interface
233 # (like pyyaml, picker or json)
234
235 def loads(source):
236 """Parses from a string.
237
238 :param str source: The string to parse
239 :returns: The parsed tree
240 :rtype: list
241
242 """
243 return UnspacedList(RawNginxParser(source).as_list())
244
245
246 def load(_file):
247 """Parses from a file.
248
249 :param file _file: The file to parse
250 :returns: The parsed tree
251 :rtype: list
252
253 """
254 return loads(_file.read())
255
256
257 def dumps(blocks: UnspacedList) -> str:
258 """Dump to a Unicode string.
259
260 :param UnspacedList block: The parsed tree
261 :rtype: six.text_type
262
263 """
264 return str(RawNginxDumper(blocks.spaced))
265
266
267 def dump(blocks: UnspacedList, _file: IO[Any]) -> None:
268 """Dump to a file.
269
270 :param UnspacedList block: The parsed tree
271 :param IO[Any] _file: The file stream to dump to. It must be opened with
272 Unicode encoding.
273 :rtype: None
274
275 """
276 _file.write(dumps(blocks))
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/certbot-nginx/certbot_nginx/_internal/nginxparser.py b/certbot-nginx/certbot_nginx/_internal/nginxparser.py
--- a/certbot-nginx/certbot_nginx/_internal/nginxparser.py
+++ b/certbot-nginx/certbot_nginx/_internal/nginxparser.py
@@ -9,7 +9,6 @@
from pyparsing import Forward
from pyparsing import Group
from pyparsing import Literal
-from pyparsing import OneOrMore
from pyparsing import Optional
from pyparsing import QuotedString
from pyparsing import Regex
@@ -57,7 +56,7 @@
block_innards = Group(ZeroOrMore(contents) + space).leaveWhitespace()
block << block_begin + left_bracket + block_innards + right_bracket
- script = OneOrMore(contents) + space + stringEnd
+ script = ZeroOrMore(contents) + space + stringEnd
script.parseWithTabs().leaveWhitespace()
def __init__(self, source):
| {"golden_diff": "diff --git a/certbot-nginx/certbot_nginx/_internal/nginxparser.py b/certbot-nginx/certbot_nginx/_internal/nginxparser.py\n--- a/certbot-nginx/certbot_nginx/_internal/nginxparser.py\n+++ b/certbot-nginx/certbot_nginx/_internal/nginxparser.py\n@@ -9,7 +9,6 @@\n from pyparsing import Forward\n from pyparsing import Group\n from pyparsing import Literal\n-from pyparsing import OneOrMore\n from pyparsing import Optional\n from pyparsing import QuotedString\n from pyparsing import Regex\n@@ -57,7 +56,7 @@\n block_innards = Group(ZeroOrMore(contents) + space).leaveWhitespace()\n block << block_begin + left_bracket + block_innards + right_bracket\n \n- script = OneOrMore(contents) + space + stringEnd\n+ script = ZeroOrMore(contents) + space + stringEnd\n script.parseWithTabs().leaveWhitespace()\n \n def __init__(self, source):\n", "issue": "nginx parser cannot handle empty file\nWe have some simple tooling that empties a vhosts config file to disable it. However certbot throws errors in the debug log about it.\r\n\r\nlatest python-certbot-nginx on ubuntu 18.04 PPA\r\n\r\n```\r\n2019-05-25 16:18:38,333:DEBUG:certbot_nginx.parser:Could not parse file: /etc/nginx/sites-generated/xdomain due to Expected {Group:({[<SPC><TAB><CR><LF>] \"#\" rest of line}) | Group:(Forward: ...) | Group:({[<SPC><TAB><CR><LF>] {Combine:({{quoted string, starting with \" ending with \" | quoted string, starting with ' ending with '} \")\" [Re:('(\\\\$\\\\{)|[^{;\\\\s]')]...}) | Combine:({Re:('(\\\\$\\\\{)|[^{};\\\\s\\'\\\\\"]') [Re:('(\\\\$\\\\{)|[^{;\\\\s]')]...}) | quoted string, starting with \" ending with \" | quoted string, starting with ' ending with '} [{<SPC><TAB><CR><LF> {Combine:({{quoted string, starting with \" ending with \" | quoted string, starting with ' ending with '} \")\" [Re:('(\\\\$\\\\{)|[^{;\\\\s]')]...}) | Combine:({Re:('(\\\\$\\\\{)|[^{};\\\\s\\'\\\\\"]') [Re:('(\\\\$\\\\{)|[^{;\\\\s]')]...}) | quoted string, starting with \" ending with \" | quoted string, starting with ' ending with '}}]... [<SPC><TAB><CR><LF>] Suppress:(\";\")})} (at char 1), (line:2, col:1)\r\n```\r\n\r\nIn this case `/etc/nginx/sites-generated/xdomain` would be pointing to an empty file, nginx is ok with that, the certbot parser isn't.\n", "before_files": [{"content": "\"\"\"Very low-level nginx config parser based on pyparsing.\"\"\"\n# Forked from https://github.com/fatiherikli/nginxparser (MIT Licensed)\nimport copy\nimport logging\nfrom typing import Any\nfrom typing import IO\n\nfrom pyparsing import Combine\nfrom pyparsing import Forward\nfrom pyparsing import Group\nfrom pyparsing import Literal\nfrom pyparsing import OneOrMore\nfrom pyparsing import Optional\nfrom pyparsing import QuotedString\nfrom pyparsing import Regex\nfrom pyparsing import restOfLine\nfrom pyparsing import stringEnd\nfrom pyparsing import White\nfrom pyparsing import ZeroOrMore\n\nlogger = logging.getLogger(__name__)\n\n\nclass RawNginxParser:\n # pylint: disable=pointless-statement\n \"\"\"A class that parses nginx configuration with pyparsing.\"\"\"\n\n # constants\n space = Optional(White()).leaveWhitespace()\n required_space = White().leaveWhitespace()\n\n left_bracket = Literal(\"{\").suppress()\n right_bracket = space + Literal(\"}\").suppress()\n semicolon = Literal(\";\").suppress()\n dquoted = QuotedString('\"', multiline=True, unquoteResults=False, escChar='\\\\')\n squoted = QuotedString(\"'\", multiline=True, unquoteResults=False, escChar='\\\\')\n quoted = dquoted | squoted\n head_tokenchars = Regex(r\"(\\$\\{)|[^{};\\s'\\\"]\") # if (last_space)\n tail_tokenchars = Regex(r\"(\\$\\{)|[^{;\\s]\") # else\n tokenchars = Combine(head_tokenchars + ZeroOrMore(tail_tokenchars))\n paren_quote_extend = Combine(quoted + Literal(')') + ZeroOrMore(tail_tokenchars))\n # note: ')' allows extension, but then we fall into else, not last_space.\n\n token = paren_quote_extend | tokenchars | quoted\n\n whitespace_token_group = space + token + ZeroOrMore(required_space + token) + space\n assignment = whitespace_token_group + semicolon\n\n comment = space + Literal('#') + restOfLine\n\n block = Forward()\n\n # order matters! see issue 518, and also http { # server { \\n}\n contents = Group(comment) | Group(block) | Group(assignment)\n\n block_begin = Group(whitespace_token_group)\n block_innards = Group(ZeroOrMore(contents) + space).leaveWhitespace()\n block << block_begin + left_bracket + block_innards + right_bracket\n\n script = OneOrMore(contents) + space + stringEnd\n script.parseWithTabs().leaveWhitespace()\n\n def __init__(self, source):\n self.source = source\n\n def parse(self):\n \"\"\"Returns the parsed tree.\"\"\"\n return self.script.parseString(self.source)\n\n def as_list(self):\n \"\"\"Returns the parsed tree as a list.\"\"\"\n return self.parse().asList()\n\nclass RawNginxDumper:\n \"\"\"A class that dumps nginx configuration from the provided tree.\"\"\"\n def __init__(self, blocks):\n self.blocks = blocks\n\n def __iter__(self, blocks=None):\n \"\"\"Iterates the dumped nginx content.\"\"\"\n blocks = blocks or self.blocks\n for b0 in blocks:\n if isinstance(b0, str):\n yield b0\n continue\n item = copy.deepcopy(b0)\n if spacey(item[0]):\n yield item.pop(0) # indentation\n if not item:\n continue\n\n if isinstance(item[0], list): # block\n yield \"\".join(item.pop(0)) + '{'\n for parameter in item.pop(0):\n for line in self.__iter__([parameter]): # negate \"for b0 in blocks\"\n yield line\n yield '}'\n else: # not a block - list of strings\n semicolon = \";\"\n if isinstance(item[0], str) and item[0].strip() == '#': # comment\n semicolon = \"\"\n yield \"\".join(item) + semicolon\n\n def __str__(self):\n \"\"\"Return the parsed block as a string.\"\"\"\n return ''.join(self)\n\n\nspacey = lambda x: (isinstance(x, str) and x.isspace()) or x == ''\n\n\nclass UnspacedList(list):\n \"\"\"Wrap a list [of lists], making any whitespace entries magically invisible\"\"\"\n\n def __init__(self, list_source):\n # ensure our argument is not a generator, and duplicate any sublists\n self.spaced = copy.deepcopy(list(list_source))\n self.dirty = False\n\n # Turn self into a version of the source list that has spaces removed\n # and all sub-lists also UnspacedList()ed\n list.__init__(self, list_source)\n for i, entry in reversed(list(enumerate(self))):\n if isinstance(entry, list):\n sublist = UnspacedList(entry)\n list.__setitem__(self, i, sublist)\n self.spaced[i] = sublist.spaced\n elif spacey(entry):\n # don't delete comments\n if \"#\" not in self[:i]:\n list.__delitem__(self, i)\n\n def _coerce(self, inbound):\n \"\"\"\n Coerce some inbound object to be appropriately usable in this object\n\n :param inbound: string or None or list or UnspacedList\n :returns: (coerced UnspacedList or string or None, spaced equivalent)\n :rtype: tuple\n\n \"\"\"\n if not isinstance(inbound, list): # str or None\n return inbound, inbound\n else:\n if not hasattr(inbound, \"spaced\"):\n inbound = UnspacedList(inbound)\n return inbound, inbound.spaced\n\n def insert(self, i, x):\n item, spaced_item = self._coerce(x)\n slicepos = self._spaced_position(i) if i < len(self) else len(self.spaced)\n self.spaced.insert(slicepos, spaced_item)\n if not spacey(item):\n list.insert(self, i, item)\n self.dirty = True\n\n def append(self, x):\n item, spaced_item = self._coerce(x)\n self.spaced.append(spaced_item)\n if not spacey(item):\n list.append(self, item)\n self.dirty = True\n\n def extend(self, x):\n item, spaced_item = self._coerce(x)\n self.spaced.extend(spaced_item)\n list.extend(self, item)\n self.dirty = True\n\n def __add__(self, other):\n l = copy.deepcopy(self)\n l.extend(other)\n l.dirty = True\n return l\n\n def pop(self, _i=None):\n raise NotImplementedError(\"UnspacedList.pop() not yet implemented\")\n def remove(self, _):\n raise NotImplementedError(\"UnspacedList.remove() not yet implemented\")\n def reverse(self):\n raise NotImplementedError(\"UnspacedList.reverse() not yet implemented\")\n def sort(self, _cmp=None, _key=None, _Rev=None):\n raise NotImplementedError(\"UnspacedList.sort() not yet implemented\")\n def __setslice__(self, _i, _j, _newslice):\n raise NotImplementedError(\"Slice operations on UnspacedLists not yet implemented\")\n\n def __setitem__(self, i, value):\n if isinstance(i, slice):\n raise NotImplementedError(\"Slice operations on UnspacedLists not yet implemented\")\n item, spaced_item = self._coerce(value)\n self.spaced.__setitem__(self._spaced_position(i), spaced_item)\n if not spacey(item):\n list.__setitem__(self, i, item)\n self.dirty = True\n\n def __delitem__(self, i):\n self.spaced.__delitem__(self._spaced_position(i))\n list.__delitem__(self, i)\n self.dirty = True\n\n def __deepcopy__(self, memo):\n new_spaced = copy.deepcopy(self.spaced, memo=memo)\n l = UnspacedList(new_spaced)\n l.dirty = self.dirty\n return l\n\n def is_dirty(self):\n \"\"\"Recurse through the parse tree to figure out if any sublists are dirty\"\"\"\n if self.dirty:\n return True\n return any((isinstance(x, UnspacedList) and x.is_dirty() for x in self))\n\n def _spaced_position(self, idx):\n \"Convert from indexes in the unspaced list to positions in the spaced one\"\n pos = spaces = 0\n # Normalize indexes like list[-1] etc, and save the result\n if idx < 0:\n idx = len(self) + idx\n if not 0 <= idx < len(self):\n raise IndexError(\"list index out of range\")\n idx0 = idx\n # Count the number of spaces in the spaced list before idx in the unspaced one\n while idx != -1:\n if spacey(self.spaced[pos]):\n spaces += 1\n else:\n idx -= 1\n pos += 1\n return idx0 + spaces\n\n\n# Shortcut functions to respect Python's serialization interface\n# (like pyyaml, picker or json)\n\ndef loads(source):\n \"\"\"Parses from a string.\n\n :param str source: The string to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return UnspacedList(RawNginxParser(source).as_list())\n\n\ndef load(_file):\n \"\"\"Parses from a file.\n\n :param file _file: The file to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return loads(_file.read())\n\n\ndef dumps(blocks: UnspacedList) -> str:\n \"\"\"Dump to a Unicode string.\n\n :param UnspacedList block: The parsed tree\n :rtype: six.text_type\n\n \"\"\"\n return str(RawNginxDumper(blocks.spaced))\n\n\ndef dump(blocks: UnspacedList, _file: IO[Any]) -> None:\n \"\"\"Dump to a file.\n\n :param UnspacedList block: The parsed tree\n :param IO[Any] _file: The file stream to dump to. It must be opened with\n Unicode encoding.\n :rtype: None\n\n \"\"\"\n _file.write(dumps(blocks))\n", "path": "certbot-nginx/certbot_nginx/_internal/nginxparser.py"}], "after_files": [{"content": "\"\"\"Very low-level nginx config parser based on pyparsing.\"\"\"\n# Forked from https://github.com/fatiherikli/nginxparser (MIT Licensed)\nimport copy\nimport logging\nfrom typing import Any\nfrom typing import IO\n\nfrom pyparsing import Combine\nfrom pyparsing import Forward\nfrom pyparsing import Group\nfrom pyparsing import Literal\nfrom pyparsing import Optional\nfrom pyparsing import QuotedString\nfrom pyparsing import Regex\nfrom pyparsing import restOfLine\nfrom pyparsing import stringEnd\nfrom pyparsing import White\nfrom pyparsing import ZeroOrMore\n\nlogger = logging.getLogger(__name__)\n\n\nclass RawNginxParser:\n # pylint: disable=pointless-statement\n \"\"\"A class that parses nginx configuration with pyparsing.\"\"\"\n\n # constants\n space = Optional(White()).leaveWhitespace()\n required_space = White().leaveWhitespace()\n\n left_bracket = Literal(\"{\").suppress()\n right_bracket = space + Literal(\"}\").suppress()\n semicolon = Literal(\";\").suppress()\n dquoted = QuotedString('\"', multiline=True, unquoteResults=False, escChar='\\\\')\n squoted = QuotedString(\"'\", multiline=True, unquoteResults=False, escChar='\\\\')\n quoted = dquoted | squoted\n head_tokenchars = Regex(r\"(\\$\\{)|[^{};\\s'\\\"]\") # if (last_space)\n tail_tokenchars = Regex(r\"(\\$\\{)|[^{;\\s]\") # else\n tokenchars = Combine(head_tokenchars + ZeroOrMore(tail_tokenchars))\n paren_quote_extend = Combine(quoted + Literal(')') + ZeroOrMore(tail_tokenchars))\n # note: ')' allows extension, but then we fall into else, not last_space.\n\n token = paren_quote_extend | tokenchars | quoted\n\n whitespace_token_group = space + token + ZeroOrMore(required_space + token) + space\n assignment = whitespace_token_group + semicolon\n\n comment = space + Literal('#') + restOfLine\n\n block = Forward()\n\n # order matters! see issue 518, and also http { # server { \\n}\n contents = Group(comment) | Group(block) | Group(assignment)\n\n block_begin = Group(whitespace_token_group)\n block_innards = Group(ZeroOrMore(contents) + space).leaveWhitespace()\n block << block_begin + left_bracket + block_innards + right_bracket\n\n script = ZeroOrMore(contents) + space + stringEnd\n script.parseWithTabs().leaveWhitespace()\n\n def __init__(self, source):\n self.source = source\n\n def parse(self):\n \"\"\"Returns the parsed tree.\"\"\"\n return self.script.parseString(self.source)\n\n def as_list(self):\n \"\"\"Returns the parsed tree as a list.\"\"\"\n return self.parse().asList()\n\nclass RawNginxDumper:\n \"\"\"A class that dumps nginx configuration from the provided tree.\"\"\"\n def __init__(self, blocks):\n self.blocks = blocks\n\n def __iter__(self, blocks=None):\n \"\"\"Iterates the dumped nginx content.\"\"\"\n blocks = blocks or self.blocks\n for b0 in blocks:\n if isinstance(b0, str):\n yield b0\n continue\n item = copy.deepcopy(b0)\n if spacey(item[0]):\n yield item.pop(0) # indentation\n if not item:\n continue\n\n if isinstance(item[0], list): # block\n yield \"\".join(item.pop(0)) + '{'\n for parameter in item.pop(0):\n for line in self.__iter__([parameter]): # negate \"for b0 in blocks\"\n yield line\n yield '}'\n else: # not a block - list of strings\n semicolon = \";\"\n if isinstance(item[0], str) and item[0].strip() == '#': # comment\n semicolon = \"\"\n yield \"\".join(item) + semicolon\n\n def __str__(self):\n \"\"\"Return the parsed block as a string.\"\"\"\n return ''.join(self)\n\n\nspacey = lambda x: (isinstance(x, str) and x.isspace()) or x == ''\n\n\nclass UnspacedList(list):\n \"\"\"Wrap a list [of lists], making any whitespace entries magically invisible\"\"\"\n\n def __init__(self, list_source):\n # ensure our argument is not a generator, and duplicate any sublists\n self.spaced = copy.deepcopy(list(list_source))\n self.dirty = False\n\n # Turn self into a version of the source list that has spaces removed\n # and all sub-lists also UnspacedList()ed\n list.__init__(self, list_source)\n for i, entry in reversed(list(enumerate(self))):\n if isinstance(entry, list):\n sublist = UnspacedList(entry)\n list.__setitem__(self, i, sublist)\n self.spaced[i] = sublist.spaced\n elif spacey(entry):\n # don't delete comments\n if \"#\" not in self[:i]:\n list.__delitem__(self, i)\n\n def _coerce(self, inbound):\n \"\"\"\n Coerce some inbound object to be appropriately usable in this object\n\n :param inbound: string or None or list or UnspacedList\n :returns: (coerced UnspacedList or string or None, spaced equivalent)\n :rtype: tuple\n\n \"\"\"\n if not isinstance(inbound, list): # str or None\n return inbound, inbound\n else:\n if not hasattr(inbound, \"spaced\"):\n inbound = UnspacedList(inbound)\n return inbound, inbound.spaced\n\n def insert(self, i, x):\n item, spaced_item = self._coerce(x)\n slicepos = self._spaced_position(i) if i < len(self) else len(self.spaced)\n self.spaced.insert(slicepos, spaced_item)\n if not spacey(item):\n list.insert(self, i, item)\n self.dirty = True\n\n def append(self, x):\n item, spaced_item = self._coerce(x)\n self.spaced.append(spaced_item)\n if not spacey(item):\n list.append(self, item)\n self.dirty = True\n\n def extend(self, x):\n item, spaced_item = self._coerce(x)\n self.spaced.extend(spaced_item)\n list.extend(self, item)\n self.dirty = True\n\n def __add__(self, other):\n l = copy.deepcopy(self)\n l.extend(other)\n l.dirty = True\n return l\n\n def pop(self, _i=None):\n raise NotImplementedError(\"UnspacedList.pop() not yet implemented\")\n def remove(self, _):\n raise NotImplementedError(\"UnspacedList.remove() not yet implemented\")\n def reverse(self):\n raise NotImplementedError(\"UnspacedList.reverse() not yet implemented\")\n def sort(self, _cmp=None, _key=None, _Rev=None):\n raise NotImplementedError(\"UnspacedList.sort() not yet implemented\")\n def __setslice__(self, _i, _j, _newslice):\n raise NotImplementedError(\"Slice operations on UnspacedLists not yet implemented\")\n\n def __setitem__(self, i, value):\n if isinstance(i, slice):\n raise NotImplementedError(\"Slice operations on UnspacedLists not yet implemented\")\n item, spaced_item = self._coerce(value)\n self.spaced.__setitem__(self._spaced_position(i), spaced_item)\n if not spacey(item):\n list.__setitem__(self, i, item)\n self.dirty = True\n\n def __delitem__(self, i):\n self.spaced.__delitem__(self._spaced_position(i))\n list.__delitem__(self, i)\n self.dirty = True\n\n def __deepcopy__(self, memo):\n new_spaced = copy.deepcopy(self.spaced, memo=memo)\n l = UnspacedList(new_spaced)\n l.dirty = self.dirty\n return l\n\n def is_dirty(self):\n \"\"\"Recurse through the parse tree to figure out if any sublists are dirty\"\"\"\n if self.dirty:\n return True\n return any((isinstance(x, UnspacedList) and x.is_dirty() for x in self))\n\n def _spaced_position(self, idx):\n \"Convert from indexes in the unspaced list to positions in the spaced one\"\n pos = spaces = 0\n # Normalize indexes like list[-1] etc, and save the result\n if idx < 0:\n idx = len(self) + idx\n if not 0 <= idx < len(self):\n raise IndexError(\"list index out of range\")\n idx0 = idx\n # Count the number of spaces in the spaced list before idx in the unspaced one\n while idx != -1:\n if spacey(self.spaced[pos]):\n spaces += 1\n else:\n idx -= 1\n pos += 1\n return idx0 + spaces\n\n\n# Shortcut functions to respect Python's serialization interface\n# (like pyyaml, picker or json)\n\ndef loads(source):\n \"\"\"Parses from a string.\n\n :param str source: The string to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return UnspacedList(RawNginxParser(source).as_list())\n\n\ndef load(_file):\n \"\"\"Parses from a file.\n\n :param file _file: The file to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return loads(_file.read())\n\n\ndef dumps(blocks: UnspacedList) -> str:\n \"\"\"Dump to a Unicode string.\n\n :param UnspacedList block: The parsed tree\n :rtype: six.text_type\n\n \"\"\"\n return str(RawNginxDumper(blocks.spaced))\n\n\ndef dump(blocks: UnspacedList, _file: IO[Any]) -> None:\n \"\"\"Dump to a file.\n\n :param UnspacedList block: The parsed tree\n :param IO[Any] _file: The file stream to dump to. It must be opened with\n Unicode encoding.\n :rtype: None\n\n \"\"\"\n _file.write(dumps(blocks))\n", "path": "certbot-nginx/certbot_nginx/_internal/nginxparser.py"}]} | 3,668 | 234 |
gh_patches_debug_6921 | rasdani/github-patches | git_diff | plotly__dash-2513 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Exception when property of patched_fig is viewed
I know that it is currently not supported to view properties of `patch_fig=Patch()` but when e.g. iterating over trace names like so:
```
for trace in patched_fig['data']:
print(trace['name'])
```
no exception or error message is thrown but an endless stream of
```
...
<dash._patch.Patch object at 0x7f3b89a89b80>
<dash._patch.Patch object at 0x7f3b8305c0a0>
<dash._patch.Patch object at 0x7f3b89a89b80>
<dash._patch.Patch object at 0x7f3b8305c0a0>
<dash._patch.Patch object at 0x7f3b89a89b80>
<dash._patch.Patch object at 0x7f3b8305c0a0>
...
```
This is not exactly intended right?
I got there by trying to delete a trace of patched_fig by its name which otherwise appears not be possible (or is it?)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dash/_patch.py`
Content:
```
1 def _operation(name, location, **kwargs):
2 return {"operation": name, "location": location, "params": dict(**kwargs)}
3
4
5 _noop = object()
6
7
8 def validate_slice(obj):
9 if isinstance(obj, slice):
10 raise TypeError("a slice is not a valid index for patch")
11
12
13 class Patch:
14 """
15 Patch a callback output value
16
17 Act like a proxy of the output prop value on the frontend.
18
19 Supported prop types: Dictionaries and lists.
20 """
21
22 def __init__(self, location=None, parent=None):
23 if location is not None:
24 self._location = location
25 else:
26 # pylint: disable=consider-using-ternary
27 self._location = (parent and parent._location) or []
28 if parent is not None:
29 self._operations = parent._operations
30 else:
31 self._operations = []
32
33 def __getstate__(self):
34 return vars(self)
35
36 def __setstate__(self, state):
37 vars(self).update(state)
38
39 def __getitem__(self, item):
40 validate_slice(item)
41 return Patch(location=self._location + [item], parent=self)
42
43 def __getattr__(self, item):
44 if item == "tolist":
45 # to_json fix
46 raise AttributeError
47 if item == "_location":
48 return self._location
49 if item == "_operations":
50 return self._operations
51 return self.__getitem__(item)
52
53 def __setattr__(self, key, value):
54 if key in ("_location", "_operations"):
55 self.__dict__[key] = value
56 else:
57 self.__setitem__(key, value)
58
59 def __delattr__(self, item):
60 self.__delitem__(item)
61
62 def __setitem__(self, key, value):
63 validate_slice(key)
64 if value is _noop:
65 # The += set themselves.
66 return
67 self._operations.append(
68 _operation(
69 "Assign",
70 self._location + [key],
71 value=value,
72 )
73 )
74
75 def __delitem__(self, key):
76 validate_slice(key)
77 self._operations.append(_operation("Delete", self._location + [key]))
78
79 def __iadd__(self, other):
80 if isinstance(other, (list, tuple)):
81 self.extend(other)
82 else:
83 self._operations.append(_operation("Add", self._location, value=other))
84 return _noop
85
86 def __isub__(self, other):
87 self._operations.append(_operation("Sub", self._location, value=other))
88 return _noop
89
90 def __imul__(self, other):
91 self._operations.append(_operation("Mul", self._location, value=other))
92 return _noop
93
94 def __itruediv__(self, other):
95 self._operations.append(_operation("Div", self._location, value=other))
96 return _noop
97
98 def __ior__(self, other):
99 self.update(E=other)
100 return _noop
101
102 def append(self, item):
103 """Add the item to the end of a list"""
104 self._operations.append(_operation("Append", self._location, value=item))
105
106 def prepend(self, item):
107 """Add the item to the start of a list"""
108 self._operations.append(_operation("Prepend", self._location, value=item))
109
110 def insert(self, index, item):
111 """Add the item at the index of a list"""
112 self._operations.append(
113 _operation("Insert", self._location, value=item, index=index)
114 )
115
116 def clear(self):
117 """Remove all items in a list"""
118 self._operations.append(_operation("Clear", self._location))
119
120 def reverse(self):
121 """Reversal of the order of items in a list"""
122 self._operations.append(_operation("Reverse", self._location))
123
124 def extend(self, item):
125 """Add all the items to the end of a list"""
126 if not isinstance(item, (list, tuple)):
127 raise TypeError(f"{item} should be a list or tuple")
128 self._operations.append(_operation("Extend", self._location, value=item))
129
130 def remove(self, item):
131 """filter the item out of a list on the frontend"""
132 self._operations.append(_operation("Remove", self._location, value=item))
133
134 def update(self, E=None, **F):
135 """Merge a dict or keyword arguments with another dictionary"""
136 value = E or {}
137 value.update(F)
138 self._operations.append(_operation("Merge", self._location, value=value))
139
140 # pylint: disable=no-self-use
141 def sort(self):
142 raise KeyError(
143 "sort is reserved for future use, use brackets to access this key on your object"
144 )
145
146 def to_plotly_json(self):
147 return {
148 "__dash_patch_update": "__dash_patch_update",
149 "operations": self._operations,
150 }
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dash/_patch.py b/dash/_patch.py
--- a/dash/_patch.py
+++ b/dash/_patch.py
@@ -99,6 +99,12 @@
self.update(E=other)
return _noop
+ def __iter__(self):
+ raise TypeError("Patch objects are write-only, you cannot iterate them.")
+
+ def __repr__(self):
+ return f"<write-only dash.Patch object at {self._location}>"
+
def append(self, item):
"""Add the item to the end of a list"""
self._operations.append(_operation("Append", self._location, value=item))
| {"golden_diff": "diff --git a/dash/_patch.py b/dash/_patch.py\n--- a/dash/_patch.py\n+++ b/dash/_patch.py\n@@ -99,6 +99,12 @@\n self.update(E=other)\n return _noop\n \n+ def __iter__(self):\n+ raise TypeError(\"Patch objects are write-only, you cannot iterate them.\")\n+\n+ def __repr__(self):\n+ return f\"<write-only dash.Patch object at {self._location}>\"\n+\n def append(self, item):\n \"\"\"Add the item to the end of a list\"\"\"\n self._operations.append(_operation(\"Append\", self._location, value=item))\n", "issue": "[BUG] Exception when property of patched_fig is viewed\nI know that it is currently not supported to view properties of `patch_fig=Patch()` but when e.g. iterating over trace names like so:\r\n```\r\n for trace in patched_fig['data']:\r\n print(trace['name'])\r\n```\r\nno exception or error message is thrown but an endless stream of \r\n\r\n```\r\n...\r\n<dash._patch.Patch object at 0x7f3b89a89b80>\r\n<dash._patch.Patch object at 0x7f3b8305c0a0>\r\n<dash._patch.Patch object at 0x7f3b89a89b80>\r\n<dash._patch.Patch object at 0x7f3b8305c0a0>\r\n<dash._patch.Patch object at 0x7f3b89a89b80>\r\n<dash._patch.Patch object at 0x7f3b8305c0a0>\r\n...\r\n```\r\nThis is not exactly intended right?\r\n\r\nI got there by trying to delete a trace of patched_fig by its name which otherwise appears not be possible (or is it?)\r\n\n", "before_files": [{"content": "def _operation(name, location, **kwargs):\n return {\"operation\": name, \"location\": location, \"params\": dict(**kwargs)}\n\n\n_noop = object()\n\n\ndef validate_slice(obj):\n if isinstance(obj, slice):\n raise TypeError(\"a slice is not a valid index for patch\")\n\n\nclass Patch:\n \"\"\"\n Patch a callback output value\n\n Act like a proxy of the output prop value on the frontend.\n\n Supported prop types: Dictionaries and lists.\n \"\"\"\n\n def __init__(self, location=None, parent=None):\n if location is not None:\n self._location = location\n else:\n # pylint: disable=consider-using-ternary\n self._location = (parent and parent._location) or []\n if parent is not None:\n self._operations = parent._operations\n else:\n self._operations = []\n\n def __getstate__(self):\n return vars(self)\n\n def __setstate__(self, state):\n vars(self).update(state)\n\n def __getitem__(self, item):\n validate_slice(item)\n return Patch(location=self._location + [item], parent=self)\n\n def __getattr__(self, item):\n if item == \"tolist\":\n # to_json fix\n raise AttributeError\n if item == \"_location\":\n return self._location\n if item == \"_operations\":\n return self._operations\n return self.__getitem__(item)\n\n def __setattr__(self, key, value):\n if key in (\"_location\", \"_operations\"):\n self.__dict__[key] = value\n else:\n self.__setitem__(key, value)\n\n def __delattr__(self, item):\n self.__delitem__(item)\n\n def __setitem__(self, key, value):\n validate_slice(key)\n if value is _noop:\n # The += set themselves.\n return\n self._operations.append(\n _operation(\n \"Assign\",\n self._location + [key],\n value=value,\n )\n )\n\n def __delitem__(self, key):\n validate_slice(key)\n self._operations.append(_operation(\"Delete\", self._location + [key]))\n\n def __iadd__(self, other):\n if isinstance(other, (list, tuple)):\n self.extend(other)\n else:\n self._operations.append(_operation(\"Add\", self._location, value=other))\n return _noop\n\n def __isub__(self, other):\n self._operations.append(_operation(\"Sub\", self._location, value=other))\n return _noop\n\n def __imul__(self, other):\n self._operations.append(_operation(\"Mul\", self._location, value=other))\n return _noop\n\n def __itruediv__(self, other):\n self._operations.append(_operation(\"Div\", self._location, value=other))\n return _noop\n\n def __ior__(self, other):\n self.update(E=other)\n return _noop\n\n def append(self, item):\n \"\"\"Add the item to the end of a list\"\"\"\n self._operations.append(_operation(\"Append\", self._location, value=item))\n\n def prepend(self, item):\n \"\"\"Add the item to the start of a list\"\"\"\n self._operations.append(_operation(\"Prepend\", self._location, value=item))\n\n def insert(self, index, item):\n \"\"\"Add the item at the index of a list\"\"\"\n self._operations.append(\n _operation(\"Insert\", self._location, value=item, index=index)\n )\n\n def clear(self):\n \"\"\"Remove all items in a list\"\"\"\n self._operations.append(_operation(\"Clear\", self._location))\n\n def reverse(self):\n \"\"\"Reversal of the order of items in a list\"\"\"\n self._operations.append(_operation(\"Reverse\", self._location))\n\n def extend(self, item):\n \"\"\"Add all the items to the end of a list\"\"\"\n if not isinstance(item, (list, tuple)):\n raise TypeError(f\"{item} should be a list or tuple\")\n self._operations.append(_operation(\"Extend\", self._location, value=item))\n\n def remove(self, item):\n \"\"\"filter the item out of a list on the frontend\"\"\"\n self._operations.append(_operation(\"Remove\", self._location, value=item))\n\n def update(self, E=None, **F):\n \"\"\"Merge a dict or keyword arguments with another dictionary\"\"\"\n value = E or {}\n value.update(F)\n self._operations.append(_operation(\"Merge\", self._location, value=value))\n\n # pylint: disable=no-self-use\n def sort(self):\n raise KeyError(\n \"sort is reserved for future use, use brackets to access this key on your object\"\n )\n\n def to_plotly_json(self):\n return {\n \"__dash_patch_update\": \"__dash_patch_update\",\n \"operations\": self._operations,\n }\n", "path": "dash/_patch.py"}], "after_files": [{"content": "def _operation(name, location, **kwargs):\n return {\"operation\": name, \"location\": location, \"params\": dict(**kwargs)}\n\n\n_noop = object()\n\n\ndef validate_slice(obj):\n if isinstance(obj, slice):\n raise TypeError(\"a slice is not a valid index for patch\")\n\n\nclass Patch:\n \"\"\"\n Patch a callback output value\n\n Act like a proxy of the output prop value on the frontend.\n\n Supported prop types: Dictionaries and lists.\n \"\"\"\n\n def __init__(self, location=None, parent=None):\n if location is not None:\n self._location = location\n else:\n # pylint: disable=consider-using-ternary\n self._location = (parent and parent._location) or []\n if parent is not None:\n self._operations = parent._operations\n else:\n self._operations = []\n\n def __getstate__(self):\n return vars(self)\n\n def __setstate__(self, state):\n vars(self).update(state)\n\n def __getitem__(self, item):\n validate_slice(item)\n return Patch(location=self._location + [item], parent=self)\n\n def __getattr__(self, item):\n if item == \"tolist\":\n # to_json fix\n raise AttributeError\n if item == \"_location\":\n return self._location\n if item == \"_operations\":\n return self._operations\n return self.__getitem__(item)\n\n def __setattr__(self, key, value):\n if key in (\"_location\", \"_operations\"):\n self.__dict__[key] = value\n else:\n self.__setitem__(key, value)\n\n def __delattr__(self, item):\n self.__delitem__(item)\n\n def __setitem__(self, key, value):\n validate_slice(key)\n if value is _noop:\n # The += set themselves.\n return\n self._operations.append(\n _operation(\n \"Assign\",\n self._location + [key],\n value=value,\n )\n )\n\n def __delitem__(self, key):\n validate_slice(key)\n self._operations.append(_operation(\"Delete\", self._location + [key]))\n\n def __iadd__(self, other):\n if isinstance(other, (list, tuple)):\n self.extend(other)\n else:\n self._operations.append(_operation(\"Add\", self._location, value=other))\n return _noop\n\n def __isub__(self, other):\n self._operations.append(_operation(\"Sub\", self._location, value=other))\n return _noop\n\n def __imul__(self, other):\n self._operations.append(_operation(\"Mul\", self._location, value=other))\n return _noop\n\n def __itruediv__(self, other):\n self._operations.append(_operation(\"Div\", self._location, value=other))\n return _noop\n\n def __ior__(self, other):\n self.update(E=other)\n return _noop\n\n def __iter__(self):\n raise TypeError(\"Patch objects are write-only, you cannot iterate them.\")\n\n def __repr__(self):\n return f\"<write-only dash.Patch object at {self._location}>\"\n\n def append(self, item):\n \"\"\"Add the item to the end of a list\"\"\"\n self._operations.append(_operation(\"Append\", self._location, value=item))\n\n def prepend(self, item):\n \"\"\"Add the item to the start of a list\"\"\"\n self._operations.append(_operation(\"Prepend\", self._location, value=item))\n\n def insert(self, index, item):\n \"\"\"Add the item at the index of a list\"\"\"\n self._operations.append(\n _operation(\"Insert\", self._location, value=item, index=index)\n )\n\n def clear(self):\n \"\"\"Remove all items in a list\"\"\"\n self._operations.append(_operation(\"Clear\", self._location))\n\n def reverse(self):\n \"\"\"Reversal of the order of items in a list\"\"\"\n self._operations.append(_operation(\"Reverse\", self._location))\n\n def extend(self, item):\n \"\"\"Add all the items to the end of a list\"\"\"\n if not isinstance(item, (list, tuple)):\n raise TypeError(f\"{item} should be a list or tuple\")\n self._operations.append(_operation(\"Extend\", self._location, value=item))\n\n def remove(self, item):\n \"\"\"filter the item out of a list on the frontend\"\"\"\n self._operations.append(_operation(\"Remove\", self._location, value=item))\n\n def update(self, E=None, **F):\n \"\"\"Merge a dict or keyword arguments with another dictionary\"\"\"\n value = E or {}\n value.update(F)\n self._operations.append(_operation(\"Merge\", self._location, value=value))\n\n # pylint: disable=no-self-use\n def sort(self):\n raise KeyError(\n \"sort is reserved for future use, use brackets to access this key on your object\"\n )\n\n def to_plotly_json(self):\n return {\n \"__dash_patch_update\": \"__dash_patch_update\",\n \"operations\": self._operations,\n }\n", "path": "dash/_patch.py"}]} | 1,924 | 145 |
gh_patches_debug_5255 | rasdani/github-patches | git_diff | wagtail__wagtail-1666 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError when passwords do not match on user creation form
Wagtail will crash with the following error if the two passwords don't match on the user creation form:
```
AttributeError at /admin/users/add/
'UserCreationForm' object has no attribute 'error_messages'
```
On this line: https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailusers/forms.py#L92
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailusers/forms.py`
Content:
```
1 from django import forms
2 from django.contrib.auth import get_user_model
3 from django.utils.translation import ugettext_lazy as _
4 from django.contrib.auth.models import Group, Permission
5 from django.forms.models import inlineformset_factory
6
7 from wagtail.wagtailcore import hooks
8 from wagtail.wagtailadmin.widgets import AdminPageChooser
9 from wagtail.wagtailusers.models import UserProfile
10 from wagtail.wagtailcore.models import Page, UserPagePermissionsProxy, GroupPagePermission
11
12
13 User = get_user_model()
14
15 # The standard fields each user model is expected to have, as a minimum.
16 standard_fields = set(['email', 'first_name', 'last_name', 'is_superuser', 'groups'])
17
18
19 class UsernameForm(forms.ModelForm):
20 """
21 Intelligently sets up the username field if it is infact a username. If the
22 User model has been swapped out, and the username field is an email or
23 something else, dont touch it.
24 """
25 def __init__(self, *args, **kwargs):
26 super(UsernameForm, self).__init__(*args, **kwargs)
27 if User.USERNAME_FIELD == 'username':
28 field = self.fields['username']
29 field.regex = r"^[\w.@+-]+$"
30 field.help_text = _("Required. 30 characters or fewer. Letters, "
31 "digits and @/./+/-/_ only.")
32 field.error_messages = field.error_messages.copy()
33 field.error_messages.update({
34 'invalid': _("This value may contain only letters, numbers "
35 "and @/./+/-/_ characters.")})
36
37 @property
38 def username_field(self):
39 return self[User.USERNAME_FIELD]
40
41 def separate_username_field(self):
42 return User.USERNAME_FIELD not in standard_fields
43
44
45 class UserCreationForm(UsernameForm):
46
47 required_css_class = "required"
48 is_superuser = forms.BooleanField(
49 label=_("Administrator"),
50 required=False,
51 help_text=_("If ticked, this user has the ability to manage user accounts.")
52 )
53
54 password1 = forms.CharField(
55 label=_("Password"),
56 required=False,
57 widget=forms.PasswordInput,
58 help_text=_("Leave blank if not changing."))
59 password2 = forms.CharField(
60 label=_("Password confirmation"), required=False,
61 widget=forms.PasswordInput,
62 help_text=_("Enter the same password as above, for verification."))
63
64 email = forms.EmailField(required=True, label=_("Email"))
65 first_name = forms.CharField(required=True, label=_("First Name"))
66 last_name = forms.CharField(required=True, label=_("Last Name"))
67
68 class Meta:
69 model = User
70 fields = set([User.USERNAME_FIELD]) | standard_fields
71 widgets = {
72 'groups': forms.CheckboxSelectMultiple
73 }
74
75 def clean_username(self):
76 username_field = User.USERNAME_FIELD
77 username = self.cleaned_data[username_field]
78 try:
79 User._default_manager.get(**{username_field: username})
80 except User.DoesNotExist:
81 return username
82 raise forms.ValidationError(
83 self.error_messages['duplicate_username'],
84 code='duplicate_username',
85 )
86
87 def clean_password2(self):
88 password1 = self.cleaned_data.get("password1")
89 password2 = self.cleaned_data.get("password2")
90 if password1 and password2 and password1 != password2:
91 raise forms.ValidationError(
92 self.error_messages['password_mismatch'],
93 code='password_mismatch',
94 )
95 return password2
96
97 def save(self, commit=True):
98 user = super(UserCreationForm, self).save(commit=False)
99 user.set_password(self.cleaned_data["password1"])
100
101 # users can access django-admin iff they are a superuser
102 user.is_staff = user.is_superuser
103
104 if commit:
105 user.save()
106 self.save_m2m()
107 return user
108
109
110 # Largely the same as django.contrib.auth.forms.UserCreationForm, but with enough subtle changes
111 # (to make password non-required) that it isn't worth inheriting...
112 class UserEditForm(UsernameForm):
113 required_css_class = "required"
114
115 error_messages = {
116 'duplicate_username': _("A user with that username already exists."),
117 'password_mismatch': _("The two password fields didn't match."),
118 }
119
120 email = forms.EmailField(required=True, label=_("Email"))
121 first_name = forms.CharField(required=True, label=_("First Name"))
122 last_name = forms.CharField(required=True, label=_("Last Name"))
123
124 password1 = forms.CharField(
125 label=_("Password"),
126 required=False,
127 widget=forms.PasswordInput,
128 help_text=_("Leave blank if not changing."))
129 password2 = forms.CharField(
130 label=_("Password confirmation"), required=False,
131 widget=forms.PasswordInput,
132 help_text=_("Enter the same password as above, for verification."))
133
134 is_superuser = forms.BooleanField(
135 label=_("Administrator"),
136 required=False,
137 help_text=_("Administrators have the ability to manage user accounts.")
138 )
139
140 class Meta:
141 model = User
142 fields = set([User.USERNAME_FIELD, "is_active"]) | standard_fields
143 widgets = {
144 'groups': forms.CheckboxSelectMultiple
145 }
146
147 def clean_username(self):
148 # Since User.username is unique, this check is redundant,
149 # but it sets a nicer error message than the ORM. See #13147.
150 username = self.cleaned_data["username"]
151 username_field = User.USERNAME_FIELD
152 try:
153 User._default_manager.exclude(id=self.instance.id).get(**{
154 username_field: username})
155 except User.DoesNotExist:
156 return username
157 raise forms.ValidationError(self.error_messages['duplicate_username'])
158
159 def clean_password2(self):
160 password1 = self.cleaned_data.get("password1")
161 password2 = self.cleaned_data.get("password2")
162 if password1 != password2:
163 raise forms.ValidationError(
164 self.error_messages['password_mismatch'])
165 return password2
166
167 def save(self, commit=True):
168 user = super(UserEditForm, self).save(commit=False)
169
170 # users can access django-admin iff they are a superuser
171 user.is_staff = user.is_superuser
172
173 if self.cleaned_data["password1"]:
174 user.set_password(self.cleaned_data["password1"])
175 if commit:
176 user.save()
177 self.save_m2m()
178 return user
179
180
181 class GroupForm(forms.ModelForm):
182 def __init__(self, *args, **kwargs):
183 super(GroupForm, self).__init__(*args, **kwargs)
184 self.registered_permissions = Permission.objects.none()
185 for fn in hooks.get_hooks('register_permissions'):
186 self.registered_permissions = self.registered_permissions | fn()
187 self.fields['permissions'].queryset = self.registered_permissions
188
189 required_css_class = "required"
190
191 error_messages = {
192 'duplicate_name': _("A group with that name already exists."),
193 }
194
195 is_superuser = forms.BooleanField(
196 label=_("Administrator"),
197 required=False,
198 help_text=_("Administrators have the ability to manage user accounts.")
199 )
200
201 class Meta:
202 model = Group
203 fields = ("name", "permissions", )
204
205 def clean_name(self):
206 # Since Group.name is unique, this check is redundant,
207 # but it sets a nicer error message than the ORM. See #13147.
208 name = self.cleaned_data["name"]
209 try:
210 Group._default_manager.exclude(id=self.instance.id).get(name=name)
211 except Group.DoesNotExist:
212 return name
213 raise forms.ValidationError(self.error_messages['duplicate_name'])
214
215 def save(self):
216 # We go back to the object to read (in order to reapply) the
217 # permissions which were set on this group, but which are not
218 # accessible in the wagtail admin interface, as otherwise these would
219 # be clobbered by this form.
220 try:
221 untouchable_permissions = self.instance.permissions.exclude(pk__in=self.registered_permissions)
222 bool(untouchable_permissions) # force this to be evaluated, as it's about to change
223 except ValueError:
224 # this form is not bound; we're probably creating a new group
225 untouchable_permissions = []
226 group = super(GroupForm, self).save()
227 group.permissions.add(*untouchable_permissions)
228 return group
229
230
231 class GroupPagePermissionForm(forms.ModelForm):
232 page = forms.ModelChoiceField(queryset=Page.objects.all(),
233 widget=AdminPageChooser(show_edit_link=False))
234
235 class Meta:
236 model = GroupPagePermission
237 fields = ('page', 'permission_type')
238
239
240 class BaseGroupPagePermissionFormSet(forms.models.BaseInlineFormSet):
241 def __init__(self, *args, **kwargs):
242 super(BaseGroupPagePermissionFormSet, self).__init__(*args, **kwargs)
243 self.form = GroupPagePermissionForm
244 for form in self.forms:
245 form.fields['DELETE'].widget = forms.HiddenInput()
246
247 @property
248 def empty_form(self):
249 empty_form = super(BaseGroupPagePermissionFormSet, self).empty_form
250 empty_form.fields['DELETE'].widget = forms.HiddenInput()
251 return empty_form
252
253
254 GroupPagePermissionFormSet = inlineformset_factory(
255 Group,
256 GroupPagePermission,
257 formset=BaseGroupPagePermissionFormSet,
258 extra=0,
259 fields=('page', 'permission_type'),
260 )
261
262
263 class NotificationPreferencesForm(forms.ModelForm):
264 def __init__(self, *args, **kwargs):
265 super(NotificationPreferencesForm, self).__init__(*args, **kwargs)
266 user_perms = UserPagePermissionsProxy(self.instance.user)
267 if not user_perms.can_publish_pages():
268 del self.fields['submitted_notifications']
269 if not user_perms.can_edit_pages():
270 del self.fields['approved_notifications']
271 del self.fields['rejected_notifications']
272
273 class Meta:
274 model = UserProfile
275 fields = ("submitted_notifications", "approved_notifications", "rejected_notifications")
276
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/wagtailusers/forms.py b/wagtail/wagtailusers/forms.py
--- a/wagtail/wagtailusers/forms.py
+++ b/wagtail/wagtailusers/forms.py
@@ -43,8 +43,13 @@
class UserCreationForm(UsernameForm):
-
required_css_class = "required"
+
+ error_messages = {
+ 'duplicate_username': _("A user with that username already exists."),
+ 'password_mismatch': _("The two password fields didn't match."),
+ }
+
is_superuser = forms.BooleanField(
label=_("Administrator"),
required=False,
| {"golden_diff": "diff --git a/wagtail/wagtailusers/forms.py b/wagtail/wagtailusers/forms.py\n--- a/wagtail/wagtailusers/forms.py\n+++ b/wagtail/wagtailusers/forms.py\n@@ -43,8 +43,13 @@\n \n \n class UserCreationForm(UsernameForm):\n-\n required_css_class = \"required\"\n+\n+ error_messages = {\n+ 'duplicate_username': _(\"A user with that username already exists.\"),\n+ 'password_mismatch': _(\"The two password fields didn't match.\"),\n+ }\n+\n is_superuser = forms.BooleanField(\n label=_(\"Administrator\"),\n required=False,\n", "issue": "AttributeError when passwords do not match on user creation form\nWagtail will crash with the following error if the two passwords don't match on the user creation form:\n\n```\nAttributeError at /admin/users/add/\n\n'UserCreationForm' object has no attribute 'error_messages'\n```\n\nOn this line: https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailusers/forms.py#L92\n\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import Group, Permission\nfrom django.forms.models import inlineformset_factory\n\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailadmin.widgets import AdminPageChooser\nfrom wagtail.wagtailusers.models import UserProfile\nfrom wagtail.wagtailcore.models import Page, UserPagePermissionsProxy, GroupPagePermission\n\n\nUser = get_user_model()\n\n# The standard fields each user model is expected to have, as a minimum.\nstandard_fields = set(['email', 'first_name', 'last_name', 'is_superuser', 'groups'])\n\n\nclass UsernameForm(forms.ModelForm):\n \"\"\"\n Intelligently sets up the username field if it is infact a username. If the\n User model has been swapped out, and the username field is an email or\n something else, dont touch it.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(UsernameForm, self).__init__(*args, **kwargs)\n if User.USERNAME_FIELD == 'username':\n field = self.fields['username']\n field.regex = r\"^[\\w.@+-]+$\"\n field.help_text = _(\"Required. 30 characters or fewer. Letters, \"\n \"digits and @/./+/-/_ only.\")\n field.error_messages = field.error_messages.copy()\n field.error_messages.update({\n 'invalid': _(\"This value may contain only letters, numbers \"\n \"and @/./+/-/_ characters.\")})\n\n @property\n def username_field(self):\n return self[User.USERNAME_FIELD]\n\n def separate_username_field(self):\n return User.USERNAME_FIELD not in standard_fields\n\n\nclass UserCreationForm(UsernameForm):\n\n required_css_class = \"required\"\n is_superuser = forms.BooleanField(\n label=_(\"Administrator\"),\n required=False,\n help_text=_(\"If ticked, this user has the ability to manage user accounts.\")\n )\n\n password1 = forms.CharField(\n label=_(\"Password\"),\n required=False,\n widget=forms.PasswordInput,\n help_text=_(\"Leave blank if not changing.\"))\n password2 = forms.CharField(\n label=_(\"Password confirmation\"), required=False,\n widget=forms.PasswordInput,\n help_text=_(\"Enter the same password as above, for verification.\"))\n\n email = forms.EmailField(required=True, label=_(\"Email\"))\n first_name = forms.CharField(required=True, label=_(\"First Name\"))\n last_name = forms.CharField(required=True, label=_(\"Last Name\"))\n\n class Meta:\n model = User\n fields = set([User.USERNAME_FIELD]) | standard_fields\n widgets = {\n 'groups': forms.CheckboxSelectMultiple\n }\n\n def clean_username(self):\n username_field = User.USERNAME_FIELD\n username = self.cleaned_data[username_field]\n try:\n User._default_manager.get(**{username_field: username})\n except User.DoesNotExist:\n return username\n raise forms.ValidationError(\n self.error_messages['duplicate_username'],\n code='duplicate_username',\n )\n\n def clean_password2(self):\n password1 = self.cleaned_data.get(\"password1\")\n password2 = self.cleaned_data.get(\"password2\")\n if password1 and password2 and password1 != password2:\n raise forms.ValidationError(\n self.error_messages['password_mismatch'],\n code='password_mismatch',\n )\n return password2\n\n def save(self, commit=True):\n user = super(UserCreationForm, self).save(commit=False)\n user.set_password(self.cleaned_data[\"password1\"])\n\n # users can access django-admin iff they are a superuser\n user.is_staff = user.is_superuser\n\n if commit:\n user.save()\n self.save_m2m()\n return user\n\n\n# Largely the same as django.contrib.auth.forms.UserCreationForm, but with enough subtle changes\n# (to make password non-required) that it isn't worth inheriting...\nclass UserEditForm(UsernameForm):\n required_css_class = \"required\"\n\n error_messages = {\n 'duplicate_username': _(\"A user with that username already exists.\"),\n 'password_mismatch': _(\"The two password fields didn't match.\"),\n }\n\n email = forms.EmailField(required=True, label=_(\"Email\"))\n first_name = forms.CharField(required=True, label=_(\"First Name\"))\n last_name = forms.CharField(required=True, label=_(\"Last Name\"))\n\n password1 = forms.CharField(\n label=_(\"Password\"),\n required=False,\n widget=forms.PasswordInput,\n help_text=_(\"Leave blank if not changing.\"))\n password2 = forms.CharField(\n label=_(\"Password confirmation\"), required=False,\n widget=forms.PasswordInput,\n help_text=_(\"Enter the same password as above, for verification.\"))\n\n is_superuser = forms.BooleanField(\n label=_(\"Administrator\"),\n required=False,\n help_text=_(\"Administrators have the ability to manage user accounts.\")\n )\n\n class Meta:\n model = User\n fields = set([User.USERNAME_FIELD, \"is_active\"]) | standard_fields\n widgets = {\n 'groups': forms.CheckboxSelectMultiple\n }\n\n def clean_username(self):\n # Since User.username is unique, this check is redundant,\n # but it sets a nicer error message than the ORM. See #13147.\n username = self.cleaned_data[\"username\"]\n username_field = User.USERNAME_FIELD\n try:\n User._default_manager.exclude(id=self.instance.id).get(**{\n username_field: username})\n except User.DoesNotExist:\n return username\n raise forms.ValidationError(self.error_messages['duplicate_username'])\n\n def clean_password2(self):\n password1 = self.cleaned_data.get(\"password1\")\n password2 = self.cleaned_data.get(\"password2\")\n if password1 != password2:\n raise forms.ValidationError(\n self.error_messages['password_mismatch'])\n return password2\n\n def save(self, commit=True):\n user = super(UserEditForm, self).save(commit=False)\n\n # users can access django-admin iff they are a superuser\n user.is_staff = user.is_superuser\n\n if self.cleaned_data[\"password1\"]:\n user.set_password(self.cleaned_data[\"password1\"])\n if commit:\n user.save()\n self.save_m2m()\n return user\n\n\nclass GroupForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(GroupForm, self).__init__(*args, **kwargs)\n self.registered_permissions = Permission.objects.none()\n for fn in hooks.get_hooks('register_permissions'):\n self.registered_permissions = self.registered_permissions | fn()\n self.fields['permissions'].queryset = self.registered_permissions\n\n required_css_class = \"required\"\n\n error_messages = {\n 'duplicate_name': _(\"A group with that name already exists.\"),\n }\n\n is_superuser = forms.BooleanField(\n label=_(\"Administrator\"),\n required=False,\n help_text=_(\"Administrators have the ability to manage user accounts.\")\n )\n\n class Meta:\n model = Group\n fields = (\"name\", \"permissions\", )\n\n def clean_name(self):\n # Since Group.name is unique, this check is redundant,\n # but it sets a nicer error message than the ORM. See #13147.\n name = self.cleaned_data[\"name\"]\n try:\n Group._default_manager.exclude(id=self.instance.id).get(name=name)\n except Group.DoesNotExist:\n return name\n raise forms.ValidationError(self.error_messages['duplicate_name'])\n\n def save(self):\n # We go back to the object to read (in order to reapply) the\n # permissions which were set on this group, but which are not\n # accessible in the wagtail admin interface, as otherwise these would\n # be clobbered by this form.\n try:\n untouchable_permissions = self.instance.permissions.exclude(pk__in=self.registered_permissions)\n bool(untouchable_permissions) # force this to be evaluated, as it's about to change\n except ValueError:\n # this form is not bound; we're probably creating a new group\n untouchable_permissions = []\n group = super(GroupForm, self).save()\n group.permissions.add(*untouchable_permissions)\n return group\n\n\nclass GroupPagePermissionForm(forms.ModelForm):\n page = forms.ModelChoiceField(queryset=Page.objects.all(),\n widget=AdminPageChooser(show_edit_link=False))\n\n class Meta:\n model = GroupPagePermission\n fields = ('page', 'permission_type')\n\n\nclass BaseGroupPagePermissionFormSet(forms.models.BaseInlineFormSet):\n def __init__(self, *args, **kwargs):\n super(BaseGroupPagePermissionFormSet, self).__init__(*args, **kwargs)\n self.form = GroupPagePermissionForm\n for form in self.forms:\n form.fields['DELETE'].widget = forms.HiddenInput()\n\n @property\n def empty_form(self):\n empty_form = super(BaseGroupPagePermissionFormSet, self).empty_form\n empty_form.fields['DELETE'].widget = forms.HiddenInput()\n return empty_form\n\n\nGroupPagePermissionFormSet = inlineformset_factory(\n Group,\n GroupPagePermission,\n formset=BaseGroupPagePermissionFormSet,\n extra=0,\n fields=('page', 'permission_type'),\n)\n\n\nclass NotificationPreferencesForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(NotificationPreferencesForm, self).__init__(*args, **kwargs)\n user_perms = UserPagePermissionsProxy(self.instance.user)\n if not user_perms.can_publish_pages():\n del self.fields['submitted_notifications']\n if not user_perms.can_edit_pages():\n del self.fields['approved_notifications']\n del self.fields['rejected_notifications']\n\n class Meta:\n model = UserProfile\n fields = (\"submitted_notifications\", \"approved_notifications\", \"rejected_notifications\")\n", "path": "wagtail/wagtailusers/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import Group, Permission\nfrom django.forms.models import inlineformset_factory\n\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailadmin.widgets import AdminPageChooser\nfrom wagtail.wagtailusers.models import UserProfile\nfrom wagtail.wagtailcore.models import Page, UserPagePermissionsProxy, GroupPagePermission\n\n\nUser = get_user_model()\n\n# The standard fields each user model is expected to have, as a minimum.\nstandard_fields = set(['email', 'first_name', 'last_name', 'is_superuser', 'groups'])\n\n\nclass UsernameForm(forms.ModelForm):\n \"\"\"\n Intelligently sets up the username field if it is infact a username. If the\n User model has been swapped out, and the username field is an email or\n something else, dont touch it.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(UsernameForm, self).__init__(*args, **kwargs)\n if User.USERNAME_FIELD == 'username':\n field = self.fields['username']\n field.regex = r\"^[\\w.@+-]+$\"\n field.help_text = _(\"Required. 30 characters or fewer. Letters, \"\n \"digits and @/./+/-/_ only.\")\n field.error_messages = field.error_messages.copy()\n field.error_messages.update({\n 'invalid': _(\"This value may contain only letters, numbers \"\n \"and @/./+/-/_ characters.\")})\n\n @property\n def username_field(self):\n return self[User.USERNAME_FIELD]\n\n def separate_username_field(self):\n return User.USERNAME_FIELD not in standard_fields\n\n\nclass UserCreationForm(UsernameForm):\n required_css_class = \"required\"\n\n error_messages = {\n 'duplicate_username': _(\"A user with that username already exists.\"),\n 'password_mismatch': _(\"The two password fields didn't match.\"),\n }\n\n is_superuser = forms.BooleanField(\n label=_(\"Administrator\"),\n required=False,\n help_text=_(\"If ticked, this user has the ability to manage user accounts.\")\n )\n\n password1 = forms.CharField(\n label=_(\"Password\"),\n required=False,\n widget=forms.PasswordInput,\n help_text=_(\"Leave blank if not changing.\"))\n password2 = forms.CharField(\n label=_(\"Password confirmation\"), required=False,\n widget=forms.PasswordInput,\n help_text=_(\"Enter the same password as above, for verification.\"))\n\n email = forms.EmailField(required=True, label=_(\"Email\"))\n first_name = forms.CharField(required=True, label=_(\"First Name\"))\n last_name = forms.CharField(required=True, label=_(\"Last Name\"))\n\n class Meta:\n model = User\n fields = set([User.USERNAME_FIELD]) | standard_fields\n widgets = {\n 'groups': forms.CheckboxSelectMultiple\n }\n\n def clean_username(self):\n username_field = User.USERNAME_FIELD\n username = self.cleaned_data[username_field]\n try:\n User._default_manager.get(**{username_field: username})\n except User.DoesNotExist:\n return username\n raise forms.ValidationError(\n self.error_messages['duplicate_username'],\n code='duplicate_username',\n )\n\n def clean_password2(self):\n password1 = self.cleaned_data.get(\"password1\")\n password2 = self.cleaned_data.get(\"password2\")\n if password1 and password2 and password1 != password2:\n raise forms.ValidationError(\n self.error_messages['password_mismatch'],\n code='password_mismatch',\n )\n return password2\n\n def save(self, commit=True):\n user = super(UserCreationForm, self).save(commit=False)\n user.set_password(self.cleaned_data[\"password1\"])\n\n # users can access django-admin iff they are a superuser\n user.is_staff = user.is_superuser\n\n if commit:\n user.save()\n self.save_m2m()\n return user\n\n\n# Largely the same as django.contrib.auth.forms.UserCreationForm, but with enough subtle changes\n# (to make password non-required) that it isn't worth inheriting...\nclass UserEditForm(UsernameForm):\n required_css_class = \"required\"\n\n error_messages = {\n 'duplicate_username': _(\"A user with that username already exists.\"),\n 'password_mismatch': _(\"The two password fields didn't match.\"),\n }\n\n email = forms.EmailField(required=True, label=_(\"Email\"))\n first_name = forms.CharField(required=True, label=_(\"First Name\"))\n last_name = forms.CharField(required=True, label=_(\"Last Name\"))\n\n password1 = forms.CharField(\n label=_(\"Password\"),\n required=False,\n widget=forms.PasswordInput,\n help_text=_(\"Leave blank if not changing.\"))\n password2 = forms.CharField(\n label=_(\"Password confirmation\"), required=False,\n widget=forms.PasswordInput,\n help_text=_(\"Enter the same password as above, for verification.\"))\n\n is_superuser = forms.BooleanField(\n label=_(\"Administrator\"),\n required=False,\n help_text=_(\"Administrators have the ability to manage user accounts.\")\n )\n\n class Meta:\n model = User\n fields = set([User.USERNAME_FIELD, \"is_active\"]) | standard_fields\n widgets = {\n 'groups': forms.CheckboxSelectMultiple\n }\n\n def clean_username(self):\n # Since User.username is unique, this check is redundant,\n # but it sets a nicer error message than the ORM. See #13147.\n username = self.cleaned_data[\"username\"]\n username_field = User.USERNAME_FIELD\n try:\n User._default_manager.exclude(id=self.instance.id).get(**{\n username_field: username})\n except User.DoesNotExist:\n return username\n raise forms.ValidationError(self.error_messages['duplicate_username'])\n\n def clean_password2(self):\n password1 = self.cleaned_data.get(\"password1\")\n password2 = self.cleaned_data.get(\"password2\")\n if password1 != password2:\n raise forms.ValidationError(\n self.error_messages['password_mismatch'])\n return password2\n\n def save(self, commit=True):\n user = super(UserEditForm, self).save(commit=False)\n\n # users can access django-admin iff they are a superuser\n user.is_staff = user.is_superuser\n\n if self.cleaned_data[\"password1\"]:\n user.set_password(self.cleaned_data[\"password1\"])\n if commit:\n user.save()\n self.save_m2m()\n return user\n\n\nclass GroupForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(GroupForm, self).__init__(*args, **kwargs)\n self.registered_permissions = Permission.objects.none()\n for fn in hooks.get_hooks('register_permissions'):\n self.registered_permissions = self.registered_permissions | fn()\n self.fields['permissions'].queryset = self.registered_permissions\n\n required_css_class = \"required\"\n\n error_messages = {\n 'duplicate_name': _(\"A group with that name already exists.\"),\n }\n\n is_superuser = forms.BooleanField(\n label=_(\"Administrator\"),\n required=False,\n help_text=_(\"Administrators have the ability to manage user accounts.\")\n )\n\n class Meta:\n model = Group\n fields = (\"name\", \"permissions\", )\n\n def clean_name(self):\n # Since Group.name is unique, this check is redundant,\n # but it sets a nicer error message than the ORM. See #13147.\n name = self.cleaned_data[\"name\"]\n try:\n Group._default_manager.exclude(id=self.instance.id).get(name=name)\n except Group.DoesNotExist:\n return name\n raise forms.ValidationError(self.error_messages['duplicate_name'])\n\n def save(self):\n # We go back to the object to read (in order to reapply) the\n # permissions which were set on this group, but which are not\n # accessible in the wagtail admin interface, as otherwise these would\n # be clobbered by this form.\n try:\n untouchable_permissions = self.instance.permissions.exclude(pk__in=self.registered_permissions)\n bool(untouchable_permissions) # force this to be evaluated, as it's about to change\n except ValueError:\n # this form is not bound; we're probably creating a new group\n untouchable_permissions = []\n group = super(GroupForm, self).save()\n group.permissions.add(*untouchable_permissions)\n return group\n\n\nclass GroupPagePermissionForm(forms.ModelForm):\n page = forms.ModelChoiceField(queryset=Page.objects.all(),\n widget=AdminPageChooser(show_edit_link=False))\n\n class Meta:\n model = GroupPagePermission\n fields = ('page', 'permission_type')\n\n\nclass BaseGroupPagePermissionFormSet(forms.models.BaseInlineFormSet):\n def __init__(self, *args, **kwargs):\n super(BaseGroupPagePermissionFormSet, self).__init__(*args, **kwargs)\n self.form = GroupPagePermissionForm\n for form in self.forms:\n form.fields['DELETE'].widget = forms.HiddenInput()\n\n @property\n def empty_form(self):\n empty_form = super(BaseGroupPagePermissionFormSet, self).empty_form\n empty_form.fields['DELETE'].widget = forms.HiddenInput()\n return empty_form\n\n\nGroupPagePermissionFormSet = inlineformset_factory(\n Group,\n GroupPagePermission,\n formset=BaseGroupPagePermissionFormSet,\n extra=0,\n fields=('page', 'permission_type'),\n)\n\n\nclass NotificationPreferencesForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(NotificationPreferencesForm, self).__init__(*args, **kwargs)\n user_perms = UserPagePermissionsProxy(self.instance.user)\n if not user_perms.can_publish_pages():\n del self.fields['submitted_notifications']\n if not user_perms.can_edit_pages():\n del self.fields['approved_notifications']\n del self.fields['rejected_notifications']\n\n class Meta:\n model = UserProfile\n fields = (\"submitted_notifications\", \"approved_notifications\", \"rejected_notifications\")\n", "path": "wagtail/wagtailusers/forms.py"}]} | 3,163 | 137 |
gh_patches_debug_28460 | rasdani/github-patches | git_diff | mindsdb__mindsdb-2678 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Fix scylladb error when connecting with secure bundle
When connecting with `secure_connect_bundle` users got unknown secure_connect_bundle path error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/integrations/handlers/scylla_handler/scylla_handler.py`
Content:
```
1 import os
2 from mindsdb.integrations.libs.base_handler import DatabaseHandler
3 from mindsdb_sql import parse_sql
4 from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
5 from cassandra.cluster import Cluster
6 from cassandra.auth import PlainTextAuthProvider
7 from mindsdb.integrations.libs.response import (
8 HandlerStatusResponse as StatusResponse,
9 HandlerResponse as Response,
10 RESPONSE_TYPE
11 )
12 from mindsdb.utilities.log import log
13 import pandas as pd
14 from mindsdb_sql.parser.ast.base import ASTNode
15
16
17 class ScyllaHandler(DatabaseHandler):
18 """
19 This handler handles connection and execution of the Scylla statements.
20 """
21 name = 'scylla'
22
23 def __init__(self, name=None, **kwargs):
24 super().__init__(name)
25 self.parser = parse_sql
26 self.connection_args = kwargs.get('connection_data')
27 self.session = None
28 self.is_connected = False
29
30 def connect(self):
31 """
32 Handles the connection to a Scylla keystore.
33 """
34 if self.is_connected is True:
35 return self.session
36
37 auth_provider = PlainTextAuthProvider(
38 username=self.connection_args['user'], password=self.connection_args['password']
39 )
40
41 connection_props = {
42 'auth_provider': auth_provider
43 }
44
45 if self.connection_args['protocol_version'] is not None:
46 connection_props['protocol_version'] = self.connection_args['protocol_version']
47
48 secure_connect_bundle = self.connection_args.get('secure_connect_bundle')
49
50 if secure_connect_bundle is not None:
51 if os.path.isfile(self.secure_connect_bundle) is False:
52 raise Exception("Secure_connect_bundle' must be path to the file")
53 connection_props['cloud'] = {
54 'secure_connect_bundle': self.secure_connect_bundle
55 }
56 else:
57 connection_props['contact_points'] = [self.connection_args['host']]
58 connection_props['port'] = int(self.connection_args['port'])
59
60 cluster = Cluster(**connection_props)
61 session = cluster.connect(self.connection_args['keyspace'])
62
63 self.is_connected = True
64 self.session = session
65 return self.session
66
67 def check_connection(self) -> StatusResponse:
68 """
69 Check the connection of the Scylla database
70 :return: success status and error message if error occurs
71 """
72 response = StatusResponse(False)
73
74 try:
75 session = self.connect()
76 # TODO: change the healthcheck
77 session.execute('SELECT release_version FROM system.local').one()
78 response.success = True
79 except Exception as e:
80 log.error(f'Error connecting to Scylla {self.connection_args["keyspace"]}, {e}!')
81 response.error_message = e
82
83 if response.success is False and self.is_connected is True:
84 self.is_connected = False
85
86 return response
87
88 def native_query(self, query: str) -> Response:
89 """
90 Receive SQL query and runs it
91 :param query: The SQL query to run in MySQL
92 :return: returns the records from the current recordset
93 """
94 session = self.connect()
95 try:
96 resp = session.execute(query).all()
97 if resp:
98 response = Response(
99 RESPONSE_TYPE.TABLE,
100 pd.DataFrame(
101 resp
102 )
103 )
104 else:
105 response = Response(RESPONSE_TYPE.OK)
106 except Exception as e:
107 log.error(f'Error running query: {query} on {self.connection_args["keyspace"]}!')
108 response = Response(
109 RESPONSE_TYPE.ERROR,
110 error_message=str(e)
111 )
112 return response
113
114 def query(self, query: ASTNode) -> Response:
115 """
116 Retrieve the data from the SQL statement.
117 """
118 renderer = SqlalchemyRender('mysql')
119 query_str = renderer.get_string(query, with_failback=True)
120 return self.native_query(query_str)
121
122 def get_tables(self) -> Response:
123 """
124 Get a list with all of the tabels in MySQL
125 """
126 q = "DESCRIBE TABLES;"
127 result = self.native_query(q)
128 df = result.data_frame
129 result.data_frame = df.rename(columns={df.columns[0]: 'table_name'})
130 return result
131
132 def get_columns(self, table_name) -> Response:
133 """
134 Show details about the table
135 """
136 q = f"DESCRIBE {table_name};"
137 result = self.native_query(q)
138 return result
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py b/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py
--- a/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py
+++ b/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py
@@ -41,24 +41,21 @@
connection_props = {
'auth_provider': auth_provider
}
-
- if self.connection_args['protocol_version'] is not None:
- connection_props['protocol_version'] = self.connection_args['protocol_version']
-
+ connection_props['protocol_version'] = self.connection_args.get('protocol_version', 4)
secure_connect_bundle = self.connection_args.get('secure_connect_bundle')
if secure_connect_bundle is not None:
- if os.path.isfile(self.secure_connect_bundle) is False:
+ if os.path.isfile(secure_connect_bundle) is False:
raise Exception("Secure_connect_bundle' must be path to the file")
connection_props['cloud'] = {
- 'secure_connect_bundle': self.secure_connect_bundle
+ 'secure_connect_bundle': secure_connect_bundle
}
else:
connection_props['contact_points'] = [self.connection_args['host']]
connection_props['port'] = int(self.connection_args['port'])
cluster = Cluster(**connection_props)
- session = cluster.connect(self.connection_args['keyspace'])
+ session = cluster.connect(self.connection_args.get('keyspace'))
self.is_connected = True
self.session = session
| {"golden_diff": "diff --git a/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py b/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py\n--- a/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py\n+++ b/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py\n@@ -41,24 +41,21 @@\n connection_props = {\n 'auth_provider': auth_provider\n }\n-\n- if self.connection_args['protocol_version'] is not None:\n- connection_props['protocol_version'] = self.connection_args['protocol_version']\n- \n+ connection_props['protocol_version'] = self.connection_args.get('protocol_version', 4)\n secure_connect_bundle = self.connection_args.get('secure_connect_bundle')\n \n if secure_connect_bundle is not None:\n- if os.path.isfile(self.secure_connect_bundle) is False:\n+ if os.path.isfile(secure_connect_bundle) is False:\n raise Exception(\"Secure_connect_bundle' must be path to the file\")\n connection_props['cloud'] = {\n- 'secure_connect_bundle': self.secure_connect_bundle\n+ 'secure_connect_bundle': secure_connect_bundle\n }\n else:\n connection_props['contact_points'] = [self.connection_args['host']]\n connection_props['port'] = int(self.connection_args['port'])\n \n cluster = Cluster(**connection_props)\n- session = cluster.connect(self.connection_args['keyspace'])\n+ session = cluster.connect(self.connection_args.get('keyspace'))\n \n self.is_connected = True\n self.session = session\n", "issue": "[BUG] Fix scylladb error when connecting with secure bundle\nWhen connecting with `secure_connect_bundle` users got unknown secure_connect_bundle path error.\n", "before_files": [{"content": "import os\nfrom mindsdb.integrations.libs.base_handler import DatabaseHandler\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\nfrom cassandra.cluster import Cluster\nfrom cassandra.auth import PlainTextAuthProvider\nfrom mindsdb.integrations.libs.response import (\n HandlerStatusResponse as StatusResponse,\n HandlerResponse as Response,\n RESPONSE_TYPE\n)\nfrom mindsdb.utilities.log import log\nimport pandas as pd\nfrom mindsdb_sql.parser.ast.base import ASTNode\n\n\nclass ScyllaHandler(DatabaseHandler):\n \"\"\"\n This handler handles connection and execution of the Scylla statements.\n \"\"\"\n name = 'scylla'\n\n def __init__(self, name=None, **kwargs):\n super().__init__(name)\n self.parser = parse_sql\n self.connection_args = kwargs.get('connection_data')\n self.session = None\n self.is_connected = False\n\n def connect(self):\n \"\"\"\n Handles the connection to a Scylla keystore.\n \"\"\"\n if self.is_connected is True:\n return self.session\n\n auth_provider = PlainTextAuthProvider(\n username=self.connection_args['user'], password=self.connection_args['password']\n )\n\n connection_props = {\n 'auth_provider': auth_provider\n }\n\n if self.connection_args['protocol_version'] is not None:\n connection_props['protocol_version'] = self.connection_args['protocol_version']\n \n secure_connect_bundle = self.connection_args.get('secure_connect_bundle')\n\n if secure_connect_bundle is not None:\n if os.path.isfile(self.secure_connect_bundle) is False:\n raise Exception(\"Secure_connect_bundle' must be path to the file\")\n connection_props['cloud'] = {\n 'secure_connect_bundle': self.secure_connect_bundle\n }\n else:\n connection_props['contact_points'] = [self.connection_args['host']]\n connection_props['port'] = int(self.connection_args['port'])\n\n cluster = Cluster(**connection_props)\n session = cluster.connect(self.connection_args['keyspace'])\n\n self.is_connected = True\n self.session = session\n return self.session\n\n def check_connection(self) -> StatusResponse:\n \"\"\"\n Check the connection of the Scylla database\n :return: success status and error message if error occurs\n \"\"\"\n response = StatusResponse(False)\n\n try:\n session = self.connect()\n # TODO: change the healthcheck\n session.execute('SELECT release_version FROM system.local').one()\n response.success = True\n except Exception as e:\n log.error(f'Error connecting to Scylla {self.connection_args[\"keyspace\"]}, {e}!')\n response.error_message = e\n\n if response.success is False and self.is_connected is True:\n self.is_connected = False\n\n return response\n\n def native_query(self, query: str) -> Response:\n \"\"\"\n Receive SQL query and runs it\n :param query: The SQL query to run in MySQL\n :return: returns the records from the current recordset\n \"\"\"\n session = self.connect()\n try:\n resp = session.execute(query).all()\n if resp:\n response = Response(\n RESPONSE_TYPE.TABLE,\n pd.DataFrame(\n resp\n )\n )\n else:\n response = Response(RESPONSE_TYPE.OK)\n except Exception as e:\n log.error(f'Error running query: {query} on {self.connection_args[\"keyspace\"]}!')\n response = Response(\n RESPONSE_TYPE.ERROR,\n error_message=str(e)\n )\n return response\n\n def query(self, query: ASTNode) -> Response:\n \"\"\"\n Retrieve the data from the SQL statement.\n \"\"\"\n renderer = SqlalchemyRender('mysql')\n query_str = renderer.get_string(query, with_failback=True)\n return self.native_query(query_str)\n\n def get_tables(self) -> Response:\n \"\"\"\n Get a list with all of the tabels in MySQL\n \"\"\"\n q = \"DESCRIBE TABLES;\"\n result = self.native_query(q)\n df = result.data_frame\n result.data_frame = df.rename(columns={df.columns[0]: 'table_name'})\n return result\n\n def get_columns(self, table_name) -> Response:\n \"\"\"\n Show details about the table\n \"\"\"\n q = f\"DESCRIBE {table_name};\"\n result = self.native_query(q)\n return result\n", "path": "mindsdb/integrations/handlers/scylla_handler/scylla_handler.py"}], "after_files": [{"content": "import os\nfrom mindsdb.integrations.libs.base_handler import DatabaseHandler\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\nfrom cassandra.cluster import Cluster\nfrom cassandra.auth import PlainTextAuthProvider\nfrom mindsdb.integrations.libs.response import (\n HandlerStatusResponse as StatusResponse,\n HandlerResponse as Response,\n RESPONSE_TYPE\n)\nfrom mindsdb.utilities.log import log\nimport pandas as pd\nfrom mindsdb_sql.parser.ast.base import ASTNode\n\n\nclass ScyllaHandler(DatabaseHandler):\n \"\"\"\n This handler handles connection and execution of the Scylla statements.\n \"\"\"\n name = 'scylla'\n\n def __init__(self, name=None, **kwargs):\n super().__init__(name)\n self.parser = parse_sql\n self.connection_args = kwargs.get('connection_data')\n self.session = None\n self.is_connected = False\n\n def connect(self):\n \"\"\"\n Handles the connection to a Scylla keystore.\n \"\"\"\n if self.is_connected is True:\n return self.session\n\n auth_provider = PlainTextAuthProvider(\n username=self.connection_args['user'], password=self.connection_args['password']\n )\n\n connection_props = {\n 'auth_provider': auth_provider\n }\n connection_props['protocol_version'] = self.connection_args.get('protocol_version', 4)\n secure_connect_bundle = self.connection_args.get('secure_connect_bundle')\n\n if secure_connect_bundle is not None:\n if os.path.isfile(secure_connect_bundle) is False:\n raise Exception(\"Secure_connect_bundle' must be path to the file\")\n connection_props['cloud'] = {\n 'secure_connect_bundle': secure_connect_bundle\n }\n else:\n connection_props['contact_points'] = [self.connection_args['host']]\n connection_props['port'] = int(self.connection_args['port'])\n\n cluster = Cluster(**connection_props)\n session = cluster.connect(self.connection_args.get('keyspace'))\n\n self.is_connected = True\n self.session = session\n return self.session\n\n def check_connection(self) -> StatusResponse:\n \"\"\"\n Check the connection of the Scylla database\n :return: success status and error message if error occurs\n \"\"\"\n response = StatusResponse(False)\n\n try:\n session = self.connect()\n # TODO: change the healthcheck\n session.execute('SELECT release_version FROM system.local').one()\n response.success = True\n except Exception as e:\n log.error(f'Error connecting to Scylla {self.connection_args[\"keyspace\"]}, {e}!')\n response.error_message = e\n\n if response.success is False and self.is_connected is True:\n self.is_connected = False\n\n return response\n\n def native_query(self, query: str) -> Response:\n \"\"\"\n Receive SQL query and runs it\n :param query: The SQL query to run in MySQL\n :return: returns the records from the current recordset\n \"\"\"\n session = self.connect()\n try:\n resp = session.execute(query).all()\n if resp:\n response = Response(\n RESPONSE_TYPE.TABLE,\n pd.DataFrame(\n resp\n )\n )\n else:\n response = Response(RESPONSE_TYPE.OK)\n except Exception as e:\n log.error(f'Error running query: {query} on {self.connection_args[\"keyspace\"]}!')\n response = Response(\n RESPONSE_TYPE.ERROR,\n error_message=str(e)\n )\n return response\n\n def query(self, query: ASTNode) -> Response:\n \"\"\"\n Retrieve the data from the SQL statement.\n \"\"\"\n renderer = SqlalchemyRender('mysql')\n query_str = renderer.get_string(query, with_failback=True)\n return self.native_query(query_str)\n\n def get_tables(self) -> Response:\n \"\"\"\n Get a list with all of the tabels in MySQL\n \"\"\"\n q = \"DESCRIBE TABLES;\"\n result = self.native_query(q)\n df = result.data_frame\n result.data_frame = df.rename(columns={df.columns[0]: 'table_name'})\n return result\n\n def get_columns(self, table_name) -> Response:\n \"\"\"\n Show details about the table\n \"\"\"\n q = f\"DESCRIBE {table_name};\"\n result = self.native_query(q)\n return result\n", "path": "mindsdb/integrations/handlers/scylla_handler/scylla_handler.py"}]} | 1,546 | 350 |
gh_patches_debug_31481 | rasdani/github-patches | git_diff | bids-standard__pybids-1023 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BIDSValidation error message doesn't provide path of erroneous `dataset_description.json`
When creating a layout object or adding derivative directories, the [`BIDSDerivativesValidationError`](https://github.com/bids-standard/pybids/blob/f0d198da950371e64c6b2911627a549d78f62df3/bids/layout/validation.py#L130-L132) does not provide the file on which it errored out. This makes it hard for the user to debug the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bids/layout/validation.py`
Content:
```
1 """Functionality related to validation of BIDSLayouts and BIDS projects."""
2
3 from pathlib import Path
4 import json
5 import re
6 import warnings
7
8 from ..utils import listify
9 from ..exceptions import BIDSValidationError, BIDSDerivativesValidationError
10
11
12 MANDATORY_BIDS_FIELDS = {
13 "Name": {"Name": "Example dataset"},
14 "BIDSVersion": {"BIDSVersion": "1.0.2"},
15 }
16
17
18 MANDATORY_DERIVATIVES_FIELDS = {
19 **MANDATORY_BIDS_FIELDS,
20 "GeneratedBy": {
21 "GeneratedBy": [{"Name": "Example pipeline"}]
22 },
23 }
24
25 EXAMPLE_BIDS_DESCRIPTION = {
26 k: val[k] for val in MANDATORY_BIDS_FIELDS.values() for k in val}
27
28
29 EXAMPLE_DERIVATIVES_DESCRIPTION = {
30 k: val[k] for val in MANDATORY_DERIVATIVES_FIELDS.values() for k in val}
31
32
33 DEFAULT_LOCATIONS_TO_IGNORE = {
34 re.compile(r"^/(code|models|sourcedata|stimuli)"),
35 re.compile(r'/\.'),
36 }
37
38 def absolute_path_deprecation_warning():
39 warnings.warn("The absolute_paths argument will be removed from PyBIDS "
40 "in 0.14. You can easily access the relative path of "
41 "BIDSFile objects via the .relpath attribute (instead of "
42 ".path). Switching to this pattern is strongly encouraged, "
43 "as the current implementation of relative path handling "
44 "is known to produce query failures in certain edge cases.")
45
46
47 def indexer_arg_deprecation_warning():
48 warnings.warn("The ability to pass arguments to BIDSLayout that control "
49 "indexing is likely to be removed in future; possibly as "
50 "early as PyBIDS 0.14. This includes the `config_filename`, "
51 "`ignore`, `force_index`, and `index_metadata` arguments. "
52 "The recommended usage pattern is to initialize a new "
53 "BIDSLayoutIndexer with these arguments, and pass it to "
54 "the BIDSLayout via the `indexer` argument.")
55
56
57 def validate_root(root, validate):
58 # Validate root argument and make sure it contains mandatory info
59 try:
60 root = Path(root)
61 except TypeError:
62 raise TypeError("root argument must be a pathlib.Path (or a type that "
63 "supports casting to pathlib.Path, such as "
64 "string) specifying the directory "
65 "containing the BIDS dataset.")
66
67 root = root.absolute()
68
69 if not root.exists():
70 raise ValueError("BIDS root does not exist: %s" % root)
71
72 target = root / 'dataset_description.json'
73 if not target.exists():
74 if validate:
75 raise BIDSValidationError(
76 "'dataset_description.json' is missing from project root."
77 " Every valid BIDS dataset must have this file."
78 "\nExample contents of 'dataset_description.json': \n%s" %
79 json.dumps(EXAMPLE_BIDS_DESCRIPTION)
80 )
81 else:
82 description = None
83 else:
84 err = None
85 try:
86 with open(target, 'r', encoding='utf-8') as desc_fd:
87 description = json.load(desc_fd)
88 except (UnicodeDecodeError, json.JSONDecodeError) as e:
89 description = None
90 err = e
91 if validate:
92
93 if description is None:
94 raise BIDSValidationError(
95 "'dataset_description.json' is not a valid json file."
96 " There is likely a typo in your 'dataset_description.json'."
97 "\nExample contents of 'dataset_description.json': \n%s" %
98 json.dumps(EXAMPLE_BIDS_DESCRIPTION)
99 ) from err
100
101 for k in MANDATORY_BIDS_FIELDS:
102 if k not in description:
103 raise BIDSValidationError(
104 "Mandatory %r field missing from "
105 "'dataset_description.json'."
106 "\nExample: %s" % (k, MANDATORY_BIDS_FIELDS[k])
107 )
108
109 return root, description
110
111
112 def validate_derivative_path(path, **kwargs):
113 # Collect all paths that contain a dataset_description.json
114 dd = Path(path) / 'dataset_description.json'
115 description = json.loads(dd.read_text(encoding='utf-8'))
116 pipeline_names = [pipeline["Name"]
117 for pipeline in description.get("GeneratedBy", [])
118 if "Name" in pipeline]
119 if pipeline_names:
120 pipeline_name = pipeline_names[0]
121 elif "PipelineDescription" in description:
122 warnings.warn("The PipelineDescription field was superseded "
123 "by GeneratedBy in BIDS 1.4.0. You can use "
124 "``pybids upgrade`` to update your derivative "
125 "dataset.")
126 pipeline_name = description["PipelineDescription"].get("Name")
127 else:
128 pipeline_name = None
129 if pipeline_name is None:
130 raise BIDSDerivativesValidationError(
131 "Every valid BIDS-derivatives dataset must "
132 "have a GeneratedBy.Name field set "
133 "inside 'dataset_description.json'. "
134 f"\nExample: {MANDATORY_DERIVATIVES_FIELDS['GeneratedBy']}"
135 )
136 return pipeline_name
137
138
139 def _sort_patterns(patterns, root):
140 """Return sorted patterns, from more specific to more general."""
141 regexes = [patt for patt in patterns if hasattr(patt, "search")]
142
143 paths = [
144 str((root / patt).absolute())
145 for patt in listify(patterns)
146 if not hasattr(patt, "search")
147 ]
148 # Sort patterns from general to specific
149 paths.sort(key=len)
150
151 # Combine and return (note path patterns are reversed, specific first)
152 return [Path(p) for p in reversed(paths)] + regexes
153
154
155 def validate_indexing_args(ignore, force_index, root):
156 if ignore is None:
157 ignore = list(
158 DEFAULT_LOCATIONS_TO_IGNORE - set(force_index or [])
159 )
160
161 # root has already been validated to be a directory
162 ignore = _sort_patterns(ignore, root)
163 force_index = _sort_patterns(force_index or [], root)
164
165 # Derivatives get special handling; they shouldn't be indexed normally
166 for entry in force_index:
167 condi = (isinstance(entry, str) and
168 str(entry.resolve()).startswith('derivatives'))
169 if condi:
170 msg = ("Do not pass 'derivatives' in the force_index "
171 "list. To index derivatives, either set "
172 "derivatives=True, or use add_derivatives().")
173 raise ValueError(msg)
174
175 return ignore, force_index
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bids/layout/validation.py b/bids/layout/validation.py
--- a/bids/layout/validation.py
+++ b/bids/layout/validation.py
@@ -93,7 +93,8 @@
if description is None:
raise BIDSValidationError(
"'dataset_description.json' is not a valid json file."
- " There is likely a typo in your 'dataset_description.json'."
+ " There is likely a typo in your 'dataset_description.json' at "
+ f"{target.resolve()}. "
"\nExample contents of 'dataset_description.json': \n%s" %
json.dumps(EXAMPLE_BIDS_DESCRIPTION)
) from err
@@ -102,7 +103,8 @@
if k not in description:
raise BIDSValidationError(
"Mandatory %r field missing from "
- "'dataset_description.json'."
+ "'dataset_description.json' at "
+ f"{target.resolve()}. "
"\nExample: %s" % (k, MANDATORY_BIDS_FIELDS[k])
)
@@ -130,7 +132,8 @@
raise BIDSDerivativesValidationError(
"Every valid BIDS-derivatives dataset must "
"have a GeneratedBy.Name field set "
- "inside 'dataset_description.json'. "
+ "inside 'dataset_description.json', "
+ f"here {dd.resolve()}. "
f"\nExample: {MANDATORY_DERIVATIVES_FIELDS['GeneratedBy']}"
)
return pipeline_name
| {"golden_diff": "diff --git a/bids/layout/validation.py b/bids/layout/validation.py\n--- a/bids/layout/validation.py\n+++ b/bids/layout/validation.py\n@@ -93,7 +93,8 @@\n if description is None:\n raise BIDSValidationError(\n \"'dataset_description.json' is not a valid json file.\"\n- \" There is likely a typo in your 'dataset_description.json'.\"\n+ \" There is likely a typo in your 'dataset_description.json' at \"\n+ f\"{target.resolve()}. \"\n \"\\nExample contents of 'dataset_description.json': \\n%s\" %\n json.dumps(EXAMPLE_BIDS_DESCRIPTION)\n ) from err\n@@ -102,7 +103,8 @@\n if k not in description:\n raise BIDSValidationError(\n \"Mandatory %r field missing from \"\n- \"'dataset_description.json'.\"\n+ \"'dataset_description.json' at \"\n+ f\"{target.resolve()}. \"\n \"\\nExample: %s\" % (k, MANDATORY_BIDS_FIELDS[k])\n )\n \n@@ -130,7 +132,8 @@\n raise BIDSDerivativesValidationError(\n \"Every valid BIDS-derivatives dataset must \"\n \"have a GeneratedBy.Name field set \"\n- \"inside 'dataset_description.json'. \"\n+ \"inside 'dataset_description.json', \"\n+ f\"here {dd.resolve()}. \"\n f\"\\nExample: {MANDATORY_DERIVATIVES_FIELDS['GeneratedBy']}\"\n )\n return pipeline_name\n", "issue": "BIDSValidation error message doesn't provide path of erroneous `dataset_description.json` \nWhen creating a layout object or adding derivative directories, the [`BIDSDerivativesValidationError`](https://github.com/bids-standard/pybids/blob/f0d198da950371e64c6b2911627a549d78f62df3/bids/layout/validation.py#L130-L132) does not provide the file on which it errored out. This makes it hard for the user to debug the issue.\n", "before_files": [{"content": "\"\"\"Functionality related to validation of BIDSLayouts and BIDS projects.\"\"\"\n\nfrom pathlib import Path\nimport json\nimport re\nimport warnings\n\nfrom ..utils import listify\nfrom ..exceptions import BIDSValidationError, BIDSDerivativesValidationError\n\n\nMANDATORY_BIDS_FIELDS = {\n \"Name\": {\"Name\": \"Example dataset\"},\n \"BIDSVersion\": {\"BIDSVersion\": \"1.0.2\"},\n}\n\n\nMANDATORY_DERIVATIVES_FIELDS = {\n **MANDATORY_BIDS_FIELDS,\n \"GeneratedBy\": {\n \"GeneratedBy\": [{\"Name\": \"Example pipeline\"}]\n },\n}\n\nEXAMPLE_BIDS_DESCRIPTION = {\n k: val[k] for val in MANDATORY_BIDS_FIELDS.values() for k in val}\n\n\nEXAMPLE_DERIVATIVES_DESCRIPTION = {\n k: val[k] for val in MANDATORY_DERIVATIVES_FIELDS.values() for k in val}\n\n\nDEFAULT_LOCATIONS_TO_IGNORE = {\n re.compile(r\"^/(code|models|sourcedata|stimuli)\"),\n re.compile(r'/\\.'),\n}\n\ndef absolute_path_deprecation_warning():\n warnings.warn(\"The absolute_paths argument will be removed from PyBIDS \"\n \"in 0.14. You can easily access the relative path of \"\n \"BIDSFile objects via the .relpath attribute (instead of \"\n \".path). Switching to this pattern is strongly encouraged, \"\n \"as the current implementation of relative path handling \"\n \"is known to produce query failures in certain edge cases.\")\n\n\ndef indexer_arg_deprecation_warning():\n warnings.warn(\"The ability to pass arguments to BIDSLayout that control \"\n \"indexing is likely to be removed in future; possibly as \"\n \"early as PyBIDS 0.14. This includes the `config_filename`, \"\n \"`ignore`, `force_index`, and `index_metadata` arguments. \"\n \"The recommended usage pattern is to initialize a new \"\n \"BIDSLayoutIndexer with these arguments, and pass it to \"\n \"the BIDSLayout via the `indexer` argument.\")\n\n\ndef validate_root(root, validate):\n # Validate root argument and make sure it contains mandatory info\n try:\n root = Path(root)\n except TypeError:\n raise TypeError(\"root argument must be a pathlib.Path (or a type that \"\n \"supports casting to pathlib.Path, such as \"\n \"string) specifying the directory \"\n \"containing the BIDS dataset.\")\n\n root = root.absolute()\n\n if not root.exists():\n raise ValueError(\"BIDS root does not exist: %s\" % root)\n\n target = root / 'dataset_description.json'\n if not target.exists():\n if validate:\n raise BIDSValidationError(\n \"'dataset_description.json' is missing from project root.\"\n \" Every valid BIDS dataset must have this file.\"\n \"\\nExample contents of 'dataset_description.json': \\n%s\" %\n json.dumps(EXAMPLE_BIDS_DESCRIPTION)\n )\n else:\n description = None\n else:\n err = None\n try:\n with open(target, 'r', encoding='utf-8') as desc_fd:\n description = json.load(desc_fd)\n except (UnicodeDecodeError, json.JSONDecodeError) as e:\n description = None\n err = e\n if validate:\n\n if description is None:\n raise BIDSValidationError(\n \"'dataset_description.json' is not a valid json file.\"\n \" There is likely a typo in your 'dataset_description.json'.\"\n \"\\nExample contents of 'dataset_description.json': \\n%s\" %\n json.dumps(EXAMPLE_BIDS_DESCRIPTION)\n ) from err\n\n for k in MANDATORY_BIDS_FIELDS:\n if k not in description:\n raise BIDSValidationError(\n \"Mandatory %r field missing from \"\n \"'dataset_description.json'.\"\n \"\\nExample: %s\" % (k, MANDATORY_BIDS_FIELDS[k])\n )\n\n return root, description\n\n\ndef validate_derivative_path(path, **kwargs):\n # Collect all paths that contain a dataset_description.json\n dd = Path(path) / 'dataset_description.json'\n description = json.loads(dd.read_text(encoding='utf-8'))\n pipeline_names = [pipeline[\"Name\"]\n for pipeline in description.get(\"GeneratedBy\", [])\n if \"Name\" in pipeline]\n if pipeline_names:\n pipeline_name = pipeline_names[0]\n elif \"PipelineDescription\" in description:\n warnings.warn(\"The PipelineDescription field was superseded \"\n \"by GeneratedBy in BIDS 1.4.0. You can use \"\n \"``pybids upgrade`` to update your derivative \"\n \"dataset.\")\n pipeline_name = description[\"PipelineDescription\"].get(\"Name\")\n else:\n pipeline_name = None\n if pipeline_name is None:\n raise BIDSDerivativesValidationError(\n \"Every valid BIDS-derivatives dataset must \"\n \"have a GeneratedBy.Name field set \"\n \"inside 'dataset_description.json'. \"\n f\"\\nExample: {MANDATORY_DERIVATIVES_FIELDS['GeneratedBy']}\"\n )\n return pipeline_name\n\n\ndef _sort_patterns(patterns, root):\n \"\"\"Return sorted patterns, from more specific to more general.\"\"\"\n regexes = [patt for patt in patterns if hasattr(patt, \"search\")]\n\n paths = [\n str((root / patt).absolute())\n for patt in listify(patterns)\n if not hasattr(patt, \"search\")\n ]\n # Sort patterns from general to specific\n paths.sort(key=len)\n\n # Combine and return (note path patterns are reversed, specific first)\n return [Path(p) for p in reversed(paths)] + regexes\n\n\ndef validate_indexing_args(ignore, force_index, root):\n if ignore is None:\n ignore = list(\n DEFAULT_LOCATIONS_TO_IGNORE - set(force_index or [])\n )\n\n # root has already been validated to be a directory\n ignore = _sort_patterns(ignore, root)\n force_index = _sort_patterns(force_index or [], root)\n\n # Derivatives get special handling; they shouldn't be indexed normally\n for entry in force_index:\n condi = (isinstance(entry, str) and\n str(entry.resolve()).startswith('derivatives'))\n if condi:\n msg = (\"Do not pass 'derivatives' in the force_index \"\n \"list. To index derivatives, either set \"\n \"derivatives=True, or use add_derivatives().\")\n raise ValueError(msg)\n\n return ignore, force_index\n", "path": "bids/layout/validation.py"}], "after_files": [{"content": "\"\"\"Functionality related to validation of BIDSLayouts and BIDS projects.\"\"\"\n\nfrom pathlib import Path\nimport json\nimport re\nimport warnings\n\nfrom ..utils import listify\nfrom ..exceptions import BIDSValidationError, BIDSDerivativesValidationError\n\n\nMANDATORY_BIDS_FIELDS = {\n \"Name\": {\"Name\": \"Example dataset\"},\n \"BIDSVersion\": {\"BIDSVersion\": \"1.0.2\"},\n}\n\n\nMANDATORY_DERIVATIVES_FIELDS = {\n **MANDATORY_BIDS_FIELDS,\n \"GeneratedBy\": {\n \"GeneratedBy\": [{\"Name\": \"Example pipeline\"}]\n },\n}\n\nEXAMPLE_BIDS_DESCRIPTION = {\n k: val[k] for val in MANDATORY_BIDS_FIELDS.values() for k in val}\n\n\nEXAMPLE_DERIVATIVES_DESCRIPTION = {\n k: val[k] for val in MANDATORY_DERIVATIVES_FIELDS.values() for k in val}\n\n\nDEFAULT_LOCATIONS_TO_IGNORE = {\n re.compile(r\"^/(code|models|sourcedata|stimuli)\"),\n re.compile(r'/\\.'),\n}\n\ndef absolute_path_deprecation_warning():\n warnings.warn(\"The absolute_paths argument will be removed from PyBIDS \"\n \"in 0.14. You can easily access the relative path of \"\n \"BIDSFile objects via the .relpath attribute (instead of \"\n \".path). Switching to this pattern is strongly encouraged, \"\n \"as the current implementation of relative path handling \"\n \"is known to produce query failures in certain edge cases.\")\n\n\ndef indexer_arg_deprecation_warning():\n warnings.warn(\"The ability to pass arguments to BIDSLayout that control \"\n \"indexing is likely to be removed in future; possibly as \"\n \"early as PyBIDS 0.14. This includes the `config_filename`, \"\n \"`ignore`, `force_index`, and `index_metadata` arguments. \"\n \"The recommended usage pattern is to initialize a new \"\n \"BIDSLayoutIndexer with these arguments, and pass it to \"\n \"the BIDSLayout via the `indexer` argument.\")\n\n\ndef validate_root(root, validate):\n # Validate root argument and make sure it contains mandatory info\n try:\n root = Path(root)\n except TypeError:\n raise TypeError(\"root argument must be a pathlib.Path (or a type that \"\n \"supports casting to pathlib.Path, such as \"\n \"string) specifying the directory \"\n \"containing the BIDS dataset.\")\n\n root = root.absolute()\n\n if not root.exists():\n raise ValueError(\"BIDS root does not exist: %s\" % root)\n\n target = root / 'dataset_description.json'\n if not target.exists():\n if validate:\n raise BIDSValidationError(\n \"'dataset_description.json' is missing from project root.\"\n \" Every valid BIDS dataset must have this file.\"\n \"\\nExample contents of 'dataset_description.json': \\n%s\" %\n json.dumps(EXAMPLE_BIDS_DESCRIPTION)\n )\n else:\n description = None\n else:\n err = None\n try:\n with open(target, 'r', encoding='utf-8') as desc_fd:\n description = json.load(desc_fd)\n except (UnicodeDecodeError, json.JSONDecodeError) as e:\n description = None\n err = e\n if validate:\n\n if description is None:\n raise BIDSValidationError(\n \"'dataset_description.json' is not a valid json file.\"\n \" There is likely a typo in your 'dataset_description.json' at \"\n f\"{target.resolve()}. \"\n \"\\nExample contents of 'dataset_description.json': \\n%s\" %\n json.dumps(EXAMPLE_BIDS_DESCRIPTION)\n ) from err\n\n for k in MANDATORY_BIDS_FIELDS:\n if k not in description:\n raise BIDSValidationError(\n \"Mandatory %r field missing from \"\n \"'dataset_description.json' at \"\n f\"{target.resolve()}. \"\n \"\\nExample: %s\" % (k, MANDATORY_BIDS_FIELDS[k])\n )\n\n return root, description\n\n\ndef validate_derivative_path(path, **kwargs):\n # Collect all paths that contain a dataset_description.json\n dd = Path(path) / 'dataset_description.json'\n description = json.loads(dd.read_text(encoding='utf-8'))\n pipeline_names = [pipeline[\"Name\"]\n for pipeline in description.get(\"GeneratedBy\", [])\n if \"Name\" in pipeline]\n if pipeline_names:\n pipeline_name = pipeline_names[0]\n elif \"PipelineDescription\" in description:\n warnings.warn(\"The PipelineDescription field was superseded \"\n \"by GeneratedBy in BIDS 1.4.0. You can use \"\n \"``pybids upgrade`` to update your derivative \"\n \"dataset.\")\n pipeline_name = description[\"PipelineDescription\"].get(\"Name\")\n else:\n pipeline_name = None\n if pipeline_name is None:\n raise BIDSDerivativesValidationError(\n \"Every valid BIDS-derivatives dataset must \"\n \"have a GeneratedBy.Name field set \"\n \"inside 'dataset_description.json', \"\n f\"here {dd.resolve()}. \"\n f\"\\nExample: {MANDATORY_DERIVATIVES_FIELDS['GeneratedBy']}\"\n )\n return pipeline_name\n\n\ndef _sort_patterns(patterns, root):\n \"\"\"Return sorted patterns, from more specific to more general.\"\"\"\n regexes = [patt for patt in patterns if hasattr(patt, \"search\")]\n\n paths = [\n str((root / patt).absolute())\n for patt in listify(patterns)\n if not hasattr(patt, \"search\")\n ]\n # Sort patterns from general to specific\n paths.sort(key=len)\n\n # Combine and return (note path patterns are reversed, specific first)\n return [Path(p) for p in reversed(paths)] + regexes\n\n\ndef validate_indexing_args(ignore, force_index, root):\n if ignore is None:\n ignore = list(\n DEFAULT_LOCATIONS_TO_IGNORE - set(force_index or [])\n )\n\n # root has already been validated to be a directory\n ignore = _sort_patterns(ignore, root)\n force_index = _sort_patterns(force_index or [], root)\n\n # Derivatives get special handling; they shouldn't be indexed normally\n for entry in force_index:\n condi = (isinstance(entry, str) and\n str(entry.resolve()).startswith('derivatives'))\n if condi:\n msg = (\"Do not pass 'derivatives' in the force_index \"\n \"list. To index derivatives, either set \"\n \"derivatives=True, or use add_derivatives().\")\n raise ValueError(msg)\n\n return ignore, force_index\n", "path": "bids/layout/validation.py"}]} | 2,206 | 330 |
gh_patches_debug_11687 | rasdani/github-patches | git_diff | pypa__setuptools-2907 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`distutils` submodules being loaded from the stdlib
It seems the issue is that `distutils.sysconfig` is being loaded from the stdlib, even though [the distutils hack has an explicit check that submodules are loaded from the locally-bundled copy](https://github.com/pypa/setuptools/blob/dd5a2cec373ffe7eefc087c1cd06fb4e491a7e88/_distutils_hack/__init__.py#L55-L57).
_Originally posted by @jaraco in https://github.com/pypa/distutils/issues/16#issuecomment-980043534_
`distutils` submodules being loaded from the stdlib
It seems the issue is that `distutils.sysconfig` is being loaded from the stdlib, even though [the distutils hack has an explicit check that submodules are loaded from the locally-bundled copy](https://github.com/pypa/setuptools/blob/dd5a2cec373ffe7eefc087c1cd06fb4e491a7e88/_distutils_hack/__init__.py#L55-L57).
_Originally posted by @jaraco in https://github.com/pypa/distutils/issues/16#issuecomment-980043534_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `_distutils_hack/__init__.py`
Content:
```
1 import sys
2 import os
3 import re
4 import importlib
5 import warnings
6
7
8 is_pypy = '__pypy__' in sys.builtin_module_names
9
10
11 warnings.filterwarnings('ignore',
12 r'.+ distutils\b.+ deprecated',
13 DeprecationWarning)
14
15
16 def warn_distutils_present():
17 if 'distutils' not in sys.modules:
18 return
19 if is_pypy and sys.version_info < (3, 7):
20 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
21 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
22 return
23 warnings.warn(
24 "Distutils was imported before Setuptools, but importing Setuptools "
25 "also replaces the `distutils` module in `sys.modules`. This may lead "
26 "to undesirable behaviors or errors. To avoid these issues, avoid "
27 "using distutils directly, ensure that setuptools is installed in the "
28 "traditional way (e.g. not an editable install), and/or make sure "
29 "that setuptools is always imported before distutils.")
30
31
32 def clear_distutils():
33 if 'distutils' not in sys.modules:
34 return
35 warnings.warn("Setuptools is replacing distutils.")
36 mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
37 for name in mods:
38 del sys.modules[name]
39
40
41 def enabled():
42 """
43 Allow selection of distutils by environment variable.
44 """
45 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
46 return which == 'local'
47
48
49 def ensure_local_distutils():
50 clear_distutils()
51 distutils = importlib.import_module('setuptools._distutils')
52 distutils.__name__ = 'distutils'
53 sys.modules['distutils'] = distutils
54
55 # sanity check that submodules load as expected
56 core = importlib.import_module('distutils.core')
57 assert '_distutils' in core.__file__, core.__file__
58
59
60 def do_override():
61 """
62 Ensure that the local copy of distutils is preferred over stdlib.
63
64 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
65 for more motivation.
66 """
67 if enabled():
68 warn_distutils_present()
69 ensure_local_distutils()
70
71
72 class DistutilsMetaFinder:
73 def find_spec(self, fullname, path, target=None):
74 if path is not None:
75 return
76
77 method_name = 'spec_for_{fullname}'.format(**locals())
78 method = getattr(self, method_name, lambda: None)
79 return method()
80
81 def spec_for_distutils(self):
82 import importlib.abc
83 import importlib.util
84
85 class DistutilsLoader(importlib.abc.Loader):
86
87 def create_module(self, spec):
88 return importlib.import_module('setuptools._distutils')
89
90 def exec_module(self, module):
91 pass
92
93 return importlib.util.spec_from_loader('distutils', DistutilsLoader())
94
95 def spec_for_pip(self):
96 """
97 Ensure stdlib distutils when running under pip.
98 See pypa/pip#8761 for rationale.
99 """
100 if self.pip_imported_during_build():
101 return
102 clear_distutils()
103 self.spec_for_distutils = lambda: None
104
105 @staticmethod
106 def pip_imported_during_build():
107 """
108 Detect if pip is being imported in a build script. Ref #2355.
109 """
110 import traceback
111 return any(
112 frame.f_globals['__file__'].endswith('setup.py')
113 for frame, line in traceback.walk_stack(None)
114 )
115
116
117 DISTUTILS_FINDER = DistutilsMetaFinder()
118
119
120 def add_shim():
121 sys.meta_path.insert(0, DISTUTILS_FINDER)
122
123
124 def remove_shim():
125 try:
126 sys.meta_path.remove(DISTUTILS_FINDER)
127 except ValueError:
128 pass
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py
--- a/_distutils_hack/__init__.py
+++ b/_distutils_hack/__init__.py
@@ -48,11 +48,15 @@
def ensure_local_distutils():
clear_distutils()
- distutils = importlib.import_module('setuptools._distutils')
- distutils.__name__ = 'distutils'
- sys.modules['distutils'] = distutils
- # sanity check that submodules load as expected
+ # With the DistutilsMetaFinder in place,
+ # perform an import to cause distutils to be
+ # loaded from setuptools._distutils. Ref #2906.
+ add_shim()
+ importlib.import_module('distutils')
+ remove_shim()
+
+ # check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
| {"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -48,11 +48,15 @@\n \n def ensure_local_distutils():\n clear_distutils()\n- distutils = importlib.import_module('setuptools._distutils')\n- distutils.__name__ = 'distutils'\n- sys.modules['distutils'] = distutils\n \n- # sanity check that submodules load as expected\n+ # With the DistutilsMetaFinder in place,\n+ # perform an import to cause distutils to be\n+ # loaded from setuptools._distutils. Ref #2906.\n+ add_shim()\n+ importlib.import_module('distutils')\n+ remove_shim()\n+\n+ # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n", "issue": "`distutils` submodules being loaded from the stdlib\nIt seems the issue is that `distutils.sysconfig` is being loaded from the stdlib, even though [the distutils hack has an explicit check that submodules are loaded from the locally-bundled copy](https://github.com/pypa/setuptools/blob/dd5a2cec373ffe7eefc087c1cd06fb4e491a7e88/_distutils_hack/__init__.py#L55-L57).\r\n\r\n_Originally posted by @jaraco in https://github.com/pypa/distutils/issues/16#issuecomment-980043534_\n`distutils` submodules being loaded from the stdlib\nIt seems the issue is that `distutils.sysconfig` is being loaded from the stdlib, even though [the distutils hack has an explicit check that submodules are loaded from the locally-bundled copy](https://github.com/pypa/setuptools/blob/dd5a2cec373ffe7eefc087c1cd06fb4e491a7e88/_distutils_hack/__init__.py#L55-L57).\r\n\r\n_Originally posted by @jaraco in https://github.com/pypa/distutils/issues/16#issuecomment-980043534_\n", "before_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\nwarnings.filterwarnings('ignore',\n r'.+ distutils\\b.+ deprecated',\n DeprecationWarning)\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n distutils = importlib.import_module('setuptools._distutils')\n distutils.__name__ = 'distutils'\n sys.modules['distutils'] = distutils\n\n # sanity check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('setuptools._distutils')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n @staticmethod\n def pip_imported_during_build():\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n frame.f_globals['__file__'].endswith('setup.py')\n for frame, line in traceback.walk_stack(None)\n )\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}], "after_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\nwarnings.filterwarnings('ignore',\n r'.+ distutils\\b.+ deprecated',\n DeprecationWarning)\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n\n # With the DistutilsMetaFinder in place,\n # perform an import to cause distutils to be\n # loaded from setuptools._distutils. Ref #2906.\n add_shim()\n importlib.import_module('distutils')\n remove_shim()\n\n # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('setuptools._distutils')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n @staticmethod\n def pip_imported_during_build():\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n frame.f_globals['__file__'].endswith('setup.py')\n for frame, line in traceback.walk_stack(None)\n )\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}]} | 1,757 | 229 |
gh_patches_debug_17193 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-2195 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Negative Hydro values (in EIA, US) cause parser to fail
When scraping historic data, I've noticed that negative hydro values are sometimes returned by EIA. For example, for US-TN 2018-09-23 6am local time, there was hydro production reported of -144.
I am not sure if this means we should just always move negative hydro production values over to storage, or if for each BA we should decide if the hydro production should be 'storage' hydro or run-of-river. What you think? @systemcatch I guess you are closest to the EIA data.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/EIA.py`
Content:
```
1 #!/usr/bin/env python3
2 """Parser for U.S. Energy Information Administration, https://www.eia.gov/ .
3
4 Aggregates and standardizes data from most of the US ISOs,
5 and exposes them via a unified API.
6
7 Requires an API key, set in the EIA_KEY environment variable. Get one here:
8 https://www.eia.gov/opendata/register.php
9 """
10 import datetime
11 import os
12
13 import arrow
14 from dateutil import parser, tz
15 os.environ.setdefault('EIA_KEY', 'eia_key')
16 from eiapy import Series
17 import requests
18
19 from .lib.validation import validate
20 from .ENTSOE import merge_production_outputs
21
22 EXCHANGES = {
23 'MX-BC->US-CA': 'EBA.CISO-CFE.ID.H',
24 'US-BPA->US-IPC': 'EBA.BPAT-IPCO.ID.H',
25 'US-SPP->US-TX': 'SWPP.ID.H-EBA.ERCO',
26 'US-MISO->US-PJM': 'EBA.MISO-PJM.ID.H',
27 'US-MISO->US-SPP': 'EBA.MISO-SWPP.ID.H',
28 'US-NEISO->US-NY': 'EBA.ISNE-NYIS.ID.H',
29 'US-NY->US-PJM': 'EBA.NYIS-PJM.ID.H'
30 }
31 # based on https://www.eia.gov/beta/electricity/gridmonitor/dashboard/electric_overview/US48/US48
32 REGIONS = {
33 'US-CA': 'CAL',
34 'US-CAR': 'CAR',
35 'US-SPP': 'CENT',
36 'US-FL': 'FLA',
37 'US-PJM': 'MIDA',
38 'US-MISO': 'MIDW',
39 'US-NEISO': 'NE',
40 'US-NY': 'NY',
41 'US-NW': 'NW',
42 'US-SE': 'SE',
43 'US-SEC': 'SEC',
44 'US-SVERI': 'SW',
45 'US-TN': 'TEN',
46 'US-TX': 'TEX',
47 }
48 TYPES = {
49 # 'biomass': 'BM', # not currently supported
50 'coal': 'COL',
51 'gas': 'NG',
52 'hydro': 'WAT',
53 'nuclear': 'NUC',
54 'oil': 'OIL',
55 'unknown': 'OTH',
56 'solar': 'SUN',
57 'wind': 'WND',
58 }
59 PRODUCTION_SERIES = 'EBA.%s-ALL.NG.H'
60 PRODUCTION_MIX_SERIES = 'EBA.%s-ALL.NG.%s.H'
61 DEMAND_SERIES = 'EBA.%s-ALL.D.H'
62 FORECAST_SERIES = 'EBA.%s-ALL.DF.H'
63
64
65 def fetch_consumption_forecast(zone_key, session=None, target_datetime=None, logger=None):
66 return _fetch_series(zone_key, FORECAST_SERIES % REGIONS[zone_key],
67 session=session, target_datetime=target_datetime,
68 logger=logger)
69
70
71 def fetch_production(zone_key, session=None, target_datetime=None, logger=None):
72 return _fetch_series(zone_key, PRODUCTION_SERIES % REGIONS[zone_key],
73 session=session, target_datetime=target_datetime,
74 logger=logger)
75
76
77 def fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):
78 consumption = _fetch_series(zone_key, DEMAND_SERIES % REGIONS[zone_key],
79 session=session, target_datetime=target_datetime,
80 logger=logger)
81 for point in consumption:
82 point['consumption'] = point.pop('value')
83
84 return consumption
85
86
87 def fetch_production_mix(zone_key, session=None, target_datetime=None, logger=None):
88 mixes = []
89 for type, code in TYPES.items():
90 series = PRODUCTION_MIX_SERIES % (REGIONS[zone_key], code)
91 mix = _fetch_series(zone_key, series, session=session,
92 target_datetime=target_datetime, logger=logger)
93 if not mix:
94 continue
95 for point in mix:
96 point.update({
97 'production': {type: point.pop('value')},
98 'storage': {}, # required by merge_production_outputs()
99 })
100
101 #replace small negative solar values (>-5) with 0s
102 point = validate(point, logger=logger, remove_negative=True)
103 mixes.append(mix)
104
105 return merge_production_outputs(mixes, zone_key, merge_source='eia.gov')
106
107
108 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):
109 sortedcodes = '->'.join(sorted([zone_key1, zone_key2]))
110 exchange = _fetch_series(sortedcodes, EXCHANGES[sortedcodes], session=session,
111 target_datetime=target_datetime, logger=logger)
112 for point in exchange:
113 point.update({
114 'sortedZoneKeys': point.pop('zoneKey'),
115 'netFlow': point.pop('value'),
116 })
117 if sortedcodes == 'MX-BC->US-CA':
118 point['netFlow'] = -point['netFlow']
119
120 return exchange
121
122
123 def _fetch_series(zone_key, series_id, session=None, target_datetime=None,
124 logger=None):
125 """Fetches and converts a data series."""
126 key = os.environ['EIA_KEY']
127 assert key and key != 'eia_key', key
128
129 s = session or requests.Session()
130 series = Series(series_id=series_id, session=s)
131
132 if target_datetime:
133 utc = tz.gettz('UTC')
134 #eia currently only accepts utc timestamps in the form YYYYMMDDTHHZ
135 dt = target_datetime.astimezone(utc).strftime('%Y%m%dT%HZ')
136 raw_data = series.last_from(24, end=dt)
137 else:
138 # Get the last 24 hours available.
139 raw_data = series.last(24)
140
141 # UTC timestamp with no offset returned.
142 if not raw_data.get('series'):
143 # Series doesn't exist. Probably requesting a fuel from a region that
144 # doesn't have any capacity for that fuel type.
145 return []
146
147 return [{
148 'zoneKey': zone_key,
149 'datetime': parser.parse(datapoint[0]),
150 'value': datapoint[1],
151 'source': 'eia.gov',
152 } for datapoint in raw_data['series'][0]['data']]
153
154
155 def main():
156 "Main method, never used by the Electricity Map backend, but handy for testing."
157 from pprint import pprint
158 pprint(fetch_consumption_forecast('US-NY'))
159 pprint(fetch_production('US-SEC'))
160 pprint(fetch_production_mix('US-TN'))
161 pprint(fetch_consumption('US-CAR'))
162 pprint(fetch_exchange('MX-BC', 'US-CA'))
163
164
165 if __name__ == '__main__':
166 main()
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/EIA.py b/parsers/EIA.py
--- a/parsers/EIA.py
+++ b/parsers/EIA.py
@@ -93,12 +93,18 @@
if not mix:
continue
for point in mix:
- point.update({
- 'production': {type: point.pop('value')},
- 'storage': {}, # required by merge_production_outputs()
- })
-
- #replace small negative solar values (>-5) with 0s
+ if type == 'hydro' and point['value'] < 0:
+ point.update({
+ 'production': {},# required by merge_production_outputs()
+ 'storage': {type: point.pop('value')},
+ })
+ else:
+ point.update({
+ 'production': {type: point.pop('value')},
+ 'storage': {}, # required by merge_production_outputs()
+ })
+
+ #replace small negative values (>-5) with 0s This is necessary for solar
point = validate(point, logger=logger, remove_negative=True)
mixes.append(mix)
| {"golden_diff": "diff --git a/parsers/EIA.py b/parsers/EIA.py\n--- a/parsers/EIA.py\n+++ b/parsers/EIA.py\n@@ -93,12 +93,18 @@\n if not mix:\n continue\n for point in mix:\n- point.update({\n- 'production': {type: point.pop('value')},\n- 'storage': {}, # required by merge_production_outputs()\n- })\n-\n- #replace small negative solar values (>-5) with 0s\n+ if type == 'hydro' and point['value'] < 0:\n+ point.update({\n+ 'production': {},# required by merge_production_outputs()\n+ 'storage': {type: point.pop('value')},\n+ })\n+ else:\n+ point.update({\n+ 'production': {type: point.pop('value')},\n+ 'storage': {}, # required by merge_production_outputs()\n+ })\n+\n+ #replace small negative values (>-5) with 0s This is necessary for solar\n point = validate(point, logger=logger, remove_negative=True)\n mixes.append(mix)\n", "issue": "Negative Hydro values (in EIA, US) cause parser to fail\nWhen scraping historic data, I've noticed that negative hydro values are sometimes returned by EIA. For example, for US-TN 2018-09-23 6am local time, there was hydro production reported of -144.\r\n\r\nI am not sure if this means we should just always move negative hydro production values over to storage, or if for each BA we should decide if the hydro production should be 'storage' hydro or run-of-river. What you think? @systemcatch I guess you are closest to the EIA data.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Parser for U.S. Energy Information Administration, https://www.eia.gov/ .\n\nAggregates and standardizes data from most of the US ISOs,\nand exposes them via a unified API.\n\nRequires an API key, set in the EIA_KEY environment variable. Get one here:\nhttps://www.eia.gov/opendata/register.php\n\"\"\"\nimport datetime\nimport os\n\nimport arrow\nfrom dateutil import parser, tz\nos.environ.setdefault('EIA_KEY', 'eia_key')\nfrom eiapy import Series\nimport requests\n\nfrom .lib.validation import validate\nfrom .ENTSOE import merge_production_outputs\n\nEXCHANGES = {\n 'MX-BC->US-CA': 'EBA.CISO-CFE.ID.H',\n 'US-BPA->US-IPC': 'EBA.BPAT-IPCO.ID.H',\n 'US-SPP->US-TX': 'SWPP.ID.H-EBA.ERCO',\n 'US-MISO->US-PJM': 'EBA.MISO-PJM.ID.H',\n 'US-MISO->US-SPP': 'EBA.MISO-SWPP.ID.H',\n 'US-NEISO->US-NY': 'EBA.ISNE-NYIS.ID.H',\n 'US-NY->US-PJM': 'EBA.NYIS-PJM.ID.H'\n}\n# based on https://www.eia.gov/beta/electricity/gridmonitor/dashboard/electric_overview/US48/US48\nREGIONS = {\n 'US-CA': 'CAL',\n 'US-CAR': 'CAR',\n 'US-SPP': 'CENT',\n 'US-FL': 'FLA',\n 'US-PJM': 'MIDA',\n 'US-MISO': 'MIDW',\n 'US-NEISO': 'NE',\n 'US-NY': 'NY',\n 'US-NW': 'NW',\n 'US-SE': 'SE',\n 'US-SEC': 'SEC',\n 'US-SVERI': 'SW',\n 'US-TN': 'TEN',\n 'US-TX': 'TEX',\n}\nTYPES = {\n # 'biomass': 'BM', # not currently supported\n 'coal': 'COL',\n 'gas': 'NG',\n 'hydro': 'WAT',\n 'nuclear': 'NUC',\n 'oil': 'OIL',\n 'unknown': 'OTH',\n 'solar': 'SUN',\n 'wind': 'WND',\n}\nPRODUCTION_SERIES = 'EBA.%s-ALL.NG.H'\nPRODUCTION_MIX_SERIES = 'EBA.%s-ALL.NG.%s.H'\nDEMAND_SERIES = 'EBA.%s-ALL.D.H'\nFORECAST_SERIES = 'EBA.%s-ALL.DF.H'\n\n\ndef fetch_consumption_forecast(zone_key, session=None, target_datetime=None, logger=None):\n return _fetch_series(zone_key, FORECAST_SERIES % REGIONS[zone_key],\n session=session, target_datetime=target_datetime,\n logger=logger)\n\n\ndef fetch_production(zone_key, session=None, target_datetime=None, logger=None):\n return _fetch_series(zone_key, PRODUCTION_SERIES % REGIONS[zone_key],\n session=session, target_datetime=target_datetime,\n logger=logger)\n\n\ndef fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):\n consumption = _fetch_series(zone_key, DEMAND_SERIES % REGIONS[zone_key],\n session=session, target_datetime=target_datetime,\n logger=logger)\n for point in consumption:\n point['consumption'] = point.pop('value')\n\n return consumption\n\n\ndef fetch_production_mix(zone_key, session=None, target_datetime=None, logger=None):\n mixes = []\n for type, code in TYPES.items():\n series = PRODUCTION_MIX_SERIES % (REGIONS[zone_key], code)\n mix = _fetch_series(zone_key, series, session=session,\n target_datetime=target_datetime, logger=logger)\n if not mix:\n continue\n for point in mix:\n point.update({\n 'production': {type: point.pop('value')},\n 'storage': {}, # required by merge_production_outputs()\n })\n\n #replace small negative solar values (>-5) with 0s\n point = validate(point, logger=logger, remove_negative=True)\n mixes.append(mix)\n\n return merge_production_outputs(mixes, zone_key, merge_source='eia.gov')\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n sortedcodes = '->'.join(sorted([zone_key1, zone_key2]))\n exchange = _fetch_series(sortedcodes, EXCHANGES[sortedcodes], session=session,\n target_datetime=target_datetime, logger=logger)\n for point in exchange:\n point.update({\n 'sortedZoneKeys': point.pop('zoneKey'),\n 'netFlow': point.pop('value'),\n })\n if sortedcodes == 'MX-BC->US-CA':\n point['netFlow'] = -point['netFlow']\n\n return exchange\n\n\ndef _fetch_series(zone_key, series_id, session=None, target_datetime=None,\n logger=None):\n \"\"\"Fetches and converts a data series.\"\"\"\n key = os.environ['EIA_KEY']\n assert key and key != 'eia_key', key\n\n s = session or requests.Session()\n series = Series(series_id=series_id, session=s)\n\n if target_datetime:\n utc = tz.gettz('UTC')\n #eia currently only accepts utc timestamps in the form YYYYMMDDTHHZ\n dt = target_datetime.astimezone(utc).strftime('%Y%m%dT%HZ')\n raw_data = series.last_from(24, end=dt)\n else:\n # Get the last 24 hours available.\n raw_data = series.last(24)\n\n # UTC timestamp with no offset returned.\n if not raw_data.get('series'):\n # Series doesn't exist. Probably requesting a fuel from a region that\n # doesn't have any capacity for that fuel type.\n return []\n\n return [{\n 'zoneKey': zone_key,\n 'datetime': parser.parse(datapoint[0]),\n 'value': datapoint[1],\n 'source': 'eia.gov',\n } for datapoint in raw_data['series'][0]['data']]\n\n\ndef main():\n \"Main method, never used by the Electricity Map backend, but handy for testing.\"\n from pprint import pprint\n pprint(fetch_consumption_forecast('US-NY'))\n pprint(fetch_production('US-SEC'))\n pprint(fetch_production_mix('US-TN'))\n pprint(fetch_consumption('US-CAR'))\n pprint(fetch_exchange('MX-BC', 'US-CA'))\n\n\nif __name__ == '__main__':\n main()\n", "path": "parsers/EIA.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Parser for U.S. Energy Information Administration, https://www.eia.gov/ .\n\nAggregates and standardizes data from most of the US ISOs,\nand exposes them via a unified API.\n\nRequires an API key, set in the EIA_KEY environment variable. Get one here:\nhttps://www.eia.gov/opendata/register.php\n\"\"\"\nimport datetime\nimport os\n\nimport arrow\nfrom dateutil import parser, tz\nos.environ.setdefault('EIA_KEY', 'eia_key')\nfrom eiapy import Series\nimport requests\n\nfrom .lib.validation import validate\nfrom .ENTSOE import merge_production_outputs\n\nEXCHANGES = {\n 'MX-BC->US-CA': 'EBA.CISO-CFE.ID.H',\n 'US-BPA->US-IPC': 'EBA.BPAT-IPCO.ID.H',\n 'US-SPP->US-TX': 'SWPP.ID.H-EBA.ERCO',\n 'US-MISO->US-PJM': 'EBA.MISO-PJM.ID.H',\n 'US-MISO->US-SPP': 'EBA.MISO-SWPP.ID.H',\n 'US-NEISO->US-NY': 'EBA.ISNE-NYIS.ID.H',\n 'US-NY->US-PJM': 'EBA.NYIS-PJM.ID.H'\n}\n# based on https://www.eia.gov/beta/electricity/gridmonitor/dashboard/electric_overview/US48/US48\nREGIONS = {\n 'US-CA': 'CAL',\n 'US-CAR': 'CAR',\n 'US-SPP': 'CENT',\n 'US-FL': 'FLA',\n 'US-PJM': 'MIDA',\n 'US-MISO': 'MIDW',\n 'US-NEISO': 'NE',\n 'US-NY': 'NY',\n 'US-NW': 'NW',\n 'US-SE': 'SE',\n 'US-SEC': 'SEC',\n 'US-SVERI': 'SW',\n 'US-TN': 'TEN',\n 'US-TX': 'TEX',\n}\nTYPES = {\n # 'biomass': 'BM', # not currently supported\n 'coal': 'COL',\n 'gas': 'NG',\n 'hydro': 'WAT',\n 'nuclear': 'NUC',\n 'oil': 'OIL',\n 'unknown': 'OTH',\n 'solar': 'SUN',\n 'wind': 'WND',\n}\nPRODUCTION_SERIES = 'EBA.%s-ALL.NG.H'\nPRODUCTION_MIX_SERIES = 'EBA.%s-ALL.NG.%s.H'\nDEMAND_SERIES = 'EBA.%s-ALL.D.H'\nFORECAST_SERIES = 'EBA.%s-ALL.DF.H'\n\n\ndef fetch_consumption_forecast(zone_key, session=None, target_datetime=None, logger=None):\n return _fetch_series(zone_key, FORECAST_SERIES % REGIONS[zone_key],\n session=session, target_datetime=target_datetime,\n logger=logger)\n\n\ndef fetch_production(zone_key, session=None, target_datetime=None, logger=None):\n return _fetch_series(zone_key, PRODUCTION_SERIES % REGIONS[zone_key],\n session=session, target_datetime=target_datetime,\n logger=logger)\n\n\ndef fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):\n consumption = _fetch_series(zone_key, DEMAND_SERIES % REGIONS[zone_key],\n session=session, target_datetime=target_datetime,\n logger=logger)\n for point in consumption:\n point['consumption'] = point.pop('value')\n\n return consumption\n\n\ndef fetch_production_mix(zone_key, session=None, target_datetime=None, logger=None):\n mixes = []\n for type, code in TYPES.items():\n series = PRODUCTION_MIX_SERIES % (REGIONS[zone_key], code)\n mix = _fetch_series(zone_key, series, session=session,\n target_datetime=target_datetime, logger=logger)\n if not mix:\n continue\n for point in mix:\n if type == 'hydro' and point['value'] < 0:\n point.update({\n 'production': {},# required by merge_production_outputs()\n 'storage': {type: point.pop('value')},\n })\n else:\n point.update({\n 'production': {type: point.pop('value')},\n 'storage': {}, # required by merge_production_outputs()\n })\n\n #replace small negative values (>-5) with 0s This is necessary for solar\n point = validate(point, logger=logger, remove_negative=True)\n mixes.append(mix)\n\n return merge_production_outputs(mixes, zone_key, merge_source='eia.gov')\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n sortedcodes = '->'.join(sorted([zone_key1, zone_key2]))\n exchange = _fetch_series(sortedcodes, EXCHANGES[sortedcodes], session=session,\n target_datetime=target_datetime, logger=logger)\n for point in exchange:\n point.update({\n 'sortedZoneKeys': point.pop('zoneKey'),\n 'netFlow': point.pop('value'),\n })\n if sortedcodes == 'MX-BC->US-CA':\n point['netFlow'] = -point['netFlow']\n\n return exchange\n\n\ndef _fetch_series(zone_key, series_id, session=None, target_datetime=None,\n logger=None):\n \"\"\"Fetches and converts a data series.\"\"\"\n key = os.environ['EIA_KEY']\n assert key and key != 'eia_key', key\n\n s = session or requests.Session()\n series = Series(series_id=series_id, session=s)\n\n if target_datetime:\n utc = tz.gettz('UTC')\n #eia currently only accepts utc timestamps in the form YYYYMMDDTHHZ\n dt = target_datetime.astimezone(utc).strftime('%Y%m%dT%HZ')\n raw_data = series.last_from(24, end=dt)\n else:\n # Get the last 24 hours available.\n raw_data = series.last(24)\n\n # UTC timestamp with no offset returned.\n if not raw_data.get('series'):\n # Series doesn't exist. Probably requesting a fuel from a region that\n # doesn't have any capacity for that fuel type.\n return []\n\n return [{\n 'zoneKey': zone_key,\n 'datetime': parser.parse(datapoint[0]),\n 'value': datapoint[1],\n 'source': 'eia.gov',\n } for datapoint in raw_data['series'][0]['data']]\n\n\ndef main():\n \"Main method, never used by the Electricity Map backend, but handy for testing.\"\n from pprint import pprint\n pprint(fetch_consumption_forecast('US-NY'))\n pprint(fetch_production('US-SEC'))\n pprint(fetch_production_mix('US-TN'))\n pprint(fetch_consumption('US-CAR'))\n pprint(fetch_exchange('MX-BC', 'US-CA'))\n\n\nif __name__ == '__main__':\n main()\n", "path": "parsers/EIA.py"}]} | 2,259 | 248 |
gh_patches_debug_64121 | rasdani/github-patches | git_diff | plotly__dash-333 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The README is in markdown and doesn't render properly on pypi.io
See: https://pypi.org/project/dash/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import io
2 from setuptools import setup, find_packages
3
4 main_ns = {}
5 exec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used
6
7 setup(
8 name='dash',
9 version=main_ns['__version__'],
10 author='chris p',
11 author_email='[email protected]',
12 packages=find_packages(exclude=['tests*']),
13 license='MIT',
14 description=('A Python framework for building reactive web-apps. '
15 'Developed by Plotly.'),
16 long_description=io.open('README.md', encoding='utf-8').read(),
17 install_requires=[
18 'Flask>=0.12',
19 'flask-compress',
20 'plotly',
21 'dash_renderer',
22 ],
23 url='https://plot.ly/dash',
24 classifiers=[
25 'Development Status :: 5 - Production/Stable',
26 'Environment :: Web Environment',
27 'Framework :: Flask',
28 'Intended Audience :: Developers',
29 'Intended Audience :: Education',
30 'Intended Audience :: Financial and Insurance Industry',
31 'Intended Audience :: Healthcare Industry',
32 'Intended Audience :: Manufacturing',
33 'Intended Audience :: Science/Research',
34 'License :: OSI Approved :: MIT License',
35 'Programming Language :: Python :: 2.7',
36 'Programming Language :: Python :: 3.3',
37 'Programming Language :: Python :: 3.4',
38 'Programming Language :: Python :: 3.5',
39 'Programming Language :: Python :: 3.6',
40 'Topic :: Database :: Front-Ends',
41 'Topic :: Office/Business :: Financial :: Spreadsheet',
42 'Topic :: Scientific/Engineering :: Visualization',
43 'Topic :: Software Development :: Libraries :: Application Frameworks',
44 'Topic :: Software Development :: Widget Sets'
45 ]
46 )
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,6 +14,7 @@
description=('A Python framework for building reactive web-apps. '
'Developed by Plotly.'),
long_description=io.open('README.md', encoding='utf-8').read(),
+ long_description_content_type='text/markdown',
install_requires=[
'Flask>=0.12',
'flask-compress',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,6 +14,7 @@\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n+ long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n", "issue": "The README is in markdown and doesn't render properly on pypi.io\nSee: https://pypi.org/project/dash/\r\n\n", "before_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer',\n ],\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer',\n ],\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py"}]} | 749 | 104 |
gh_patches_debug_41469 | rasdani/github-patches | git_diff | encode__uvicorn-1600 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
uvicorn eats SIGINTs, does not propagate exceptions
The following snippet cannot be killed with a SIGINT (ctrl+c):
```python
import asyncio
from starlette.applications import Starlette
from uvicorn import Config, Server
async def web_ui():
await Server(Config(Starlette())).serve()
async def task():
await asyncio.sleep(100000000000000)
async def main():
await asyncio.gather(web_ui(), task())
if __name__ == "__main__":
asyncio.run(main())
```
It appears that uvicorn is eating SIGINTs and does not propagate the KeyboardInterrupt and/or Cancelled exceptions. Thanks for having a look.
<!-- POLAR PLEDGE BADGE START -->
> [!IMPORTANT]
> - We're using [Polar.sh](https://polar.sh/encode) so you can upvote and help fund this issue.
> - We receive the funding once the issue is completed & confirmed by you.
> - Thank you in advance for helping prioritize & fund our backlog.
<a href="https://polar.sh/encode/uvicorn/issues/1579">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/encode/uvicorn/issues/1579/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/encode/uvicorn/issues/1579/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/server.py`
Content:
```
1 from __future__ import annotations
2
3 import asyncio
4 import logging
5 import os
6 import platform
7 import signal
8 import socket
9 import sys
10 import threading
11 import time
12 from email.utils import formatdate
13 from types import FrameType
14 from typing import TYPE_CHECKING, Sequence, Union
15
16 import click
17
18 from uvicorn.config import Config
19
20 if TYPE_CHECKING:
21 from uvicorn.protocols.http.h11_impl import H11Protocol
22 from uvicorn.protocols.http.httptools_impl import HttpToolsProtocol
23 from uvicorn.protocols.websockets.websockets_impl import WebSocketProtocol
24 from uvicorn.protocols.websockets.wsproto_impl import WSProtocol
25
26 Protocols = Union[H11Protocol, HttpToolsProtocol, WSProtocol, WebSocketProtocol]
27
28 HANDLED_SIGNALS = (
29 signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.
30 signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.
31 )
32 if sys.platform == "win32": # pragma: py-not-win32
33 HANDLED_SIGNALS += (signal.SIGBREAK,) # Windows signal 21. Sent by Ctrl+Break.
34
35 logger = logging.getLogger("uvicorn.error")
36
37
38 class ServerState:
39 """
40 Shared servers state that is available between all protocol instances.
41 """
42
43 def __init__(self) -> None:
44 self.total_requests = 0
45 self.connections: set[Protocols] = set()
46 self.tasks: set[asyncio.Task[None]] = set()
47 self.default_headers: list[tuple[bytes, bytes]] = []
48
49
50 class Server:
51 def __init__(self, config: Config) -> None:
52 self.config = config
53 self.server_state = ServerState()
54
55 self.started = False
56 self.should_exit = False
57 self.force_exit = False
58 self.last_notified = 0.0
59
60 def run(self, sockets: list[socket.socket] | None = None) -> None:
61 self.config.setup_event_loop()
62 return asyncio.run(self.serve(sockets=sockets))
63
64 async def serve(self, sockets: list[socket.socket] | None = None) -> None:
65 process_id = os.getpid()
66
67 config = self.config
68 if not config.loaded:
69 config.load()
70
71 self.lifespan = config.lifespan_class(config)
72
73 self.install_signal_handlers()
74
75 message = "Started server process [%d]"
76 color_message = "Started server process [" + click.style("%d", fg="cyan") + "]"
77 logger.info(message, process_id, extra={"color_message": color_message})
78
79 await self.startup(sockets=sockets)
80 if self.should_exit:
81 return
82 await self.main_loop()
83 await self.shutdown(sockets=sockets)
84
85 message = "Finished server process [%d]"
86 color_message = "Finished server process [" + click.style("%d", fg="cyan") + "]"
87 logger.info(message, process_id, extra={"color_message": color_message})
88
89 async def startup(self, sockets: list[socket.socket] | None = None) -> None:
90 await self.lifespan.startup()
91 if self.lifespan.should_exit:
92 self.should_exit = True
93 return
94
95 config = self.config
96
97 def create_protocol(
98 _loop: asyncio.AbstractEventLoop | None = None,
99 ) -> asyncio.Protocol:
100 return config.http_protocol_class( # type: ignore[call-arg]
101 config=config,
102 server_state=self.server_state,
103 app_state=self.lifespan.state,
104 _loop=_loop,
105 )
106
107 loop = asyncio.get_running_loop()
108
109 listeners: Sequence[socket.SocketType]
110 if sockets is not None:
111 # Explicitly passed a list of open sockets.
112 # We use this when the server is run from a Gunicorn worker.
113
114 def _share_socket(
115 sock: socket.SocketType,
116 ) -> socket.SocketType: # pragma py-linux pragma: py-darwin
117 # Windows requires the socket be explicitly shared across
118 # multiple workers (processes).
119 from socket import fromshare # type: ignore[attr-defined]
120
121 sock_data = sock.share(os.getpid()) # type: ignore[attr-defined]
122 return fromshare(sock_data)
123
124 self.servers: list[asyncio.base_events.Server] = []
125 for sock in sockets:
126 is_windows = platform.system() == "Windows"
127 if config.workers > 1 and is_windows: # pragma: py-not-win32
128 sock = _share_socket(sock) # type: ignore[assignment]
129 server = await loop.create_server(create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog)
130 self.servers.append(server)
131 listeners = sockets
132
133 elif config.fd is not None: # pragma: py-win32
134 # Use an existing socket, from a file descriptor.
135 sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)
136 server = await loop.create_server(create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog)
137 assert server.sockets is not None # mypy
138 listeners = server.sockets
139 self.servers = [server]
140
141 elif config.uds is not None: # pragma: py-win32
142 # Create a socket using UNIX domain socket.
143 uds_perms = 0o666
144 if os.path.exists(config.uds):
145 uds_perms = os.stat(config.uds).st_mode
146 server = await loop.create_unix_server(
147 create_protocol, path=config.uds, ssl=config.ssl, backlog=config.backlog
148 )
149 os.chmod(config.uds, uds_perms)
150 assert server.sockets is not None # mypy
151 listeners = server.sockets
152 self.servers = [server]
153
154 else:
155 # Standard case. Create a socket from a host/port pair.
156 try:
157 server = await loop.create_server(
158 create_protocol,
159 host=config.host,
160 port=config.port,
161 ssl=config.ssl,
162 backlog=config.backlog,
163 )
164 except OSError as exc:
165 logger.error(exc)
166 await self.lifespan.shutdown()
167 sys.exit(1)
168
169 assert server.sockets is not None
170 listeners = server.sockets
171 self.servers = [server]
172
173 if sockets is None:
174 self._log_started_message(listeners)
175 else:
176 # We're most likely running multiple workers, so a message has already been
177 # logged by `config.bind_socket()`.
178 pass
179
180 self.started = True
181
182 def _log_started_message(self, listeners: Sequence[socket.SocketType]) -> None:
183 config = self.config
184
185 if config.fd is not None: # pragma: py-win32
186 sock = listeners[0]
187 logger.info(
188 "Uvicorn running on socket %s (Press CTRL+C to quit)",
189 sock.getsockname(),
190 )
191
192 elif config.uds is not None: # pragma: py-win32
193 logger.info("Uvicorn running on unix socket %s (Press CTRL+C to quit)", config.uds)
194
195 else:
196 addr_format = "%s://%s:%d"
197 host = "0.0.0.0" if config.host is None else config.host
198 if ":" in host:
199 # It's an IPv6 address.
200 addr_format = "%s://[%s]:%d"
201
202 port = config.port
203 if port == 0:
204 port = listeners[0].getsockname()[1]
205
206 protocol_name = "https" if config.ssl else "http"
207 message = f"Uvicorn running on {addr_format} (Press CTRL+C to quit)"
208 color_message = "Uvicorn running on " + click.style(addr_format, bold=True) + " (Press CTRL+C to quit)"
209 logger.info(
210 message,
211 protocol_name,
212 host,
213 port,
214 extra={"color_message": color_message},
215 )
216
217 async def main_loop(self) -> None:
218 counter = 0
219 should_exit = await self.on_tick(counter)
220 while not should_exit:
221 counter += 1
222 counter = counter % 864000
223 await asyncio.sleep(0.1)
224 should_exit = await self.on_tick(counter)
225
226 async def on_tick(self, counter: int) -> bool:
227 # Update the default headers, once per second.
228 if counter % 10 == 0:
229 current_time = time.time()
230 current_date = formatdate(current_time, usegmt=True).encode()
231
232 if self.config.date_header:
233 date_header = [(b"date", current_date)]
234 else:
235 date_header = []
236
237 self.server_state.default_headers = date_header + self.config.encoded_headers
238
239 # Callback to `callback_notify` once every `timeout_notify` seconds.
240 if self.config.callback_notify is not None:
241 if current_time - self.last_notified > self.config.timeout_notify:
242 self.last_notified = current_time
243 await self.config.callback_notify()
244
245 # Determine if we should exit.
246 if self.should_exit:
247 return True
248 if self.config.limit_max_requests is not None:
249 return self.server_state.total_requests >= self.config.limit_max_requests
250 return False
251
252 async def shutdown(self, sockets: list[socket.socket] | None = None) -> None:
253 logger.info("Shutting down")
254
255 # Stop accepting new connections.
256 for server in self.servers:
257 server.close()
258 for sock in sockets or []:
259 sock.close()
260
261 # Request shutdown on all existing connections.
262 for connection in list(self.server_state.connections):
263 connection.shutdown()
264 await asyncio.sleep(0.1)
265
266 # When 3.10 is not supported anymore, use `async with asyncio.timeout(...):`.
267 try:
268 await asyncio.wait_for(
269 self._wait_tasks_to_complete(),
270 timeout=self.config.timeout_graceful_shutdown,
271 )
272 except asyncio.TimeoutError:
273 logger.error(
274 "Cancel %s running task(s), timeout graceful shutdown exceeded",
275 len(self.server_state.tasks),
276 )
277 for t in self.server_state.tasks:
278 if sys.version_info < (3, 9): # pragma: py-gte-39
279 t.cancel()
280 else: # pragma: py-lt-39
281 t.cancel(msg="Task cancelled, timeout graceful shutdown exceeded")
282
283 # Send the lifespan shutdown event, and wait for application shutdown.
284 if not self.force_exit:
285 await self.lifespan.shutdown()
286
287 async def _wait_tasks_to_complete(self) -> None:
288 # Wait for existing connections to finish sending responses.
289 if self.server_state.connections and not self.force_exit:
290 msg = "Waiting for connections to close. (CTRL+C to force quit)"
291 logger.info(msg)
292 while self.server_state.connections and not self.force_exit:
293 await asyncio.sleep(0.1)
294
295 # Wait for existing tasks to complete.
296 if self.server_state.tasks and not self.force_exit:
297 msg = "Waiting for background tasks to complete. (CTRL+C to force quit)"
298 logger.info(msg)
299 while self.server_state.tasks and not self.force_exit:
300 await asyncio.sleep(0.1)
301
302 for server in self.servers:
303 await server.wait_closed()
304
305 def install_signal_handlers(self) -> None:
306 if threading.current_thread() is not threading.main_thread():
307 # Signals can only be listened to from the main thread.
308 return
309
310 loop = asyncio.get_event_loop()
311
312 try:
313 for sig in HANDLED_SIGNALS:
314 loop.add_signal_handler(sig, self.handle_exit, sig, None)
315 except NotImplementedError: # pragma: no cover
316 # Windows
317 for sig in HANDLED_SIGNALS:
318 signal.signal(sig, self.handle_exit)
319
320 def handle_exit(self, sig: int, frame: FrameType | None) -> None:
321 if self.should_exit and sig == signal.SIGINT:
322 self.force_exit = True
323 else:
324 self.should_exit = True
325
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/uvicorn/server.py b/uvicorn/server.py
--- a/uvicorn/server.py
+++ b/uvicorn/server.py
@@ -1,6 +1,7 @@
from __future__ import annotations
import asyncio
+import contextlib
import logging
import os
import platform
@@ -11,7 +12,7 @@
import time
from email.utils import formatdate
from types import FrameType
-from typing import TYPE_CHECKING, Sequence, Union
+from typing import TYPE_CHECKING, Generator, Sequence, Union
import click
@@ -57,11 +58,17 @@
self.force_exit = False
self.last_notified = 0.0
+ self._captured_signals: list[int] = []
+
def run(self, sockets: list[socket.socket] | None = None) -> None:
self.config.setup_event_loop()
return asyncio.run(self.serve(sockets=sockets))
async def serve(self, sockets: list[socket.socket] | None = None) -> None:
+ with self.capture_signals():
+ await self._serve(sockets)
+
+ async def _serve(self, sockets: list[socket.socket] | None = None) -> None:
process_id = os.getpid()
config = self.config
@@ -70,8 +77,6 @@
self.lifespan = config.lifespan_class(config)
- self.install_signal_handlers()
-
message = "Started server process [%d]"
color_message = "Started server process [" + click.style("%d", fg="cyan") + "]"
logger.info(message, process_id, extra={"color_message": color_message})
@@ -302,22 +307,28 @@
for server in self.servers:
await server.wait_closed()
- def install_signal_handlers(self) -> None:
+ @contextlib.contextmanager
+ def capture_signals(self) -> Generator[None, None, None]:
+ # Signals can only be listened to from the main thread.
if threading.current_thread() is not threading.main_thread():
- # Signals can only be listened to from the main thread.
+ yield
return
-
- loop = asyncio.get_event_loop()
-
+ # always use signal.signal, even if loop.add_signal_handler is available
+ # this allows to restore previous signal handlers later on
+ original_handlers = {sig: signal.signal(sig, self.handle_exit) for sig in HANDLED_SIGNALS}
try:
- for sig in HANDLED_SIGNALS:
- loop.add_signal_handler(sig, self.handle_exit, sig, None)
- except NotImplementedError: # pragma: no cover
- # Windows
- for sig in HANDLED_SIGNALS:
- signal.signal(sig, self.handle_exit)
+ yield
+ finally:
+ for sig, handler in original_handlers.items():
+ signal.signal(sig, handler)
+ # If we did gracefully shut down due to a signal, try to
+ # trigger the expected behaviour now; multiple signals would be
+ # done LIFO, see https://stackoverflow.com/questions/48434964
+ for captured_signal in reversed(self._captured_signals):
+ signal.raise_signal(captured_signal)
def handle_exit(self, sig: int, frame: FrameType | None) -> None:
+ self._captured_signals.append(sig)
if self.should_exit and sig == signal.SIGINT:
self.force_exit = True
else:
| {"golden_diff": "diff --git a/uvicorn/server.py b/uvicorn/server.py\n--- a/uvicorn/server.py\n+++ b/uvicorn/server.py\n@@ -1,6 +1,7 @@\n from __future__ import annotations\n \n import asyncio\n+import contextlib\n import logging\n import os\n import platform\n@@ -11,7 +12,7 @@\n import time\n from email.utils import formatdate\n from types import FrameType\n-from typing import TYPE_CHECKING, Sequence, Union\n+from typing import TYPE_CHECKING, Generator, Sequence, Union\n \n import click\n \n@@ -57,11 +58,17 @@\n self.force_exit = False\n self.last_notified = 0.0\n \n+ self._captured_signals: list[int] = []\n+\n def run(self, sockets: list[socket.socket] | None = None) -> None:\n self.config.setup_event_loop()\n return asyncio.run(self.serve(sockets=sockets))\n \n async def serve(self, sockets: list[socket.socket] | None = None) -> None:\n+ with self.capture_signals():\n+ await self._serve(sockets)\n+\n+ async def _serve(self, sockets: list[socket.socket] | None = None) -> None:\n process_id = os.getpid()\n \n config = self.config\n@@ -70,8 +77,6 @@\n \n self.lifespan = config.lifespan_class(config)\n \n- self.install_signal_handlers()\n-\n message = \"Started server process [%d]\"\n color_message = \"Started server process [\" + click.style(\"%d\", fg=\"cyan\") + \"]\"\n logger.info(message, process_id, extra={\"color_message\": color_message})\n@@ -302,22 +307,28 @@\n for server in self.servers:\n await server.wait_closed()\n \n- def install_signal_handlers(self) -> None:\n+ @contextlib.contextmanager\n+ def capture_signals(self) -> Generator[None, None, None]:\n+ # Signals can only be listened to from the main thread.\n if threading.current_thread() is not threading.main_thread():\n- # Signals can only be listened to from the main thread.\n+ yield\n return\n-\n- loop = asyncio.get_event_loop()\n-\n+ # always use signal.signal, even if loop.add_signal_handler is available\n+ # this allows to restore previous signal handlers later on\n+ original_handlers = {sig: signal.signal(sig, self.handle_exit) for sig in HANDLED_SIGNALS}\n try:\n- for sig in HANDLED_SIGNALS:\n- loop.add_signal_handler(sig, self.handle_exit, sig, None)\n- except NotImplementedError: # pragma: no cover\n- # Windows\n- for sig in HANDLED_SIGNALS:\n- signal.signal(sig, self.handle_exit)\n+ yield\n+ finally:\n+ for sig, handler in original_handlers.items():\n+ signal.signal(sig, handler)\n+ # If we did gracefully shut down due to a signal, try to\n+ # trigger the expected behaviour now; multiple signals would be\n+ # done LIFO, see https://stackoverflow.com/questions/48434964\n+ for captured_signal in reversed(self._captured_signals):\n+ signal.raise_signal(captured_signal)\n \n def handle_exit(self, sig: int, frame: FrameType | None) -> None:\n+ self._captured_signals.append(sig)\n if self.should_exit and sig == signal.SIGINT:\n self.force_exit = True\n else:\n", "issue": "uvicorn eats SIGINTs, does not propagate exceptions\nThe following snippet cannot be killed with a SIGINT (ctrl+c):\r\n\r\n```python\r\nimport asyncio\r\n\r\nfrom starlette.applications import Starlette\r\nfrom uvicorn import Config, Server\r\n\r\nasync def web_ui():\r\n await Server(Config(Starlette())).serve()\r\n\r\nasync def task():\r\n await asyncio.sleep(100000000000000)\r\n\r\nasync def main():\r\n await asyncio.gather(web_ui(), task())\r\n\r\nif __name__ == \"__main__\":\r\n asyncio.run(main())\r\n```\r\n\r\nIt appears that uvicorn is eating SIGINTs and does not propagate the KeyboardInterrupt and/or Cancelled exceptions. Thanks for having a look.\n\n<!-- POLAR PLEDGE BADGE START -->\n> [!IMPORTANT]\n> - We're using [Polar.sh](https://polar.sh/encode) so you can upvote and help fund this issue.\n> - We receive the funding once the issue is completed & confirmed by you.\n> - Thank you in advance for helping prioritize & fund our backlog.\n\n<a href=\"https://polar.sh/encode/uvicorn/issues/1579\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/encode/uvicorn/issues/1579/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/encode/uvicorn/issues/1579/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport asyncio\nimport logging\nimport os\nimport platform\nimport signal\nimport socket\nimport sys\nimport threading\nimport time\nfrom email.utils import formatdate\nfrom types import FrameType\nfrom typing import TYPE_CHECKING, Sequence, Union\n\nimport click\n\nfrom uvicorn.config import Config\n\nif TYPE_CHECKING:\n from uvicorn.protocols.http.h11_impl import H11Protocol\n from uvicorn.protocols.http.httptools_impl import HttpToolsProtocol\n from uvicorn.protocols.websockets.websockets_impl import WebSocketProtocol\n from uvicorn.protocols.websockets.wsproto_impl import WSProtocol\n\n Protocols = Union[H11Protocol, HttpToolsProtocol, WSProtocol, WebSocketProtocol]\n\nHANDLED_SIGNALS = (\n signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.\n signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.\n)\nif sys.platform == \"win32\": # pragma: py-not-win32\n HANDLED_SIGNALS += (signal.SIGBREAK,) # Windows signal 21. Sent by Ctrl+Break.\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\nclass ServerState:\n \"\"\"\n Shared servers state that is available between all protocol instances.\n \"\"\"\n\n def __init__(self) -> None:\n self.total_requests = 0\n self.connections: set[Protocols] = set()\n self.tasks: set[asyncio.Task[None]] = set()\n self.default_headers: list[tuple[bytes, bytes]] = []\n\n\nclass Server:\n def __init__(self, config: Config) -> None:\n self.config = config\n self.server_state = ServerState()\n\n self.started = False\n self.should_exit = False\n self.force_exit = False\n self.last_notified = 0.0\n\n def run(self, sockets: list[socket.socket] | None = None) -> None:\n self.config.setup_event_loop()\n return asyncio.run(self.serve(sockets=sockets))\n\n async def serve(self, sockets: list[socket.socket] | None = None) -> None:\n process_id = os.getpid()\n\n config = self.config\n if not config.loaded:\n config.load()\n\n self.lifespan = config.lifespan_class(config)\n\n self.install_signal_handlers()\n\n message = \"Started server process [%d]\"\n color_message = \"Started server process [\" + click.style(\"%d\", fg=\"cyan\") + \"]\"\n logger.info(message, process_id, extra={\"color_message\": color_message})\n\n await self.startup(sockets=sockets)\n if self.should_exit:\n return\n await self.main_loop()\n await self.shutdown(sockets=sockets)\n\n message = \"Finished server process [%d]\"\n color_message = \"Finished server process [\" + click.style(\"%d\", fg=\"cyan\") + \"]\"\n logger.info(message, process_id, extra={\"color_message\": color_message})\n\n async def startup(self, sockets: list[socket.socket] | None = None) -> None:\n await self.lifespan.startup()\n if self.lifespan.should_exit:\n self.should_exit = True\n return\n\n config = self.config\n\n def create_protocol(\n _loop: asyncio.AbstractEventLoop | None = None,\n ) -> asyncio.Protocol:\n return config.http_protocol_class( # type: ignore[call-arg]\n config=config,\n server_state=self.server_state,\n app_state=self.lifespan.state,\n _loop=_loop,\n )\n\n loop = asyncio.get_running_loop()\n\n listeners: Sequence[socket.SocketType]\n if sockets is not None:\n # Explicitly passed a list of open sockets.\n # We use this when the server is run from a Gunicorn worker.\n\n def _share_socket(\n sock: socket.SocketType,\n ) -> socket.SocketType: # pragma py-linux pragma: py-darwin\n # Windows requires the socket be explicitly shared across\n # multiple workers (processes).\n from socket import fromshare # type: ignore[attr-defined]\n\n sock_data = sock.share(os.getpid()) # type: ignore[attr-defined]\n return fromshare(sock_data)\n\n self.servers: list[asyncio.base_events.Server] = []\n for sock in sockets:\n is_windows = platform.system() == \"Windows\"\n if config.workers > 1 and is_windows: # pragma: py-not-win32\n sock = _share_socket(sock) # type: ignore[assignment]\n server = await loop.create_server(create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog)\n self.servers.append(server)\n listeners = sockets\n\n elif config.fd is not None: # pragma: py-win32\n # Use an existing socket, from a file descriptor.\n sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)\n server = await loop.create_server(create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog)\n assert server.sockets is not None # mypy\n listeners = server.sockets\n self.servers = [server]\n\n elif config.uds is not None: # pragma: py-win32\n # Create a socket using UNIX domain socket.\n uds_perms = 0o666\n if os.path.exists(config.uds):\n uds_perms = os.stat(config.uds).st_mode\n server = await loop.create_unix_server(\n create_protocol, path=config.uds, ssl=config.ssl, backlog=config.backlog\n )\n os.chmod(config.uds, uds_perms)\n assert server.sockets is not None # mypy\n listeners = server.sockets\n self.servers = [server]\n\n else:\n # Standard case. Create a socket from a host/port pair.\n try:\n server = await loop.create_server(\n create_protocol,\n host=config.host,\n port=config.port,\n ssl=config.ssl,\n backlog=config.backlog,\n )\n except OSError as exc:\n logger.error(exc)\n await self.lifespan.shutdown()\n sys.exit(1)\n\n assert server.sockets is not None\n listeners = server.sockets\n self.servers = [server]\n\n if sockets is None:\n self._log_started_message(listeners)\n else:\n # We're most likely running multiple workers, so a message has already been\n # logged by `config.bind_socket()`.\n pass\n\n self.started = True\n\n def _log_started_message(self, listeners: Sequence[socket.SocketType]) -> None:\n config = self.config\n\n if config.fd is not None: # pragma: py-win32\n sock = listeners[0]\n logger.info(\n \"Uvicorn running on socket %s (Press CTRL+C to quit)\",\n sock.getsockname(),\n )\n\n elif config.uds is not None: # pragma: py-win32\n logger.info(\"Uvicorn running on unix socket %s (Press CTRL+C to quit)\", config.uds)\n\n else:\n addr_format = \"%s://%s:%d\"\n host = \"0.0.0.0\" if config.host is None else config.host\n if \":\" in host:\n # It's an IPv6 address.\n addr_format = \"%s://[%s]:%d\"\n\n port = config.port\n if port == 0:\n port = listeners[0].getsockname()[1]\n\n protocol_name = \"https\" if config.ssl else \"http\"\n message = f\"Uvicorn running on {addr_format} (Press CTRL+C to quit)\"\n color_message = \"Uvicorn running on \" + click.style(addr_format, bold=True) + \" (Press CTRL+C to quit)\"\n logger.info(\n message,\n protocol_name,\n host,\n port,\n extra={\"color_message\": color_message},\n )\n\n async def main_loop(self) -> None:\n counter = 0\n should_exit = await self.on_tick(counter)\n while not should_exit:\n counter += 1\n counter = counter % 864000\n await asyncio.sleep(0.1)\n should_exit = await self.on_tick(counter)\n\n async def on_tick(self, counter: int) -> bool:\n # Update the default headers, once per second.\n if counter % 10 == 0:\n current_time = time.time()\n current_date = formatdate(current_time, usegmt=True).encode()\n\n if self.config.date_header:\n date_header = [(b\"date\", current_date)]\n else:\n date_header = []\n\n self.server_state.default_headers = date_header + self.config.encoded_headers\n\n # Callback to `callback_notify` once every `timeout_notify` seconds.\n if self.config.callback_notify is not None:\n if current_time - self.last_notified > self.config.timeout_notify:\n self.last_notified = current_time\n await self.config.callback_notify()\n\n # Determine if we should exit.\n if self.should_exit:\n return True\n if self.config.limit_max_requests is not None:\n return self.server_state.total_requests >= self.config.limit_max_requests\n return False\n\n async def shutdown(self, sockets: list[socket.socket] | None = None) -> None:\n logger.info(\"Shutting down\")\n\n # Stop accepting new connections.\n for server in self.servers:\n server.close()\n for sock in sockets or []:\n sock.close()\n\n # Request shutdown on all existing connections.\n for connection in list(self.server_state.connections):\n connection.shutdown()\n await asyncio.sleep(0.1)\n\n # When 3.10 is not supported anymore, use `async with asyncio.timeout(...):`.\n try:\n await asyncio.wait_for(\n self._wait_tasks_to_complete(),\n timeout=self.config.timeout_graceful_shutdown,\n )\n except asyncio.TimeoutError:\n logger.error(\n \"Cancel %s running task(s), timeout graceful shutdown exceeded\",\n len(self.server_state.tasks),\n )\n for t in self.server_state.tasks:\n if sys.version_info < (3, 9): # pragma: py-gte-39\n t.cancel()\n else: # pragma: py-lt-39\n t.cancel(msg=\"Task cancelled, timeout graceful shutdown exceeded\")\n\n # Send the lifespan shutdown event, and wait for application shutdown.\n if not self.force_exit:\n await self.lifespan.shutdown()\n\n async def _wait_tasks_to_complete(self) -> None:\n # Wait for existing connections to finish sending responses.\n if self.server_state.connections and not self.force_exit:\n msg = \"Waiting for connections to close. (CTRL+C to force quit)\"\n logger.info(msg)\n while self.server_state.connections and not self.force_exit:\n await asyncio.sleep(0.1)\n\n # Wait for existing tasks to complete.\n if self.server_state.tasks and not self.force_exit:\n msg = \"Waiting for background tasks to complete. (CTRL+C to force quit)\"\n logger.info(msg)\n while self.server_state.tasks and not self.force_exit:\n await asyncio.sleep(0.1)\n\n for server in self.servers:\n await server.wait_closed()\n\n def install_signal_handlers(self) -> None:\n if threading.current_thread() is not threading.main_thread():\n # Signals can only be listened to from the main thread.\n return\n\n loop = asyncio.get_event_loop()\n\n try:\n for sig in HANDLED_SIGNALS:\n loop.add_signal_handler(sig, self.handle_exit, sig, None)\n except NotImplementedError: # pragma: no cover\n # Windows\n for sig in HANDLED_SIGNALS:\n signal.signal(sig, self.handle_exit)\n\n def handle_exit(self, sig: int, frame: FrameType | None) -> None:\n if self.should_exit and sig == signal.SIGINT:\n self.force_exit = True\n else:\n self.should_exit = True\n", "path": "uvicorn/server.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport asyncio\nimport contextlib\nimport logging\nimport os\nimport platform\nimport signal\nimport socket\nimport sys\nimport threading\nimport time\nfrom email.utils import formatdate\nfrom types import FrameType\nfrom typing import TYPE_CHECKING, Generator, Sequence, Union\n\nimport click\n\nfrom uvicorn.config import Config\n\nif TYPE_CHECKING:\n from uvicorn.protocols.http.h11_impl import H11Protocol\n from uvicorn.protocols.http.httptools_impl import HttpToolsProtocol\n from uvicorn.protocols.websockets.websockets_impl import WebSocketProtocol\n from uvicorn.protocols.websockets.wsproto_impl import WSProtocol\n\n Protocols = Union[H11Protocol, HttpToolsProtocol, WSProtocol, WebSocketProtocol]\n\nHANDLED_SIGNALS = (\n signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.\n signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.\n)\nif sys.platform == \"win32\": # pragma: py-not-win32\n HANDLED_SIGNALS += (signal.SIGBREAK,) # Windows signal 21. Sent by Ctrl+Break.\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\nclass ServerState:\n \"\"\"\n Shared servers state that is available between all protocol instances.\n \"\"\"\n\n def __init__(self) -> None:\n self.total_requests = 0\n self.connections: set[Protocols] = set()\n self.tasks: set[asyncio.Task[None]] = set()\n self.default_headers: list[tuple[bytes, bytes]] = []\n\n\nclass Server:\n def __init__(self, config: Config) -> None:\n self.config = config\n self.server_state = ServerState()\n\n self.started = False\n self.should_exit = False\n self.force_exit = False\n self.last_notified = 0.0\n\n self._captured_signals: list[int] = []\n\n def run(self, sockets: list[socket.socket] | None = None) -> None:\n self.config.setup_event_loop()\n return asyncio.run(self.serve(sockets=sockets))\n\n async def serve(self, sockets: list[socket.socket] | None = None) -> None:\n with self.capture_signals():\n await self._serve(sockets)\n\n async def _serve(self, sockets: list[socket.socket] | None = None) -> None:\n process_id = os.getpid()\n\n config = self.config\n if not config.loaded:\n config.load()\n\n self.lifespan = config.lifespan_class(config)\n\n message = \"Started server process [%d]\"\n color_message = \"Started server process [\" + click.style(\"%d\", fg=\"cyan\") + \"]\"\n logger.info(message, process_id, extra={\"color_message\": color_message})\n\n await self.startup(sockets=sockets)\n if self.should_exit:\n return\n await self.main_loop()\n await self.shutdown(sockets=sockets)\n\n message = \"Finished server process [%d]\"\n color_message = \"Finished server process [\" + click.style(\"%d\", fg=\"cyan\") + \"]\"\n logger.info(message, process_id, extra={\"color_message\": color_message})\n\n async def startup(self, sockets: list[socket.socket] | None = None) -> None:\n await self.lifespan.startup()\n if self.lifespan.should_exit:\n self.should_exit = True\n return\n\n config = self.config\n\n def create_protocol(\n _loop: asyncio.AbstractEventLoop | None = None,\n ) -> asyncio.Protocol:\n return config.http_protocol_class( # type: ignore[call-arg]\n config=config,\n server_state=self.server_state,\n app_state=self.lifespan.state,\n _loop=_loop,\n )\n\n loop = asyncio.get_running_loop()\n\n listeners: Sequence[socket.SocketType]\n if sockets is not None:\n # Explicitly passed a list of open sockets.\n # We use this when the server is run from a Gunicorn worker.\n\n def _share_socket(\n sock: socket.SocketType,\n ) -> socket.SocketType: # pragma py-linux pragma: py-darwin\n # Windows requires the socket be explicitly shared across\n # multiple workers (processes).\n from socket import fromshare # type: ignore[attr-defined]\n\n sock_data = sock.share(os.getpid()) # type: ignore[attr-defined]\n return fromshare(sock_data)\n\n self.servers: list[asyncio.base_events.Server] = []\n for sock in sockets:\n is_windows = platform.system() == \"Windows\"\n if config.workers > 1 and is_windows: # pragma: py-not-win32\n sock = _share_socket(sock) # type: ignore[assignment]\n server = await loop.create_server(create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog)\n self.servers.append(server)\n listeners = sockets\n\n elif config.fd is not None: # pragma: py-win32\n # Use an existing socket, from a file descriptor.\n sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)\n server = await loop.create_server(create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog)\n assert server.sockets is not None # mypy\n listeners = server.sockets\n self.servers = [server]\n\n elif config.uds is not None: # pragma: py-win32\n # Create a socket using UNIX domain socket.\n uds_perms = 0o666\n if os.path.exists(config.uds):\n uds_perms = os.stat(config.uds).st_mode\n server = await loop.create_unix_server(\n create_protocol, path=config.uds, ssl=config.ssl, backlog=config.backlog\n )\n os.chmod(config.uds, uds_perms)\n assert server.sockets is not None # mypy\n listeners = server.sockets\n self.servers = [server]\n\n else:\n # Standard case. Create a socket from a host/port pair.\n try:\n server = await loop.create_server(\n create_protocol,\n host=config.host,\n port=config.port,\n ssl=config.ssl,\n backlog=config.backlog,\n )\n except OSError as exc:\n logger.error(exc)\n await self.lifespan.shutdown()\n sys.exit(1)\n\n assert server.sockets is not None\n listeners = server.sockets\n self.servers = [server]\n\n if sockets is None:\n self._log_started_message(listeners)\n else:\n # We're most likely running multiple workers, so a message has already been\n # logged by `config.bind_socket()`.\n pass\n\n self.started = True\n\n def _log_started_message(self, listeners: Sequence[socket.SocketType]) -> None:\n config = self.config\n\n if config.fd is not None: # pragma: py-win32\n sock = listeners[0]\n logger.info(\n \"Uvicorn running on socket %s (Press CTRL+C to quit)\",\n sock.getsockname(),\n )\n\n elif config.uds is not None: # pragma: py-win32\n logger.info(\"Uvicorn running on unix socket %s (Press CTRL+C to quit)\", config.uds)\n\n else:\n addr_format = \"%s://%s:%d\"\n host = \"0.0.0.0\" if config.host is None else config.host\n if \":\" in host:\n # It's an IPv6 address.\n addr_format = \"%s://[%s]:%d\"\n\n port = config.port\n if port == 0:\n port = listeners[0].getsockname()[1]\n\n protocol_name = \"https\" if config.ssl else \"http\"\n message = f\"Uvicorn running on {addr_format} (Press CTRL+C to quit)\"\n color_message = \"Uvicorn running on \" + click.style(addr_format, bold=True) + \" (Press CTRL+C to quit)\"\n logger.info(\n message,\n protocol_name,\n host,\n port,\n extra={\"color_message\": color_message},\n )\n\n async def main_loop(self) -> None:\n counter = 0\n should_exit = await self.on_tick(counter)\n while not should_exit:\n counter += 1\n counter = counter % 864000\n await asyncio.sleep(0.1)\n should_exit = await self.on_tick(counter)\n\n async def on_tick(self, counter: int) -> bool:\n # Update the default headers, once per second.\n if counter % 10 == 0:\n current_time = time.time()\n current_date = formatdate(current_time, usegmt=True).encode()\n\n if self.config.date_header:\n date_header = [(b\"date\", current_date)]\n else:\n date_header = []\n\n self.server_state.default_headers = date_header + self.config.encoded_headers\n\n # Callback to `callback_notify` once every `timeout_notify` seconds.\n if self.config.callback_notify is not None:\n if current_time - self.last_notified > self.config.timeout_notify:\n self.last_notified = current_time\n await self.config.callback_notify()\n\n # Determine if we should exit.\n if self.should_exit:\n return True\n if self.config.limit_max_requests is not None:\n return self.server_state.total_requests >= self.config.limit_max_requests\n return False\n\n async def shutdown(self, sockets: list[socket.socket] | None = None) -> None:\n logger.info(\"Shutting down\")\n\n # Stop accepting new connections.\n for server in self.servers:\n server.close()\n for sock in sockets or []:\n sock.close()\n\n # Request shutdown on all existing connections.\n for connection in list(self.server_state.connections):\n connection.shutdown()\n await asyncio.sleep(0.1)\n\n # When 3.10 is not supported anymore, use `async with asyncio.timeout(...):`.\n try:\n await asyncio.wait_for(\n self._wait_tasks_to_complete(),\n timeout=self.config.timeout_graceful_shutdown,\n )\n except asyncio.TimeoutError:\n logger.error(\n \"Cancel %s running task(s), timeout graceful shutdown exceeded\",\n len(self.server_state.tasks),\n )\n for t in self.server_state.tasks:\n if sys.version_info < (3, 9): # pragma: py-gte-39\n t.cancel()\n else: # pragma: py-lt-39\n t.cancel(msg=\"Task cancelled, timeout graceful shutdown exceeded\")\n\n # Send the lifespan shutdown event, and wait for application shutdown.\n if not self.force_exit:\n await self.lifespan.shutdown()\n\n async def _wait_tasks_to_complete(self) -> None:\n # Wait for existing connections to finish sending responses.\n if self.server_state.connections and not self.force_exit:\n msg = \"Waiting for connections to close. (CTRL+C to force quit)\"\n logger.info(msg)\n while self.server_state.connections and not self.force_exit:\n await asyncio.sleep(0.1)\n\n # Wait for existing tasks to complete.\n if self.server_state.tasks and not self.force_exit:\n msg = \"Waiting for background tasks to complete. (CTRL+C to force quit)\"\n logger.info(msg)\n while self.server_state.tasks and not self.force_exit:\n await asyncio.sleep(0.1)\n\n for server in self.servers:\n await server.wait_closed()\n\n @contextlib.contextmanager\n def capture_signals(self) -> Generator[None, None, None]:\n # Signals can only be listened to from the main thread.\n if threading.current_thread() is not threading.main_thread():\n yield\n return\n # always use signal.signal, even if loop.add_signal_handler is available\n # this allows to restore previous signal handlers later on\n original_handlers = {sig: signal.signal(sig, self.handle_exit) for sig in HANDLED_SIGNALS}\n try:\n yield\n finally:\n for sig, handler in original_handlers.items():\n signal.signal(sig, handler)\n # If we did gracefully shut down due to a signal, try to\n # trigger the expected behaviour now; multiple signals would be\n # done LIFO, see https://stackoverflow.com/questions/48434964\n for captured_signal in reversed(self._captured_signals):\n signal.raise_signal(captured_signal)\n\n def handle_exit(self, sig: int, frame: FrameType | None) -> None:\n self._captured_signals.append(sig)\n if self.should_exit and sig == signal.SIGINT:\n self.force_exit = True\n else:\n self.should_exit = True\n", "path": "uvicorn/server.py"}]} | 4,059 | 766 |
gh_patches_debug_40366 | rasdani/github-patches | git_diff | chainer__chainer-321 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hierarchical softmax doesn't have `to_cpu`
Same problem as #276
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/hierarchical_softmax.py`
Content:
```
1 import numpy
2 import six
3
4 from chainer import cuda
5 from chainer import function
6 from chainer.utils import type_check
7
8
9 class TreeParser(object):
10
11 def __init__(self):
12 self.next_id = 0
13
14 def size(self):
15 return self.next_id
16
17 def get_paths(self):
18 return self.paths
19
20 def get_codes(self):
21 return self.codes
22
23 def parse(self, tree):
24 self.next_id = 0
25 self.path = []
26 self.code = []
27 self.paths = {}
28 self.codes = {}
29 self._parse(tree)
30
31 assert(len(self.path) == 0)
32 assert(len(self.code) == 0)
33 assert(len(self.paths) == len(self.codes))
34
35 def _parse(self, node):
36 if isinstance(node, tuple):
37 # internal node
38 if len(node) != 2:
39 raise ValueError(
40 'All internal nodes must have two child nodes')
41 left, right = node
42 self.path.append(self.next_id)
43 self.next_id += 1
44 self.code.append(1.0)
45 self._parse(left)
46
47 self.code[-1] = -1.0
48 self._parse(right)
49
50 self.path.pop()
51 self.code.pop()
52
53 else:
54 # leaf node
55 self.paths[node] = numpy.array(self.path).astype(numpy.int32)
56 self.codes[node] = numpy.array(self.code).astype(numpy.float32)
57
58
59 class BinaryHierarchicalSoftmax(function.Function):
60
61 """Implementation of hierarchical softmax (HSM).
62
63 In natural language applications, vocabulary size is too large to use
64 softmax loss.
65 Instead, the hierarchical softmax uses product of sigmoid functions.
66 It costs only :math:`O(\log(n))` time where :math:`n` is the vocabulary
67 size in average.
68
69 At first a user need to prepare a binary tree whose each leaf is
70 corresponding to a word in a vocabulary.
71 When a word :math:`x` is given, exactly one path from the root of the tree
72 to the leaf of the word exists.
73 Let :math:`\mbox{path}(x) = ((e_1, b_1), \dots, (e_m, b_m))` be the path of
74 :math:`x`, where :math:`e_i` is an index of :math:`i`-th internal node, and
75 :math:`b_i \in \{-1, 1\}` indicates direction to move at :math:`i`-th
76 internal node (-1 is left, and 1 is right).
77 Then, the probability of :math:`x` is given as below:
78
79 .. math::
80
81 P(x) &= \prod_{(e_i, b_i) \in \mbox{path}(x)}P(b_i | e_i) \\\\
82 &= \prod_{(e_i, b_i) \in \mbox{path}(x)}\sigma(b_i x^\\top
83 w_{e_i}),
84
85 where :math:`\sigma(\\cdot)` is a sigmoid function, and :math:`w` is a
86 weight matrix.
87
88 This function costs :math:`O(\log(n))` time as an average length of paths
89 is :math:`O(\log(n))`, and :math:`O(n)` memory as the number of internal
90 nodes equals :math:`n - 1`.
91
92 Args:
93 in_size (int): Dimension of input vectors.
94 tree: A binary tree made with tuples like `((1, 2), 3)`.
95
96 See: Hierarchical Probabilistic Neural Network Language Model [Morin+,
97 AISTAT2005].
98
99 """
100
101 parameter_names = ('W',)
102 gradient_names = ('gW',)
103
104 def __init__(self, in_size, tree):
105 parser = TreeParser()
106 parser.parse(tree)
107 self.paths = parser.get_paths()
108 self.codes = parser.get_codes()
109
110 self.W = numpy.random.uniform(
111 -1, 1, (parser.size(), in_size)).astype(numpy.float32)
112 self.gW = numpy.zeros(self.W.shape, numpy.float32)
113
114 def check_type_forward(self, in_types):
115 type_check.expect(in_types.size() == 2)
116 x_type, t_type = in_types
117
118 type_check.expect(
119 x_type.dtype == numpy.float32,
120 x_type.ndim == 2,
121 t_type.dtype == numpy.int32,
122 t_type.ndim == 1,
123 x_type.shape[0] == t_type.shape[0]
124 )
125
126 def check_type_backward(self, in_types, out_types):
127 type_check.expect(
128 out_types.size() == 1,
129 out_types[0].dtype == numpy.float32,
130 out_types[0].ndim == 0
131 )
132
133 def forward_cpu(self, args):
134 x, t = args
135
136 loss = numpy.float32(0.0)
137 for ix, it in six.moves.zip(x, t):
138 loss += self._forward_cpu_one(ix, it)
139 return numpy.array(loss),
140
141 def _forward_cpu_one(self, x, t):
142 assert t in self.paths
143
144 w = self.W[self.paths[t]]
145 wxy = w.dot(x) * self.codes[t]
146 loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))
147 return numpy.sum(loss)
148
149 def backward_cpu(self, args, loss):
150 x, t = args
151 gloss, = loss
152 gx = numpy.empty_like(x)
153 for i, (ix, it) in enumerate(six.moves.zip(x, t)):
154 gx[i] = self._backward_cpu_one(ix, it, gloss)
155 return gx, None
156
157 def _backward_cpu_one(self, x, t, gloss):
158 path = self.paths[t]
159 w = self.W[path]
160 wxy = w.dot(x) * self.codes[t]
161 g = -gloss * self.codes[t] / (1.0 + numpy.exp(wxy))
162 gx = g.dot(w)
163 gw = g.reshape((g.shape[0], 1)).dot(x.reshape(1, x.shape[0]))
164 self.gW[path] += gw
165 return gx
166
167 def to_gpu(self, device=None):
168 function.Function.to_gpu(self, device)
169
170 n_vocab = max(self.paths.keys()) + 1
171 paths = cuda.to_gpu(numpy.concatenate(
172 [self.paths[i] for i in range(n_vocab) if i in self.paths]))
173 codes = cuda.to_gpu(numpy.concatenate(
174 [self.codes[i] for i in range(n_vocab) if i in self.codes]))
175
176 begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)
177 begins[0] = 0
178 for i in range(0, n_vocab):
179 length = len(self.paths[i]) if i in self.paths else 0
180 begins[i + 1] = begins[i] + length
181
182 self.paths = paths
183 self.codes = codes
184 self.begins = cuda.to_gpu(begins)
185
186 def forward_gpu(self, inputs):
187 x, t = inputs
188
189 max_length = cuda.reduce(
190 'int* t, int* begins', 'begins[t[i] + 1] - begins[t[i]]',
191 'max(a,b)', '0', 'binary_hierarchical_softmax_max_length',
192 numpy.int32
193 )(t, self.begins)
194 max_length = cuda.to_cpu(max_length)[()]
195
196 length = max_length * x.shape[0]
197 ls = cuda.empty((length,), dtype=numpy.float32)
198 n_in = x.shape[1]
199 wxy = cuda.empty((length,), dtype=numpy.float32)
200 cuda.elementwise(
201 '''float* ls, float* wxy, const float* x, const float* w,
202 const int* ts, const int* paths, const float* codes,
203 const int* begins, int c, int max_length''',
204 '''
205 int ind = i / max_length;
206 int offset = i - ind * max_length;
207 int t = ts[ind];
208
209 int begin = begins[t];
210 int length = begins[t + 1] - begins[t];
211
212 if (offset < length) {
213 int p = begin + offset;
214 int node = paths[p];
215
216 x = &x[ind * c];
217
218 float wx = 0;
219 for (int j = 0; j < c; ++j) {
220 wx += w[node * c + j] * x[j];
221 }
222 wxy[i] = wx * codes[p];
223 ls[i] = log(1 + exp(-wxy[i]));
224 } else {
225 ls[i] = 0;
226 }
227 ''',
228 'binary_hierarchical_softmax_forward'
229 )(ls, wxy, x, self.W, t, self.paths, self.codes, self.begins,
230 n_in, max_length)
231 self.max_length = max_length
232 self.wxy = wxy
233 return cuda.gpuarray.sum(ls),
234
235 def backward_gpu(self, inputs, loss):
236 x, t = inputs
237 gloss, = loss
238
239 n_in = x.shape[1]
240 gx = cuda.zeros_like(x)
241 cuda.elementwise(
242 '''const float* wxy, float* gx, float* gw, const float* x,
243 const float* w, const int* ts, const int* paths,
244 const float* codes, const int* begins,
245 const float* gloss, int c, int max_length''',
246 '''
247 int ind = i / max_length;
248 int offset = i - ind * max_length;
249 int t = ts[ind];
250
251 int begin = begins[t];
252 int length = begins[t + 1] - begins[t];
253
254 if (offset < length) {
255 int p = begin + offset;
256 int node = paths[p];
257 float code = codes[p];
258 gx = &gx[ind * c];
259 x = &x[ind * c];
260
261 float g = -*gloss * code / (1.0 + exp(wxy[i]));
262 for (int j = 0; j < c; ++j) {
263 atomicAdd(gx + j, g * w[node * c + j]);
264 atomicAdd(gw + node * c + j, g * x[j]);
265 }
266 }
267 ''',
268 'binary_hierarchical_softmax_bwd'
269 )(self.wxy, gx, self.gW, x, self.W, t, self.paths, self.codes,
270 self.begins, gloss, n_in, self.max_length)
271 return gx, None
272
273
274 def create_huffman_tree(word_counts):
275 """Make a huffman tree from a dictionary containing word counts.
276
277 This method creates a binary huffman tree, that is required for
278 :class:`BinaryHierarchicalSoftmax`.
279 For example, ``{0: 8, 1: 5, 2: 6, 3: 4}`` is converted to
280 ``((3, 1), (2, 0))``.
281
282 Args:
283 word_counts (``dict`` of ``int`` key and ``int`` or ``float`` values.):
284 Dictionary representing counts of words.
285
286 Returns:
287 Binary huffman tree with tuples and keys of ``word_coutns``.
288
289 """
290 if len(word_counts) == 0:
291 raise ValueError('Empty vocabulary')
292
293 q = six.moves.queue.PriorityQueue()
294 # Add unique id to each entry so that we can compare two entries with same
295 # counts.
296 # Note that itreitems randomly order the entries.
297 for uid, (w, c) in enumerate(six.iteritems(word_counts)):
298 q.put((c, uid, w))
299
300 while q.qsize() >= 2:
301 (count1, id1, word1) = q.get()
302 (count2, id2, word2) = q.get()
303 count = count1 + count2
304 tree = (word1, word2)
305 q.put((count, min(id1, id2), tree))
306
307 return q.get()[2]
308
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/hierarchical_softmax.py b/chainer/functions/hierarchical_softmax.py
--- a/chainer/functions/hierarchical_softmax.py
+++ b/chainer/functions/hierarchical_softmax.py
@@ -104,8 +104,20 @@
def __init__(self, in_size, tree):
parser = TreeParser()
parser.parse(tree)
- self.paths = parser.get_paths()
- self.codes = parser.get_codes()
+ paths = parser.get_paths()
+ codes = parser.get_codes()
+ n_vocab = max(paths.keys()) + 1
+
+ self.paths = numpy.concatenate(
+ [paths[i] for i in range(n_vocab) if i in paths])
+ self.codes = numpy.concatenate(
+ [codes[i] for i in range(n_vocab) if i in codes])
+ begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)
+ begins[0] = 0
+ for i in range(0, n_vocab):
+ length = len(paths[i]) if i in paths else 0
+ begins[i + 1] = begins[i] + length
+ self.begins = begins
self.W = numpy.random.uniform(
-1, 1, (parser.size(), in_size)).astype(numpy.float32)
@@ -139,10 +151,11 @@
return numpy.array(loss),
def _forward_cpu_one(self, x, t):
- assert t in self.paths
+ begin = self.begins[t]
+ end = self.begins[t + 1]
- w = self.W[self.paths[t]]
- wxy = w.dot(x) * self.codes[t]
+ w = self.W[self.paths[begin:end]]
+ wxy = w.dot(x) * self.codes[begin:end]
loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))
return numpy.sum(loss)
@@ -155,10 +168,13 @@
return gx, None
def _backward_cpu_one(self, x, t, gloss):
- path = self.paths[t]
+ begin = self.begins[t]
+ end = self.begins[t + 1]
+
+ path = self.paths[begin:end]
w = self.W[path]
- wxy = w.dot(x) * self.codes[t]
- g = -gloss * self.codes[t] / (1.0 + numpy.exp(wxy))
+ wxy = w.dot(x) * self.codes[begin:end]
+ g = -gloss * self.codes[begin:end] / (1.0 + numpy.exp(wxy))
gx = g.dot(w)
gw = g.reshape((g.shape[0], 1)).dot(x.reshape(1, x.shape[0]))
self.gW[path] += gw
@@ -167,21 +183,16 @@
def to_gpu(self, device=None):
function.Function.to_gpu(self, device)
- n_vocab = max(self.paths.keys()) + 1
- paths = cuda.to_gpu(numpy.concatenate(
- [self.paths[i] for i in range(n_vocab) if i in self.paths]))
- codes = cuda.to_gpu(numpy.concatenate(
- [self.codes[i] for i in range(n_vocab) if i in self.codes]))
+ self.paths = cuda.to_gpu(self.paths, device)
+ self.codes = cuda.to_gpu(self.codes, device)
+ self.begins = cuda.to_gpu(self.begins, device)
- begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)
- begins[0] = 0
- for i in range(0, n_vocab):
- length = len(self.paths[i]) if i in self.paths else 0
- begins[i + 1] = begins[i] + length
+ def to_cpu(self):
+ function.Function.to_cpu(self)
- self.paths = paths
- self.codes = codes
- self.begins = cuda.to_gpu(begins)
+ self.paths = cuda.to_cpu(self.paths)
+ self.codes = cuda.to_cpu(self.codes)
+ self.begins = cuda.to_cpu(self.begins)
def forward_gpu(self, inputs):
x, t = inputs
| {"golden_diff": "diff --git a/chainer/functions/hierarchical_softmax.py b/chainer/functions/hierarchical_softmax.py\n--- a/chainer/functions/hierarchical_softmax.py\n+++ b/chainer/functions/hierarchical_softmax.py\n@@ -104,8 +104,20 @@\n def __init__(self, in_size, tree):\n parser = TreeParser()\n parser.parse(tree)\n- self.paths = parser.get_paths()\n- self.codes = parser.get_codes()\n+ paths = parser.get_paths()\n+ codes = parser.get_codes()\n+ n_vocab = max(paths.keys()) + 1\n+\n+ self.paths = numpy.concatenate(\n+ [paths[i] for i in range(n_vocab) if i in paths])\n+ self.codes = numpy.concatenate(\n+ [codes[i] for i in range(n_vocab) if i in codes])\n+ begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)\n+ begins[0] = 0\n+ for i in range(0, n_vocab):\n+ length = len(paths[i]) if i in paths else 0\n+ begins[i + 1] = begins[i] + length\n+ self.begins = begins\n \n self.W = numpy.random.uniform(\n -1, 1, (parser.size(), in_size)).astype(numpy.float32)\n@@ -139,10 +151,11 @@\n return numpy.array(loss),\n \n def _forward_cpu_one(self, x, t):\n- assert t in self.paths\n+ begin = self.begins[t]\n+ end = self.begins[t + 1]\n \n- w = self.W[self.paths[t]]\n- wxy = w.dot(x) * self.codes[t]\n+ w = self.W[self.paths[begin:end]]\n+ wxy = w.dot(x) * self.codes[begin:end]\n loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))\n return numpy.sum(loss)\n \n@@ -155,10 +168,13 @@\n return gx, None\n \n def _backward_cpu_one(self, x, t, gloss):\n- path = self.paths[t]\n+ begin = self.begins[t]\n+ end = self.begins[t + 1]\n+\n+ path = self.paths[begin:end]\n w = self.W[path]\n- wxy = w.dot(x) * self.codes[t]\n- g = -gloss * self.codes[t] / (1.0 + numpy.exp(wxy))\n+ wxy = w.dot(x) * self.codes[begin:end]\n+ g = -gloss * self.codes[begin:end] / (1.0 + numpy.exp(wxy))\n gx = g.dot(w)\n gw = g.reshape((g.shape[0], 1)).dot(x.reshape(1, x.shape[0]))\n self.gW[path] += gw\n@@ -167,21 +183,16 @@\n def to_gpu(self, device=None):\n function.Function.to_gpu(self, device)\n \n- n_vocab = max(self.paths.keys()) + 1\n- paths = cuda.to_gpu(numpy.concatenate(\n- [self.paths[i] for i in range(n_vocab) if i in self.paths]))\n- codes = cuda.to_gpu(numpy.concatenate(\n- [self.codes[i] for i in range(n_vocab) if i in self.codes]))\n+ self.paths = cuda.to_gpu(self.paths, device)\n+ self.codes = cuda.to_gpu(self.codes, device)\n+ self.begins = cuda.to_gpu(self.begins, device)\n \n- begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)\n- begins[0] = 0\n- for i in range(0, n_vocab):\n- length = len(self.paths[i]) if i in self.paths else 0\n- begins[i + 1] = begins[i] + length\n+ def to_cpu(self):\n+ function.Function.to_cpu(self)\n \n- self.paths = paths\n- self.codes = codes\n- self.begins = cuda.to_gpu(begins)\n+ self.paths = cuda.to_cpu(self.paths)\n+ self.codes = cuda.to_cpu(self.codes)\n+ self.begins = cuda.to_cpu(self.begins)\n \n def forward_gpu(self, inputs):\n x, t = inputs\n", "issue": "Hierarchical softmax doesn't have `to_cpu`\nSame problem as #276 \n\n", "before_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass TreeParser(object):\n\n def __init__(self):\n self.next_id = 0\n\n def size(self):\n return self.next_id\n\n def get_paths(self):\n return self.paths\n\n def get_codes(self):\n return self.codes\n\n def parse(self, tree):\n self.next_id = 0\n self.path = []\n self.code = []\n self.paths = {}\n self.codes = {}\n self._parse(tree)\n\n assert(len(self.path) == 0)\n assert(len(self.code) == 0)\n assert(len(self.paths) == len(self.codes))\n\n def _parse(self, node):\n if isinstance(node, tuple):\n # internal node\n if len(node) != 2:\n raise ValueError(\n 'All internal nodes must have two child nodes')\n left, right = node\n self.path.append(self.next_id)\n self.next_id += 1\n self.code.append(1.0)\n self._parse(left)\n\n self.code[-1] = -1.0\n self._parse(right)\n\n self.path.pop()\n self.code.pop()\n\n else:\n # leaf node\n self.paths[node] = numpy.array(self.path).astype(numpy.int32)\n self.codes[node] = numpy.array(self.code).astype(numpy.float32)\n\n\nclass BinaryHierarchicalSoftmax(function.Function):\n\n \"\"\"Implementation of hierarchical softmax (HSM).\n\n In natural language applications, vocabulary size is too large to use\n softmax loss.\n Instead, the hierarchical softmax uses product of sigmoid functions.\n It costs only :math:`O(\\log(n))` time where :math:`n` is the vocabulary\n size in average.\n\n At first a user need to prepare a binary tree whose each leaf is\n corresponding to a word in a vocabulary.\n When a word :math:`x` is given, exactly one path from the root of the tree\n to the leaf of the word exists.\n Let :math:`\\mbox{path}(x) = ((e_1, b_1), \\dots, (e_m, b_m))` be the path of\n :math:`x`, where :math:`e_i` is an index of :math:`i`-th internal node, and\n :math:`b_i \\in \\{-1, 1\\}` indicates direction to move at :math:`i`-th\n internal node (-1 is left, and 1 is right).\n Then, the probability of :math:`x` is given as below:\n\n .. math::\n\n P(x) &= \\prod_{(e_i, b_i) \\in \\mbox{path}(x)}P(b_i | e_i) \\\\\\\\\n &= \\prod_{(e_i, b_i) \\in \\mbox{path}(x)}\\sigma(b_i x^\\\\top\n w_{e_i}),\n\n where :math:`\\sigma(\\\\cdot)` is a sigmoid function, and :math:`w` is a\n weight matrix.\n\n This function costs :math:`O(\\log(n))` time as an average length of paths\n is :math:`O(\\log(n))`, and :math:`O(n)` memory as the number of internal\n nodes equals :math:`n - 1`.\n\n Args:\n in_size (int): Dimension of input vectors.\n tree: A binary tree made with tuples like `((1, 2), 3)`.\n\n See: Hierarchical Probabilistic Neural Network Language Model [Morin+,\n AISTAT2005].\n\n \"\"\"\n\n parameter_names = ('W',)\n gradient_names = ('gW',)\n\n def __init__(self, in_size, tree):\n parser = TreeParser()\n parser.parse(tree)\n self.paths = parser.get_paths()\n self.codes = parser.get_codes()\n\n self.W = numpy.random.uniform(\n -1, 1, (parser.size(), in_size)).astype(numpy.float32)\n self.gW = numpy.zeros(self.W.shape, numpy.float32)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype == numpy.float32,\n x_type.ndim == 2,\n t_type.dtype == numpy.int32,\n t_type.ndim == 1,\n x_type.shape[0] == t_type.shape[0]\n )\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n out_types.size() == 1,\n out_types[0].dtype == numpy.float32,\n out_types[0].ndim == 0\n )\n\n def forward_cpu(self, args):\n x, t = args\n\n loss = numpy.float32(0.0)\n for ix, it in six.moves.zip(x, t):\n loss += self._forward_cpu_one(ix, it)\n return numpy.array(loss),\n\n def _forward_cpu_one(self, x, t):\n assert t in self.paths\n\n w = self.W[self.paths[t]]\n wxy = w.dot(x) * self.codes[t]\n loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))\n return numpy.sum(loss)\n\n def backward_cpu(self, args, loss):\n x, t = args\n gloss, = loss\n gx = numpy.empty_like(x)\n for i, (ix, it) in enumerate(six.moves.zip(x, t)):\n gx[i] = self._backward_cpu_one(ix, it, gloss)\n return gx, None\n\n def _backward_cpu_one(self, x, t, gloss):\n path = self.paths[t]\n w = self.W[path]\n wxy = w.dot(x) * self.codes[t]\n g = -gloss * self.codes[t] / (1.0 + numpy.exp(wxy))\n gx = g.dot(w)\n gw = g.reshape((g.shape[0], 1)).dot(x.reshape(1, x.shape[0]))\n self.gW[path] += gw\n return gx\n\n def to_gpu(self, device=None):\n function.Function.to_gpu(self, device)\n\n n_vocab = max(self.paths.keys()) + 1\n paths = cuda.to_gpu(numpy.concatenate(\n [self.paths[i] for i in range(n_vocab) if i in self.paths]))\n codes = cuda.to_gpu(numpy.concatenate(\n [self.codes[i] for i in range(n_vocab) if i in self.codes]))\n\n begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)\n begins[0] = 0\n for i in range(0, n_vocab):\n length = len(self.paths[i]) if i in self.paths else 0\n begins[i + 1] = begins[i] + length\n\n self.paths = paths\n self.codes = codes\n self.begins = cuda.to_gpu(begins)\n\n def forward_gpu(self, inputs):\n x, t = inputs\n\n max_length = cuda.reduce(\n 'int* t, int* begins', 'begins[t[i] + 1] - begins[t[i]]',\n 'max(a,b)', '0', 'binary_hierarchical_softmax_max_length',\n numpy.int32\n )(t, self.begins)\n max_length = cuda.to_cpu(max_length)[()]\n\n length = max_length * x.shape[0]\n ls = cuda.empty((length,), dtype=numpy.float32)\n n_in = x.shape[1]\n wxy = cuda.empty((length,), dtype=numpy.float32)\n cuda.elementwise(\n '''float* ls, float* wxy, const float* x, const float* w,\n const int* ts, const int* paths, const float* codes,\n const int* begins, int c, int max_length''',\n '''\n int ind = i / max_length;\n int offset = i - ind * max_length;\n int t = ts[ind];\n\n int begin = begins[t];\n int length = begins[t + 1] - begins[t];\n\n if (offset < length) {\n int p = begin + offset;\n int node = paths[p];\n\n x = &x[ind * c];\n\n float wx = 0;\n for (int j = 0; j < c; ++j) {\n wx += w[node * c + j] * x[j];\n }\n wxy[i] = wx * codes[p];\n ls[i] = log(1 + exp(-wxy[i]));\n } else {\n ls[i] = 0;\n }\n ''',\n 'binary_hierarchical_softmax_forward'\n )(ls, wxy, x, self.W, t, self.paths, self.codes, self.begins,\n n_in, max_length)\n self.max_length = max_length\n self.wxy = wxy\n return cuda.gpuarray.sum(ls),\n\n def backward_gpu(self, inputs, loss):\n x, t = inputs\n gloss, = loss\n\n n_in = x.shape[1]\n gx = cuda.zeros_like(x)\n cuda.elementwise(\n '''const float* wxy, float* gx, float* gw, const float* x,\n const float* w, const int* ts, const int* paths,\n const float* codes, const int* begins,\n const float* gloss, int c, int max_length''',\n '''\n int ind = i / max_length;\n int offset = i - ind * max_length;\n int t = ts[ind];\n\n int begin = begins[t];\n int length = begins[t + 1] - begins[t];\n\n if (offset < length) {\n int p = begin + offset;\n int node = paths[p];\n float code = codes[p];\n gx = &gx[ind * c];\n x = &x[ind * c];\n\n float g = -*gloss * code / (1.0 + exp(wxy[i]));\n for (int j = 0; j < c; ++j) {\n atomicAdd(gx + j, g * w[node * c + j]);\n atomicAdd(gw + node * c + j, g * x[j]);\n }\n }\n ''',\n 'binary_hierarchical_softmax_bwd'\n )(self.wxy, gx, self.gW, x, self.W, t, self.paths, self.codes,\n self.begins, gloss, n_in, self.max_length)\n return gx, None\n\n\ndef create_huffman_tree(word_counts):\n \"\"\"Make a huffman tree from a dictionary containing word counts.\n\n This method creates a binary huffman tree, that is required for\n :class:`BinaryHierarchicalSoftmax`.\n For example, ``{0: 8, 1: 5, 2: 6, 3: 4}`` is converted to\n ``((3, 1), (2, 0))``.\n\n Args:\n word_counts (``dict`` of ``int`` key and ``int`` or ``float`` values.):\n Dictionary representing counts of words.\n\n Returns:\n Binary huffman tree with tuples and keys of ``word_coutns``.\n\n \"\"\"\n if len(word_counts) == 0:\n raise ValueError('Empty vocabulary')\n\n q = six.moves.queue.PriorityQueue()\n # Add unique id to each entry so that we can compare two entries with same\n # counts.\n # Note that itreitems randomly order the entries.\n for uid, (w, c) in enumerate(six.iteritems(word_counts)):\n q.put((c, uid, w))\n\n while q.qsize() >= 2:\n (count1, id1, word1) = q.get()\n (count2, id2, word2) = q.get()\n count = count1 + count2\n tree = (word1, word2)\n q.put((count, min(id1, id2), tree))\n\n return q.get()[2]\n", "path": "chainer/functions/hierarchical_softmax.py"}], "after_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass TreeParser(object):\n\n def __init__(self):\n self.next_id = 0\n\n def size(self):\n return self.next_id\n\n def get_paths(self):\n return self.paths\n\n def get_codes(self):\n return self.codes\n\n def parse(self, tree):\n self.next_id = 0\n self.path = []\n self.code = []\n self.paths = {}\n self.codes = {}\n self._parse(tree)\n\n assert(len(self.path) == 0)\n assert(len(self.code) == 0)\n assert(len(self.paths) == len(self.codes))\n\n def _parse(self, node):\n if isinstance(node, tuple):\n # internal node\n if len(node) != 2:\n raise ValueError(\n 'All internal nodes must have two child nodes')\n left, right = node\n self.path.append(self.next_id)\n self.next_id += 1\n self.code.append(1.0)\n self._parse(left)\n\n self.code[-1] = -1.0\n self._parse(right)\n\n self.path.pop()\n self.code.pop()\n\n else:\n # leaf node\n self.paths[node] = numpy.array(self.path).astype(numpy.int32)\n self.codes[node] = numpy.array(self.code).astype(numpy.float32)\n\n\nclass BinaryHierarchicalSoftmax(function.Function):\n\n \"\"\"Implementation of hierarchical softmax (HSM).\n\n In natural language applications, vocabulary size is too large to use\n softmax loss.\n Instead, the hierarchical softmax uses product of sigmoid functions.\n It costs only :math:`O(\\log(n))` time where :math:`n` is the vocabulary\n size in average.\n\n At first a user need to prepare a binary tree whose each leaf is\n corresponding to a word in a vocabulary.\n When a word :math:`x` is given, exactly one path from the root of the tree\n to the leaf of the word exists.\n Let :math:`\\mbox{path}(x) = ((e_1, b_1), \\dots, (e_m, b_m))` be the path of\n :math:`x`, where :math:`e_i` is an index of :math:`i`-th internal node, and\n :math:`b_i \\in \\{-1, 1\\}` indicates direction to move at :math:`i`-th\n internal node (-1 is left, and 1 is right).\n Then, the probability of :math:`x` is given as below:\n\n .. math::\n\n P(x) &= \\prod_{(e_i, b_i) \\in \\mbox{path}(x)}P(b_i | e_i) \\\\\\\\\n &= \\prod_{(e_i, b_i) \\in \\mbox{path}(x)}\\sigma(b_i x^\\\\top\n w_{e_i}),\n\n where :math:`\\sigma(\\\\cdot)` is a sigmoid function, and :math:`w` is a\n weight matrix.\n\n This function costs :math:`O(\\log(n))` time as an average length of paths\n is :math:`O(\\log(n))`, and :math:`O(n)` memory as the number of internal\n nodes equals :math:`n - 1`.\n\n Args:\n in_size (int): Dimension of input vectors.\n tree: A binary tree made with tuples like `((1, 2), 3)`.\n\n See: Hierarchical Probabilistic Neural Network Language Model [Morin+,\n AISTAT2005].\n\n \"\"\"\n\n parameter_names = ('W',)\n gradient_names = ('gW',)\n\n def __init__(self, in_size, tree):\n parser = TreeParser()\n parser.parse(tree)\n paths = parser.get_paths()\n codes = parser.get_codes()\n n_vocab = max(paths.keys()) + 1\n\n self.paths = numpy.concatenate(\n [paths[i] for i in range(n_vocab) if i in paths])\n self.codes = numpy.concatenate(\n [codes[i] for i in range(n_vocab) if i in codes])\n begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)\n begins[0] = 0\n for i in range(0, n_vocab):\n length = len(paths[i]) if i in paths else 0\n begins[i + 1] = begins[i] + length\n self.begins = begins\n\n self.W = numpy.random.uniform(\n -1, 1, (parser.size(), in_size)).astype(numpy.float32)\n self.gW = numpy.zeros(self.W.shape, numpy.float32)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype == numpy.float32,\n x_type.ndim == 2,\n t_type.dtype == numpy.int32,\n t_type.ndim == 1,\n x_type.shape[0] == t_type.shape[0]\n )\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n out_types.size() == 1,\n out_types[0].dtype == numpy.float32,\n out_types[0].ndim == 0\n )\n\n def forward_cpu(self, args):\n x, t = args\n\n loss = numpy.float32(0.0)\n for ix, it in six.moves.zip(x, t):\n loss += self._forward_cpu_one(ix, it)\n return numpy.array(loss),\n\n def _forward_cpu_one(self, x, t):\n begin = self.begins[t]\n end = self.begins[t + 1]\n\n w = self.W[self.paths[begin:end]]\n wxy = w.dot(x) * self.codes[begin:end]\n loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))\n return numpy.sum(loss)\n\n def backward_cpu(self, args, loss):\n x, t = args\n gloss, = loss\n gx = numpy.empty_like(x)\n for i, (ix, it) in enumerate(six.moves.zip(x, t)):\n gx[i] = self._backward_cpu_one(ix, it, gloss)\n return gx, None\n\n def _backward_cpu_one(self, x, t, gloss):\n begin = self.begins[t]\n end = self.begins[t + 1]\n\n path = self.paths[begin:end]\n w = self.W[path]\n wxy = w.dot(x) * self.codes[begin:end]\n g = -gloss * self.codes[begin:end] / (1.0 + numpy.exp(wxy))\n gx = g.dot(w)\n gw = g.reshape((g.shape[0], 1)).dot(x.reshape(1, x.shape[0]))\n self.gW[path] += gw\n return gx\n\n def to_gpu(self, device=None):\n function.Function.to_gpu(self, device)\n\n self.paths = cuda.to_gpu(self.paths, device)\n self.codes = cuda.to_gpu(self.codes, device)\n self.begins = cuda.to_gpu(self.begins, device)\n\n def to_cpu(self):\n function.Function.to_cpu(self)\n\n self.paths = cuda.to_cpu(self.paths)\n self.codes = cuda.to_cpu(self.codes)\n self.begins = cuda.to_cpu(self.begins)\n\n def forward_gpu(self, inputs):\n x, t = inputs\n\n max_length = cuda.reduce(\n 'int* t, int* begins', 'begins[t[i] + 1] - begins[t[i]]',\n 'max(a,b)', '0', 'binary_hierarchical_softmax_max_length',\n numpy.int32\n )(t, self.begins)\n max_length = cuda.to_cpu(max_length)[()]\n\n length = max_length * x.shape[0]\n ls = cuda.empty((length,), dtype=numpy.float32)\n n_in = x.shape[1]\n wxy = cuda.empty((length,), dtype=numpy.float32)\n cuda.elementwise(\n '''float* ls, float* wxy, const float* x, const float* w,\n const int* ts, const int* paths, const float* codes,\n const int* begins, int c, int max_length''',\n '''\n int ind = i / max_length;\n int offset = i - ind * max_length;\n int t = ts[ind];\n\n int begin = begins[t];\n int length = begins[t + 1] - begins[t];\n\n if (offset < length) {\n int p = begin + offset;\n int node = paths[p];\n\n x = &x[ind * c];\n\n float wx = 0;\n for (int j = 0; j < c; ++j) {\n wx += w[node * c + j] * x[j];\n }\n wxy[i] = wx * codes[p];\n ls[i] = log(1 + exp(-wxy[i]));\n } else {\n ls[i] = 0;\n }\n ''',\n 'binary_hierarchical_softmax_forward'\n )(ls, wxy, x, self.W, t, self.paths, self.codes, self.begins,\n n_in, max_length)\n self.max_length = max_length\n self.wxy = wxy\n return cuda.gpuarray.sum(ls),\n\n def backward_gpu(self, inputs, loss):\n x, t = inputs\n gloss, = loss\n\n n_in = x.shape[1]\n gx = cuda.zeros_like(x)\n cuda.elementwise(\n '''const float* wxy, float* gx, float* gw, const float* x,\n const float* w, const int* ts, const int* paths,\n const float* codes, const int* begins,\n const float* gloss, int c, int max_length''',\n '''\n int ind = i / max_length;\n int offset = i - ind * max_length;\n int t = ts[ind];\n\n int begin = begins[t];\n int length = begins[t + 1] - begins[t];\n\n if (offset < length) {\n int p = begin + offset;\n int node = paths[p];\n float code = codes[p];\n gx = &gx[ind * c];\n x = &x[ind * c];\n\n float g = -*gloss * code / (1.0 + exp(wxy[i]));\n for (int j = 0; j < c; ++j) {\n atomicAdd(gx + j, g * w[node * c + j]);\n atomicAdd(gw + node * c + j, g * x[j]);\n }\n }\n ''',\n 'binary_hierarchical_softmax_bwd'\n )(self.wxy, gx, self.gW, x, self.W, t, self.paths, self.codes,\n self.begins, gloss, n_in, self.max_length)\n return gx, None\n\n\ndef create_huffman_tree(word_counts):\n \"\"\"Make a huffman tree from a dictionary containing word counts.\n\n This method creates a binary huffman tree, that is required for\n :class:`BinaryHierarchicalSoftmax`.\n For example, ``{0: 8, 1: 5, 2: 6, 3: 4}`` is converted to\n ``((3, 1), (2, 0))``.\n\n Args:\n word_counts (``dict`` of ``int`` key and ``int`` or ``float`` values.):\n Dictionary representing counts of words.\n\n Returns:\n Binary huffman tree with tuples and keys of ``word_coutns``.\n\n \"\"\"\n if len(word_counts) == 0:\n raise ValueError('Empty vocabulary')\n\n q = six.moves.queue.PriorityQueue()\n # Add unique id to each entry so that we can compare two entries with same\n # counts.\n # Note that itreitems randomly order the entries.\n for uid, (w, c) in enumerate(six.iteritems(word_counts)):\n q.put((c, uid, w))\n\n while q.qsize() >= 2:\n (count1, id1, word1) = q.get()\n (count2, id2, word2) = q.get()\n count = count1 + count2\n tree = (word1, word2)\n q.put((count, min(id1, id2), tree))\n\n return q.get()[2]\n", "path": "chainer/functions/hierarchical_softmax.py"}]} | 3,768 | 971 |
gh_patches_debug_1331 | rasdani/github-patches | git_diff | litestar-org__litestar-1773 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/dto/exceptions.py`
Content:
```
1 from __future__ import annotations
2
3 from litestar.exceptions import ImproperlyConfiguredException
4
5 __all__ = ("DTOException", "UnsupportedType")
6
7
8 class DTOException(ImproperlyConfiguredException):
9 """Base exception for DTO errors."""
10
11
12 class UnsupportedType(DTOException):
13 """Raised when a type is not supported by Litestar."""
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/dto/exceptions.py b/litestar/dto/exceptions.py
deleted file mode 100644
--- a/litestar/dto/exceptions.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from __future__ import annotations
-
-from litestar.exceptions import ImproperlyConfiguredException
-
-__all__ = ("DTOException", "UnsupportedType")
-
-
-class DTOException(ImproperlyConfiguredException):
- """Base exception for DTO errors."""
-
-
-class UnsupportedType(DTOException):
- """Raised when a type is not supported by Litestar."""
| {"golden_diff": "diff --git a/litestar/dto/exceptions.py b/litestar/dto/exceptions.py\ndeleted file mode 100644\n--- a/litestar/dto/exceptions.py\n+++ /dev/null\n@@ -1,13 +0,0 @@\n-from __future__ import annotations\n-\n-from litestar.exceptions import ImproperlyConfiguredException\n-\n-__all__ = (\"DTOException\", \"UnsupportedType\")\n-\n-\n-class DTOException(ImproperlyConfiguredException):\n- \"\"\"Base exception for DTO errors.\"\"\"\n-\n-\n-class UnsupportedType(DTOException):\n- \"\"\"Raised when a type is not supported by Litestar.\"\"\"\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom litestar.exceptions import ImproperlyConfiguredException\n\n__all__ = (\"DTOException\", \"UnsupportedType\")\n\n\nclass DTOException(ImproperlyConfiguredException):\n \"\"\"Base exception for DTO errors.\"\"\"\n\n\nclass UnsupportedType(DTOException):\n \"\"\"Raised when a type is not supported by Litestar.\"\"\"\n", "path": "litestar/dto/exceptions.py"}], "after_files": [{"content": null, "path": "litestar/dto/exceptions.py"}]} | 523 | 139 |
gh_patches_debug_34145 | rasdani/github-patches | git_diff | scikit-hep__awkward-1795 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`ak.run_lengths` does not handle leading/trailing empty sublists
### Version of Awkward Array
main
### Description and code to reproduce
This also exists in v1.
The following code raises an `Exception`:
```python
import awkward as ak
import numpy as np
layout = ak.contents.ListOffsetArray(
ak.index.Index(np.array([0, 2, 4, 4], dtype=np.int64)),
ak.contents.NumpyArray(
np.arange(4)
)
)
ak.run_lengths(layout)
```
Also, we should use `regularize_numpyarray` to handle `NumpyArray`, which currently just raises an Exception.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/awkward/operations/ak_run_lengths.py`
Content:
```
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 import awkward as ak
4
5 np = ak.nplikes.NumpyMetadata.instance()
6
7
8 def run_lengths(array, highlevel=True, behavior=None):
9
10 """
11 Args:
12 array: Data containing runs of numbers to count.
13 highlevel (bool): If True, return an #ak.Array; otherwise, return
14 a low-level #ak.contents.Content subclass.
15 behavior (None or dict): Custom #ak.behavior for the output array, if
16 high-level.
17
18 Computes the lengths of sequences of identical values at the deepest level
19 of nesting, returning an array with the same structure but with `int64` type.
20
21 For example,
22
23 >>> array = ak.Array([1.1, 1.1, 1.1, 2.2, 3.3, 3.3, 4.4, 4.4, 5.5])
24 >>> ak.run_lengths(array)
25 <Array [3, 1, 2, 2, 1] type='5 * int64'>
26
27 There are 3 instances of 1.1, followed by 1 instance of 2.2, 2 instances of 3.3,
28 2 instances of 4.4, and 1 instance of 5.5.
29
30 The order and uniqueness of the input data doesn't matter,
31
32 >>> array = ak.Array([1.1, 1.1, 1.1, 5.5, 4.4, 4.4, 1.1, 1.1, 5.5])
33 >>> ak.run_lengths(array)
34 <Array [3, 1, 2, 2, 1] type='5 * int64'>
35
36 just the difference between each value and its neighbors.
37
38 The data can be nested, but runs don't cross list boundaries.
39
40 >>> array = ak.Array([[1.1, 1.1, 1.1, 2.2, 3.3], [3.3, 4.4], [4.4, 5.5]])
41 >>> ak.run_lengths(array)
42 <Array [[3, 1, 1], [1, 1], [1, 1]] type='3 * var * int64'>
43
44 This function recognizes strings as distinguishable values.
45
46 >>> array = ak.Array([["one", "one"], ["one", "two", "two"], ["three", "two", "two"]])
47 >>> ak.run_lengths(array)
48 <Array [[2], [1, 2], [1, 2]] type='3 * var * int64'>
49
50 Note that this can be combined with #ak.argsort and #ak.unflatten to compute
51 a "group by" operation:
52
53 >>> array = ak.Array([{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 1, "y": 1.1},
54 ... {"x": 3, "y": 3.3}, {"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}])
55 >>> sorted = array[ak.argsort(array.x)]
56 >>> sorted.x
57 <Array [1, 1, 1, 2, 2, 3] type='6 * int64'>
58 >>> ak.run_lengths(sorted.x)
59 <Array [3, 2, 1] type='3 * int64'>
60 >>> ak.unflatten(sorted, ak.run_lengths(sorted.x)).tolist()
61 [[{'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}],
62 [{'x': 2, 'y': 2.2}, {'x': 2, 'y': 2.2}],
63 [{'x': 3, 'y': 3.3}]]
64
65 Unlike a database "group by," this operation can be applied in bulk to many sublists
66 (though the run lengths need to be fully flattened to be used as `counts` for
67 #ak.unflatten, and you need to specify `axis=-1` as the depth).
68
69 >>> array = ak.Array([[{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 1, "y": 1.1}],
70 ... [{"x": 3, "y": 3.3}, {"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}]])
71 >>> sorted = array[ak.argsort(array.x)]
72 >>> sorted.x
73 <Array [[1, 1, 2], [1, 2, 3]] type='2 * var * int64'>
74 >>> ak.run_lengths(sorted.x)
75 <Array [[2, 1], [1, 1, 1]] type='2 * var * int64'>
76 >>> counts = ak.flatten(ak.run_lengths(sorted.x), axis=None)
77 >>> ak.unflatten(sorted, counts, axis=-1).tolist()
78 [[[{'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}],
79 [{'x': 2, 'y': 2.2}]],
80 [[{'x': 1, 'y': 1.1}],
81 [{'x': 2, 'y': 2.2}],
82 [{'x': 3, 'y': 3.3}]]]
83
84 See also #ak.num, #ak.argsort, #ak.unflatten.
85 """
86 with ak._errors.OperationErrorContext(
87 "ak.run_lengths",
88 dict(
89 array=array,
90 highlevel=highlevel,
91 behavior=behavior,
92 ),
93 ):
94 return _impl(array, highlevel, behavior)
95
96
97 def _impl(array, highlevel, behavior):
98 nplike = ak.nplikes.nplike_of(array)
99
100 def lengths_of(data, offsets):
101 if len(data) == 0:
102 return nplike.index_nplike.empty(0, np.int64), offsets
103 else:
104 diffs = data[1:] != data[:-1]
105
106 if isinstance(diffs, ak.highlevel.Array):
107 diffs = nplike.asarray(diffs)
108 if offsets is not None:
109 diffs[offsets[1:-1] - 1] = True
110 positions = nplike.index_nplike.nonzero(diffs)[0]
111 full_positions = nplike.index_nplike.empty(len(positions) + 2, np.int64)
112 full_positions[0] = 0
113 full_positions[-1] = len(data)
114 full_positions[1:-1] = positions + 1
115
116 nextcontent = full_positions[1:] - full_positions[:-1]
117 if offsets is None:
118 nextoffsets = None
119 else:
120 nextoffsets = nplike.index_nplike.searchsorted(
121 full_positions, offsets, side="left"
122 )
123 return nextcontent, nextoffsets
124
125 def action(layout, **kwargs):
126 if layout.branch_depth == (False, 1):
127 if layout.is_IndexedType:
128 layout = layout.project()
129
130 if (
131 layout.parameter("__array__") == "string"
132 or layout.parameter("__array__") == "bytestring"
133 ):
134 nextcontent, _ = lengths_of(ak.highlevel.Array(layout), None)
135 return ak.contents.NumpyArray(nextcontent)
136
137 if not isinstance(layout, (ak.contents.NumpyArray, ak.contents.EmptyArray)):
138 raise ak._errors.wrap_error(
139 NotImplementedError("run_lengths on " + type(layout).__name__)
140 )
141
142 nextcontent, _ = lengths_of(nplike.asarray(layout), None)
143 return ak.contents.NumpyArray(nextcontent)
144
145 elif layout.branch_depth == (False, 2):
146 if layout.is_IndexedType:
147 layout = layout.project()
148
149 if not layout.is_ListType:
150 raise ak._errors.wrap_error(
151 NotImplementedError("run_lengths on " + type(layout).__name__)
152 )
153
154 if (
155 layout.content.parameter("__array__") == "string"
156 or layout.content.parameter("__array__") == "bytestring"
157 ):
158 listoffsetarray = layout.toListOffsetArray64(False)
159 offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)
160 content = listoffsetarray.content[offsets[0] : offsets[-1]]
161
162 if content.is_IndexedType:
163 content = content.project()
164
165 nextcontent, nextoffsets = lengths_of(
166 ak.highlevel.Array(content), offsets - offsets[0]
167 )
168 return ak.contents.ListOffsetArray(
169 ak.index.Index64(nextoffsets),
170 ak.contents.NumpyArray(nextcontent),
171 )
172
173 listoffsetarray = layout.toListOffsetArray64(False)
174 offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)
175 content = listoffsetarray.content[offsets[0] : offsets[-1]]
176
177 if content.is_IndexedType:
178 content = content.project()
179
180 if not isinstance(
181 content, (ak.contents.NumpyArray, ak.contents.EmptyArray)
182 ):
183 raise ak._errors.wrap_error(
184 NotImplementedError(
185 "run_lengths on "
186 + type(layout).__name__
187 + " with content "
188 + type(content).__name__
189 )
190 )
191
192 nextcontent, nextoffsets = lengths_of(
193 nplike.asarray(content), offsets - offsets[0]
194 )
195 return ak.contents.ListOffsetArray(
196 ak.index.Index64(nextoffsets),
197 ak.contents.NumpyArray(nextcontent),
198 )
199 else:
200 return None
201
202 layout = ak.operations.to_layout(array, allow_record=False, allow_other=False)
203
204 out = layout.recursively_apply(action, behavior)
205
206 return ak._util.wrap(out, behavior, highlevel)
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/awkward/operations/ak_run_lengths.py b/src/awkward/operations/ak_run_lengths.py
--- a/src/awkward/operations/ak_run_lengths.py
+++ b/src/awkward/operations/ak_run_lengths.py
@@ -105,8 +105,21 @@
if isinstance(diffs, ak.highlevel.Array):
diffs = nplike.asarray(diffs)
+ # Do we have list boundaries to consider?
if offsets is not None:
- diffs[offsets[1:-1] - 1] = True
+ # When checking to see whether one element equals its following neighbour
+ # we also want to break runs at list boundaries. The comparison for offset `i`
+ # occurs at `i-1` in `diffs`. Consider this example with an empty sublist:
+ # data = [1 1 2 2 2 3 4 4 5 ]
+ # offsets = [0 6 9 9]
+ # (data) diffs = [ 0 1 0 0 1 1 0 1 ]
+ # diffs index = [ 0 1 2 3 4 5 6 7 ]
+ # boundary diff ^
+ # To consider only the interior boundaries, we ignore the start and end
+ # offset values. These can be repeated with empty sublists, so we mask them out.
+ is_interior = nplike.logical_and(0 < offsets, offsets < len(data) - 1)
+ interior_offsets = offsets[is_interior]
+ diffs[interior_offsets - 1] = True
positions = nplike.index_nplike.nonzero(diffs)[0]
full_positions = nplike.index_nplike.empty(len(positions) + 2, np.int64)
full_positions[0] = 0
@@ -155,6 +168,8 @@
layout.content.parameter("__array__") == "string"
or layout.content.parameter("__array__") == "bytestring"
):
+ # We also want to trim the _upper_ bound of content,
+ # so we manually convert the list type to zero-based
listoffsetarray = layout.toListOffsetArray64(False)
offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)
content = listoffsetarray.content[offsets[0] : offsets[-1]]
| {"golden_diff": "diff --git a/src/awkward/operations/ak_run_lengths.py b/src/awkward/operations/ak_run_lengths.py\n--- a/src/awkward/operations/ak_run_lengths.py\n+++ b/src/awkward/operations/ak_run_lengths.py\n@@ -105,8 +105,21 @@\n \n if isinstance(diffs, ak.highlevel.Array):\n diffs = nplike.asarray(diffs)\n+ # Do we have list boundaries to consider?\n if offsets is not None:\n- diffs[offsets[1:-1] - 1] = True\n+ # When checking to see whether one element equals its following neighbour\n+ # we also want to break runs at list boundaries. The comparison for offset `i`\n+ # occurs at `i-1` in `diffs`. Consider this example with an empty sublist:\n+ # data = [1 1 2 2 2 3 4 4 5 ]\n+ # offsets = [0 6 9 9]\n+ # (data) diffs = [ 0 1 0 0 1 1 0 1 ]\n+ # diffs index = [ 0 1 2 3 4 5 6 7 ]\n+ # boundary diff ^\n+ # To consider only the interior boundaries, we ignore the start and end\n+ # offset values. These can be repeated with empty sublists, so we mask them out.\n+ is_interior = nplike.logical_and(0 < offsets, offsets < len(data) - 1)\n+ interior_offsets = offsets[is_interior]\n+ diffs[interior_offsets - 1] = True\n positions = nplike.index_nplike.nonzero(diffs)[0]\n full_positions = nplike.index_nplike.empty(len(positions) + 2, np.int64)\n full_positions[0] = 0\n@@ -155,6 +168,8 @@\n layout.content.parameter(\"__array__\") == \"string\"\n or layout.content.parameter(\"__array__\") == \"bytestring\"\n ):\n+ # We also want to trim the _upper_ bound of content,\n+ # so we manually convert the list type to zero-based\n listoffsetarray = layout.toListOffsetArray64(False)\n offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)\n content = listoffsetarray.content[offsets[0] : offsets[-1]]\n", "issue": "`ak.run_lengths` does not handle leading/trailing empty sublists\n### Version of Awkward Array\r\n\r\nmain\r\n\r\n### Description and code to reproduce\r\n\r\nThis also exists in v1.\r\n\r\nThe following code raises an `Exception`:\r\n```python\r\nimport awkward as ak\r\nimport numpy as np\r\n\r\n\r\nlayout = ak.contents.ListOffsetArray(\r\n ak.index.Index(np.array([0, 2, 4, 4], dtype=np.int64)),\r\n ak.contents.NumpyArray(\r\n np.arange(4)\r\n )\r\n)\r\nak.run_lengths(layout)\r\n```\r\n\r\nAlso, we should use `regularize_numpyarray` to handle `NumpyArray`, which currently just raises an Exception.\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef run_lengths(array, highlevel=True, behavior=None):\n\n \"\"\"\n Args:\n array: Data containing runs of numbers to count.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Computes the lengths of sequences of identical values at the deepest level\n of nesting, returning an array with the same structure but with `int64` type.\n\n For example,\n\n >>> array = ak.Array([1.1, 1.1, 1.1, 2.2, 3.3, 3.3, 4.4, 4.4, 5.5])\n >>> ak.run_lengths(array)\n <Array [3, 1, 2, 2, 1] type='5 * int64'>\n\n There are 3 instances of 1.1, followed by 1 instance of 2.2, 2 instances of 3.3,\n 2 instances of 4.4, and 1 instance of 5.5.\n\n The order and uniqueness of the input data doesn't matter,\n\n >>> array = ak.Array([1.1, 1.1, 1.1, 5.5, 4.4, 4.4, 1.1, 1.1, 5.5])\n >>> ak.run_lengths(array)\n <Array [3, 1, 2, 2, 1] type='5 * int64'>\n\n just the difference between each value and its neighbors.\n\n The data can be nested, but runs don't cross list boundaries.\n\n >>> array = ak.Array([[1.1, 1.1, 1.1, 2.2, 3.3], [3.3, 4.4], [4.4, 5.5]])\n >>> ak.run_lengths(array)\n <Array [[3, 1, 1], [1, 1], [1, 1]] type='3 * var * int64'>\n\n This function recognizes strings as distinguishable values.\n\n >>> array = ak.Array([[\"one\", \"one\"], [\"one\", \"two\", \"two\"], [\"three\", \"two\", \"two\"]])\n >>> ak.run_lengths(array)\n <Array [[2], [1, 2], [1, 2]] type='3 * var * int64'>\n\n Note that this can be combined with #ak.argsort and #ak.unflatten to compute\n a \"group by\" operation:\n\n >>> array = ak.Array([{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 1, \"y\": 1.1},\n ... {\"x\": 3, \"y\": 3.3}, {\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}])\n >>> sorted = array[ak.argsort(array.x)]\n >>> sorted.x\n <Array [1, 1, 1, 2, 2, 3] type='6 * int64'>\n >>> ak.run_lengths(sorted.x)\n <Array [3, 2, 1] type='3 * int64'>\n >>> ak.unflatten(sorted, ak.run_lengths(sorted.x)).tolist()\n [[{'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}],\n [{'x': 2, 'y': 2.2}, {'x': 2, 'y': 2.2}],\n [{'x': 3, 'y': 3.3}]]\n\n Unlike a database \"group by,\" this operation can be applied in bulk to many sublists\n (though the run lengths need to be fully flattened to be used as `counts` for\n #ak.unflatten, and you need to specify `axis=-1` as the depth).\n\n >>> array = ak.Array([[{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 1, \"y\": 1.1}],\n ... [{\"x\": 3, \"y\": 3.3}, {\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}]])\n >>> sorted = array[ak.argsort(array.x)]\n >>> sorted.x\n <Array [[1, 1, 2], [1, 2, 3]] type='2 * var * int64'>\n >>> ak.run_lengths(sorted.x)\n <Array [[2, 1], [1, 1, 1]] type='2 * var * int64'>\n >>> counts = ak.flatten(ak.run_lengths(sorted.x), axis=None)\n >>> ak.unflatten(sorted, counts, axis=-1).tolist()\n [[[{'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}],\n [{'x': 2, 'y': 2.2}]],\n [[{'x': 1, 'y': 1.1}],\n [{'x': 2, 'y': 2.2}],\n [{'x': 3, 'y': 3.3}]]]\n\n See also #ak.num, #ak.argsort, #ak.unflatten.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.run_lengths\",\n dict(\n array=array,\n highlevel=highlevel,\n behavior=behavior,\n ),\n ):\n return _impl(array, highlevel, behavior)\n\n\ndef _impl(array, highlevel, behavior):\n nplike = ak.nplikes.nplike_of(array)\n\n def lengths_of(data, offsets):\n if len(data) == 0:\n return nplike.index_nplike.empty(0, np.int64), offsets\n else:\n diffs = data[1:] != data[:-1]\n\n if isinstance(diffs, ak.highlevel.Array):\n diffs = nplike.asarray(diffs)\n if offsets is not None:\n diffs[offsets[1:-1] - 1] = True\n positions = nplike.index_nplike.nonzero(diffs)[0]\n full_positions = nplike.index_nplike.empty(len(positions) + 2, np.int64)\n full_positions[0] = 0\n full_positions[-1] = len(data)\n full_positions[1:-1] = positions + 1\n\n nextcontent = full_positions[1:] - full_positions[:-1]\n if offsets is None:\n nextoffsets = None\n else:\n nextoffsets = nplike.index_nplike.searchsorted(\n full_positions, offsets, side=\"left\"\n )\n return nextcontent, nextoffsets\n\n def action(layout, **kwargs):\n if layout.branch_depth == (False, 1):\n if layout.is_IndexedType:\n layout = layout.project()\n\n if (\n layout.parameter(\"__array__\") == \"string\"\n or layout.parameter(\"__array__\") == \"bytestring\"\n ):\n nextcontent, _ = lengths_of(ak.highlevel.Array(layout), None)\n return ak.contents.NumpyArray(nextcontent)\n\n if not isinstance(layout, (ak.contents.NumpyArray, ak.contents.EmptyArray)):\n raise ak._errors.wrap_error(\n NotImplementedError(\"run_lengths on \" + type(layout).__name__)\n )\n\n nextcontent, _ = lengths_of(nplike.asarray(layout), None)\n return ak.contents.NumpyArray(nextcontent)\n\n elif layout.branch_depth == (False, 2):\n if layout.is_IndexedType:\n layout = layout.project()\n\n if not layout.is_ListType:\n raise ak._errors.wrap_error(\n NotImplementedError(\"run_lengths on \" + type(layout).__name__)\n )\n\n if (\n layout.content.parameter(\"__array__\") == \"string\"\n or layout.content.parameter(\"__array__\") == \"bytestring\"\n ):\n listoffsetarray = layout.toListOffsetArray64(False)\n offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)\n content = listoffsetarray.content[offsets[0] : offsets[-1]]\n\n if content.is_IndexedType:\n content = content.project()\n\n nextcontent, nextoffsets = lengths_of(\n ak.highlevel.Array(content), offsets - offsets[0]\n )\n return ak.contents.ListOffsetArray(\n ak.index.Index64(nextoffsets),\n ak.contents.NumpyArray(nextcontent),\n )\n\n listoffsetarray = layout.toListOffsetArray64(False)\n offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)\n content = listoffsetarray.content[offsets[0] : offsets[-1]]\n\n if content.is_IndexedType:\n content = content.project()\n\n if not isinstance(\n content, (ak.contents.NumpyArray, ak.contents.EmptyArray)\n ):\n raise ak._errors.wrap_error(\n NotImplementedError(\n \"run_lengths on \"\n + type(layout).__name__\n + \" with content \"\n + type(content).__name__\n )\n )\n\n nextcontent, nextoffsets = lengths_of(\n nplike.asarray(content), offsets - offsets[0]\n )\n return ak.contents.ListOffsetArray(\n ak.index.Index64(nextoffsets),\n ak.contents.NumpyArray(nextcontent),\n )\n else:\n return None\n\n layout = ak.operations.to_layout(array, allow_record=False, allow_other=False)\n\n out = layout.recursively_apply(action, behavior)\n\n return ak._util.wrap(out, behavior, highlevel)\n", "path": "src/awkward/operations/ak_run_lengths.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef run_lengths(array, highlevel=True, behavior=None):\n\n \"\"\"\n Args:\n array: Data containing runs of numbers to count.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Computes the lengths of sequences of identical values at the deepest level\n of nesting, returning an array with the same structure but with `int64` type.\n\n For example,\n\n >>> array = ak.Array([1.1, 1.1, 1.1, 2.2, 3.3, 3.3, 4.4, 4.4, 5.5])\n >>> ak.run_lengths(array)\n <Array [3, 1, 2, 2, 1] type='5 * int64'>\n\n There are 3 instances of 1.1, followed by 1 instance of 2.2, 2 instances of 3.3,\n 2 instances of 4.4, and 1 instance of 5.5.\n\n The order and uniqueness of the input data doesn't matter,\n\n >>> array = ak.Array([1.1, 1.1, 1.1, 5.5, 4.4, 4.4, 1.1, 1.1, 5.5])\n >>> ak.run_lengths(array)\n <Array [3, 1, 2, 2, 1] type='5 * int64'>\n\n just the difference between each value and its neighbors.\n\n The data can be nested, but runs don't cross list boundaries.\n\n >>> array = ak.Array([[1.1, 1.1, 1.1, 2.2, 3.3], [3.3, 4.4], [4.4, 5.5]])\n >>> ak.run_lengths(array)\n <Array [[3, 1, 1], [1, 1], [1, 1]] type='3 * var * int64'>\n\n This function recognizes strings as distinguishable values.\n\n >>> array = ak.Array([[\"one\", \"one\"], [\"one\", \"two\", \"two\"], [\"three\", \"two\", \"two\"]])\n >>> ak.run_lengths(array)\n <Array [[2], [1, 2], [1, 2]] type='3 * var * int64'>\n\n Note that this can be combined with #ak.argsort and #ak.unflatten to compute\n a \"group by\" operation:\n\n >>> array = ak.Array([{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 1, \"y\": 1.1},\n ... {\"x\": 3, \"y\": 3.3}, {\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}])\n >>> sorted = array[ak.argsort(array.x)]\n >>> sorted.x\n <Array [1, 1, 1, 2, 2, 3] type='6 * int64'>\n >>> ak.run_lengths(sorted.x)\n <Array [3, 2, 1] type='3 * int64'>\n >>> ak.unflatten(sorted, ak.run_lengths(sorted.x)).tolist()\n [[{'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}],\n [{'x': 2, 'y': 2.2}, {'x': 2, 'y': 2.2}],\n [{'x': 3, 'y': 3.3}]]\n\n Unlike a database \"group by,\" this operation can be applied in bulk to many sublists\n (though the run lengths need to be fully flattened to be used as `counts` for\n #ak.unflatten, and you need to specify `axis=-1` as the depth).\n\n >>> array = ak.Array([[{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 1, \"y\": 1.1}],\n ... [{\"x\": 3, \"y\": 3.3}, {\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}]])\n >>> sorted = array[ak.argsort(array.x)]\n >>> sorted.x\n <Array [[1, 1, 2], [1, 2, 3]] type='2 * var * int64'>\n >>> ak.run_lengths(sorted.x)\n <Array [[2, 1], [1, 1, 1]] type='2 * var * int64'>\n >>> counts = ak.flatten(ak.run_lengths(sorted.x), axis=None)\n >>> ak.unflatten(sorted, counts, axis=-1).tolist()\n [[[{'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}],\n [{'x': 2, 'y': 2.2}]],\n [[{'x': 1, 'y': 1.1}],\n [{'x': 2, 'y': 2.2}],\n [{'x': 3, 'y': 3.3}]]]\n\n See also #ak.num, #ak.argsort, #ak.unflatten.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.run_lengths\",\n dict(\n array=array,\n highlevel=highlevel,\n behavior=behavior,\n ),\n ):\n return _impl(array, highlevel, behavior)\n\n\ndef _impl(array, highlevel, behavior):\n nplike = ak.nplikes.nplike_of(array)\n\n def lengths_of(data, offsets):\n if len(data) == 0:\n return nplike.index_nplike.empty(0, np.int64), offsets\n else:\n diffs = data[1:] != data[:-1]\n\n if isinstance(diffs, ak.highlevel.Array):\n diffs = nplike.asarray(diffs)\n # Do we have list boundaries to consider?\n if offsets is not None:\n # When checking to see whether one element equals its following neighbour\n # we also want to break runs at list boundaries. The comparison for offset `i`\n # occurs at `i-1` in `diffs`. Consider this example with an empty sublist:\n # data = [1 1 2 2 2 3 4 4 5 ]\n # offsets = [0 6 9 9]\n # (data) diffs = [ 0 1 0 0 1 1 0 1 ]\n # diffs index = [ 0 1 2 3 4 5 6 7 ]\n # boundary diff ^\n # To consider only the interior boundaries, we ignore the start and end\n # offset values. These can be repeated with empty sublists, so we mask them out.\n is_interior = nplike.logical_and(0 < offsets, offsets < len(data) - 1)\n interior_offsets = offsets[is_interior]\n diffs[interior_offsets - 1] = True\n positions = nplike.index_nplike.nonzero(diffs)[0]\n full_positions = nplike.index_nplike.empty(len(positions) + 2, np.int64)\n full_positions[0] = 0\n full_positions[-1] = len(data)\n full_positions[1:-1] = positions + 1\n\n nextcontent = full_positions[1:] - full_positions[:-1]\n if offsets is None:\n nextoffsets = None\n else:\n nextoffsets = nplike.index_nplike.searchsorted(\n full_positions, offsets, side=\"left\"\n )\n return nextcontent, nextoffsets\n\n def action(layout, **kwargs):\n if layout.branch_depth == (False, 1):\n if layout.is_IndexedType:\n layout = layout.project()\n\n if (\n layout.parameter(\"__array__\") == \"string\"\n or layout.parameter(\"__array__\") == \"bytestring\"\n ):\n nextcontent, _ = lengths_of(ak.highlevel.Array(layout), None)\n return ak.contents.NumpyArray(nextcontent)\n\n if not isinstance(layout, (ak.contents.NumpyArray, ak.contents.EmptyArray)):\n raise ak._errors.wrap_error(\n NotImplementedError(\"run_lengths on \" + type(layout).__name__)\n )\n\n nextcontent, _ = lengths_of(nplike.asarray(layout), None)\n return ak.contents.NumpyArray(nextcontent)\n\n elif layout.branch_depth == (False, 2):\n if layout.is_IndexedType:\n layout = layout.project()\n\n if not layout.is_ListType:\n raise ak._errors.wrap_error(\n NotImplementedError(\"run_lengths on \" + type(layout).__name__)\n )\n\n if (\n layout.content.parameter(\"__array__\") == \"string\"\n or layout.content.parameter(\"__array__\") == \"bytestring\"\n ):\n # We also want to trim the _upper_ bound of content,\n # so we manually convert the list type to zero-based\n listoffsetarray = layout.toListOffsetArray64(False)\n offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)\n content = listoffsetarray.content[offsets[0] : offsets[-1]]\n\n if content.is_IndexedType:\n content = content.project()\n\n nextcontent, nextoffsets = lengths_of(\n ak.highlevel.Array(content), offsets - offsets[0]\n )\n return ak.contents.ListOffsetArray(\n ak.index.Index64(nextoffsets),\n ak.contents.NumpyArray(nextcontent),\n )\n\n listoffsetarray = layout.toListOffsetArray64(False)\n offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)\n content = listoffsetarray.content[offsets[0] : offsets[-1]]\n\n if content.is_IndexedType:\n content = content.project()\n\n if not isinstance(\n content, (ak.contents.NumpyArray, ak.contents.EmptyArray)\n ):\n raise ak._errors.wrap_error(\n NotImplementedError(\n \"run_lengths on \"\n + type(layout).__name__\n + \" with content \"\n + type(content).__name__\n )\n )\n\n nextcontent, nextoffsets = lengths_of(\n nplike.asarray(content), offsets - offsets[0]\n )\n return ak.contents.ListOffsetArray(\n ak.index.Index64(nextoffsets),\n ak.contents.NumpyArray(nextcontent),\n )\n else:\n return None\n\n layout = ak.operations.to_layout(array, allow_record=False, allow_other=False)\n\n out = layout.recursively_apply(action, behavior)\n\n return ak._util.wrap(out, behavior, highlevel)\n", "path": "src/awkward/operations/ak_run_lengths.py"}]} | 3,133 | 553 |
gh_patches_debug_13767 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1504 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pending follow requests still show up in suggested users
An additional signal is needed to remove users from suggestions when a follow request is created
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/suggested_users.py`
Content:
```
1 """ store recommended follows in redis """
2 import math
3 import logging
4 from django.dispatch import receiver
5 from django.db.models import signals, Count, Q
6
7 from bookwyrm import models
8 from bookwyrm.redis_store import RedisStore, r
9 from bookwyrm.tasks import app
10
11
12 logger = logging.getLogger(__name__)
13
14
15 class SuggestedUsers(RedisStore):
16 """suggested users for a user"""
17
18 max_length = 30
19
20 def get_rank(self, obj):
21 """get computed rank"""
22 return obj.mutuals # + (1.0 - (1.0 / (obj.shared_books + 1)))
23
24 def store_id(self, user): # pylint: disable=no-self-use
25 """the key used to store this user's recs"""
26 if isinstance(user, int):
27 return f"{user}-suggestions"
28 return f"{user.id}-suggestions"
29
30 def get_counts_from_rank(self, rank): # pylint: disable=no-self-use
31 """calculate mutuals count and shared books count from rank"""
32 return {
33 "mutuals": math.floor(rank),
34 # "shared_books": int(1 / (-1 * (rank % 1 - 1))) - 1,
35 }
36
37 def get_objects_for_store(self, store):
38 """a list of potential follows for a user"""
39 user = models.User.objects.get(id=store.split("-")[0])
40
41 return get_annotated_users(
42 user,
43 ~Q(id=user.id),
44 ~Q(followers=user),
45 ~Q(follower_requests=user),
46 bookwyrm_user=True,
47 )
48
49 def get_stores_for_object(self, obj):
50 return [self.store_id(u) for u in self.get_users_for_object(obj)]
51
52 def get_users_for_object(self, obj): # pylint: disable=no-self-use
53 """given a user, who might want to follow them"""
54 return models.User.objects.filter(local=True,).exclude(
55 Q(id=obj.id) | Q(followers=obj) | Q(id__in=obj.blocks.all()) | Q(blocks=obj)
56 )
57
58 def rerank_obj(self, obj, update_only=True):
59 """update all the instances of this user with new ranks"""
60 pipeline = r.pipeline()
61 for store_user in self.get_users_for_object(obj):
62 annotated_user = get_annotated_users(
63 store_user,
64 id=obj.id,
65 ).first()
66 if not annotated_user:
67 continue
68
69 pipeline.zadd(
70 self.store_id(store_user),
71 self.get_value(annotated_user),
72 xx=update_only,
73 )
74 pipeline.execute()
75
76 def rerank_user_suggestions(self, user):
77 """update the ranks of the follows suggested to a user"""
78 self.populate_store(self.store_id(user))
79
80 def remove_suggestion(self, user, suggested_user):
81 """take a user out of someone's suggestions"""
82 self.bulk_remove_objects_from_store([suggested_user], self.store_id(user))
83
84 def get_suggestions(self, user):
85 """get suggestions"""
86 values = self.get_store(self.store_id(user), withscores=True)
87 results = []
88 # annotate users with mutuals and shared book counts
89 for user_id, rank in values:
90 counts = self.get_counts_from_rank(rank)
91 try:
92 user = models.User.objects.get(
93 id=user_id, is_active=True, bookwyrm_user=True
94 )
95 except models.User.DoesNotExist as err:
96 # if this happens, the suggestions are janked way up
97 logger.exception(err)
98 continue
99 user.mutuals = counts["mutuals"]
100 # user.shared_books = counts["shared_books"]
101 results.append(user)
102 if len(results) >= 5:
103 break
104 return results
105
106
107 def get_annotated_users(viewer, *args, **kwargs):
108 """Users, annotated with things they have in common"""
109 return (
110 models.User.objects.filter(discoverable=True, is_active=True, *args, **kwargs)
111 .exclude(Q(id__in=viewer.blocks.all()) | Q(blocks=viewer))
112 .annotate(
113 mutuals=Count(
114 "followers",
115 filter=Q(
116 ~Q(id=viewer.id),
117 ~Q(id__in=viewer.following.all()),
118 followers__in=viewer.following.all(),
119 ),
120 distinct=True,
121 ),
122 # shared_books=Count(
123 # "shelfbook",
124 # filter=Q(
125 # ~Q(id=viewer.id),
126 # shelfbook__book__parent_work__in=[
127 # s.book.parent_work for s in viewer.shelfbook_set.all()
128 # ],
129 # ),
130 # distinct=True,
131 # ),
132 )
133 )
134
135
136 suggested_users = SuggestedUsers()
137
138
139 @receiver(signals.post_save, sender=models.UserFollows)
140 # pylint: disable=unused-argument
141 def update_suggestions_on_follow(sender, instance, created, *args, **kwargs):
142 """remove a follow from the recs and update the ranks"""
143 if not created or not instance.user_object.discoverable:
144 return
145
146 if instance.user_subject.local:
147 remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)
148 rerank_user_task.delay(instance.user_object.id, update_only=False)
149
150
151 @receiver(signals.post_save, sender=models.UserBlocks)
152 # pylint: disable=unused-argument
153 def update_suggestions_on_block(sender, instance, *args, **kwargs):
154 """remove blocked users from recs"""
155 if instance.user_subject.local and instance.user_object.discoverable:
156 remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)
157 if instance.user_object.local and instance.user_subject.discoverable:
158 remove_suggestion_task.delay(instance.user_object.id, instance.user_subject.id)
159
160
161 @receiver(signals.post_delete, sender=models.UserFollows)
162 # pylint: disable=unused-argument
163 def update_suggestions_on_unfollow(sender, instance, **kwargs):
164 """update rankings, but don't re-suggest because it was probably intentional"""
165 if instance.user_object.discoverable:
166 rerank_user_task.delay(instance.user_object.id, update_only=False)
167
168
169 # @receiver(signals.post_save, sender=models.ShelfBook)
170 # @receiver(signals.post_delete, sender=models.ShelfBook)
171 # # pylint: disable=unused-argument
172 # def update_rank_on_shelving(sender, instance, *args, **kwargs):
173 # """when a user shelves or unshelves a book, re-compute their rank"""
174 # # if it's a local user, re-calculate who is rec'ed to them
175 # if instance.user.local:
176 # rerank_suggestions_task.delay(instance.user.id)
177 #
178 # # if the user is discoverable, update their rankings
179 # if instance.user.discoverable:
180 # rerank_user_task.delay(instance.user.id)
181
182
183 @receiver(signals.post_save, sender=models.User)
184 # pylint: disable=unused-argument, too-many-arguments
185 def update_user(sender, instance, created, update_fields=None, **kwargs):
186 """an updated user, neat"""
187 # a new user is found, create suggestions for them
188 if created and instance.local:
189 rerank_suggestions_task.delay(instance.id)
190
191 # we know what fields were updated and discoverability didn't change
192 if not instance.bookwyrm_user or (
193 update_fields and not "discoverable" in update_fields
194 ):
195 return
196
197 # deleted the user
198 if not created and not instance.is_active:
199 remove_user_task.delay(instance.id)
200 return
201
202 # this happens on every save, not just when discoverability changes, annoyingly
203 if instance.discoverable:
204 rerank_user_task.delay(instance.id, update_only=False)
205 elif not created:
206 remove_user_task.delay(instance.id)
207
208
209 @receiver(signals.post_save, sender=models.FederatedServer)
210 def domain_level_update(sender, instance, created, update_fields=None, **kwargs):
211 """remove users on a domain block"""
212 if (
213 not update_fields
214 or "status" not in update_fields
215 or instance.application_type != "bookwyrm"
216 ):
217 return
218
219 if instance.status == "blocked":
220 bulk_remove_instance_task.delay(instance.id)
221 return
222 bulk_add_instance_task.delay(instance.id)
223
224
225 # ------------------- TASKS
226
227
228 @app.task(queue="low_priority")
229 def rerank_suggestions_task(user_id):
230 """do the hard work in celery"""
231 suggested_users.rerank_user_suggestions(user_id)
232
233
234 @app.task(queue="low_priority")
235 def rerank_user_task(user_id, update_only=False):
236 """do the hard work in celery"""
237 user = models.User.objects.get(id=user_id)
238 suggested_users.rerank_obj(user, update_only=update_only)
239
240
241 @app.task(queue="low_priority")
242 def remove_user_task(user_id):
243 """do the hard work in celery"""
244 user = models.User.objects.get(id=user_id)
245 suggested_users.remove_object_from_related_stores(user)
246
247
248 @app.task(queue="medium_priority")
249 def remove_suggestion_task(user_id, suggested_user_id):
250 """remove a specific user from a specific user's suggestions"""
251 suggested_user = models.User.objects.get(id=suggested_user_id)
252 suggested_users.remove_suggestion(user_id, suggested_user)
253
254
255 @app.task(queue="low_priority")
256 def bulk_remove_instance_task(instance_id):
257 """remove a bunch of users from recs"""
258 for user in models.User.objects.filter(federated_server__id=instance_id):
259 suggested_users.remove_object_from_related_stores(user)
260
261
262 @app.task(queue="low_priority")
263 def bulk_add_instance_task(instance_id):
264 """remove a bunch of users from recs"""
265 for user in models.User.objects.filter(federated_server__id=instance_id):
266 suggested_users.rerank_obj(user, update_only=False)
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/suggested_users.py b/bookwyrm/suggested_users.py
--- a/bookwyrm/suggested_users.py
+++ b/bookwyrm/suggested_users.py
@@ -148,6 +148,17 @@
rerank_user_task.delay(instance.user_object.id, update_only=False)
+@receiver(signals.post_save, sender=models.UserFollowRequest)
+# pylint: disable=unused-argument
+def update_suggestions_on_follow_request(sender, instance, created, *args, **kwargs):
+ """remove a follow from the recs and update the ranks"""
+ if not created or not instance.user_object.discoverable:
+ return
+
+ if instance.user_subject.local:
+ remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)
+
+
@receiver(signals.post_save, sender=models.UserBlocks)
# pylint: disable=unused-argument
def update_suggestions_on_block(sender, instance, *args, **kwargs):
| {"golden_diff": "diff --git a/bookwyrm/suggested_users.py b/bookwyrm/suggested_users.py\n--- a/bookwyrm/suggested_users.py\n+++ b/bookwyrm/suggested_users.py\n@@ -148,6 +148,17 @@\n rerank_user_task.delay(instance.user_object.id, update_only=False)\n \n \n+@receiver(signals.post_save, sender=models.UserFollowRequest)\n+# pylint: disable=unused-argument\n+def update_suggestions_on_follow_request(sender, instance, created, *args, **kwargs):\n+ \"\"\"remove a follow from the recs and update the ranks\"\"\"\n+ if not created or not instance.user_object.discoverable:\n+ return\n+\n+ if instance.user_subject.local:\n+ remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)\n+\n+\n @receiver(signals.post_save, sender=models.UserBlocks)\n # pylint: disable=unused-argument\n def update_suggestions_on_block(sender, instance, *args, **kwargs):\n", "issue": "Pending follow requests still show up in suggested users\nAn additional signal is needed to remove users from suggestions when a follow request is created\n", "before_files": [{"content": "\"\"\" store recommended follows in redis \"\"\"\nimport math\nimport logging\nfrom django.dispatch import receiver\nfrom django.db.models import signals, Count, Q\n\nfrom bookwyrm import models\nfrom bookwyrm.redis_store import RedisStore, r\nfrom bookwyrm.tasks import app\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SuggestedUsers(RedisStore):\n \"\"\"suggested users for a user\"\"\"\n\n max_length = 30\n\n def get_rank(self, obj):\n \"\"\"get computed rank\"\"\"\n return obj.mutuals # + (1.0 - (1.0 / (obj.shared_books + 1)))\n\n def store_id(self, user): # pylint: disable=no-self-use\n \"\"\"the key used to store this user's recs\"\"\"\n if isinstance(user, int):\n return f\"{user}-suggestions\"\n return f\"{user.id}-suggestions\"\n\n def get_counts_from_rank(self, rank): # pylint: disable=no-self-use\n \"\"\"calculate mutuals count and shared books count from rank\"\"\"\n return {\n \"mutuals\": math.floor(rank),\n # \"shared_books\": int(1 / (-1 * (rank % 1 - 1))) - 1,\n }\n\n def get_objects_for_store(self, store):\n \"\"\"a list of potential follows for a user\"\"\"\n user = models.User.objects.get(id=store.split(\"-\")[0])\n\n return get_annotated_users(\n user,\n ~Q(id=user.id),\n ~Q(followers=user),\n ~Q(follower_requests=user),\n bookwyrm_user=True,\n )\n\n def get_stores_for_object(self, obj):\n return [self.store_id(u) for u in self.get_users_for_object(obj)]\n\n def get_users_for_object(self, obj): # pylint: disable=no-self-use\n \"\"\"given a user, who might want to follow them\"\"\"\n return models.User.objects.filter(local=True,).exclude(\n Q(id=obj.id) | Q(followers=obj) | Q(id__in=obj.blocks.all()) | Q(blocks=obj)\n )\n\n def rerank_obj(self, obj, update_only=True):\n \"\"\"update all the instances of this user with new ranks\"\"\"\n pipeline = r.pipeline()\n for store_user in self.get_users_for_object(obj):\n annotated_user = get_annotated_users(\n store_user,\n id=obj.id,\n ).first()\n if not annotated_user:\n continue\n\n pipeline.zadd(\n self.store_id(store_user),\n self.get_value(annotated_user),\n xx=update_only,\n )\n pipeline.execute()\n\n def rerank_user_suggestions(self, user):\n \"\"\"update the ranks of the follows suggested to a user\"\"\"\n self.populate_store(self.store_id(user))\n\n def remove_suggestion(self, user, suggested_user):\n \"\"\"take a user out of someone's suggestions\"\"\"\n self.bulk_remove_objects_from_store([suggested_user], self.store_id(user))\n\n def get_suggestions(self, user):\n \"\"\"get suggestions\"\"\"\n values = self.get_store(self.store_id(user), withscores=True)\n results = []\n # annotate users with mutuals and shared book counts\n for user_id, rank in values:\n counts = self.get_counts_from_rank(rank)\n try:\n user = models.User.objects.get(\n id=user_id, is_active=True, bookwyrm_user=True\n )\n except models.User.DoesNotExist as err:\n # if this happens, the suggestions are janked way up\n logger.exception(err)\n continue\n user.mutuals = counts[\"mutuals\"]\n # user.shared_books = counts[\"shared_books\"]\n results.append(user)\n if len(results) >= 5:\n break\n return results\n\n\ndef get_annotated_users(viewer, *args, **kwargs):\n \"\"\"Users, annotated with things they have in common\"\"\"\n return (\n models.User.objects.filter(discoverable=True, is_active=True, *args, **kwargs)\n .exclude(Q(id__in=viewer.blocks.all()) | Q(blocks=viewer))\n .annotate(\n mutuals=Count(\n \"followers\",\n filter=Q(\n ~Q(id=viewer.id),\n ~Q(id__in=viewer.following.all()),\n followers__in=viewer.following.all(),\n ),\n distinct=True,\n ),\n # shared_books=Count(\n # \"shelfbook\",\n # filter=Q(\n # ~Q(id=viewer.id),\n # shelfbook__book__parent_work__in=[\n # s.book.parent_work for s in viewer.shelfbook_set.all()\n # ],\n # ),\n # distinct=True,\n # ),\n )\n )\n\n\nsuggested_users = SuggestedUsers()\n\n\n@receiver(signals.post_save, sender=models.UserFollows)\n# pylint: disable=unused-argument\ndef update_suggestions_on_follow(sender, instance, created, *args, **kwargs):\n \"\"\"remove a follow from the recs and update the ranks\"\"\"\n if not created or not instance.user_object.discoverable:\n return\n\n if instance.user_subject.local:\n remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)\n rerank_user_task.delay(instance.user_object.id, update_only=False)\n\n\n@receiver(signals.post_save, sender=models.UserBlocks)\n# pylint: disable=unused-argument\ndef update_suggestions_on_block(sender, instance, *args, **kwargs):\n \"\"\"remove blocked users from recs\"\"\"\n if instance.user_subject.local and instance.user_object.discoverable:\n remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)\n if instance.user_object.local and instance.user_subject.discoverable:\n remove_suggestion_task.delay(instance.user_object.id, instance.user_subject.id)\n\n\n@receiver(signals.post_delete, sender=models.UserFollows)\n# pylint: disable=unused-argument\ndef update_suggestions_on_unfollow(sender, instance, **kwargs):\n \"\"\"update rankings, but don't re-suggest because it was probably intentional\"\"\"\n if instance.user_object.discoverable:\n rerank_user_task.delay(instance.user_object.id, update_only=False)\n\n\n# @receiver(signals.post_save, sender=models.ShelfBook)\n# @receiver(signals.post_delete, sender=models.ShelfBook)\n# # pylint: disable=unused-argument\n# def update_rank_on_shelving(sender, instance, *args, **kwargs):\n# \"\"\"when a user shelves or unshelves a book, re-compute their rank\"\"\"\n# # if it's a local user, re-calculate who is rec'ed to them\n# if instance.user.local:\n# rerank_suggestions_task.delay(instance.user.id)\n#\n# # if the user is discoverable, update their rankings\n# if instance.user.discoverable:\n# rerank_user_task.delay(instance.user.id)\n\n\n@receiver(signals.post_save, sender=models.User)\n# pylint: disable=unused-argument, too-many-arguments\ndef update_user(sender, instance, created, update_fields=None, **kwargs):\n \"\"\"an updated user, neat\"\"\"\n # a new user is found, create suggestions for them\n if created and instance.local:\n rerank_suggestions_task.delay(instance.id)\n\n # we know what fields were updated and discoverability didn't change\n if not instance.bookwyrm_user or (\n update_fields and not \"discoverable\" in update_fields\n ):\n return\n\n # deleted the user\n if not created and not instance.is_active:\n remove_user_task.delay(instance.id)\n return\n\n # this happens on every save, not just when discoverability changes, annoyingly\n if instance.discoverable:\n rerank_user_task.delay(instance.id, update_only=False)\n elif not created:\n remove_user_task.delay(instance.id)\n\n\n@receiver(signals.post_save, sender=models.FederatedServer)\ndef domain_level_update(sender, instance, created, update_fields=None, **kwargs):\n \"\"\"remove users on a domain block\"\"\"\n if (\n not update_fields\n or \"status\" not in update_fields\n or instance.application_type != \"bookwyrm\"\n ):\n return\n\n if instance.status == \"blocked\":\n bulk_remove_instance_task.delay(instance.id)\n return\n bulk_add_instance_task.delay(instance.id)\n\n\n# ------------------- TASKS\n\n\[email protected](queue=\"low_priority\")\ndef rerank_suggestions_task(user_id):\n \"\"\"do the hard work in celery\"\"\"\n suggested_users.rerank_user_suggestions(user_id)\n\n\[email protected](queue=\"low_priority\")\ndef rerank_user_task(user_id, update_only=False):\n \"\"\"do the hard work in celery\"\"\"\n user = models.User.objects.get(id=user_id)\n suggested_users.rerank_obj(user, update_only=update_only)\n\n\[email protected](queue=\"low_priority\")\ndef remove_user_task(user_id):\n \"\"\"do the hard work in celery\"\"\"\n user = models.User.objects.get(id=user_id)\n suggested_users.remove_object_from_related_stores(user)\n\n\[email protected](queue=\"medium_priority\")\ndef remove_suggestion_task(user_id, suggested_user_id):\n \"\"\"remove a specific user from a specific user's suggestions\"\"\"\n suggested_user = models.User.objects.get(id=suggested_user_id)\n suggested_users.remove_suggestion(user_id, suggested_user)\n\n\[email protected](queue=\"low_priority\")\ndef bulk_remove_instance_task(instance_id):\n \"\"\"remove a bunch of users from recs\"\"\"\n for user in models.User.objects.filter(federated_server__id=instance_id):\n suggested_users.remove_object_from_related_stores(user)\n\n\[email protected](queue=\"low_priority\")\ndef bulk_add_instance_task(instance_id):\n \"\"\"remove a bunch of users from recs\"\"\"\n for user in models.User.objects.filter(federated_server__id=instance_id):\n suggested_users.rerank_obj(user, update_only=False)\n", "path": "bookwyrm/suggested_users.py"}], "after_files": [{"content": "\"\"\" store recommended follows in redis \"\"\"\nimport math\nimport logging\nfrom django.dispatch import receiver\nfrom django.db.models import signals, Count, Q\n\nfrom bookwyrm import models\nfrom bookwyrm.redis_store import RedisStore, r\nfrom bookwyrm.tasks import app\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SuggestedUsers(RedisStore):\n \"\"\"suggested users for a user\"\"\"\n\n max_length = 30\n\n def get_rank(self, obj):\n \"\"\"get computed rank\"\"\"\n return obj.mutuals # + (1.0 - (1.0 / (obj.shared_books + 1)))\n\n def store_id(self, user): # pylint: disable=no-self-use\n \"\"\"the key used to store this user's recs\"\"\"\n if isinstance(user, int):\n return f\"{user}-suggestions\"\n return f\"{user.id}-suggestions\"\n\n def get_counts_from_rank(self, rank): # pylint: disable=no-self-use\n \"\"\"calculate mutuals count and shared books count from rank\"\"\"\n return {\n \"mutuals\": math.floor(rank),\n # \"shared_books\": int(1 / (-1 * (rank % 1 - 1))) - 1,\n }\n\n def get_objects_for_store(self, store):\n \"\"\"a list of potential follows for a user\"\"\"\n user = models.User.objects.get(id=store.split(\"-\")[0])\n\n return get_annotated_users(\n user,\n ~Q(id=user.id),\n ~Q(followers=user),\n ~Q(follower_requests=user),\n bookwyrm_user=True,\n )\n\n def get_stores_for_object(self, obj):\n return [self.store_id(u) for u in self.get_users_for_object(obj)]\n\n def get_users_for_object(self, obj): # pylint: disable=no-self-use\n \"\"\"given a user, who might want to follow them\"\"\"\n return models.User.objects.filter(local=True,).exclude(\n Q(id=obj.id) | Q(followers=obj) | Q(id__in=obj.blocks.all()) | Q(blocks=obj)\n )\n\n def rerank_obj(self, obj, update_only=True):\n \"\"\"update all the instances of this user with new ranks\"\"\"\n pipeline = r.pipeline()\n for store_user in self.get_users_for_object(obj):\n annotated_user = get_annotated_users(\n store_user,\n id=obj.id,\n ).first()\n if not annotated_user:\n continue\n\n pipeline.zadd(\n self.store_id(store_user),\n self.get_value(annotated_user),\n xx=update_only,\n )\n pipeline.execute()\n\n def rerank_user_suggestions(self, user):\n \"\"\"update the ranks of the follows suggested to a user\"\"\"\n self.populate_store(self.store_id(user))\n\n def remove_suggestion(self, user, suggested_user):\n \"\"\"take a user out of someone's suggestions\"\"\"\n self.bulk_remove_objects_from_store([suggested_user], self.store_id(user))\n\n def get_suggestions(self, user):\n \"\"\"get suggestions\"\"\"\n values = self.get_store(self.store_id(user), withscores=True)\n results = []\n # annotate users with mutuals and shared book counts\n for user_id, rank in values:\n counts = self.get_counts_from_rank(rank)\n try:\n user = models.User.objects.get(\n id=user_id, is_active=True, bookwyrm_user=True\n )\n except models.User.DoesNotExist as err:\n # if this happens, the suggestions are janked way up\n logger.exception(err)\n continue\n user.mutuals = counts[\"mutuals\"]\n # user.shared_books = counts[\"shared_books\"]\n results.append(user)\n if len(results) >= 5:\n break\n return results\n\n\ndef get_annotated_users(viewer, *args, **kwargs):\n \"\"\"Users, annotated with things they have in common\"\"\"\n return (\n models.User.objects.filter(discoverable=True, is_active=True, *args, **kwargs)\n .exclude(Q(id__in=viewer.blocks.all()) | Q(blocks=viewer))\n .annotate(\n mutuals=Count(\n \"followers\",\n filter=Q(\n ~Q(id=viewer.id),\n ~Q(id__in=viewer.following.all()),\n followers__in=viewer.following.all(),\n ),\n distinct=True,\n ),\n # shared_books=Count(\n # \"shelfbook\",\n # filter=Q(\n # ~Q(id=viewer.id),\n # shelfbook__book__parent_work__in=[\n # s.book.parent_work for s in viewer.shelfbook_set.all()\n # ],\n # ),\n # distinct=True,\n # ),\n )\n )\n\n\nsuggested_users = SuggestedUsers()\n\n\n@receiver(signals.post_save, sender=models.UserFollows)\n# pylint: disable=unused-argument\ndef update_suggestions_on_follow(sender, instance, created, *args, **kwargs):\n \"\"\"remove a follow from the recs and update the ranks\"\"\"\n if not created or not instance.user_object.discoverable:\n return\n\n if instance.user_subject.local:\n remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)\n rerank_user_task.delay(instance.user_object.id, update_only=False)\n\n\n@receiver(signals.post_save, sender=models.UserFollowRequest)\n# pylint: disable=unused-argument\ndef update_suggestions_on_follow_request(sender, instance, created, *args, **kwargs):\n \"\"\"remove a follow from the recs and update the ranks\"\"\"\n if not created or not instance.user_object.discoverable:\n return\n\n if instance.user_subject.local:\n remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)\n\n\n@receiver(signals.post_save, sender=models.UserBlocks)\n# pylint: disable=unused-argument\ndef update_suggestions_on_block(sender, instance, *args, **kwargs):\n \"\"\"remove blocked users from recs\"\"\"\n if instance.user_subject.local and instance.user_object.discoverable:\n remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)\n if instance.user_object.local and instance.user_subject.discoverable:\n remove_suggestion_task.delay(instance.user_object.id, instance.user_subject.id)\n\n\n@receiver(signals.post_delete, sender=models.UserFollows)\n# pylint: disable=unused-argument\ndef update_suggestions_on_unfollow(sender, instance, **kwargs):\n \"\"\"update rankings, but don't re-suggest because it was probably intentional\"\"\"\n if instance.user_object.discoverable:\n rerank_user_task.delay(instance.user_object.id, update_only=False)\n\n\n# @receiver(signals.post_save, sender=models.ShelfBook)\n# @receiver(signals.post_delete, sender=models.ShelfBook)\n# # pylint: disable=unused-argument\n# def update_rank_on_shelving(sender, instance, *args, **kwargs):\n# \"\"\"when a user shelves or unshelves a book, re-compute their rank\"\"\"\n# # if it's a local user, re-calculate who is rec'ed to them\n# if instance.user.local:\n# rerank_suggestions_task.delay(instance.user.id)\n#\n# # if the user is discoverable, update their rankings\n# if instance.user.discoverable:\n# rerank_user_task.delay(instance.user.id)\n\n\n@receiver(signals.post_save, sender=models.User)\n# pylint: disable=unused-argument, too-many-arguments\ndef update_user(sender, instance, created, update_fields=None, **kwargs):\n \"\"\"an updated user, neat\"\"\"\n # a new user is found, create suggestions for them\n if created and instance.local:\n rerank_suggestions_task.delay(instance.id)\n\n # we know what fields were updated and discoverability didn't change\n if not instance.bookwyrm_user or (\n update_fields and not \"discoverable\" in update_fields\n ):\n return\n\n # deleted the user\n if not created and not instance.is_active:\n remove_user_task.delay(instance.id)\n return\n\n # this happens on every save, not just when discoverability changes, annoyingly\n if instance.discoverable:\n rerank_user_task.delay(instance.id, update_only=False)\n elif not created:\n remove_user_task.delay(instance.id)\n\n\n@receiver(signals.post_save, sender=models.FederatedServer)\ndef domain_level_update(sender, instance, created, update_fields=None, **kwargs):\n \"\"\"remove users on a domain block\"\"\"\n if (\n not update_fields\n or \"status\" not in update_fields\n or instance.application_type != \"bookwyrm\"\n ):\n return\n\n if instance.status == \"blocked\":\n bulk_remove_instance_task.delay(instance.id)\n return\n bulk_add_instance_task.delay(instance.id)\n\n\n# ------------------- TASKS\n\n\[email protected](queue=\"low_priority\")\ndef rerank_suggestions_task(user_id):\n \"\"\"do the hard work in celery\"\"\"\n suggested_users.rerank_user_suggestions(user_id)\n\n\[email protected](queue=\"low_priority\")\ndef rerank_user_task(user_id, update_only=False):\n \"\"\"do the hard work in celery\"\"\"\n user = models.User.objects.get(id=user_id)\n suggested_users.rerank_obj(user, update_only=update_only)\n\n\[email protected](queue=\"low_priority\")\ndef remove_user_task(user_id):\n \"\"\"do the hard work in celery\"\"\"\n user = models.User.objects.get(id=user_id)\n suggested_users.remove_object_from_related_stores(user)\n\n\[email protected](queue=\"medium_priority\")\ndef remove_suggestion_task(user_id, suggested_user_id):\n \"\"\"remove a specific user from a specific user's suggestions\"\"\"\n suggested_user = models.User.objects.get(id=suggested_user_id)\n suggested_users.remove_suggestion(user_id, suggested_user)\n\n\[email protected](queue=\"low_priority\")\ndef bulk_remove_instance_task(instance_id):\n \"\"\"remove a bunch of users from recs\"\"\"\n for user in models.User.objects.filter(federated_server__id=instance_id):\n suggested_users.remove_object_from_related_stores(user)\n\n\[email protected](queue=\"low_priority\")\ndef bulk_add_instance_task(instance_id):\n \"\"\"remove a bunch of users from recs\"\"\"\n for user in models.User.objects.filter(federated_server__id=instance_id):\n suggested_users.rerank_obj(user, update_only=False)\n", "path": "bookwyrm/suggested_users.py"}]} | 3,103 | 212 |
gh_patches_debug_47653 | rasdani/github-patches | git_diff | DataBiosphere__toil-4528 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WES ignores host in production
When trying to run `toil server --host 0.0.0.0`, I noticed that it would always only listen on `127.0.0.1` no matter what `--host` is set to but running with `--debug` didn't have this problem.
```
❯ toil server --host 0.0.0.0
...
[2022-11-11 16:50:46 +0000] [7173] [INFO] Starting gunicorn 20.1.0
[2022-11-11 16:50:46 +0000] [7173] [INFO] Listening at: http://127.0.0.1:8000
...
```
vs
```
❯ toil server --host 0.0.0.0 --debug
...
INFO:werkzeug:WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.
* Running on all addresses (0.0.0.0)
* Running on http://127.0.0.1:8080
...
```
I tracked the problem down to [this line](https://github.com/DataBiosphere/toil/blob/master/src/toil/server/wsgi_app.py#L44). It appears to be overwriting the settings taken from the command line with Gunicorn's defaults before checking to see if anything has been set which `bind` won't be as it's been set to `None` in the merge.
Swapping the dictionaries around seems to have fixed it.
```python
for key, value in {**vars(env_args), **self.options}.items():
```
┆Issue is synchronized with this [Jira Story](https://ucsc-cgl.atlassian.net/browse/TOIL-1242)
┆Issue Number: TOIL-1242
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/toil/server/wsgi_app.py`
Content:
```
1 # Copyright (C) 2015-2021 Regents of the University of California
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Dict, Optional
15
16 from gunicorn.app.base import BaseApplication # type: ignore
17
18
19 class GunicornApplication(BaseApplication): # type: ignore
20 """
21 An entry point to integrate a Gunicorn WSGI server in Python. To start a
22 WSGI application with callable `app`, run the following code:
23
24 WSGIApplication(app, options={
25 ...
26 }).run()
27
28 For more details, see: https://docs.gunicorn.org/en/latest/custom.html
29 """
30 def __init__(self, app: object, options: Optional[Dict[str, Any]] = None):
31 self.options = options or {}
32 self.application = app
33 super().__init__()
34
35 def init(self, *args: Any) -> None:
36 pass
37
38 def load_config(self) -> None:
39 parser = self.cfg.parser()
40 env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())
41
42 # TODO: also read from the Gunicorn config file?
43
44 for key, value in {**self.options, **vars(env_args)}.items():
45 if key in self.cfg.settings and value is not None:
46 self.cfg.set(key.lower(), value)
47
48 def load(self) -> object:
49 return self.application
50
51
52 def run_app(app: object, options: Optional[Dict[str, Any]] = None) -> None:
53 """
54 Run a Gunicorn WSGI server.
55 """
56 GunicornApplication(app, options=options).run()
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/toil/server/wsgi_app.py b/src/toil/server/wsgi_app.py
--- a/src/toil/server/wsgi_app.py
+++ b/src/toil/server/wsgi_app.py
@@ -41,7 +41,7 @@
# TODO: also read from the Gunicorn config file?
- for key, value in {**self.options, **vars(env_args)}.items():
+ for key, value in {**vars(env_args), **self.options}.items():
if key in self.cfg.settings and value is not None:
self.cfg.set(key.lower(), value)
| {"golden_diff": "diff --git a/src/toil/server/wsgi_app.py b/src/toil/server/wsgi_app.py\n--- a/src/toil/server/wsgi_app.py\n+++ b/src/toil/server/wsgi_app.py\n@@ -41,7 +41,7 @@\n \n # TODO: also read from the Gunicorn config file?\n \n- for key, value in {**self.options, **vars(env_args)}.items():\n+ for key, value in {**vars(env_args), **self.options}.items():\n if key in self.cfg.settings and value is not None:\n self.cfg.set(key.lower(), value)\n", "issue": "WES ignores host in production\nWhen trying to run `toil server --host 0.0.0.0`, I noticed that it would always only listen on `127.0.0.1` no matter what `--host` is set to but running with `--debug` didn't have this problem.\n\n```\n\u276f toil server --host 0.0.0.0\n...\n[2022-11-11 16:50:46 +0000] [7173] [INFO] Starting gunicorn 20.1.0\n[2022-11-11 16:50:46 +0000] [7173] [INFO] Listening at: http://127.0.0.1:8000\n...\n```\nvs\n```\n\u276f toil server --host 0.0.0.0 --debug\n...\nINFO:werkzeug:WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.\n * Running on all addresses (0.0.0.0)\n * Running on http://127.0.0.1:8080\n...\n```\n\nI tracked the problem down to [this line](https://github.com/DataBiosphere/toil/blob/master/src/toil/server/wsgi_app.py#L44). It appears to be overwriting the settings taken from the command line with Gunicorn's defaults before checking to see if anything has been set which `bind` won't be as it's been set to `None` in the merge.\n\nSwapping the dictionaries around seems to have fixed it.\n```python\n for key, value in {**vars(env_args), **self.options}.items():\n```\n\n\u2506Issue is synchronized with this [Jira Story](https://ucsc-cgl.atlassian.net/browse/TOIL-1242)\n\u2506Issue Number: TOIL-1242\n\n", "before_files": [{"content": "# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional\n\nfrom gunicorn.app.base import BaseApplication # type: ignore\n\n\nclass GunicornApplication(BaseApplication): # type: ignore\n \"\"\"\n An entry point to integrate a Gunicorn WSGI server in Python. To start a\n WSGI application with callable `app`, run the following code:\n\n WSGIApplication(app, options={\n ...\n }).run()\n\n For more details, see: https://docs.gunicorn.org/en/latest/custom.html\n \"\"\"\n def __init__(self, app: object, options: Optional[Dict[str, Any]] = None):\n self.options = options or {}\n self.application = app\n super().__init__()\n\n def init(self, *args: Any) -> None:\n pass\n\n def load_config(self) -> None:\n parser = self.cfg.parser()\n env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())\n\n # TODO: also read from the Gunicorn config file?\n\n for key, value in {**self.options, **vars(env_args)}.items():\n if key in self.cfg.settings and value is not None:\n self.cfg.set(key.lower(), value)\n\n def load(self) -> object:\n return self.application\n\n\ndef run_app(app: object, options: Optional[Dict[str, Any]] = None) -> None:\n \"\"\"\n Run a Gunicorn WSGI server.\n \"\"\"\n GunicornApplication(app, options=options).run()\n", "path": "src/toil/server/wsgi_app.py"}], "after_files": [{"content": "# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional\n\nfrom gunicorn.app.base import BaseApplication # type: ignore\n\n\nclass GunicornApplication(BaseApplication): # type: ignore\n \"\"\"\n An entry point to integrate a Gunicorn WSGI server in Python. To start a\n WSGI application with callable `app`, run the following code:\n\n WSGIApplication(app, options={\n ...\n }).run()\n\n For more details, see: https://docs.gunicorn.org/en/latest/custom.html\n \"\"\"\n def __init__(self, app: object, options: Optional[Dict[str, Any]] = None):\n self.options = options or {}\n self.application = app\n super().__init__()\n\n def init(self, *args: Any) -> None:\n pass\n\n def load_config(self) -> None:\n parser = self.cfg.parser()\n env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())\n\n # TODO: also read from the Gunicorn config file?\n\n for key, value in {**vars(env_args), **self.options}.items():\n if key in self.cfg.settings and value is not None:\n self.cfg.set(key.lower(), value)\n\n def load(self) -> object:\n return self.application\n\n\ndef run_app(app: object, options: Optional[Dict[str, Any]] = None) -> None:\n \"\"\"\n Run a Gunicorn WSGI server.\n \"\"\"\n GunicornApplication(app, options=options).run()\n", "path": "src/toil/server/wsgi_app.py"}]} | 1,271 | 131 |
gh_patches_debug_40009 | rasdani/github-patches | git_diff | Textualize__textual-1051 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Button.watch_variant does not remove old_variant class
When changing the variant of a Button widget, the new variant gets added to the classes, but the old one is not removed.
Minimal example:
```python
from textual.app import App, ComposeResult
from textual.widgets import Button
class MyApp(App):
def __init__(self):
super().__init__()
self.button = Button()
self.button.variant = "warning"
self.button.variant = "default"
def compose(self) -> ComposeResult:
yield self.button
MyApp().run()
```
This still displays the button in "warning" style as it has both classes.
I believe the problem is the underscore instead of a dash in the watch_variant method.
https://github.com/Textualize/textual/blob/main/src/textual/widgets/_button.py#L218
textual version 0.2.1
terminal: xterm-256color on linux ubuntu 22.04 (Regolith)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/widgets/_button.py`
Content:
```
1 from __future__ import annotations
2
3 import sys
4 from functools import partial
5 from typing import cast
6
7 if sys.version_info >= (3, 8):
8 from typing import Literal
9 else:
10 from typing_extensions import Literal # pragma: no cover
11
12 import rich.repr
13 from rich.console import RenderableType
14 from rich.text import Text, TextType
15
16 from .. import events
17 from ..css._error_tools import friendly_list
18 from ..message import Message
19 from ..reactive import Reactive
20 from ..widgets import Static
21
22 ButtonVariant = Literal["default", "primary", "success", "warning", "error"]
23 _VALID_BUTTON_VARIANTS = {"default", "primary", "success", "warning", "error"}
24
25
26 class InvalidButtonVariant(Exception):
27 pass
28
29
30 class Button(Static, can_focus=True):
31 """A simple clickable button."""
32
33 DEFAULT_CSS = """
34 Button {
35 width: auto;
36 min-width: 16;
37 height: 3;
38 background: $panel;
39 color: $text;
40 border: none;
41 border-top: tall $panel-lighten-2;
42 border-bottom: tall $panel-darken-3;
43 content-align: center middle;
44 text-style: bold;
45 }
46
47 Button.-disabled {
48 opacity: 0.4;
49 text-opacity: 0.7;
50 }
51
52 Button:focus {
53 text-style: bold reverse;
54 }
55
56 Button:hover {
57 border-top: tall $panel-lighten-1;
58 background: $panel-darken-2;
59 color: $text;
60 }
61
62 Button.-active {
63 background: $panel;
64 border-bottom: tall $panel-lighten-2;
65 border-top: tall $panel-darken-2;
66 tint: $background 30%;
67 }
68
69 /* Primary variant */
70 Button.-primary {
71 background: $primary;
72 color: $text;
73 border-top: tall $primary-lighten-3;
74 border-bottom: tall $primary-darken-3;
75
76 }
77
78 Button.-primary:hover {
79 background: $primary-darken-2;
80 color: $text;
81 border-top: tall $primary-lighten-2;
82 }
83
84 Button.-primary.-active {
85 background: $primary;
86 border-bottom: tall $primary-lighten-3;
87 border-top: tall $primary-darken-3;
88 }
89
90
91 /* Success variant */
92 Button.-success {
93 background: $success;
94 color: $text;
95 border-top: tall $success-lighten-2;
96 border-bottom: tall $success-darken-3;
97 }
98
99 Button.-success:hover {
100 background: $success-darken-2;
101 color: $text;
102 }
103
104 Button.-success.-active {
105 background: $success;
106 border-bottom: tall $success-lighten-2;
107 border-top: tall $success-darken-2;
108 }
109
110
111 /* Warning variant */
112 Button.-warning {
113 background: $warning;
114 color: $text;
115 border-top: tall $warning-lighten-2;
116 border-bottom: tall $warning-darken-3;
117 }
118
119 Button.-warning:hover {
120 background: $warning-darken-2;
121 color: $text;
122
123 }
124
125 Button.-warning.-active {
126 background: $warning;
127 border-bottom: tall $warning-lighten-2;
128 border-top: tall $warning-darken-2;
129 }
130
131
132 /* Error variant */
133 Button.-error {
134 background: $error;
135 color: $text;
136 border-top: tall $error-lighten-2;
137 border-bottom: tall $error-darken-3;
138
139 }
140
141 Button.-error:hover {
142 background: $error-darken-1;
143 color: $text;
144
145 }
146
147 Button.-error.-active {
148 background: $error;
149 border-bottom: tall $error-lighten-2;
150 border-top: tall $error-darken-2;
151 }
152
153 """
154
155 ACTIVE_EFFECT_DURATION = 0.3
156 """When buttons are clicked they get the `-active` class for this duration (in seconds)"""
157
158 class Pressed(Message, bubble=True):
159 @property
160 def button(self) -> Button:
161 return cast(Button, self.sender)
162
163 def __init__(
164 self,
165 label: TextType | None = None,
166 disabled: bool = False,
167 variant: ButtonVariant = "default",
168 *,
169 name: str | None = None,
170 id: str | None = None,
171 classes: str | None = None,
172 ):
173 """Create a Button widget.
174
175 Args:
176 label (str): The text that appears within the button.
177 disabled (bool): Whether the button is disabled or not.
178 variant (ButtonVariant): The variant of the button.
179 name: The name of the button.
180 id: The ID of the button in the DOM.
181 classes: The CSS classes of the button.
182 """
183 super().__init__(name=name, id=id, classes=classes)
184
185 if label is None:
186 label = self.css_identifier_styled
187
188 self.label = self.validate_label(label)
189
190 self.disabled = disabled
191 if disabled:
192 self.add_class("-disabled")
193
194 self.variant = variant
195
196 label: Reactive[RenderableType] = Reactive("")
197 variant = Reactive.init("default")
198 disabled = Reactive(False)
199
200 def __rich_repr__(self) -> rich.repr.Result:
201 yield from super().__rich_repr__()
202 yield "variant", self.variant, "default"
203 yield "disabled", self.disabled, False
204
205 def watch_mouse_over(self, value: bool) -> None:
206 """Update from CSS if mouse over state changes."""
207 if self._has_hover_style and not self.disabled:
208 self.app.update_styles(self)
209
210 def validate_variant(self, variant: str) -> str:
211 if variant not in _VALID_BUTTON_VARIANTS:
212 raise InvalidButtonVariant(
213 f"Valid button variants are {friendly_list(_VALID_BUTTON_VARIANTS)}"
214 )
215 return variant
216
217 def watch_variant(self, old_variant: str, variant: str):
218 self.remove_class(f"_{old_variant}")
219 self.add_class(f"-{variant}")
220
221 def watch_disabled(self, disabled: bool) -> None:
222 self.set_class(disabled, "-disabled")
223 self.can_focus = not disabled
224
225 def validate_label(self, label: RenderableType) -> RenderableType:
226 """Parse markup for self.label"""
227 if isinstance(label, str):
228 return Text.from_markup(label)
229 return label
230
231 def render(self) -> RenderableType:
232 label = self.label.copy()
233 label = Text.assemble(" ", label, " ")
234 label.stylize(self.text_style)
235 return label
236
237 async def _on_click(self, event: events.Click) -> None:
238 event.stop()
239 self.press()
240
241 def press(self) -> None:
242 """Respond to a button press."""
243 if self.disabled or not self.display:
244 return
245 # Manage the "active" effect:
246 self._start_active_affect()
247 # ...and let other components know that we've just been clicked:
248 self.emit_no_wait(Button.Pressed(self))
249
250 def _start_active_affect(self) -> None:
251 """Start a small animation to show the button was clicked."""
252 self.add_class("-active")
253 self.set_timer(
254 self.ACTIVE_EFFECT_DURATION, partial(self.remove_class, "-active")
255 )
256
257 async def _on_key(self, event: events.Key) -> None:
258 if event.key == "enter" and not self.disabled:
259 self._start_active_affect()
260 await self.emit(Button.Pressed(self))
261
262 @classmethod
263 def success(
264 cls,
265 label: TextType | None = None,
266 disabled: bool = False,
267 *,
268 name: str | None = None,
269 id: str | None = None,
270 classes: str | None = None,
271 ) -> Button:
272 """Utility constructor for creating a success Button variant.
273
274 Args:
275 label (str): The text that appears within the button.
276 disabled (bool): Whether the button is disabled or not.
277 name: The name of the button.
278 id: The ID of the button in the DOM.
279 classes: The CSS classes of the button.
280
281 Returns:
282 Button: A Button widget of the 'success' variant.
283 """
284 return Button(
285 label=label,
286 disabled=disabled,
287 variant="success",
288 name=name,
289 id=id,
290 classes=classes,
291 )
292
293 @classmethod
294 def warning(
295 cls,
296 label: TextType | None = None,
297 disabled: bool = False,
298 *,
299 name: str | None = None,
300 id: str | None = None,
301 classes: str | None = None,
302 ) -> Button:
303 """Utility constructor for creating a warning Button variant.
304
305 Args:
306 label (str): The text that appears within the button.
307 disabled (bool): Whether the button is disabled or not.
308 name: The name of the button.
309 id: The ID of the button in the DOM.
310 classes: The CSS classes of the button.
311
312 Returns:
313 Button: A Button widget of the 'warning' variant.
314 """
315 return Button(
316 label=label,
317 disabled=disabled,
318 variant="warning",
319 name=name,
320 id=id,
321 classes=classes,
322 )
323
324 @classmethod
325 def error(
326 cls,
327 label: TextType | None = None,
328 disabled: bool = False,
329 *,
330 name: str | None = None,
331 id: str | None = None,
332 classes: str | None = None,
333 ) -> Button:
334 """Utility constructor for creating an error Button variant.
335
336 Args:
337 label (str): The text that appears within the button.
338 disabled (bool): Whether the button is disabled or not.
339 name: The name of the button.
340 id: The ID of the button in the DOM.
341 classes: The CSS classes of the button.
342
343 Returns:
344 Button: A Button widget of the 'error' variant.
345 """
346 return Button(
347 label=label,
348 disabled=disabled,
349 variant="error",
350 name=name,
351 id=id,
352 classes=classes,
353 )
354
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/textual/widgets/_button.py b/src/textual/widgets/_button.py
--- a/src/textual/widgets/_button.py
+++ b/src/textual/widgets/_button.py
@@ -36,12 +36,12 @@
min-width: 16;
height: 3;
background: $panel;
- color: $text;
- border: none;
+ color: $text;
+ border: none;
border-top: tall $panel-lighten-2;
border-bottom: tall $panel-darken-3;
- content-align: center middle;
- text-style: bold;
+ content-align: center middle;
+ text-style: bold;
}
Button.-disabled {
@@ -50,7 +50,7 @@
}
Button:focus {
- text-style: bold reverse;
+ text-style: bold reverse;
}
Button:hover {
@@ -62,7 +62,7 @@
Button.-active {
background: $panel;
border-bottom: tall $panel-lighten-2;
- border-top: tall $panel-darken-2;
+ border-top: tall $panel-darken-2;
tint: $background 30%;
}
@@ -72,7 +72,7 @@
color: $text;
border-top: tall $primary-lighten-3;
border-bottom: tall $primary-darken-3;
-
+
}
Button.-primary:hover {
@@ -107,7 +107,7 @@
border-top: tall $success-darken-2;
}
-
+
/* Warning variant */
Button.-warning {
background: $warning;
@@ -119,7 +119,7 @@
Button.-warning:hover {
background: $warning-darken-2;
color: $text;
-
+
}
Button.-warning.-active {
@@ -127,7 +127,7 @@
border-bottom: tall $warning-lighten-2;
border-top: tall $warning-darken-2;
}
-
+
/* Error variant */
Button.-error {
@@ -135,7 +135,7 @@
color: $text;
border-top: tall $error-lighten-2;
border-bottom: tall $error-darken-3;
-
+
}
Button.-error:hover {
@@ -215,7 +215,7 @@
return variant
def watch_variant(self, old_variant: str, variant: str):
- self.remove_class(f"_{old_variant}")
+ self.remove_class(f"-{old_variant}")
self.add_class(f"-{variant}")
def watch_disabled(self, disabled: bool) -> None:
| {"golden_diff": "diff --git a/src/textual/widgets/_button.py b/src/textual/widgets/_button.py\n--- a/src/textual/widgets/_button.py\n+++ b/src/textual/widgets/_button.py\n@@ -36,12 +36,12 @@\n min-width: 16;\n height: 3;\n background: $panel;\n- color: $text; \n- border: none; \n+ color: $text;\n+ border: none;\n border-top: tall $panel-lighten-2;\n border-bottom: tall $panel-darken-3;\n- content-align: center middle; \n- text-style: bold; \n+ content-align: center middle;\n+ text-style: bold;\n }\n \n Button.-disabled {\n@@ -50,7 +50,7 @@\n }\n \n Button:focus {\n- text-style: bold reverse; \n+ text-style: bold reverse;\n }\n \n Button:hover {\n@@ -62,7 +62,7 @@\n Button.-active {\n background: $panel;\n border-bottom: tall $panel-lighten-2;\n- border-top: tall $panel-darken-2; \n+ border-top: tall $panel-darken-2;\n tint: $background 30%;\n }\n \n@@ -72,7 +72,7 @@\n color: $text;\n border-top: tall $primary-lighten-3;\n border-bottom: tall $primary-darken-3;\n- \n+\n }\n \n Button.-primary:hover {\n@@ -107,7 +107,7 @@\n border-top: tall $success-darken-2;\n }\n \n- \n+\n /* Warning variant */\n Button.-warning {\n background: $warning;\n@@ -119,7 +119,7 @@\n Button.-warning:hover {\n background: $warning-darken-2;\n color: $text;\n- \n+\n }\n \n Button.-warning.-active {\n@@ -127,7 +127,7 @@\n border-bottom: tall $warning-lighten-2;\n border-top: tall $warning-darken-2;\n }\n- \n+\n \n /* Error variant */\n Button.-error {\n@@ -135,7 +135,7 @@\n color: $text;\n border-top: tall $error-lighten-2;\n border-bottom: tall $error-darken-3;\n- \n+\n }\n \n Button.-error:hover {\n@@ -215,7 +215,7 @@\n return variant\n \n def watch_variant(self, old_variant: str, variant: str):\n- self.remove_class(f\"_{old_variant}\")\n+ self.remove_class(f\"-{old_variant}\")\n self.add_class(f\"-{variant}\")\n \n def watch_disabled(self, disabled: bool) -> None:\n", "issue": "Button.watch_variant does not remove old_variant class\nWhen changing the variant of a Button widget, the new variant gets added to the classes, but the old one is not removed.\r\nMinimal example:\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.widgets import Button\r\n\r\n\r\nclass MyApp(App):\r\n\tdef __init__(self):\r\n\t\tsuper().__init__()\r\n\t\tself.button = Button()\r\n\t\tself.button.variant = \"warning\"\r\n\t\tself.button.variant = \"default\"\r\n\r\n\tdef compose(self) -> ComposeResult:\r\n\t\tyield self.button\r\n\r\nMyApp().run()\r\n```\r\nThis still displays the button in \"warning\" style as it has both classes.\r\nI believe the problem is the underscore instead of a dash in the watch_variant method.\r\nhttps://github.com/Textualize/textual/blob/main/src/textual/widgets/_button.py#L218\r\n\r\ntextual version 0.2.1\r\nterminal: xterm-256color on linux ubuntu 22.04 (Regolith)\n", "before_files": [{"content": "from __future__ import annotations\n\nimport sys\nfrom functools import partial\nfrom typing import cast\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal # pragma: no cover\n\nimport rich.repr\nfrom rich.console import RenderableType\nfrom rich.text import Text, TextType\n\nfrom .. import events\nfrom ..css._error_tools import friendly_list\nfrom ..message import Message\nfrom ..reactive import Reactive\nfrom ..widgets import Static\n\nButtonVariant = Literal[\"default\", \"primary\", \"success\", \"warning\", \"error\"]\n_VALID_BUTTON_VARIANTS = {\"default\", \"primary\", \"success\", \"warning\", \"error\"}\n\n\nclass InvalidButtonVariant(Exception):\n pass\n\n\nclass Button(Static, can_focus=True):\n \"\"\"A simple clickable button.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Button {\n width: auto;\n min-width: 16;\n height: 3;\n background: $panel;\n color: $text; \n border: none; \n border-top: tall $panel-lighten-2;\n border-bottom: tall $panel-darken-3;\n content-align: center middle; \n text-style: bold; \n }\n\n Button.-disabled {\n opacity: 0.4;\n text-opacity: 0.7;\n }\n\n Button:focus {\n text-style: bold reverse; \n }\n\n Button:hover {\n border-top: tall $panel-lighten-1;\n background: $panel-darken-2;\n color: $text;\n }\n\n Button.-active {\n background: $panel;\n border-bottom: tall $panel-lighten-2;\n border-top: tall $panel-darken-2; \n tint: $background 30%;\n }\n\n /* Primary variant */\n Button.-primary {\n background: $primary;\n color: $text;\n border-top: tall $primary-lighten-3;\n border-bottom: tall $primary-darken-3;\n \n }\n\n Button.-primary:hover {\n background: $primary-darken-2;\n color: $text;\n border-top: tall $primary-lighten-2;\n }\n\n Button.-primary.-active {\n background: $primary;\n border-bottom: tall $primary-lighten-3;\n border-top: tall $primary-darken-3;\n }\n\n\n /* Success variant */\n Button.-success {\n background: $success;\n color: $text;\n border-top: tall $success-lighten-2;\n border-bottom: tall $success-darken-3;\n }\n\n Button.-success:hover {\n background: $success-darken-2;\n color: $text;\n }\n\n Button.-success.-active {\n background: $success;\n border-bottom: tall $success-lighten-2;\n border-top: tall $success-darken-2;\n }\n\n \n /* Warning variant */\n Button.-warning {\n background: $warning;\n color: $text;\n border-top: tall $warning-lighten-2;\n border-bottom: tall $warning-darken-3;\n }\n\n Button.-warning:hover {\n background: $warning-darken-2;\n color: $text;\n \n }\n\n Button.-warning.-active {\n background: $warning;\n border-bottom: tall $warning-lighten-2;\n border-top: tall $warning-darken-2;\n }\n \n\n /* Error variant */\n Button.-error {\n background: $error;\n color: $text;\n border-top: tall $error-lighten-2;\n border-bottom: tall $error-darken-3;\n \n }\n\n Button.-error:hover {\n background: $error-darken-1;\n color: $text;\n\n }\n\n Button.-error.-active {\n background: $error;\n border-bottom: tall $error-lighten-2;\n border-top: tall $error-darken-2;\n }\n\n \"\"\"\n\n ACTIVE_EFFECT_DURATION = 0.3\n \"\"\"When buttons are clicked they get the `-active` class for this duration (in seconds)\"\"\"\n\n class Pressed(Message, bubble=True):\n @property\n def button(self) -> Button:\n return cast(Button, self.sender)\n\n def __init__(\n self,\n label: TextType | None = None,\n disabled: bool = False,\n variant: ButtonVariant = \"default\",\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ):\n \"\"\"Create a Button widget.\n\n Args:\n label (str): The text that appears within the button.\n disabled (bool): Whether the button is disabled or not.\n variant (ButtonVariant): The variant of the button.\n name: The name of the button.\n id: The ID of the button in the DOM.\n classes: The CSS classes of the button.\n \"\"\"\n super().__init__(name=name, id=id, classes=classes)\n\n if label is None:\n label = self.css_identifier_styled\n\n self.label = self.validate_label(label)\n\n self.disabled = disabled\n if disabled:\n self.add_class(\"-disabled\")\n\n self.variant = variant\n\n label: Reactive[RenderableType] = Reactive(\"\")\n variant = Reactive.init(\"default\")\n disabled = Reactive(False)\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield from super().__rich_repr__()\n yield \"variant\", self.variant, \"default\"\n yield \"disabled\", self.disabled, False\n\n def watch_mouse_over(self, value: bool) -> None:\n \"\"\"Update from CSS if mouse over state changes.\"\"\"\n if self._has_hover_style and not self.disabled:\n self.app.update_styles(self)\n\n def validate_variant(self, variant: str) -> str:\n if variant not in _VALID_BUTTON_VARIANTS:\n raise InvalidButtonVariant(\n f\"Valid button variants are {friendly_list(_VALID_BUTTON_VARIANTS)}\"\n )\n return variant\n\n def watch_variant(self, old_variant: str, variant: str):\n self.remove_class(f\"_{old_variant}\")\n self.add_class(f\"-{variant}\")\n\n def watch_disabled(self, disabled: bool) -> None:\n self.set_class(disabled, \"-disabled\")\n self.can_focus = not disabled\n\n def validate_label(self, label: RenderableType) -> RenderableType:\n \"\"\"Parse markup for self.label\"\"\"\n if isinstance(label, str):\n return Text.from_markup(label)\n return label\n\n def render(self) -> RenderableType:\n label = self.label.copy()\n label = Text.assemble(\" \", label, \" \")\n label.stylize(self.text_style)\n return label\n\n async def _on_click(self, event: events.Click) -> None:\n event.stop()\n self.press()\n\n def press(self) -> None:\n \"\"\"Respond to a button press.\"\"\"\n if self.disabled or not self.display:\n return\n # Manage the \"active\" effect:\n self._start_active_affect()\n # ...and let other components know that we've just been clicked:\n self.emit_no_wait(Button.Pressed(self))\n\n def _start_active_affect(self) -> None:\n \"\"\"Start a small animation to show the button was clicked.\"\"\"\n self.add_class(\"-active\")\n self.set_timer(\n self.ACTIVE_EFFECT_DURATION, partial(self.remove_class, \"-active\")\n )\n\n async def _on_key(self, event: events.Key) -> None:\n if event.key == \"enter\" and not self.disabled:\n self._start_active_affect()\n await self.emit(Button.Pressed(self))\n\n @classmethod\n def success(\n cls,\n label: TextType | None = None,\n disabled: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ) -> Button:\n \"\"\"Utility constructor for creating a success Button variant.\n\n Args:\n label (str): The text that appears within the button.\n disabled (bool): Whether the button is disabled or not.\n name: The name of the button.\n id: The ID of the button in the DOM.\n classes: The CSS classes of the button.\n\n Returns:\n Button: A Button widget of the 'success' variant.\n \"\"\"\n return Button(\n label=label,\n disabled=disabled,\n variant=\"success\",\n name=name,\n id=id,\n classes=classes,\n )\n\n @classmethod\n def warning(\n cls,\n label: TextType | None = None,\n disabled: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ) -> Button:\n \"\"\"Utility constructor for creating a warning Button variant.\n\n Args:\n label (str): The text that appears within the button.\n disabled (bool): Whether the button is disabled or not.\n name: The name of the button.\n id: The ID of the button in the DOM.\n classes: The CSS classes of the button.\n\n Returns:\n Button: A Button widget of the 'warning' variant.\n \"\"\"\n return Button(\n label=label,\n disabled=disabled,\n variant=\"warning\",\n name=name,\n id=id,\n classes=classes,\n )\n\n @classmethod\n def error(\n cls,\n label: TextType | None = None,\n disabled: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ) -> Button:\n \"\"\"Utility constructor for creating an error Button variant.\n\n Args:\n label (str): The text that appears within the button.\n disabled (bool): Whether the button is disabled or not.\n name: The name of the button.\n id: The ID of the button in the DOM.\n classes: The CSS classes of the button.\n\n Returns:\n Button: A Button widget of the 'error' variant.\n \"\"\"\n return Button(\n label=label,\n disabled=disabled,\n variant=\"error\",\n name=name,\n id=id,\n classes=classes,\n )\n", "path": "src/textual/widgets/_button.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport sys\nfrom functools import partial\nfrom typing import cast\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal # pragma: no cover\n\nimport rich.repr\nfrom rich.console import RenderableType\nfrom rich.text import Text, TextType\n\nfrom .. import events\nfrom ..css._error_tools import friendly_list\nfrom ..message import Message\nfrom ..reactive import Reactive\nfrom ..widgets import Static\n\nButtonVariant = Literal[\"default\", \"primary\", \"success\", \"warning\", \"error\"]\n_VALID_BUTTON_VARIANTS = {\"default\", \"primary\", \"success\", \"warning\", \"error\"}\n\n\nclass InvalidButtonVariant(Exception):\n pass\n\n\nclass Button(Static, can_focus=True):\n \"\"\"A simple clickable button.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Button {\n width: auto;\n min-width: 16;\n height: 3;\n background: $panel;\n color: $text;\n border: none;\n border-top: tall $panel-lighten-2;\n border-bottom: tall $panel-darken-3;\n content-align: center middle;\n text-style: bold;\n }\n\n Button.-disabled {\n opacity: 0.4;\n text-opacity: 0.7;\n }\n\n Button:focus {\n text-style: bold reverse;\n }\n\n Button:hover {\n border-top: tall $panel-lighten-1;\n background: $panel-darken-2;\n color: $text;\n }\n\n Button.-active {\n background: $panel;\n border-bottom: tall $panel-lighten-2;\n border-top: tall $panel-darken-2;\n tint: $background 30%;\n }\n\n /* Primary variant */\n Button.-primary {\n background: $primary;\n color: $text;\n border-top: tall $primary-lighten-3;\n border-bottom: tall $primary-darken-3;\n\n }\n\n Button.-primary:hover {\n background: $primary-darken-2;\n color: $text;\n border-top: tall $primary-lighten-2;\n }\n\n Button.-primary.-active {\n background: $primary;\n border-bottom: tall $primary-lighten-3;\n border-top: tall $primary-darken-3;\n }\n\n\n /* Success variant */\n Button.-success {\n background: $success;\n color: $text;\n border-top: tall $success-lighten-2;\n border-bottom: tall $success-darken-3;\n }\n\n Button.-success:hover {\n background: $success-darken-2;\n color: $text;\n }\n\n Button.-success.-active {\n background: $success;\n border-bottom: tall $success-lighten-2;\n border-top: tall $success-darken-2;\n }\n\n\n /* Warning variant */\n Button.-warning {\n background: $warning;\n color: $text;\n border-top: tall $warning-lighten-2;\n border-bottom: tall $warning-darken-3;\n }\n\n Button.-warning:hover {\n background: $warning-darken-2;\n color: $text;\n\n }\n\n Button.-warning.-active {\n background: $warning;\n border-bottom: tall $warning-lighten-2;\n border-top: tall $warning-darken-2;\n }\n\n\n /* Error variant */\n Button.-error {\n background: $error;\n color: $text;\n border-top: tall $error-lighten-2;\n border-bottom: tall $error-darken-3;\n\n }\n\n Button.-error:hover {\n background: $error-darken-1;\n color: $text;\n\n }\n\n Button.-error.-active {\n background: $error;\n border-bottom: tall $error-lighten-2;\n border-top: tall $error-darken-2;\n }\n\n \"\"\"\n\n ACTIVE_EFFECT_DURATION = 0.3\n \"\"\"When buttons are clicked they get the `-active` class for this duration (in seconds)\"\"\"\n\n class Pressed(Message, bubble=True):\n @property\n def button(self) -> Button:\n return cast(Button, self.sender)\n\n def __init__(\n self,\n label: TextType | None = None,\n disabled: bool = False,\n variant: ButtonVariant = \"default\",\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ):\n \"\"\"Create a Button widget.\n\n Args:\n label (str): The text that appears within the button.\n disabled (bool): Whether the button is disabled or not.\n variant (ButtonVariant): The variant of the button.\n name: The name of the button.\n id: The ID of the button in the DOM.\n classes: The CSS classes of the button.\n \"\"\"\n super().__init__(name=name, id=id, classes=classes)\n\n if label is None:\n label = self.css_identifier_styled\n\n self.label = self.validate_label(label)\n\n self.disabled = disabled\n if disabled:\n self.add_class(\"-disabled\")\n\n self.variant = variant\n\n label: Reactive[RenderableType] = Reactive(\"\")\n variant = Reactive.init(\"default\")\n disabled = Reactive(False)\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield from super().__rich_repr__()\n yield \"variant\", self.variant, \"default\"\n yield \"disabled\", self.disabled, False\n\n def watch_mouse_over(self, value: bool) -> None:\n \"\"\"Update from CSS if mouse over state changes.\"\"\"\n if self._has_hover_style and not self.disabled:\n self.app.update_styles(self)\n\n def validate_variant(self, variant: str) -> str:\n if variant not in _VALID_BUTTON_VARIANTS:\n raise InvalidButtonVariant(\n f\"Valid button variants are {friendly_list(_VALID_BUTTON_VARIANTS)}\"\n )\n return variant\n\n def watch_variant(self, old_variant: str, variant: str):\n self.remove_class(f\"-{old_variant}\")\n self.add_class(f\"-{variant}\")\n\n def watch_disabled(self, disabled: bool) -> None:\n self.set_class(disabled, \"-disabled\")\n self.can_focus = not disabled\n\n def validate_label(self, label: RenderableType) -> RenderableType:\n \"\"\"Parse markup for self.label\"\"\"\n if isinstance(label, str):\n return Text.from_markup(label)\n return label\n\n def render(self) -> RenderableType:\n label = self.label.copy()\n label = Text.assemble(\" \", label, \" \")\n label.stylize(self.text_style)\n return label\n\n async def _on_click(self, event: events.Click) -> None:\n event.stop()\n self.press()\n\n def press(self) -> None:\n \"\"\"Respond to a button press.\"\"\"\n if self.disabled or not self.display:\n return\n # Manage the \"active\" effect:\n self._start_active_affect()\n # ...and let other components know that we've just been clicked:\n self.emit_no_wait(Button.Pressed(self))\n\n def _start_active_affect(self) -> None:\n \"\"\"Start a small animation to show the button was clicked.\"\"\"\n self.add_class(\"-active\")\n self.set_timer(\n self.ACTIVE_EFFECT_DURATION, partial(self.remove_class, \"-active\")\n )\n\n async def _on_key(self, event: events.Key) -> None:\n if event.key == \"enter\" and not self.disabled:\n self._start_active_affect()\n await self.emit(Button.Pressed(self))\n\n @classmethod\n def success(\n cls,\n label: TextType | None = None,\n disabled: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ) -> Button:\n \"\"\"Utility constructor for creating a success Button variant.\n\n Args:\n label (str): The text that appears within the button.\n disabled (bool): Whether the button is disabled or not.\n name: The name of the button.\n id: The ID of the button in the DOM.\n classes: The CSS classes of the button.\n\n Returns:\n Button: A Button widget of the 'success' variant.\n \"\"\"\n return Button(\n label=label,\n disabled=disabled,\n variant=\"success\",\n name=name,\n id=id,\n classes=classes,\n )\n\n @classmethod\n def warning(\n cls,\n label: TextType | None = None,\n disabled: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ) -> Button:\n \"\"\"Utility constructor for creating a warning Button variant.\n\n Args:\n label (str): The text that appears within the button.\n disabled (bool): Whether the button is disabled or not.\n name: The name of the button.\n id: The ID of the button in the DOM.\n classes: The CSS classes of the button.\n\n Returns:\n Button: A Button widget of the 'warning' variant.\n \"\"\"\n return Button(\n label=label,\n disabled=disabled,\n variant=\"warning\",\n name=name,\n id=id,\n classes=classes,\n )\n\n @classmethod\n def error(\n cls,\n label: TextType | None = None,\n disabled: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ) -> Button:\n \"\"\"Utility constructor for creating an error Button variant.\n\n Args:\n label (str): The text that appears within the button.\n disabled (bool): Whether the button is disabled or not.\n name: The name of the button.\n id: The ID of the button in the DOM.\n classes: The CSS classes of the button.\n\n Returns:\n Button: A Button widget of the 'error' variant.\n \"\"\"\n return Button(\n label=label,\n disabled=disabled,\n variant=\"error\",\n name=name,\n id=id,\n classes=classes,\n )\n", "path": "src/textual/widgets/_button.py"}]} | 3,654 | 622 |
gh_patches_debug_23573 | rasdani/github-patches | git_diff | translate__pootle-6498 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken PO files are partially loaded without error
I mistakenly made a PO file with duplicate units. The files are mostly like:
N/M.po where N=1 to 3 and M=1 to 20
A bug in my script meant that my files had more than 100 units and they where duplicated.
1. The files loaded but when checking only the first 100 units are actually loaded
2. No error was reported and update_stores did not abort or report any issue
Expected:
1. PO fil with duplicate units should throw and error
2. update_stores should report that error and abort update or that specific store.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_store/diff.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import difflib
10 from collections import OrderedDict
11
12 from django.db import models
13 from django.utils.functional import cached_property
14
15 from pootle.core.delegate import format_diffs
16
17 from .constants import FUZZY, OBSOLETE, TRANSLATED, UNTRANSLATED
18 from .fields import to_python as multistring_to_python
19 from .unit import UnitProxy
20
21
22 class UnitDiffProxy(UnitProxy):
23 """Wraps File/DB Unit dicts used by StoreDiff for equality comparison"""
24
25 match_attrs = ["context", "developer_comment", "locations",
26 "source", "state", "target", "translator_comment"]
27
28 def __eq__(self, other):
29 return all(getattr(self, k) == getattr(other, k)
30 for k in self.match_attrs)
31
32 def __ne__(self, other):
33 return not self == other
34
35 def hasplural(self):
36 return (
37 self.source is not None
38 and (len(self.source.strings) > 1
39 or getattr(self.source, "plural", None)))
40
41 def getnotes(self, origin=None):
42 return self.unit.get("%s_comment" % origin, "")
43
44 def getcontext(self):
45 return self.unit["context"]
46
47 def isfuzzy(self):
48 return self.unit["state"] == FUZZY
49
50 def isobsolete(self):
51 return self.unit["state"] == OBSOLETE
52
53 def getid(self):
54 return self.unit["unitid"]
55
56
57 class DBUnit(UnitDiffProxy):
58 pass
59
60
61 class FileUnit(UnitDiffProxy):
62
63 @property
64 def locations(self):
65 return "\n".join(self.unit["locations"])
66
67 @property
68 def source(self):
69 return multistring_to_python(self.unit["source"])
70
71 @property
72 def target(self):
73 return multistring_to_python(self.unit["target"])
74
75 def hasplural(self):
76 return self.unit["hasplural"]
77
78
79 class DiffableStore(object):
80 """Default Store representation for diffing
81
82 this can be customized per-format using `format_diffs` provider
83 """
84
85 file_unit_class = FileUnit
86 db_unit_class = DBUnit
87
88 unit_fields = (
89 "unitid", "state", "id", "index", "revision",
90 "source_f", "target_f", "developer_comment",
91 "translator_comment", "locations", "context")
92
93 def __init__(self, target_store, source_store):
94 self.target_store = target_store
95 self.source_store = source_store
96
97 def get_db_units(self, unit_qs):
98 diff_units = OrderedDict()
99 units = unit_qs.values(*self.unit_fields).order_by("index")
100 for unit in units:
101 diff_units[unit["unitid"]] = unit
102 return diff_units
103
104 def get_file_unit(self, unit):
105 state = UNTRANSLATED
106 if unit.isobsolete():
107 state = OBSOLETE
108 elif unit.istranslated():
109 state = TRANSLATED
110 elif unit.isfuzzy():
111 state = FUZZY
112 return {
113 "unitid": unit.getid(),
114 "context": unit.getcontext(),
115 "locations": unit.getlocations(),
116 "source": unit.source,
117 "target": unit.target,
118 "state": state,
119 "hasplural": unit.hasplural(),
120 "developer_comment": unit.getnotes(origin="developer"),
121 "translator_comment": unit.getnotes(origin="translator")}
122
123 def get_file_units(self, units):
124 diff_units = OrderedDict()
125 for unit in units:
126 if unit.isheader():
127 continue
128 diff_units[unit.getid()] = self.get_file_unit(unit)
129 return diff_units
130
131 @cached_property
132 def target_units(self):
133 return self.get_db_units(self.target_store.unit_set)
134
135 @cached_property
136 def source_units(self):
137 if isinstance(self.source_store, models.Model):
138 return self.get_db_units(self.source_store.unit_set.live())
139 return self.get_file_units(self.source_store.units)
140
141 @property
142 def target_unit_class(self):
143 return self.db_unit_class
144
145 @property
146 def source_unit_class(self):
147 if isinstance(self.source_store, models.Model):
148 return self.db_unit_class
149 return self.file_unit_class
150
151
152 class StoreDiff(object):
153 """Compares 2 DBStores"""
154
155 def __init__(self, target_store, source_store, source_revision):
156 self.target_store = target_store
157 self.source_store = source_store
158 self.source_revision = source_revision
159 self.target_revision = self.get_target_revision()
160
161 @property
162 def diff_class(self):
163 diffs = format_diffs.gather()
164 differ = diffs.get(
165 self.target_store.filetype.name)
166 if differ:
167 return differ
168 return diffs["default"]
169
170 def get_target_revision(self):
171 return self.target_store.data.max_unit_revision or 0
172
173 @cached_property
174 def active_target_units(self):
175 return [unitid for unitid, unit in self.target_units.items()
176 if unit['state'] != OBSOLETE]
177
178 @cached_property
179 def diffable(self):
180 return self.diff_class(self.target_store, self.source_store)
181
182 @cached_property
183 def target_units(self):
184 """All of the db units regardless of state or revision"""
185 return self.diffable.target_units
186
187 @cached_property
188 def source_units(self):
189 """All of the db units regardless of state or revision"""
190 return self.diffable.source_units
191
192 @cached_property
193 def insert_points(self):
194 """Returns a list of insert points with update index info.
195 :return: a list of tuples
196 ``(insert_at, uids_to_add, next_index, update_index_delta)`` where
197 ``insert_at`` is the point for inserting
198 ``uids_to_add`` are the units to be inserted
199 ``update_index_delta`` is the offset for index updating
200 ``next_index`` is the starting point after which
201 ``update_index_delta`` should be applied.
202 """
203 inserts = []
204 new_unitid_list = self.new_unit_list
205 for (tag, i1, i2, j1, j2) in self.opcodes:
206 if tag == 'insert':
207 update_index_delta = 0
208 insert_at = 0
209 if i1 > 0:
210 insert_at = (
211 self.target_units[
212 self.active_target_units[i1 - 1]]['index'])
213 next_index = insert_at + 1
214 if i1 < len(self.active_target_units):
215 next_index = self.target_units[
216 self.active_target_units[i1]]["index"]
217 update_index_delta = (
218 j2 - j1 - next_index + insert_at + 1)
219
220 inserts.append((insert_at,
221 new_unitid_list[j1:j2],
222 next_index,
223 update_index_delta))
224
225 elif tag == 'replace':
226 insert_at = self.target_units[
227 self.active_target_units[max(i1 - 1, 0)]]['index']
228 next_index = self.target_units[
229 self.active_target_units[i2 - 1]]['index']
230 inserts.append((insert_at,
231 new_unitid_list[j1:j2],
232 next_index,
233 j2 - j1 - insert_at + next_index))
234 return inserts
235
236 @cached_property
237 def new_unit_list(self):
238 # If source_revision is gte than the target_revision then new unit list
239 # will be exactly what is in the file
240 if self.source_revision >= self.target_revision:
241 return self.source_units.keys()
242
243 # These units are kept as they have been updated since source_revision
244 # but do not appear in the file
245 new_units = [u for u in self.updated_target_units
246 if u not in self.source_units]
247
248 # These unit are either present in both or only in the file so are
249 # kept in the file order
250 new_units += [u for u in self.source_units.keys()
251 if u not in self.obsoleted_target_units]
252
253 return new_units
254
255 @cached_property
256 def obsoleted_target_units(self):
257 return [unitid for unitid, unit in self.target_units.items()
258 if (unit['state'] == OBSOLETE
259 and unit["revision"] > self.source_revision)]
260
261 @cached_property
262 def opcodes(self):
263 sm = difflib.SequenceMatcher(None,
264 self.active_target_units,
265 self.new_unit_list)
266 return sm.get_opcodes()
267
268 @cached_property
269 def updated_target_units(self):
270 return [unitid for unitid, unit in self.target_units.items()
271 if (unit['revision'] > self.source_revision
272 and unit["state"] != OBSOLETE)]
273
274 def diff(self):
275 """Return a dictionary of change actions or None if there are no
276 changes to be made.
277 """
278 diff = {"index": self.get_indexes_to_update(),
279 "obsolete": self.get_units_to_obsolete(),
280 "add": self.get_units_to_add(),
281 "update": self.get_units_to_update()}
282 if self.has_changes(diff):
283 return diff
284 return None
285
286 def get_indexes_to_update(self):
287 offset = 0
288 index_updates = []
289 for (insert_at_, uids_add_, next_index, delta) in self.insert_points:
290 if delta > 0:
291 index_updates += [(next_index + offset, delta)]
292 offset += delta
293 return index_updates
294
295 def get_units_to_add(self):
296 offset = 0
297 to_add = []
298 proxy = (
299 isinstance(self.source_store, models.Model)
300 and DBUnit or FileUnit)
301
302 for (insert_at, uids_add, next_index_, delta) in self.insert_points:
303 for index, uid in enumerate(uids_add):
304 source_unit = self.source_units.get(uid)
305 if source_unit and uid not in self.target_units:
306 new_unit_index = insert_at + index + 1 + offset
307 to_add += [(proxy(source_unit), new_unit_index)]
308 if delta > 0:
309 offset += delta
310 return to_add
311
312 def get_units_to_obsolete(self):
313 return [unit['id'] for unitid, unit in self.target_units.items()
314 if ((unitid not in self.source_units
315 or self.source_units[unitid]['state'] == OBSOLETE)
316 and unitid in self.active_target_units
317 and unitid not in self.updated_target_units)]
318
319 def get_units_to_update(self):
320 uid_index_map = {}
321 offset = 0
322
323 for (insert_at, uids_add, next_index_, delta) in self.insert_points:
324 for index, uid in enumerate(uids_add):
325 new_unit_index = insert_at + index + 1 + offset
326 if uid in self.target_units:
327 uid_index_map[uid] = {
328 'dbid': self.target_units[uid]['id'],
329 'index': new_unit_index}
330 if delta > 0:
331 offset += delta
332 update_ids = self.get_updated_sourceids()
333 update_ids.update({x['dbid'] for x in uid_index_map.values()})
334 return (update_ids, uid_index_map)
335
336 def get_updated_sourceids(self):
337 """Returns a set of unit DB ids to be updated.
338 """
339 update_ids = set()
340
341 for (tag, i1, i2, j1_, j2_) in self.opcodes:
342 if tag != 'equal':
343 continue
344 update_ids.update(
345 set(self.target_units[uid]['id']
346 for uid in self.active_target_units[i1:i2]
347 if (uid in self.source_units
348 and (
349 self.diffable.target_unit_class(
350 self.target_units[uid])
351 != self.diffable.source_unit_class(
352 self.source_units[uid])))))
353 return update_ids
354
355 def has_changes(self, diff):
356 for k, v in diff.items():
357 if k == "update":
358 if len(v[0]) > 0:
359 return True
360 else:
361 if len(v) > 0:
362 return True
363 return False
364
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_store/diff.py b/pootle/apps/pootle_store/diff.py
--- a/pootle/apps/pootle_store/diff.py
+++ b/pootle/apps/pootle_store/diff.py
@@ -7,6 +7,7 @@
# AUTHORS file for copyright and authorship information.
import difflib
+import logging
from collections import OrderedDict
from django.db import models
@@ -19,6 +20,9 @@
from .unit import UnitProxy
+logger = logging.getLogger(__name__)
+
+
class UnitDiffProxy(UnitProxy):
"""Wraps File/DB Unit dicts used by StoreDiff for equality comparison"""
@@ -125,6 +129,14 @@
for unit in units:
if unit.isheader():
continue
+ if unit.getid() in diff_units:
+ unitid = unit.getid()
+ logger.warning(
+ "[diff] Duplicate unit found: %s %s",
+ self.target_store.name,
+ (unitid
+ if len(unitid) <= 20
+ else "%s..." % unitid[:17]))
diff_units[unit.getid()] = self.get_file_unit(unit)
return diff_units
| {"golden_diff": "diff --git a/pootle/apps/pootle_store/diff.py b/pootle/apps/pootle_store/diff.py\n--- a/pootle/apps/pootle_store/diff.py\n+++ b/pootle/apps/pootle_store/diff.py\n@@ -7,6 +7,7 @@\n # AUTHORS file for copyright and authorship information.\n \n import difflib\n+import logging\n from collections import OrderedDict\n \n from django.db import models\n@@ -19,6 +20,9 @@\n from .unit import UnitProxy\n \n \n+logger = logging.getLogger(__name__)\n+\n+\n class UnitDiffProxy(UnitProxy):\n \"\"\"Wraps File/DB Unit dicts used by StoreDiff for equality comparison\"\"\"\n \n@@ -125,6 +129,14 @@\n for unit in units:\n if unit.isheader():\n continue\n+ if unit.getid() in diff_units:\n+ unitid = unit.getid()\n+ logger.warning(\n+ \"[diff] Duplicate unit found: %s %s\",\n+ self.target_store.name,\n+ (unitid\n+ if len(unitid) <= 20\n+ else \"%s...\" % unitid[:17]))\n diff_units[unit.getid()] = self.get_file_unit(unit)\n return diff_units\n", "issue": "Broken PO files are partially loaded without error\nI mistakenly made a PO file with duplicate units. The files are mostly like:\r\n\r\nN/M.po where N=1 to 3 and M=1 to 20\r\n\r\nA bug in my script meant that my files had more than 100 units and they where duplicated.\r\n\r\n1. The files loaded but when checking only the first 100 units are actually loaded\r\n2. No error was reported and update_stores did not abort or report any issue\r\n\r\nExpected:\r\n1. PO fil with duplicate units should throw and error\r\n2. update_stores should report that error and abort update or that specific store.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport difflib\nfrom collections import OrderedDict\n\nfrom django.db import models\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import format_diffs\n\nfrom .constants import FUZZY, OBSOLETE, TRANSLATED, UNTRANSLATED\nfrom .fields import to_python as multistring_to_python\nfrom .unit import UnitProxy\n\n\nclass UnitDiffProxy(UnitProxy):\n \"\"\"Wraps File/DB Unit dicts used by StoreDiff for equality comparison\"\"\"\n\n match_attrs = [\"context\", \"developer_comment\", \"locations\",\n \"source\", \"state\", \"target\", \"translator_comment\"]\n\n def __eq__(self, other):\n return all(getattr(self, k) == getattr(other, k)\n for k in self.match_attrs)\n\n def __ne__(self, other):\n return not self == other\n\n def hasplural(self):\n return (\n self.source is not None\n and (len(self.source.strings) > 1\n or getattr(self.source, \"plural\", None)))\n\n def getnotes(self, origin=None):\n return self.unit.get(\"%s_comment\" % origin, \"\")\n\n def getcontext(self):\n return self.unit[\"context\"]\n\n def isfuzzy(self):\n return self.unit[\"state\"] == FUZZY\n\n def isobsolete(self):\n return self.unit[\"state\"] == OBSOLETE\n\n def getid(self):\n return self.unit[\"unitid\"]\n\n\nclass DBUnit(UnitDiffProxy):\n pass\n\n\nclass FileUnit(UnitDiffProxy):\n\n @property\n def locations(self):\n return \"\\n\".join(self.unit[\"locations\"])\n\n @property\n def source(self):\n return multistring_to_python(self.unit[\"source\"])\n\n @property\n def target(self):\n return multistring_to_python(self.unit[\"target\"])\n\n def hasplural(self):\n return self.unit[\"hasplural\"]\n\n\nclass DiffableStore(object):\n \"\"\"Default Store representation for diffing\n\n this can be customized per-format using `format_diffs` provider\n \"\"\"\n\n file_unit_class = FileUnit\n db_unit_class = DBUnit\n\n unit_fields = (\n \"unitid\", \"state\", \"id\", \"index\", \"revision\",\n \"source_f\", \"target_f\", \"developer_comment\",\n \"translator_comment\", \"locations\", \"context\")\n\n def __init__(self, target_store, source_store):\n self.target_store = target_store\n self.source_store = source_store\n\n def get_db_units(self, unit_qs):\n diff_units = OrderedDict()\n units = unit_qs.values(*self.unit_fields).order_by(\"index\")\n for unit in units:\n diff_units[unit[\"unitid\"]] = unit\n return diff_units\n\n def get_file_unit(self, unit):\n state = UNTRANSLATED\n if unit.isobsolete():\n state = OBSOLETE\n elif unit.istranslated():\n state = TRANSLATED\n elif unit.isfuzzy():\n state = FUZZY\n return {\n \"unitid\": unit.getid(),\n \"context\": unit.getcontext(),\n \"locations\": unit.getlocations(),\n \"source\": unit.source,\n \"target\": unit.target,\n \"state\": state,\n \"hasplural\": unit.hasplural(),\n \"developer_comment\": unit.getnotes(origin=\"developer\"),\n \"translator_comment\": unit.getnotes(origin=\"translator\")}\n\n def get_file_units(self, units):\n diff_units = OrderedDict()\n for unit in units:\n if unit.isheader():\n continue\n diff_units[unit.getid()] = self.get_file_unit(unit)\n return diff_units\n\n @cached_property\n def target_units(self):\n return self.get_db_units(self.target_store.unit_set)\n\n @cached_property\n def source_units(self):\n if isinstance(self.source_store, models.Model):\n return self.get_db_units(self.source_store.unit_set.live())\n return self.get_file_units(self.source_store.units)\n\n @property\n def target_unit_class(self):\n return self.db_unit_class\n\n @property\n def source_unit_class(self):\n if isinstance(self.source_store, models.Model):\n return self.db_unit_class\n return self.file_unit_class\n\n\nclass StoreDiff(object):\n \"\"\"Compares 2 DBStores\"\"\"\n\n def __init__(self, target_store, source_store, source_revision):\n self.target_store = target_store\n self.source_store = source_store\n self.source_revision = source_revision\n self.target_revision = self.get_target_revision()\n\n @property\n def diff_class(self):\n diffs = format_diffs.gather()\n differ = diffs.get(\n self.target_store.filetype.name)\n if differ:\n return differ\n return diffs[\"default\"]\n\n def get_target_revision(self):\n return self.target_store.data.max_unit_revision or 0\n\n @cached_property\n def active_target_units(self):\n return [unitid for unitid, unit in self.target_units.items()\n if unit['state'] != OBSOLETE]\n\n @cached_property\n def diffable(self):\n return self.diff_class(self.target_store, self.source_store)\n\n @cached_property\n def target_units(self):\n \"\"\"All of the db units regardless of state or revision\"\"\"\n return self.diffable.target_units\n\n @cached_property\n def source_units(self):\n \"\"\"All of the db units regardless of state or revision\"\"\"\n return self.diffable.source_units\n\n @cached_property\n def insert_points(self):\n \"\"\"Returns a list of insert points with update index info.\n :return: a list of tuples\n ``(insert_at, uids_to_add, next_index, update_index_delta)`` where\n ``insert_at`` is the point for inserting\n ``uids_to_add`` are the units to be inserted\n ``update_index_delta`` is the offset for index updating\n ``next_index`` is the starting point after which\n ``update_index_delta`` should be applied.\n \"\"\"\n inserts = []\n new_unitid_list = self.new_unit_list\n for (tag, i1, i2, j1, j2) in self.opcodes:\n if tag == 'insert':\n update_index_delta = 0\n insert_at = 0\n if i1 > 0:\n insert_at = (\n self.target_units[\n self.active_target_units[i1 - 1]]['index'])\n next_index = insert_at + 1\n if i1 < len(self.active_target_units):\n next_index = self.target_units[\n self.active_target_units[i1]][\"index\"]\n update_index_delta = (\n j2 - j1 - next_index + insert_at + 1)\n\n inserts.append((insert_at,\n new_unitid_list[j1:j2],\n next_index,\n update_index_delta))\n\n elif tag == 'replace':\n insert_at = self.target_units[\n self.active_target_units[max(i1 - 1, 0)]]['index']\n next_index = self.target_units[\n self.active_target_units[i2 - 1]]['index']\n inserts.append((insert_at,\n new_unitid_list[j1:j2],\n next_index,\n j2 - j1 - insert_at + next_index))\n return inserts\n\n @cached_property\n def new_unit_list(self):\n # If source_revision is gte than the target_revision then new unit list\n # will be exactly what is in the file\n if self.source_revision >= self.target_revision:\n return self.source_units.keys()\n\n # These units are kept as they have been updated since source_revision\n # but do not appear in the file\n new_units = [u for u in self.updated_target_units\n if u not in self.source_units]\n\n # These unit are either present in both or only in the file so are\n # kept in the file order\n new_units += [u for u in self.source_units.keys()\n if u not in self.obsoleted_target_units]\n\n return new_units\n\n @cached_property\n def obsoleted_target_units(self):\n return [unitid for unitid, unit in self.target_units.items()\n if (unit['state'] == OBSOLETE\n and unit[\"revision\"] > self.source_revision)]\n\n @cached_property\n def opcodes(self):\n sm = difflib.SequenceMatcher(None,\n self.active_target_units,\n self.new_unit_list)\n return sm.get_opcodes()\n\n @cached_property\n def updated_target_units(self):\n return [unitid for unitid, unit in self.target_units.items()\n if (unit['revision'] > self.source_revision\n and unit[\"state\"] != OBSOLETE)]\n\n def diff(self):\n \"\"\"Return a dictionary of change actions or None if there are no\n changes to be made.\n \"\"\"\n diff = {\"index\": self.get_indexes_to_update(),\n \"obsolete\": self.get_units_to_obsolete(),\n \"add\": self.get_units_to_add(),\n \"update\": self.get_units_to_update()}\n if self.has_changes(diff):\n return diff\n return None\n\n def get_indexes_to_update(self):\n offset = 0\n index_updates = []\n for (insert_at_, uids_add_, next_index, delta) in self.insert_points:\n if delta > 0:\n index_updates += [(next_index + offset, delta)]\n offset += delta\n return index_updates\n\n def get_units_to_add(self):\n offset = 0\n to_add = []\n proxy = (\n isinstance(self.source_store, models.Model)\n and DBUnit or FileUnit)\n\n for (insert_at, uids_add, next_index_, delta) in self.insert_points:\n for index, uid in enumerate(uids_add):\n source_unit = self.source_units.get(uid)\n if source_unit and uid not in self.target_units:\n new_unit_index = insert_at + index + 1 + offset\n to_add += [(proxy(source_unit), new_unit_index)]\n if delta > 0:\n offset += delta\n return to_add\n\n def get_units_to_obsolete(self):\n return [unit['id'] for unitid, unit in self.target_units.items()\n if ((unitid not in self.source_units\n or self.source_units[unitid]['state'] == OBSOLETE)\n and unitid in self.active_target_units\n and unitid not in self.updated_target_units)]\n\n def get_units_to_update(self):\n uid_index_map = {}\n offset = 0\n\n for (insert_at, uids_add, next_index_, delta) in self.insert_points:\n for index, uid in enumerate(uids_add):\n new_unit_index = insert_at + index + 1 + offset\n if uid in self.target_units:\n uid_index_map[uid] = {\n 'dbid': self.target_units[uid]['id'],\n 'index': new_unit_index}\n if delta > 0:\n offset += delta\n update_ids = self.get_updated_sourceids()\n update_ids.update({x['dbid'] for x in uid_index_map.values()})\n return (update_ids, uid_index_map)\n\n def get_updated_sourceids(self):\n \"\"\"Returns a set of unit DB ids to be updated.\n \"\"\"\n update_ids = set()\n\n for (tag, i1, i2, j1_, j2_) in self.opcodes:\n if tag != 'equal':\n continue\n update_ids.update(\n set(self.target_units[uid]['id']\n for uid in self.active_target_units[i1:i2]\n if (uid in self.source_units\n and (\n self.diffable.target_unit_class(\n self.target_units[uid])\n != self.diffable.source_unit_class(\n self.source_units[uid])))))\n return update_ids\n\n def has_changes(self, diff):\n for k, v in diff.items():\n if k == \"update\":\n if len(v[0]) > 0:\n return True\n else:\n if len(v) > 0:\n return True\n return False\n", "path": "pootle/apps/pootle_store/diff.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport difflib\nimport logging\nfrom collections import OrderedDict\n\nfrom django.db import models\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import format_diffs\n\nfrom .constants import FUZZY, OBSOLETE, TRANSLATED, UNTRANSLATED\nfrom .fields import to_python as multistring_to_python\nfrom .unit import UnitProxy\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UnitDiffProxy(UnitProxy):\n \"\"\"Wraps File/DB Unit dicts used by StoreDiff for equality comparison\"\"\"\n\n match_attrs = [\"context\", \"developer_comment\", \"locations\",\n \"source\", \"state\", \"target\", \"translator_comment\"]\n\n def __eq__(self, other):\n return all(getattr(self, k) == getattr(other, k)\n for k in self.match_attrs)\n\n def __ne__(self, other):\n return not self == other\n\n def hasplural(self):\n return (\n self.source is not None\n and (len(self.source.strings) > 1\n or getattr(self.source, \"plural\", None)))\n\n def getnotes(self, origin=None):\n return self.unit.get(\"%s_comment\" % origin, \"\")\n\n def getcontext(self):\n return self.unit[\"context\"]\n\n def isfuzzy(self):\n return self.unit[\"state\"] == FUZZY\n\n def isobsolete(self):\n return self.unit[\"state\"] == OBSOLETE\n\n def getid(self):\n return self.unit[\"unitid\"]\n\n\nclass DBUnit(UnitDiffProxy):\n pass\n\n\nclass FileUnit(UnitDiffProxy):\n\n @property\n def locations(self):\n return \"\\n\".join(self.unit[\"locations\"])\n\n @property\n def source(self):\n return multistring_to_python(self.unit[\"source\"])\n\n @property\n def target(self):\n return multistring_to_python(self.unit[\"target\"])\n\n def hasplural(self):\n return self.unit[\"hasplural\"]\n\n\nclass DiffableStore(object):\n \"\"\"Default Store representation for diffing\n\n this can be customized per-format using `format_diffs` provider\n \"\"\"\n\n file_unit_class = FileUnit\n db_unit_class = DBUnit\n\n unit_fields = (\n \"unitid\", \"state\", \"id\", \"index\", \"revision\",\n \"source_f\", \"target_f\", \"developer_comment\",\n \"translator_comment\", \"locations\", \"context\")\n\n def __init__(self, target_store, source_store):\n self.target_store = target_store\n self.source_store = source_store\n\n def get_db_units(self, unit_qs):\n diff_units = OrderedDict()\n units = unit_qs.values(*self.unit_fields).order_by(\"index\")\n for unit in units:\n diff_units[unit[\"unitid\"]] = unit\n return diff_units\n\n def get_file_unit(self, unit):\n state = UNTRANSLATED\n if unit.isobsolete():\n state = OBSOLETE\n elif unit.istranslated():\n state = TRANSLATED\n elif unit.isfuzzy():\n state = FUZZY\n return {\n \"unitid\": unit.getid(),\n \"context\": unit.getcontext(),\n \"locations\": unit.getlocations(),\n \"source\": unit.source,\n \"target\": unit.target,\n \"state\": state,\n \"hasplural\": unit.hasplural(),\n \"developer_comment\": unit.getnotes(origin=\"developer\"),\n \"translator_comment\": unit.getnotes(origin=\"translator\")}\n\n def get_file_units(self, units):\n diff_units = OrderedDict()\n for unit in units:\n if unit.isheader():\n continue\n if unit.getid() in diff_units:\n unitid = unit.getid()\n logger.warning(\n \"[diff] Duplicate unit found: %s %s\",\n self.target_store.name,\n (unitid\n if len(unitid) <= 20\n else \"%s...\" % unitid[:17]))\n diff_units[unit.getid()] = self.get_file_unit(unit)\n return diff_units\n\n @cached_property\n def target_units(self):\n return self.get_db_units(self.target_store.unit_set)\n\n @cached_property\n def source_units(self):\n if isinstance(self.source_store, models.Model):\n return self.get_db_units(self.source_store.unit_set.live())\n return self.get_file_units(self.source_store.units)\n\n @property\n def target_unit_class(self):\n return self.db_unit_class\n\n @property\n def source_unit_class(self):\n if isinstance(self.source_store, models.Model):\n return self.db_unit_class\n return self.file_unit_class\n\n\nclass StoreDiff(object):\n \"\"\"Compares 2 DBStores\"\"\"\n\n def __init__(self, target_store, source_store, source_revision):\n self.target_store = target_store\n self.source_store = source_store\n self.source_revision = source_revision\n self.target_revision = self.get_target_revision()\n\n @property\n def diff_class(self):\n diffs = format_diffs.gather()\n differ = diffs.get(\n self.target_store.filetype.name)\n if differ:\n return differ\n return diffs[\"default\"]\n\n def get_target_revision(self):\n return self.target_store.data.max_unit_revision or 0\n\n @cached_property\n def active_target_units(self):\n return [unitid for unitid, unit in self.target_units.items()\n if unit['state'] != OBSOLETE]\n\n @cached_property\n def diffable(self):\n return self.diff_class(self.target_store, self.source_store)\n\n @cached_property\n def target_units(self):\n \"\"\"All of the db units regardless of state or revision\"\"\"\n return self.diffable.target_units\n\n @cached_property\n def source_units(self):\n \"\"\"All of the db units regardless of state or revision\"\"\"\n return self.diffable.source_units\n\n @cached_property\n def insert_points(self):\n \"\"\"Returns a list of insert points with update index info.\n :return: a list of tuples\n ``(insert_at, uids_to_add, next_index, update_index_delta)`` where\n ``insert_at`` is the point for inserting\n ``uids_to_add`` are the units to be inserted\n ``update_index_delta`` is the offset for index updating\n ``next_index`` is the starting point after which\n ``update_index_delta`` should be applied.\n \"\"\"\n inserts = []\n new_unitid_list = self.new_unit_list\n for (tag, i1, i2, j1, j2) in self.opcodes:\n if tag == 'insert':\n update_index_delta = 0\n insert_at = 0\n if i1 > 0:\n insert_at = (\n self.target_units[\n self.active_target_units[i1 - 1]]['index'])\n next_index = insert_at + 1\n if i1 < len(self.active_target_units):\n next_index = self.target_units[\n self.active_target_units[i1]][\"index\"]\n update_index_delta = (\n j2 - j1 - next_index + insert_at + 1)\n\n inserts.append((insert_at,\n new_unitid_list[j1:j2],\n next_index,\n update_index_delta))\n\n elif tag == 'replace':\n insert_at = self.target_units[\n self.active_target_units[max(i1 - 1, 0)]]['index']\n next_index = self.target_units[\n self.active_target_units[i2 - 1]]['index']\n inserts.append((insert_at,\n new_unitid_list[j1:j2],\n next_index,\n j2 - j1 - insert_at + next_index))\n return inserts\n\n @cached_property\n def new_unit_list(self):\n # If source_revision is gte than the target_revision then new unit list\n # will be exactly what is in the file\n if self.source_revision >= self.target_revision:\n return self.source_units.keys()\n\n # These units are kept as they have been updated since source_revision\n # but do not appear in the file\n new_units = [u for u in self.updated_target_units\n if u not in self.source_units]\n\n # These unit are either present in both or only in the file so are\n # kept in the file order\n new_units += [u for u in self.source_units.keys()\n if u not in self.obsoleted_target_units]\n\n return new_units\n\n @cached_property\n def obsoleted_target_units(self):\n return [unitid for unitid, unit in self.target_units.items()\n if (unit['state'] == OBSOLETE\n and unit[\"revision\"] > self.source_revision)]\n\n @cached_property\n def opcodes(self):\n sm = difflib.SequenceMatcher(None,\n self.active_target_units,\n self.new_unit_list)\n return sm.get_opcodes()\n\n @cached_property\n def updated_target_units(self):\n return [unitid for unitid, unit in self.target_units.items()\n if (unit['revision'] > self.source_revision\n and unit[\"state\"] != OBSOLETE)]\n\n def diff(self):\n \"\"\"Return a dictionary of change actions or None if there are no\n changes to be made.\n \"\"\"\n diff = {\"index\": self.get_indexes_to_update(),\n \"obsolete\": self.get_units_to_obsolete(),\n \"add\": self.get_units_to_add(),\n \"update\": self.get_units_to_update()}\n if self.has_changes(diff):\n return diff\n return None\n\n def get_indexes_to_update(self):\n offset = 0\n index_updates = []\n for (insert_at_, uids_add_, next_index, delta) in self.insert_points:\n if delta > 0:\n index_updates += [(next_index + offset, delta)]\n offset += delta\n return index_updates\n\n def get_units_to_add(self):\n offset = 0\n to_add = []\n proxy = (\n isinstance(self.source_store, models.Model)\n and DBUnit or FileUnit)\n\n for (insert_at, uids_add, next_index_, delta) in self.insert_points:\n for index, uid in enumerate(uids_add):\n source_unit = self.source_units.get(uid)\n if source_unit and uid not in self.target_units:\n new_unit_index = insert_at + index + 1 + offset\n to_add += [(proxy(source_unit), new_unit_index)]\n if delta > 0:\n offset += delta\n return to_add\n\n def get_units_to_obsolete(self):\n return [unit['id'] for unitid, unit in self.target_units.items()\n if ((unitid not in self.source_units\n or self.source_units[unitid]['state'] == OBSOLETE)\n and unitid in self.active_target_units\n and unitid not in self.updated_target_units)]\n\n def get_units_to_update(self):\n uid_index_map = {}\n offset = 0\n\n for (insert_at, uids_add, next_index_, delta) in self.insert_points:\n for index, uid in enumerate(uids_add):\n new_unit_index = insert_at + index + 1 + offset\n if uid in self.target_units:\n uid_index_map[uid] = {\n 'dbid': self.target_units[uid]['id'],\n 'index': new_unit_index}\n if delta > 0:\n offset += delta\n update_ids = self.get_updated_sourceids()\n update_ids.update({x['dbid'] for x in uid_index_map.values()})\n return (update_ids, uid_index_map)\n\n def get_updated_sourceids(self):\n \"\"\"Returns a set of unit DB ids to be updated.\n \"\"\"\n update_ids = set()\n\n for (tag, i1, i2, j1_, j2_) in self.opcodes:\n if tag != 'equal':\n continue\n update_ids.update(\n set(self.target_units[uid]['id']\n for uid in self.active_target_units[i1:i2]\n if (uid in self.source_units\n and (\n self.diffable.target_unit_class(\n self.target_units[uid])\n != self.diffable.source_unit_class(\n self.source_units[uid])))))\n return update_ids\n\n def has_changes(self, diff):\n for k, v in diff.items():\n if k == \"update\":\n if len(v[0]) > 0:\n return True\n else:\n if len(v) > 0:\n return True\n return False\n", "path": "pootle/apps/pootle_store/diff.py"}]} | 4,064 | 278 |
gh_patches_debug_22293 | rasdani/github-patches | git_diff | dmlc__dgl-4218 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Example][Bug] Running error on the example case: example/pytorch/arma
## 🐛 Bug
Example of using DGL to implement GNN with convolutional ARMA filters failed.
## To Reproduce
`python citation.py --gpu 0`
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
```
Traceback (most recent call last):
File "citation.py", line 142, in <module>
acc_lists.append(main(args))
File "citation.py", line 84, in main
train_loss.backward()
File "/opt/conda/lib/python3.8/site-packages/torch/_tensor.py", line 396, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
File "/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py", line 173, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [2708, 7]], which is output 0 of ReluBackward0, is at version 1; expected version 0 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).
```
`python citation.py --gpu 0 --dataset Citeseer --num-stacks 3`
```
Traceback (most recent call last):
File "citation.py", line 142, in <module>
acc_lists.append(main(args))
File "citation.py", line 84, in main
train_loss.backward()
File "/opt/conda/lib/python3.8/site-packages/torch/_tensor.py", line 396, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
File "/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py", line 173, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [3327, 6]], which is output 0 of ReluBackward0, is at version 2; expected version 0 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).
```
## Expected behavior
The case should run through
## Environment
- DGL Version (e.g., 1.0): 0.9
- Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): 1.12
- OS (e.g., Linux): ubuntu
- How you installed DGL (`conda`, `pip`, source): source
- Build command you used (if compiling from source):
- Python version: 3.8
- CUDA/cuDNN version (if applicable): 11.7
- GPU models and configuration (e.g. V100): A100
- Any other relevant information:
## Additional context
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/pytorch/arma/model.py`
Content:
```
1 import torch
2 import torch.nn as nn
3 import torch.nn.functional as F
4 import dgl.function as fn
5 import math
6
7 def glorot(tensor):
8 if tensor is not None:
9 stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))
10 tensor.data.uniform_(-stdv, stdv)
11
12 def zeros(tensor):
13 if tensor is not None:
14 tensor.data.fill_(0)
15
16 class ARMAConv(nn.Module):
17 def __init__(self,
18 in_dim,
19 out_dim,
20 num_stacks,
21 num_layers,
22 activation=None,
23 dropout=0.0,
24 bias=True):
25 super(ARMAConv, self).__init__()
26
27 self.in_dim = in_dim
28 self.out_dim = out_dim
29 self.K = num_stacks
30 self.T = num_layers
31 self.activation = activation
32 self.dropout = nn.Dropout(p=dropout)
33
34 # init weight
35 self.w_0 = nn.ModuleDict({
36 str(k): nn.Linear(in_dim, out_dim, bias=False) for k in range(self.K)
37 })
38 # deeper weight
39 self.w = nn.ModuleDict({
40 str(k): nn.Linear(out_dim, out_dim, bias=False) for k in range(self.K)
41 })
42 # v
43 self.v = nn.ModuleDict({
44 str(k): nn.Linear(in_dim, out_dim, bias=False) for k in range(self.K)
45 })
46 # bias
47 if bias:
48 self.bias = nn.Parameter(torch.Tensor(self.K, self.T, 1, self.out_dim))
49 else:
50 self.register_parameter('bias', None)
51
52 self.reset_parameters()
53
54 def reset_parameters(self):
55 for k in range(self.K):
56 glorot(self.w_0[str(k)].weight)
57 glorot(self.w[str(k)].weight)
58 glorot(self.v[str(k)].weight)
59 zeros(self.bias)
60
61 def forward(self, g, feats):
62 with g.local_scope():
63 init_feats = feats
64 # assume that the graphs are undirected and graph.in_degrees() is the same as graph.out_degrees()
65 degs = g.in_degrees().float().clamp(min=1)
66 norm = torch.pow(degs, -0.5).to(feats.device).unsqueeze(1)
67 output = None
68
69 for k in range(self.K):
70 feats = init_feats
71 for t in range(self.T):
72 feats = feats * norm
73 g.ndata['h'] = feats
74 g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
75 feats = g.ndata.pop('h')
76 feats = feats * norm
77
78 if t == 0:
79 feats = self.w_0[str(k)](feats)
80 else:
81 feats = self.w[str(k)](feats)
82
83 feats += self.dropout(self.v[str(k)](init_feats))
84 feats += self.v[str(k)](self.dropout(init_feats))
85
86 if self.bias is not None:
87 feats += self.bias[k][t]
88
89 if self.activation is not None:
90 feats = self.activation(feats)
91
92 if output is None:
93 output = feats
94 else:
95 output += feats
96
97 return output / self.K
98
99 class ARMA4NC(nn.Module):
100 def __init__(self,
101 in_dim,
102 hid_dim,
103 out_dim,
104 num_stacks,
105 num_layers,
106 activation=None,
107 dropout=0.0):
108 super(ARMA4NC, self).__init__()
109
110 self.conv1 = ARMAConv(in_dim=in_dim,
111 out_dim=hid_dim,
112 num_stacks=num_stacks,
113 num_layers=num_layers,
114 activation=activation,
115 dropout=dropout)
116
117 self.conv2 = ARMAConv(in_dim=hid_dim,
118 out_dim=out_dim,
119 num_stacks=num_stacks,
120 num_layers=num_layers,
121 activation=activation,
122 dropout=dropout)
123
124 self.dropout = nn.Dropout(p=dropout)
125
126 def forward(self, g, feats):
127 feats = F.relu(self.conv1(g, feats))
128 feats = self.dropout(feats)
129 feats = self.conv2(g, feats)
130 return feats
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/pytorch/arma/model.py b/examples/pytorch/arma/model.py
--- a/examples/pytorch/arma/model.py
+++ b/examples/pytorch/arma/model.py
@@ -64,7 +64,7 @@
# assume that the graphs are undirected and graph.in_degrees() is the same as graph.out_degrees()
degs = g.in_degrees().float().clamp(min=1)
norm = torch.pow(degs, -0.5).to(feats.device).unsqueeze(1)
- output = None
+ output = []
for k in range(self.K):
feats = init_feats
@@ -88,13 +88,9 @@
if self.activation is not None:
feats = self.activation(feats)
-
- if output is None:
- output = feats
- else:
- output += feats
-
- return output / self.K
+ output.append(feats)
+
+ return torch.stack(output).mean(dim=0)
class ARMA4NC(nn.Module):
def __init__(self,
| {"golden_diff": "diff --git a/examples/pytorch/arma/model.py b/examples/pytorch/arma/model.py\n--- a/examples/pytorch/arma/model.py\n+++ b/examples/pytorch/arma/model.py\n@@ -64,7 +64,7 @@\n # assume that the graphs are undirected and graph.in_degrees() is the same as graph.out_degrees()\n degs = g.in_degrees().float().clamp(min=1)\n norm = torch.pow(degs, -0.5).to(feats.device).unsqueeze(1)\n- output = None\n+ output = [] \n \n for k in range(self.K):\n feats = init_feats\n@@ -88,13 +88,9 @@\n \n if self.activation is not None:\n feats = self.activation(feats)\n- \n- if output is None:\n- output = feats\n- else:\n- output += feats\n- \n- return output / self.K \n+ output.append(feats)\n+\n+ return torch.stack(output).mean(dim=0)\n \n class ARMA4NC(nn.Module):\n def __init__(self,\n", "issue": "[Example][Bug] Running error on the example case: example/pytorch/arma\n## \ud83d\udc1b Bug\r\n\r\nExample of using DGL to implement GNN with convolutional ARMA filters failed.\r\n\r\n## To Reproduce\r\n\r\n`python citation.py --gpu 0`\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n```\r\nTraceback (most recent call last):\r\n File \"citation.py\", line 142, in <module>\r\n acc_lists.append(main(args))\r\n File \"citation.py\", line 84, in main\r\n train_loss.backward()\r\n File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 396, in backward\r\n torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\r\n File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\r\n Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass\r\nRuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [2708, 7]], which is output 0 of ReluBackward0, is at version 1; expected version 0 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).\r\n```\r\n\r\n`python citation.py --gpu 0 --dataset Citeseer --num-stacks 3`\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"citation.py\", line 142, in <module>\r\n acc_lists.append(main(args))\r\n File \"citation.py\", line 84, in main\r\n train_loss.backward()\r\n File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 396, in backward\r\n torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\r\n File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\r\n Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass\r\nRuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [3327, 6]], which is output 0 of ReluBackward0, is at version 2; expected version 0 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).\r\n\r\n```\r\n\r\n## Expected behavior\r\n\r\nThe case should run through\r\n\r\n## Environment\r\n\r\n - DGL Version (e.g., 1.0): 0.9\r\n - Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): 1.12\r\n - OS (e.g., Linux): ubuntu\r\n - How you installed DGL (`conda`, `pip`, source): source\r\n - Build command you used (if compiling from source):\r\n - Python version: 3.8\r\n - CUDA/cuDNN version (if applicable): 11.7\r\n - GPU models and configuration (e.g. V100): A100\r\n - Any other relevant information:\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport dgl.function as fn\nimport math\n\ndef glorot(tensor):\n if tensor is not None:\n stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))\n tensor.data.uniform_(-stdv, stdv)\n\ndef zeros(tensor):\n if tensor is not None:\n tensor.data.fill_(0)\n\nclass ARMAConv(nn.Module):\n def __init__(self,\n in_dim,\n out_dim,\n num_stacks,\n num_layers,\n activation=None,\n dropout=0.0,\n bias=True):\n super(ARMAConv, self).__init__()\n \n self.in_dim = in_dim\n self.out_dim = out_dim\n self.K = num_stacks\n self.T = num_layers\n self.activation = activation\n self.dropout = nn.Dropout(p=dropout)\n\n # init weight\n self.w_0 = nn.ModuleDict({\n str(k): nn.Linear(in_dim, out_dim, bias=False) for k in range(self.K)\n })\n # deeper weight\n self.w = nn.ModuleDict({\n str(k): nn.Linear(out_dim, out_dim, bias=False) for k in range(self.K)\n })\n # v\n self.v = nn.ModuleDict({\n str(k): nn.Linear(in_dim, out_dim, bias=False) for k in range(self.K)\n })\n # bias\n if bias:\n self.bias = nn.Parameter(torch.Tensor(self.K, self.T, 1, self.out_dim))\n else:\n self.register_parameter('bias', None)\n \n self.reset_parameters()\n\n def reset_parameters(self):\n for k in range(self.K):\n glorot(self.w_0[str(k)].weight)\n glorot(self.w[str(k)].weight)\n glorot(self.v[str(k)].weight)\n zeros(self.bias)\n\n def forward(self, g, feats):\n with g.local_scope():\n init_feats = feats\n # assume that the graphs are undirected and graph.in_degrees() is the same as graph.out_degrees()\n degs = g.in_degrees().float().clamp(min=1)\n norm = torch.pow(degs, -0.5).to(feats.device).unsqueeze(1)\n output = None\n\n for k in range(self.K):\n feats = init_feats\n for t in range(self.T):\n feats = feats * norm\n g.ndata['h'] = feats\n g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))\n feats = g.ndata.pop('h')\n feats = feats * norm\n\n if t == 0:\n feats = self.w_0[str(k)](feats)\n else:\n feats = self.w[str(k)](feats)\n \n feats += self.dropout(self.v[str(k)](init_feats))\n feats += self.v[str(k)](self.dropout(init_feats))\n\n if self.bias is not None:\n feats += self.bias[k][t]\n \n if self.activation is not None:\n feats = self.activation(feats)\n \n if output is None:\n output = feats\n else:\n output += feats\n \n return output / self.K \n\nclass ARMA4NC(nn.Module):\n def __init__(self,\n in_dim,\n hid_dim,\n out_dim,\n num_stacks,\n num_layers,\n activation=None,\n dropout=0.0):\n super(ARMA4NC, self).__init__()\n\n self.conv1 = ARMAConv(in_dim=in_dim,\n out_dim=hid_dim,\n num_stacks=num_stacks,\n num_layers=num_layers,\n activation=activation,\n dropout=dropout)\n\n self.conv2 = ARMAConv(in_dim=hid_dim,\n out_dim=out_dim,\n num_stacks=num_stacks,\n num_layers=num_layers,\n activation=activation,\n dropout=dropout)\n \n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, g, feats):\n feats = F.relu(self.conv1(g, feats))\n feats = self.dropout(feats)\n feats = self.conv2(g, feats)\n return feats\n", "path": "examples/pytorch/arma/model.py"}], "after_files": [{"content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport dgl.function as fn\nimport math\n\ndef glorot(tensor):\n if tensor is not None:\n stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))\n tensor.data.uniform_(-stdv, stdv)\n\ndef zeros(tensor):\n if tensor is not None:\n tensor.data.fill_(0)\n\nclass ARMAConv(nn.Module):\n def __init__(self,\n in_dim,\n out_dim,\n num_stacks,\n num_layers,\n activation=None,\n dropout=0.0,\n bias=True):\n super(ARMAConv, self).__init__()\n \n self.in_dim = in_dim\n self.out_dim = out_dim\n self.K = num_stacks\n self.T = num_layers\n self.activation = activation\n self.dropout = nn.Dropout(p=dropout)\n\n # init weight\n self.w_0 = nn.ModuleDict({\n str(k): nn.Linear(in_dim, out_dim, bias=False) for k in range(self.K)\n })\n # deeper weight\n self.w = nn.ModuleDict({\n str(k): nn.Linear(out_dim, out_dim, bias=False) for k in range(self.K)\n })\n # v\n self.v = nn.ModuleDict({\n str(k): nn.Linear(in_dim, out_dim, bias=False) for k in range(self.K)\n })\n # bias\n if bias:\n self.bias = nn.Parameter(torch.Tensor(self.K, self.T, 1, self.out_dim))\n else:\n self.register_parameter('bias', None)\n \n self.reset_parameters()\n\n def reset_parameters(self):\n for k in range(self.K):\n glorot(self.w_0[str(k)].weight)\n glorot(self.w[str(k)].weight)\n glorot(self.v[str(k)].weight)\n zeros(self.bias)\n\n def forward(self, g, feats):\n with g.local_scope():\n init_feats = feats\n # assume that the graphs are undirected and graph.in_degrees() is the same as graph.out_degrees()\n degs = g.in_degrees().float().clamp(min=1)\n norm = torch.pow(degs, -0.5).to(feats.device).unsqueeze(1)\n output = [] \n\n for k in range(self.K):\n feats = init_feats\n for t in range(self.T):\n feats = feats * norm\n g.ndata['h'] = feats\n g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))\n feats = g.ndata.pop('h')\n feats = feats * norm\n\n if t == 0:\n feats = self.w_0[str(k)](feats)\n else:\n feats = self.w[str(k)](feats)\n \n feats += self.dropout(self.v[str(k)](init_feats))\n feats += self.v[str(k)](self.dropout(init_feats))\n\n if self.bias is not None:\n feats += self.bias[k][t]\n \n if self.activation is not None:\n feats = self.activation(feats)\n output.append(feats)\n\n return torch.stack(output).mean(dim=0)\n\nclass ARMA4NC(nn.Module):\n def __init__(self,\n in_dim,\n hid_dim,\n out_dim,\n num_stacks,\n num_layers,\n activation=None,\n dropout=0.0):\n super(ARMA4NC, self).__init__()\n\n self.conv1 = ARMAConv(in_dim=in_dim,\n out_dim=hid_dim,\n num_stacks=num_stacks,\n num_layers=num_layers,\n activation=activation,\n dropout=dropout)\n\n self.conv2 = ARMAConv(in_dim=hid_dim,\n out_dim=out_dim,\n num_stacks=num_stacks,\n num_layers=num_layers,\n activation=activation,\n dropout=dropout)\n \n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, g, feats):\n feats = F.relu(self.conv1(g, feats))\n feats = self.dropout(feats)\n feats = self.conv2(g, feats)\n return feats\n", "path": "examples/pytorch/arma/model.py"}]} | 2,201 | 241 |
gh_patches_debug_25085 | rasdani/github-patches | git_diff | pex-tool__pex-703 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pex file looses the executable permission of a binary file if packaged
I have a use case where the pex file I am generating has to package another statically compiled binary.
When the pex file is exploded the binary file has lost the executable permission. Is there anyway to preserve the permissions?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/util.py`
Content:
```
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import absolute_import
5
6 import contextlib
7 import os
8 import shutil
9 import tempfile
10 import uuid
11 from hashlib import sha1
12 from site import makepath
13 from threading import Lock
14
15 from pex.common import rename_if_empty, safe_mkdir, safe_mkdtemp, safe_open
16 from pex.compatibility import exec_function
17 from pex.finders import register_finders
18 from pex.third_party.pkg_resources import (
19 find_distributions,
20 resource_isdir,
21 resource_listdir,
22 resource_string
23 )
24
25
26 class DistributionHelper(object):
27 @classmethod
28 def walk_data(cls, dist, path='/'):
29 """Yields filename, stream for files identified as data in the distribution"""
30 for rel_fn in filter(None, dist.resource_listdir(path)):
31 full_fn = os.path.join(path, rel_fn)
32 if dist.resource_isdir(full_fn):
33 for fn, stream in cls.walk_data(dist, full_fn):
34 yield fn, stream
35 else:
36 yield full_fn[1:], dist.get_resource_stream(dist._provider, full_fn)
37
38 @staticmethod
39 def zipsafe(dist):
40 """Returns whether or not we determine a distribution is zip-safe."""
41 # zip-safety is only an attribute of eggs. wheels are considered never
42 # zip safe per implications of PEP 427.
43 if hasattr(dist, 'egg_info') and dist.egg_info.endswith('EGG-INFO'):
44 egg_metadata = dist.metadata_listdir('')
45 return 'zip-safe' in egg_metadata and 'native_libs.txt' not in egg_metadata
46 else:
47 return False
48
49 @classmethod
50 def access_zipped_assets(cls, static_module_name, static_path, dir_location=None):
51 """
52 Create a copy of static resource files as we can't serve them from within the pex file.
53
54 :param static_module_name: Module name containing module to cache in a tempdir
55 :type static_module_name: string, for example 'twitter.common.zookeeper' or similar
56 :param static_path: Module name, for example 'serverset'
57 :param dir_location: create a new temporary directory inside, or None to have one created
58 :returns temp_dir: Temporary directory with the zipped assets inside
59 :rtype: str
60 """
61
62 # asset_path is initially a module name that's the same as the static_path, but will be
63 # changed to walk the directory tree
64 def walk_zipped_assets(static_module_name, static_path, asset_path, temp_dir):
65 for asset in resource_listdir(static_module_name, asset_path):
66 asset_target = os.path.normpath(
67 os.path.join(os.path.relpath(asset_path, static_path), asset))
68 if resource_isdir(static_module_name, os.path.join(asset_path, asset)):
69 safe_mkdir(os.path.join(temp_dir, asset_target))
70 walk_zipped_assets(static_module_name, static_path, os.path.join(asset_path, asset),
71 temp_dir)
72 else:
73 with open(os.path.join(temp_dir, asset_target), 'wb') as fp:
74 path = os.path.join(static_path, asset_target)
75 file_data = resource_string(static_module_name, path)
76 fp.write(file_data)
77
78 if dir_location is None:
79 temp_dir = safe_mkdtemp()
80 else:
81 temp_dir = dir_location
82
83 walk_zipped_assets(static_module_name, static_path, static_path, temp_dir)
84
85 return temp_dir
86
87 @classmethod
88 def distribution_from_path(cls, path, name=None):
89 """Return a distribution from a path.
90
91 If name is provided, find the distribution. If none is found matching the name,
92 return None. If name is not provided and there is unambiguously a single
93 distribution, return that distribution otherwise None.
94 """
95 # Monkeypatch pkg_resources finders should it not already be so.
96 register_finders()
97 if name is None:
98 distributions = set(find_distributions(path))
99 if len(distributions) == 1:
100 return distributions.pop()
101 else:
102 for dist in find_distributions(path):
103 if dist.project_name == name:
104 return dist
105
106
107 class CacheHelper(object):
108 @classmethod
109 def update_hash(cls, filelike, digest):
110 """Update the digest of a single file in a memory-efficient manner."""
111 block_size = digest.block_size * 1024
112 for chunk in iter(lambda: filelike.read(block_size), b''):
113 digest.update(chunk)
114
115 @classmethod
116 def hash(cls, path, digest=None, hasher=sha1):
117 """Return the digest of a single file in a memory-efficient manner."""
118 if digest is None:
119 digest = hasher()
120 with open(path, 'rb') as fh:
121 cls.update_hash(fh, digest)
122 return digest.hexdigest()
123
124 @classmethod
125 def _compute_hash(cls, names, stream_factory):
126 digest = sha1()
127 # Always use / as the path separator, since that's what zip uses.
128 hashed_names = [n.replace(os.sep, '/') for n in names]
129 digest.update(''.join(hashed_names).encode('utf-8'))
130 for name in names:
131 with contextlib.closing(stream_factory(name)) as fp:
132 cls.update_hash(fp, digest)
133 return digest.hexdigest()
134
135 @classmethod
136 def zip_hash(cls, zf, prefix=''):
137 """Return the hash of the contents of a zipfile, comparable with a cls.dir_hash."""
138 prefix_length = len(prefix)
139 names = sorted(name[prefix_length:] for name in zf.namelist()
140 if name.startswith(prefix) and not name.endswith('.pyc') and not name.endswith('/'))
141 def stream_factory(name):
142 return zf.open(prefix + name)
143 return cls._compute_hash(names, stream_factory)
144
145 @classmethod
146 def _iter_files(cls, directory):
147 normpath = os.path.realpath(os.path.normpath(directory))
148 for root, _, files in os.walk(normpath):
149 for f in files:
150 yield os.path.relpath(os.path.join(root, f), normpath)
151
152 @classmethod
153 def pex_hash(cls, d):
154 """Return a reproducible hash of the contents of a directory."""
155 names = sorted(f for f in cls._iter_files(d) if not (f.endswith('.pyc') or f.startswith('.')))
156 def stream_factory(name):
157 return open(os.path.join(d, name), 'rb') # noqa: T802
158 return cls._compute_hash(names, stream_factory)
159
160 @classmethod
161 def dir_hash(cls, d):
162 """Return a reproducible hash of the contents of a directory."""
163 names = sorted(f for f in cls._iter_files(d) if not f.endswith('.pyc'))
164 def stream_factory(name):
165 return open(os.path.join(d, name), 'rb') # noqa: T802
166 return cls._compute_hash(names, stream_factory)
167
168 @classmethod
169 def cache_distribution(cls, zf, source, target_dir):
170 """Possibly cache an egg from within a zipfile into target_cache.
171
172 Given a zipfile handle and a filename corresponding to an egg distribution within
173 that zip, maybe write to the target cache and return a Distribution."""
174 dependency_basename = os.path.basename(source)
175 if not os.path.exists(target_dir):
176 target_dir_tmp = target_dir + '.' + uuid.uuid4().hex
177 for name in zf.namelist():
178 if name.startswith(source) and not name.endswith('/'):
179 # strip off prefix + '/'
180 target_name = os.path.join(dependency_basename, name[len(source) + 1:])
181 with contextlib.closing(zf.open(name)) as zi:
182 with safe_open(os.path.join(target_dir_tmp, target_name), 'wb') as fp:
183 shutil.copyfileobj(zi, fp)
184
185 rename_if_empty(target_dir_tmp, target_dir)
186
187 dist = DistributionHelper.distribution_from_path(target_dir)
188 assert dist is not None, 'Failed to cache distribution %s' % source
189 return dist
190
191
192 class Memoizer(object):
193 """A thread safe class for memoizing the results of a computation."""
194
195 def __init__(self):
196 self._data = {}
197 self._lock = Lock()
198
199 def get(self, key, default=None):
200 with self._lock:
201 return self._data.get(key, default)
202
203 def store(self, key, value):
204 with self._lock:
205 self._data[key] = value
206
207
208 @contextlib.contextmanager
209 def named_temporary_file(*args, **kwargs):
210 """
211 Due to a bug in python (https://bugs.python.org/issue14243), we need
212 this to be able to use the temporary file without deleting it.
213 """
214 assert 'delete' not in kwargs
215 kwargs['delete'] = False
216 fp = tempfile.NamedTemporaryFile(*args, **kwargs)
217 try:
218 with fp:
219 yield fp
220 finally:
221 os.remove(fp.name)
222
223
224 def iter_pth_paths(filename):
225 """Given a .pth file, extract and yield all inner paths without honoring imports. This shadows
226 python's site.py behavior, which is invoked at interpreter startup."""
227 try:
228 f = open(filename, 'rU') # noqa
229 except IOError:
230 return
231
232 dirname = os.path.dirname(filename)
233 known_paths = set()
234
235 with f:
236 for line in f:
237 line = line.rstrip()
238 if not line or line.startswith('#'):
239 continue
240 elif line.startswith(('import ', 'import\t')):
241 try:
242 exec_function(line, globals_map={})
243 continue
244 except Exception:
245 # NB: import lines are routinely abused with extra code appended using `;` so the class of
246 # exceptions that might be raised in broader than ImportError. As such we cacth broadly
247 # here.
248
249 # Defer error handling to the higher level site.py logic invoked at startup.
250 return
251 else:
252 extras_dir, extras_dir_case_insensitive = makepath(dirname, line)
253 if extras_dir_case_insensitive not in known_paths and os.path.exists(extras_dir):
254 yield extras_dir
255 known_paths.add(extras_dir_case_insensitive)
256
257
258 def merge_split(*paths):
259 """Merge paths into a single path delimited by colons and split on colons to return
260 a list of paths.
261
262 :param paths: a variable length list of path strings
263 :return: a list of paths from the merged path list split by colons
264 """
265 filtered_paths = filter(None, paths)
266 return [p for p in ':'.join(filtered_paths).split(':') if p]
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/util.py b/pex/util.py
--- a/pex/util.py
+++ b/pex/util.py
@@ -5,14 +5,13 @@
import contextlib
import os
-import shutil
import tempfile
import uuid
from hashlib import sha1
from site import makepath
from threading import Lock
-from pex.common import rename_if_empty, safe_mkdir, safe_mkdtemp, safe_open
+from pex.common import rename_if_empty, safe_mkdir, safe_mkdtemp
from pex.compatibility import exec_function
from pex.finders import register_finders
from pex.third_party.pkg_resources import (
@@ -176,11 +175,9 @@
target_dir_tmp = target_dir + '.' + uuid.uuid4().hex
for name in zf.namelist():
if name.startswith(source) and not name.endswith('/'):
- # strip off prefix + '/'
- target_name = os.path.join(dependency_basename, name[len(source) + 1:])
- with contextlib.closing(zf.open(name)) as zi:
- with safe_open(os.path.join(target_dir_tmp, target_name), 'wb') as fp:
- shutil.copyfileobj(zi, fp)
+ zf.extract(name, target_dir_tmp)
+ os.rename(os.path.join(target_dir_tmp, source),
+ os.path.join(target_dir_tmp, dependency_basename))
rename_if_empty(target_dir_tmp, target_dir)
| {"golden_diff": "diff --git a/pex/util.py b/pex/util.py\n--- a/pex/util.py\n+++ b/pex/util.py\n@@ -5,14 +5,13 @@\n \n import contextlib\n import os\n-import shutil\n import tempfile\n import uuid\n from hashlib import sha1\n from site import makepath\n from threading import Lock\n \n-from pex.common import rename_if_empty, safe_mkdir, safe_mkdtemp, safe_open\n+from pex.common import rename_if_empty, safe_mkdir, safe_mkdtemp\n from pex.compatibility import exec_function\n from pex.finders import register_finders\n from pex.third_party.pkg_resources import (\n@@ -176,11 +175,9 @@\n target_dir_tmp = target_dir + '.' + uuid.uuid4().hex\n for name in zf.namelist():\n if name.startswith(source) and not name.endswith('/'):\n- # strip off prefix + '/'\n- target_name = os.path.join(dependency_basename, name[len(source) + 1:])\n- with contextlib.closing(zf.open(name)) as zi:\n- with safe_open(os.path.join(target_dir_tmp, target_name), 'wb') as fp:\n- shutil.copyfileobj(zi, fp)\n+ zf.extract(name, target_dir_tmp)\n+ os.rename(os.path.join(target_dir_tmp, source),\n+ os.path.join(target_dir_tmp, dependency_basename))\n \n rename_if_empty(target_dir_tmp, target_dir)\n", "issue": "Pex file looses the executable permission of a binary file if packaged \nI have a use case where the pex file I am generating has to package another statically compiled binary. \nWhen the pex file is exploded the binary file has lost the executable permission. Is there anyway to preserve the permissions?\n\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import\n\nimport contextlib\nimport os\nimport shutil\nimport tempfile\nimport uuid\nfrom hashlib import sha1\nfrom site import makepath\nfrom threading import Lock\n\nfrom pex.common import rename_if_empty, safe_mkdir, safe_mkdtemp, safe_open\nfrom pex.compatibility import exec_function\nfrom pex.finders import register_finders\nfrom pex.third_party.pkg_resources import (\n find_distributions,\n resource_isdir,\n resource_listdir,\n resource_string\n)\n\n\nclass DistributionHelper(object):\n @classmethod\n def walk_data(cls, dist, path='/'):\n \"\"\"Yields filename, stream for files identified as data in the distribution\"\"\"\n for rel_fn in filter(None, dist.resource_listdir(path)):\n full_fn = os.path.join(path, rel_fn)\n if dist.resource_isdir(full_fn):\n for fn, stream in cls.walk_data(dist, full_fn):\n yield fn, stream\n else:\n yield full_fn[1:], dist.get_resource_stream(dist._provider, full_fn)\n\n @staticmethod\n def zipsafe(dist):\n \"\"\"Returns whether or not we determine a distribution is zip-safe.\"\"\"\n # zip-safety is only an attribute of eggs. wheels are considered never\n # zip safe per implications of PEP 427.\n if hasattr(dist, 'egg_info') and dist.egg_info.endswith('EGG-INFO'):\n egg_metadata = dist.metadata_listdir('')\n return 'zip-safe' in egg_metadata and 'native_libs.txt' not in egg_metadata\n else:\n return False\n\n @classmethod\n def access_zipped_assets(cls, static_module_name, static_path, dir_location=None):\n \"\"\"\n Create a copy of static resource files as we can't serve them from within the pex file.\n\n :param static_module_name: Module name containing module to cache in a tempdir\n :type static_module_name: string, for example 'twitter.common.zookeeper' or similar\n :param static_path: Module name, for example 'serverset'\n :param dir_location: create a new temporary directory inside, or None to have one created\n :returns temp_dir: Temporary directory with the zipped assets inside\n :rtype: str\n \"\"\"\n\n # asset_path is initially a module name that's the same as the static_path, but will be\n # changed to walk the directory tree\n def walk_zipped_assets(static_module_name, static_path, asset_path, temp_dir):\n for asset in resource_listdir(static_module_name, asset_path):\n asset_target = os.path.normpath(\n os.path.join(os.path.relpath(asset_path, static_path), asset))\n if resource_isdir(static_module_name, os.path.join(asset_path, asset)):\n safe_mkdir(os.path.join(temp_dir, asset_target))\n walk_zipped_assets(static_module_name, static_path, os.path.join(asset_path, asset),\n temp_dir)\n else:\n with open(os.path.join(temp_dir, asset_target), 'wb') as fp:\n path = os.path.join(static_path, asset_target)\n file_data = resource_string(static_module_name, path)\n fp.write(file_data)\n\n if dir_location is None:\n temp_dir = safe_mkdtemp()\n else:\n temp_dir = dir_location\n\n walk_zipped_assets(static_module_name, static_path, static_path, temp_dir)\n\n return temp_dir\n\n @classmethod\n def distribution_from_path(cls, path, name=None):\n \"\"\"Return a distribution from a path.\n\n If name is provided, find the distribution. If none is found matching the name,\n return None. If name is not provided and there is unambiguously a single\n distribution, return that distribution otherwise None.\n \"\"\"\n # Monkeypatch pkg_resources finders should it not already be so.\n register_finders()\n if name is None:\n distributions = set(find_distributions(path))\n if len(distributions) == 1:\n return distributions.pop()\n else:\n for dist in find_distributions(path):\n if dist.project_name == name:\n return dist\n\n\nclass CacheHelper(object):\n @classmethod\n def update_hash(cls, filelike, digest):\n \"\"\"Update the digest of a single file in a memory-efficient manner.\"\"\"\n block_size = digest.block_size * 1024\n for chunk in iter(lambda: filelike.read(block_size), b''):\n digest.update(chunk)\n\n @classmethod\n def hash(cls, path, digest=None, hasher=sha1):\n \"\"\"Return the digest of a single file in a memory-efficient manner.\"\"\"\n if digest is None:\n digest = hasher()\n with open(path, 'rb') as fh:\n cls.update_hash(fh, digest)\n return digest.hexdigest()\n\n @classmethod\n def _compute_hash(cls, names, stream_factory):\n digest = sha1()\n # Always use / as the path separator, since that's what zip uses.\n hashed_names = [n.replace(os.sep, '/') for n in names]\n digest.update(''.join(hashed_names).encode('utf-8'))\n for name in names:\n with contextlib.closing(stream_factory(name)) as fp:\n cls.update_hash(fp, digest)\n return digest.hexdigest()\n\n @classmethod\n def zip_hash(cls, zf, prefix=''):\n \"\"\"Return the hash of the contents of a zipfile, comparable with a cls.dir_hash.\"\"\"\n prefix_length = len(prefix)\n names = sorted(name[prefix_length:] for name in zf.namelist()\n if name.startswith(prefix) and not name.endswith('.pyc') and not name.endswith('/'))\n def stream_factory(name):\n return zf.open(prefix + name)\n return cls._compute_hash(names, stream_factory)\n\n @classmethod\n def _iter_files(cls, directory):\n normpath = os.path.realpath(os.path.normpath(directory))\n for root, _, files in os.walk(normpath):\n for f in files:\n yield os.path.relpath(os.path.join(root, f), normpath)\n\n @classmethod\n def pex_hash(cls, d):\n \"\"\"Return a reproducible hash of the contents of a directory.\"\"\"\n names = sorted(f for f in cls._iter_files(d) if not (f.endswith('.pyc') or f.startswith('.')))\n def stream_factory(name):\n return open(os.path.join(d, name), 'rb') # noqa: T802\n return cls._compute_hash(names, stream_factory)\n\n @classmethod\n def dir_hash(cls, d):\n \"\"\"Return a reproducible hash of the contents of a directory.\"\"\"\n names = sorted(f for f in cls._iter_files(d) if not f.endswith('.pyc'))\n def stream_factory(name):\n return open(os.path.join(d, name), 'rb') # noqa: T802\n return cls._compute_hash(names, stream_factory)\n\n @classmethod\n def cache_distribution(cls, zf, source, target_dir):\n \"\"\"Possibly cache an egg from within a zipfile into target_cache.\n\n Given a zipfile handle and a filename corresponding to an egg distribution within\n that zip, maybe write to the target cache and return a Distribution.\"\"\"\n dependency_basename = os.path.basename(source)\n if not os.path.exists(target_dir):\n target_dir_tmp = target_dir + '.' + uuid.uuid4().hex\n for name in zf.namelist():\n if name.startswith(source) and not name.endswith('/'):\n # strip off prefix + '/'\n target_name = os.path.join(dependency_basename, name[len(source) + 1:])\n with contextlib.closing(zf.open(name)) as zi:\n with safe_open(os.path.join(target_dir_tmp, target_name), 'wb') as fp:\n shutil.copyfileobj(zi, fp)\n\n rename_if_empty(target_dir_tmp, target_dir)\n\n dist = DistributionHelper.distribution_from_path(target_dir)\n assert dist is not None, 'Failed to cache distribution %s' % source\n return dist\n\n\nclass Memoizer(object):\n \"\"\"A thread safe class for memoizing the results of a computation.\"\"\"\n\n def __init__(self):\n self._data = {}\n self._lock = Lock()\n\n def get(self, key, default=None):\n with self._lock:\n return self._data.get(key, default)\n\n def store(self, key, value):\n with self._lock:\n self._data[key] = value\n\n\[email protected]\ndef named_temporary_file(*args, **kwargs):\n \"\"\"\n Due to a bug in python (https://bugs.python.org/issue14243), we need\n this to be able to use the temporary file without deleting it.\n \"\"\"\n assert 'delete' not in kwargs\n kwargs['delete'] = False\n fp = tempfile.NamedTemporaryFile(*args, **kwargs)\n try:\n with fp:\n yield fp\n finally:\n os.remove(fp.name)\n\n\ndef iter_pth_paths(filename):\n \"\"\"Given a .pth file, extract and yield all inner paths without honoring imports. This shadows\n python's site.py behavior, which is invoked at interpreter startup.\"\"\"\n try:\n f = open(filename, 'rU') # noqa\n except IOError:\n return\n\n dirname = os.path.dirname(filename)\n known_paths = set()\n\n with f:\n for line in f:\n line = line.rstrip()\n if not line or line.startswith('#'):\n continue\n elif line.startswith(('import ', 'import\\t')):\n try:\n exec_function(line, globals_map={})\n continue\n except Exception:\n # NB: import lines are routinely abused with extra code appended using `;` so the class of\n # exceptions that might be raised in broader than ImportError. As such we cacth broadly\n # here.\n\n # Defer error handling to the higher level site.py logic invoked at startup.\n return\n else:\n extras_dir, extras_dir_case_insensitive = makepath(dirname, line)\n if extras_dir_case_insensitive not in known_paths and os.path.exists(extras_dir):\n yield extras_dir\n known_paths.add(extras_dir_case_insensitive)\n\n\ndef merge_split(*paths):\n \"\"\"Merge paths into a single path delimited by colons and split on colons to return\n a list of paths.\n\n :param paths: a variable length list of path strings\n :return: a list of paths from the merged path list split by colons\n \"\"\"\n filtered_paths = filter(None, paths)\n return [p for p in ':'.join(filtered_paths).split(':') if p]\n", "path": "pex/util.py"}], "after_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import\n\nimport contextlib\nimport os\nimport tempfile\nimport uuid\nfrom hashlib import sha1\nfrom site import makepath\nfrom threading import Lock\n\nfrom pex.common import rename_if_empty, safe_mkdir, safe_mkdtemp\nfrom pex.compatibility import exec_function\nfrom pex.finders import register_finders\nfrom pex.third_party.pkg_resources import (\n find_distributions,\n resource_isdir,\n resource_listdir,\n resource_string\n)\n\n\nclass DistributionHelper(object):\n @classmethod\n def walk_data(cls, dist, path='/'):\n \"\"\"Yields filename, stream for files identified as data in the distribution\"\"\"\n for rel_fn in filter(None, dist.resource_listdir(path)):\n full_fn = os.path.join(path, rel_fn)\n if dist.resource_isdir(full_fn):\n for fn, stream in cls.walk_data(dist, full_fn):\n yield fn, stream\n else:\n yield full_fn[1:], dist.get_resource_stream(dist._provider, full_fn)\n\n @staticmethod\n def zipsafe(dist):\n \"\"\"Returns whether or not we determine a distribution is zip-safe.\"\"\"\n # zip-safety is only an attribute of eggs. wheels are considered never\n # zip safe per implications of PEP 427.\n if hasattr(dist, 'egg_info') and dist.egg_info.endswith('EGG-INFO'):\n egg_metadata = dist.metadata_listdir('')\n return 'zip-safe' in egg_metadata and 'native_libs.txt' not in egg_metadata\n else:\n return False\n\n @classmethod\n def access_zipped_assets(cls, static_module_name, static_path, dir_location=None):\n \"\"\"\n Create a copy of static resource files as we can't serve them from within the pex file.\n\n :param static_module_name: Module name containing module to cache in a tempdir\n :type static_module_name: string, for example 'twitter.common.zookeeper' or similar\n :param static_path: Module name, for example 'serverset'\n :param dir_location: create a new temporary directory inside, or None to have one created\n :returns temp_dir: Temporary directory with the zipped assets inside\n :rtype: str\n \"\"\"\n\n # asset_path is initially a module name that's the same as the static_path, but will be\n # changed to walk the directory tree\n def walk_zipped_assets(static_module_name, static_path, asset_path, temp_dir):\n for asset in resource_listdir(static_module_name, asset_path):\n asset_target = os.path.normpath(\n os.path.join(os.path.relpath(asset_path, static_path), asset))\n if resource_isdir(static_module_name, os.path.join(asset_path, asset)):\n safe_mkdir(os.path.join(temp_dir, asset_target))\n walk_zipped_assets(static_module_name, static_path, os.path.join(asset_path, asset),\n temp_dir)\n else:\n with open(os.path.join(temp_dir, asset_target), 'wb') as fp:\n path = os.path.join(static_path, asset_target)\n file_data = resource_string(static_module_name, path)\n fp.write(file_data)\n\n if dir_location is None:\n temp_dir = safe_mkdtemp()\n else:\n temp_dir = dir_location\n\n walk_zipped_assets(static_module_name, static_path, static_path, temp_dir)\n\n return temp_dir\n\n @classmethod\n def distribution_from_path(cls, path, name=None):\n \"\"\"Return a distribution from a path.\n\n If name is provided, find the distribution. If none is found matching the name,\n return None. If name is not provided and there is unambiguously a single\n distribution, return that distribution otherwise None.\n \"\"\"\n # Monkeypatch pkg_resources finders should it not already be so.\n register_finders()\n if name is None:\n distributions = set(find_distributions(path))\n if len(distributions) == 1:\n return distributions.pop()\n else:\n for dist in find_distributions(path):\n if dist.project_name == name:\n return dist\n\n\nclass CacheHelper(object):\n @classmethod\n def update_hash(cls, filelike, digest):\n \"\"\"Update the digest of a single file in a memory-efficient manner.\"\"\"\n block_size = digest.block_size * 1024\n for chunk in iter(lambda: filelike.read(block_size), b''):\n digest.update(chunk)\n\n @classmethod\n def hash(cls, path, digest=None, hasher=sha1):\n \"\"\"Return the digest of a single file in a memory-efficient manner.\"\"\"\n if digest is None:\n digest = hasher()\n with open(path, 'rb') as fh:\n cls.update_hash(fh, digest)\n return digest.hexdigest()\n\n @classmethod\n def _compute_hash(cls, names, stream_factory):\n digest = sha1()\n # Always use / as the path separator, since that's what zip uses.\n hashed_names = [n.replace(os.sep, '/') for n in names]\n digest.update(''.join(hashed_names).encode('utf-8'))\n for name in names:\n with contextlib.closing(stream_factory(name)) as fp:\n cls.update_hash(fp, digest)\n return digest.hexdigest()\n\n @classmethod\n def zip_hash(cls, zf, prefix=''):\n \"\"\"Return the hash of the contents of a zipfile, comparable with a cls.dir_hash.\"\"\"\n prefix_length = len(prefix)\n names = sorted(name[prefix_length:] for name in zf.namelist()\n if name.startswith(prefix) and not name.endswith('.pyc') and not name.endswith('/'))\n def stream_factory(name):\n return zf.open(prefix + name)\n return cls._compute_hash(names, stream_factory)\n\n @classmethod\n def _iter_files(cls, directory):\n normpath = os.path.realpath(os.path.normpath(directory))\n for root, _, files in os.walk(normpath):\n for f in files:\n yield os.path.relpath(os.path.join(root, f), normpath)\n\n @classmethod\n def pex_hash(cls, d):\n \"\"\"Return a reproducible hash of the contents of a directory.\"\"\"\n names = sorted(f for f in cls._iter_files(d) if not (f.endswith('.pyc') or f.startswith('.')))\n def stream_factory(name):\n return open(os.path.join(d, name), 'rb') # noqa: T802\n return cls._compute_hash(names, stream_factory)\n\n @classmethod\n def dir_hash(cls, d):\n \"\"\"Return a reproducible hash of the contents of a directory.\"\"\"\n names = sorted(f for f in cls._iter_files(d) if not f.endswith('.pyc'))\n def stream_factory(name):\n return open(os.path.join(d, name), 'rb') # noqa: T802\n return cls._compute_hash(names, stream_factory)\n\n @classmethod\n def cache_distribution(cls, zf, source, target_dir):\n \"\"\"Possibly cache an egg from within a zipfile into target_cache.\n\n Given a zipfile handle and a filename corresponding to an egg distribution within\n that zip, maybe write to the target cache and return a Distribution.\"\"\"\n dependency_basename = os.path.basename(source)\n if not os.path.exists(target_dir):\n target_dir_tmp = target_dir + '.' + uuid.uuid4().hex\n for name in zf.namelist():\n if name.startswith(source) and not name.endswith('/'):\n zf.extract(name, target_dir_tmp)\n os.rename(os.path.join(target_dir_tmp, source),\n os.path.join(target_dir_tmp, dependency_basename))\n\n rename_if_empty(target_dir_tmp, target_dir)\n\n dist = DistributionHelper.distribution_from_path(target_dir)\n assert dist is not None, 'Failed to cache distribution %s' % source\n return dist\n\n\nclass Memoizer(object):\n \"\"\"A thread safe class for memoizing the results of a computation.\"\"\"\n\n def __init__(self):\n self._data = {}\n self._lock = Lock()\n\n def get(self, key, default=None):\n with self._lock:\n return self._data.get(key, default)\n\n def store(self, key, value):\n with self._lock:\n self._data[key] = value\n\n\[email protected]\ndef named_temporary_file(*args, **kwargs):\n \"\"\"\n Due to a bug in python (https://bugs.python.org/issue14243), we need\n this to be able to use the temporary file without deleting it.\n \"\"\"\n assert 'delete' not in kwargs\n kwargs['delete'] = False\n fp = tempfile.NamedTemporaryFile(*args, **kwargs)\n try:\n with fp:\n yield fp\n finally:\n os.remove(fp.name)\n\n\ndef iter_pth_paths(filename):\n \"\"\"Given a .pth file, extract and yield all inner paths without honoring imports. This shadows\n python's site.py behavior, which is invoked at interpreter startup.\"\"\"\n try:\n f = open(filename, 'rU') # noqa\n except IOError:\n return\n\n dirname = os.path.dirname(filename)\n known_paths = set()\n\n with f:\n for line in f:\n line = line.rstrip()\n if not line or line.startswith('#'):\n continue\n elif line.startswith(('import ', 'import\\t')):\n try:\n exec_function(line, globals_map={})\n continue\n except Exception:\n # NB: import lines are routinely abused with extra code appended using `;` so the class of\n # exceptions that might be raised in broader than ImportError. As such we cacth broadly\n # here.\n\n # Defer error handling to the higher level site.py logic invoked at startup.\n return\n else:\n extras_dir, extras_dir_case_insensitive = makepath(dirname, line)\n if extras_dir_case_insensitive not in known_paths and os.path.exists(extras_dir):\n yield extras_dir\n known_paths.add(extras_dir_case_insensitive)\n\n\ndef merge_split(*paths):\n \"\"\"Merge paths into a single path delimited by colons and split on colons to return\n a list of paths.\n\n :param paths: a variable length list of path strings\n :return: a list of paths from the merged path list split by colons\n \"\"\"\n filtered_paths = filter(None, paths)\n return [p for p in ':'.join(filtered_paths).split(':') if p]\n", "path": "pex/util.py"}]} | 3,325 | 317 |
gh_patches_debug_39354 | rasdani/github-patches | git_diff | marshmallow-code__webargs-509 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'Not a valid tuple.' when trying to use marshmallow fields.Tuple for argument validation
I'm trying to use the marshmallow fields.Tuple for querystring argument validation on a GET request using Flask. The issue I'm running into is that no matter what type of object I declare and no matter what I use in the request, I always get the default 'Not a valid tuple.' response. I have tried using a tuple of size 1 and 2; using fields.String and/or fields.Integer, etc with the same result.
- I'm using Python 3.6.9 with these dependencies:
anyjson==0.3.3
apipkg==1.5
arrow==0.15.5
attrs==19.3.0
backports.functools-lru-cache==1.6.1
cassandra-driver==3.22.0
Cerberus==1.3.2
certifi==2019.11.28
cffi==1.13.2
chardet==3.0.4
click==7.1.1
execnet==1.7.1
Flask==1.1.1
Flask-Cors==3.0.8
funcsigs==1.0.2
futures==3.1.1
geomet==0.1.2
gevent==1.4.0
greenlet==0.4.13
gunicorn==20.0.4
idna==2.9
importlib-metadata==1.6.0
itsdangerous==1.1.0
Jinja2==2.11.1
jsonklog==0.15.0
MarkupSafe==1.1.1
marshmallow==3.5.1
neurolab==0.3.5
numpy==1.18.1
pluggy==0.13.1
py==1.8.1
pyaml==20.3.1
pymongo==3.10.1
pytest==3.3.0
pytest-forked==0.2
pytest-xdist==1.20.1
python-dateutil==2.8.1
PyYAML==5.3.1
readline==6.2.4.1
requests==2.23.0
six==1.14.0
urllib3==1.25.8
webargs==6.0.0
Werkzeug==1.0.0
zipp==3.1.0
- Here is an example of what I'm trying to do:
```
from flask import Flask
from webargs.flaskparser import parser, use_kwargs
from marshmallow import EXCLUDE, fields, Schema
app = Flask(__name__)
@app.errorhandler(422)
def custom_handler(error):
errors = []
if 'query' in error.data['messages']:
for arg in error.data['messages']['query']:
for item in error.data['messages']['query'][arg]:
errors.append(item)
return str(errors), 400
class test_schema(Schema):
class Meta:
unknown = EXCLUDE
strict = True
test_tup = fields.Tuple((fields.Integer(required=True), fields.Integer(required=True)), required=True)
@app.route('/test/', strict_slashes=False)
@parser.use_kwargs(test_schema, location='query')
def test_the_mallow(**kwargs):
return "True"
```
- Finally, here are a couple example url's I've tried:
localhost:2300/test/?test_tup=[0,0]
localhost:2300/test/?test_tup=(0,0)
localhost:2300/test/?test_tup=0,0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/webargs/fields.py`
Content:
```
1 """Field classes.
2
3 Includes all fields from `marshmallow.fields` in addition to a custom
4 `Nested` field and `DelimitedList`.
5
6 All fields can optionally take a special `location` keyword argument, which
7 tells webargs where to parse the request argument from.
8
9 .. code-block:: python
10
11 args = {
12 "active": fields.Bool(location="query"),
13 "content_type": fields.Str(data_key="Content-Type", location="headers"),
14 }
15
16 Note: `data_key` replaced `load_from` in marshmallow 3.
17 When using marshmallow 2, use `load_from`.
18 """
19 import marshmallow as ma
20
21 # Expose all fields from marshmallow.fields.
22 from marshmallow.fields import * # noqa: F40
23 from webargs.compat import MARSHMALLOW_VERSION_INFO
24 from webargs.dict2schema import dict2schema
25
26 __all__ = ["DelimitedList"] + ma.fields.__all__
27
28
29 class Nested(ma.fields.Nested):
30 """Same as `marshmallow.fields.Nested`, except can be passed a dictionary as
31 the first argument, which will be converted to a `marshmallow.Schema`.
32
33 .. note::
34
35 The schema class here will always be `marshmallow.Schema`, regardless
36 of whether a custom schema class is set on the parser. Pass an explicit schema
37 class if necessary.
38 """
39
40 def __init__(self, nested, *args, **kwargs):
41 if isinstance(nested, dict):
42 nested = dict2schema(nested)
43 super().__init__(nested, *args, **kwargs)
44
45
46 class DelimitedList(ma.fields.List):
47 """A field which is similar to a List, but takes its input as a delimited
48 string (e.g. "foo,bar,baz").
49
50 Like List, it can be given a nested field type which it will use to
51 de/serialize each element of the list.
52
53 :param Field cls_or_instance: A field class or instance.
54 :param str delimiter: Delimiter between values.
55 """
56
57 default_error_messages = {"invalid": "Not a valid delimited list."}
58 delimiter = ","
59
60 def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):
61 self.delimiter = delimiter or self.delimiter
62 super().__init__(cls_or_instance, **kwargs)
63
64 def _serialize(self, value, attr, obj):
65 # serializing will start with List serialization, so that we correctly
66 # output lists of non-primitive types, e.g. DelimitedList(DateTime)
67 return self.delimiter.join(
68 format(each) for each in super()._serialize(value, attr, obj)
69 )
70
71 def _deserialize(self, value, attr, data, **kwargs):
72 # attempting to deserialize from a non-string source is an error
73 if not isinstance(value, (str, bytes)):
74 if MARSHMALLOW_VERSION_INFO[0] < 3:
75 self.fail("invalid")
76 else:
77 raise self.make_error("invalid")
78 return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/webargs/fields.py b/src/webargs/fields.py
--- a/src/webargs/fields.py
+++ b/src/webargs/fields.py
@@ -43,26 +43,24 @@
super().__init__(nested, *args, **kwargs)
-class DelimitedList(ma.fields.List):
- """A field which is similar to a List, but takes its input as a delimited
- string (e.g. "foo,bar,baz").
+class DelimitedFieldMixin:
+ """
+ This is a mixin class for subclasses of ma.fields.List and ma.fields.Tuple
+ which split on a pre-specified delimiter. By default, the delimiter will be ","
- Like List, it can be given a nested field type which it will use to
- de/serialize each element of the list.
+ Because we want the MRO to reach this class before the List or Tuple class,
+ it must be listed first in the superclasses
- :param Field cls_or_instance: A field class or instance.
- :param str delimiter: Delimiter between values.
+ For example, a DelimitedList-like type can be defined like so:
+
+ >>> class MyDelimitedList(DelimitedFieldMixin, ma.fields.List):
+ >>> pass
"""
- default_error_messages = {"invalid": "Not a valid delimited list."}
delimiter = ","
- def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):
- self.delimiter = delimiter or self.delimiter
- super().__init__(cls_or_instance, **kwargs)
-
def _serialize(self, value, attr, obj):
- # serializing will start with List serialization, so that we correctly
+ # serializing will start with parent-class serialization, so that we correctly
# output lists of non-primitive types, e.g. DelimitedList(DateTime)
return self.delimiter.join(
format(each) for each in super()._serialize(value, attr, obj)
@@ -76,3 +74,45 @@
else:
raise self.make_error("invalid")
return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)
+
+
+class DelimitedList(DelimitedFieldMixin, ma.fields.List):
+ """A field which is similar to a List, but takes its input as a delimited
+ string (e.g. "foo,bar,baz").
+
+ Like List, it can be given a nested field type which it will use to
+ de/serialize each element of the list.
+
+ :param Field cls_or_instance: A field class or instance.
+ :param str delimiter: Delimiter between values.
+ """
+
+ default_error_messages = {"invalid": "Not a valid delimited list."}
+ delimiter = ","
+
+ def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):
+ self.delimiter = delimiter or self.delimiter
+ super().__init__(cls_or_instance, **kwargs)
+
+
+# DelimitedTuple can only be defined when using marshmallow3, when Tuple was
+# added
+if MARSHMALLOW_VERSION_INFO[0] >= 3:
+
+ class DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):
+ """A field which is similar to a Tuple, but takes its input as a delimited
+ string (e.g. "foo,bar,baz").
+
+ Like Tuple, it can be given a tuple of nested field types which it will use to
+ de/serialize each element of the tuple.
+
+ :param Iterable[Field] tuple_fields: An iterable of field classes or instances.
+ :param str delimiter: Delimiter between values.
+ """
+
+ default_error_messages = {"invalid": "Not a valid delimited tuple."}
+ delimiter = ","
+
+ def __init__(self, tuple_fields, *, delimiter=None, **kwargs):
+ self.delimiter = delimiter or self.delimiter
+ super().__init__(tuple_fields, **kwargs)
| {"golden_diff": "diff --git a/src/webargs/fields.py b/src/webargs/fields.py\n--- a/src/webargs/fields.py\n+++ b/src/webargs/fields.py\n@@ -43,26 +43,24 @@\n super().__init__(nested, *args, **kwargs)\n \n \n-class DelimitedList(ma.fields.List):\n- \"\"\"A field which is similar to a List, but takes its input as a delimited\n- string (e.g. \"foo,bar,baz\").\n+class DelimitedFieldMixin:\n+ \"\"\"\n+ This is a mixin class for subclasses of ma.fields.List and ma.fields.Tuple\n+ which split on a pre-specified delimiter. By default, the delimiter will be \",\"\n \n- Like List, it can be given a nested field type which it will use to\n- de/serialize each element of the list.\n+ Because we want the MRO to reach this class before the List or Tuple class,\n+ it must be listed first in the superclasses\n \n- :param Field cls_or_instance: A field class or instance.\n- :param str delimiter: Delimiter between values.\n+ For example, a DelimitedList-like type can be defined like so:\n+\n+ >>> class MyDelimitedList(DelimitedFieldMixin, ma.fields.List):\n+ >>> pass\n \"\"\"\n \n- default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n delimiter = \",\"\n \n- def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):\n- self.delimiter = delimiter or self.delimiter\n- super().__init__(cls_or_instance, **kwargs)\n-\n def _serialize(self, value, attr, obj):\n- # serializing will start with List serialization, so that we correctly\n+ # serializing will start with parent-class serialization, so that we correctly\n # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n return self.delimiter.join(\n format(each) for each in super()._serialize(value, attr, obj)\n@@ -76,3 +74,45 @@\n else:\n raise self.make_error(\"invalid\")\n return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)\n+\n+\n+class DelimitedList(DelimitedFieldMixin, ma.fields.List):\n+ \"\"\"A field which is similar to a List, but takes its input as a delimited\n+ string (e.g. \"foo,bar,baz\").\n+\n+ Like List, it can be given a nested field type which it will use to\n+ de/serialize each element of the list.\n+\n+ :param Field cls_or_instance: A field class or instance.\n+ :param str delimiter: Delimiter between values.\n+ \"\"\"\n+\n+ default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n+ delimiter = \",\"\n+\n+ def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):\n+ self.delimiter = delimiter or self.delimiter\n+ super().__init__(cls_or_instance, **kwargs)\n+\n+\n+# DelimitedTuple can only be defined when using marshmallow3, when Tuple was\n+# added\n+if MARSHMALLOW_VERSION_INFO[0] >= 3:\n+\n+ class DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):\n+ \"\"\"A field which is similar to a Tuple, but takes its input as a delimited\n+ string (e.g. \"foo,bar,baz\").\n+\n+ Like Tuple, it can be given a tuple of nested field types which it will use to\n+ de/serialize each element of the tuple.\n+\n+ :param Iterable[Field] tuple_fields: An iterable of field classes or instances.\n+ :param str delimiter: Delimiter between values.\n+ \"\"\"\n+\n+ default_error_messages = {\"invalid\": \"Not a valid delimited tuple.\"}\n+ delimiter = \",\"\n+\n+ def __init__(self, tuple_fields, *, delimiter=None, **kwargs):\n+ self.delimiter = delimiter or self.delimiter\n+ super().__init__(tuple_fields, **kwargs)\n", "issue": "'Not a valid tuple.' when trying to use marshmallow fields.Tuple for argument validation\nI'm trying to use the marshmallow fields.Tuple for querystring argument validation on a GET request using Flask. The issue I'm running into is that no matter what type of object I declare and no matter what I use in the request, I always get the default 'Not a valid tuple.' response. I have tried using a tuple of size 1 and 2; using fields.String and/or fields.Integer, etc with the same result.\r\n\r\n- I'm using Python 3.6.9 with these dependencies:\r\nanyjson==0.3.3\r\napipkg==1.5\r\narrow==0.15.5\r\nattrs==19.3.0\r\nbackports.functools-lru-cache==1.6.1\r\ncassandra-driver==3.22.0\r\nCerberus==1.3.2\r\ncertifi==2019.11.28\r\ncffi==1.13.2\r\nchardet==3.0.4\r\nclick==7.1.1\r\nexecnet==1.7.1\r\nFlask==1.1.1\r\nFlask-Cors==3.0.8\r\nfuncsigs==1.0.2\r\nfutures==3.1.1\r\ngeomet==0.1.2\r\ngevent==1.4.0\r\ngreenlet==0.4.13\r\ngunicorn==20.0.4\r\nidna==2.9\r\nimportlib-metadata==1.6.0\r\nitsdangerous==1.1.0\r\nJinja2==2.11.1\r\njsonklog==0.15.0\r\nMarkupSafe==1.1.1\r\nmarshmallow==3.5.1\r\nneurolab==0.3.5\r\nnumpy==1.18.1\r\npluggy==0.13.1\r\npy==1.8.1\r\npyaml==20.3.1\r\npymongo==3.10.1\r\npytest==3.3.0\r\npytest-forked==0.2\r\npytest-xdist==1.20.1\r\npython-dateutil==2.8.1\r\nPyYAML==5.3.1\r\nreadline==6.2.4.1\r\nrequests==2.23.0\r\nsix==1.14.0\r\nurllib3==1.25.8\r\nwebargs==6.0.0\r\nWerkzeug==1.0.0\r\nzipp==3.1.0\r\n\r\n- Here is an example of what I'm trying to do:\r\n```\r\nfrom flask import Flask\r\nfrom webargs.flaskparser import parser, use_kwargs\r\nfrom marshmallow import EXCLUDE, fields, Schema\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\[email protected](422)\r\ndef custom_handler(error):\r\n errors = []\r\n if 'query' in error.data['messages']:\r\n for arg in error.data['messages']['query']:\r\n for item in error.data['messages']['query'][arg]:\r\n errors.append(item)\r\n return str(errors), 400\r\n\r\n\r\nclass test_schema(Schema):\r\n class Meta:\r\n unknown = EXCLUDE\r\n strict = True\r\n \r\n test_tup = fields.Tuple((fields.Integer(required=True), fields.Integer(required=True)), required=True)\r\n\r\n\r\[email protected]('/test/', strict_slashes=False)\r\[email protected]_kwargs(test_schema, location='query')\r\ndef test_the_mallow(**kwargs):\r\n return \"True\"\r\n```\r\n\r\n- Finally, here are a couple example url's I've tried:\r\n localhost:2300/test/?test_tup=[0,0]\r\n localhost:2300/test/?test_tup=(0,0)\r\n localhost:2300/test/?test_tup=0,0\r\n\n", "before_files": [{"content": "\"\"\"Field classes.\n\nIncludes all fields from `marshmallow.fields` in addition to a custom\n`Nested` field and `DelimitedList`.\n\nAll fields can optionally take a special `location` keyword argument, which\ntells webargs where to parse the request argument from.\n\n.. code-block:: python\n\n args = {\n \"active\": fields.Bool(location=\"query\"),\n \"content_type\": fields.Str(data_key=\"Content-Type\", location=\"headers\"),\n }\n\nNote: `data_key` replaced `load_from` in marshmallow 3.\nWhen using marshmallow 2, use `load_from`.\n\"\"\"\nimport marshmallow as ma\n\n# Expose all fields from marshmallow.fields.\nfrom marshmallow.fields import * # noqa: F40\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.dict2schema import dict2schema\n\n__all__ = [\"DelimitedList\"] + ma.fields.__all__\n\n\nclass Nested(ma.fields.Nested):\n \"\"\"Same as `marshmallow.fields.Nested`, except can be passed a dictionary as\n the first argument, which will be converted to a `marshmallow.Schema`.\n\n .. note::\n\n The schema class here will always be `marshmallow.Schema`, regardless\n of whether a custom schema class is set on the parser. Pass an explicit schema\n class if necessary.\n \"\"\"\n\n def __init__(self, nested, *args, **kwargs):\n if isinstance(nested, dict):\n nested = dict2schema(nested)\n super().__init__(nested, *args, **kwargs)\n\n\nclass DelimitedList(ma.fields.List):\n \"\"\"A field which is similar to a List, but takes its input as a delimited\n string (e.g. \"foo,bar,baz\").\n\n Like List, it can be given a nested field type which it will use to\n de/serialize each element of the list.\n\n :param Field cls_or_instance: A field class or instance.\n :param str delimiter: Delimiter between values.\n \"\"\"\n\n default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n delimiter = \",\"\n\n def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):\n self.delimiter = delimiter or self.delimiter\n super().__init__(cls_or_instance, **kwargs)\n\n def _serialize(self, value, attr, obj):\n # serializing will start with List serialization, so that we correctly\n # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n return self.delimiter.join(\n format(each) for each in super()._serialize(value, attr, obj)\n )\n\n def _deserialize(self, value, attr, data, **kwargs):\n # attempting to deserialize from a non-string source is an error\n if not isinstance(value, (str, bytes)):\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n self.fail(\"invalid\")\n else:\n raise self.make_error(\"invalid\")\n return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)\n", "path": "src/webargs/fields.py"}], "after_files": [{"content": "\"\"\"Field classes.\n\nIncludes all fields from `marshmallow.fields` in addition to a custom\n`Nested` field and `DelimitedList`.\n\nAll fields can optionally take a special `location` keyword argument, which\ntells webargs where to parse the request argument from.\n\n.. code-block:: python\n\n args = {\n \"active\": fields.Bool(location=\"query\"),\n \"content_type\": fields.Str(data_key=\"Content-Type\", location=\"headers\"),\n }\n\nNote: `data_key` replaced `load_from` in marshmallow 3.\nWhen using marshmallow 2, use `load_from`.\n\"\"\"\nimport marshmallow as ma\n\n# Expose all fields from marshmallow.fields.\nfrom marshmallow.fields import * # noqa: F40\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.dict2schema import dict2schema\n\n__all__ = [\"DelimitedList\"] + ma.fields.__all__\n\n\nclass Nested(ma.fields.Nested):\n \"\"\"Same as `marshmallow.fields.Nested`, except can be passed a dictionary as\n the first argument, which will be converted to a `marshmallow.Schema`.\n\n .. note::\n\n The schema class here will always be `marshmallow.Schema`, regardless\n of whether a custom schema class is set on the parser. Pass an explicit schema\n class if necessary.\n \"\"\"\n\n def __init__(self, nested, *args, **kwargs):\n if isinstance(nested, dict):\n nested = dict2schema(nested)\n super().__init__(nested, *args, **kwargs)\n\n\nclass DelimitedFieldMixin:\n \"\"\"\n This is a mixin class for subclasses of ma.fields.List and ma.fields.Tuple\n which split on a pre-specified delimiter. By default, the delimiter will be \",\"\n\n Because we want the MRO to reach this class before the List or Tuple class,\n it must be listed first in the superclasses\n\n For example, a DelimitedList-like type can be defined like so:\n\n >>> class MyDelimitedList(DelimitedFieldMixin, ma.fields.List):\n >>> pass\n \"\"\"\n\n delimiter = \",\"\n\n def _serialize(self, value, attr, obj):\n # serializing will start with parent-class serialization, so that we correctly\n # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n return self.delimiter.join(\n format(each) for each in super()._serialize(value, attr, obj)\n )\n\n def _deserialize(self, value, attr, data, **kwargs):\n # attempting to deserialize from a non-string source is an error\n if not isinstance(value, (str, bytes)):\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n self.fail(\"invalid\")\n else:\n raise self.make_error(\"invalid\")\n return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)\n\n\nclass DelimitedList(DelimitedFieldMixin, ma.fields.List):\n \"\"\"A field which is similar to a List, but takes its input as a delimited\n string (e.g. \"foo,bar,baz\").\n\n Like List, it can be given a nested field type which it will use to\n de/serialize each element of the list.\n\n :param Field cls_or_instance: A field class or instance.\n :param str delimiter: Delimiter between values.\n \"\"\"\n\n default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n delimiter = \",\"\n\n def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):\n self.delimiter = delimiter or self.delimiter\n super().__init__(cls_or_instance, **kwargs)\n\n\n# DelimitedTuple can only be defined when using marshmallow3, when Tuple was\n# added\nif MARSHMALLOW_VERSION_INFO[0] >= 3:\n\n class DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):\n \"\"\"A field which is similar to a Tuple, but takes its input as a delimited\n string (e.g. \"foo,bar,baz\").\n\n Like Tuple, it can be given a tuple of nested field types which it will use to\n de/serialize each element of the tuple.\n\n :param Iterable[Field] tuple_fields: An iterable of field classes or instances.\n :param str delimiter: Delimiter between values.\n \"\"\"\n\n default_error_messages = {\"invalid\": \"Not a valid delimited tuple.\"}\n delimiter = \",\"\n\n def __init__(self, tuple_fields, *, delimiter=None, **kwargs):\n self.delimiter = delimiter or self.delimiter\n super().__init__(tuple_fields, **kwargs)\n", "path": "src/webargs/fields.py"}]} | 1,902 | 889 |
gh_patches_debug_7758 | rasdani/github-patches | git_diff | CTFd__CTFd-1934 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error 500 when visiting /admin/users/1 - AttributeError: 'NoneType' object has no attribute 'get_score'
**Environment**:
- CTFd Version/Commit: HEAD
- Operating System: Docker image based off official Dockerfile
- Web Browser and Version: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1 Safari/605.1.15
**What happened?**
500 Internal server error
**What did you expect to happen?**
Show the admin user details when in team mode
**How to reproduce your issue**
* visited the `/admin/users/1`
* this seems due to the fact that, when in team mode, the admin user does not belong to any team and, for some reason, this one returns `None`
```python
@hybrid_property
def account(self):
from CTFd.utils import get_config
user_mode = get_config("user_mode")
if user_mode == "teams":
return self.team
elif user_mode == "users":
return self
```
**Any associated stack traces or error logs**
```
ERROR [CTFd] Exception on /admin/users/1 [GET]
--
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask_restx/api.py", line 639, in error_router
return original_handler(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/CTFd/CTFd/utils/decorators/__init__.py", line 133, in admins_only_wrapper
return f(*args, **kwargs)
File "/opt/CTFd/CTFd/admin/users.py", line 91, in users_detail
score = user.account.get_score(admin=True)
AttributeError: 'NoneType' object has no attribute 'get_score'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/admin/users.py`
Content:
```
1 from flask import render_template, request, url_for
2 from sqlalchemy.sql import not_
3
4 from CTFd.admin import admin
5 from CTFd.models import Challenges, Tracking, Users
6 from CTFd.utils import get_config
7 from CTFd.utils.decorators import admins_only
8 from CTFd.utils.modes import TEAMS_MODE
9
10
11 @admin.route("/admin/users")
12 @admins_only
13 def users_listing():
14 q = request.args.get("q")
15 field = request.args.get("field")
16 page = abs(request.args.get("page", 1, type=int))
17 filters = []
18 users = []
19
20 if q:
21 # The field exists as an exposed column
22 if Users.__mapper__.has_property(field):
23 filters.append(getattr(Users, field).like("%{}%".format(q)))
24
25 if q and field == "ip":
26 users = (
27 Users.query.join(Tracking, Users.id == Tracking.user_id)
28 .filter(Tracking.ip.like("%{}%".format(q)))
29 .order_by(Users.id.asc())
30 .paginate(page=page, per_page=50)
31 )
32 else:
33 users = (
34 Users.query.filter(*filters)
35 .order_by(Users.id.asc())
36 .paginate(page=page, per_page=50)
37 )
38
39 args = dict(request.args)
40 args.pop("page", 1)
41
42 return render_template(
43 "admin/users/users.html",
44 users=users,
45 prev_page=url_for(request.endpoint, page=users.prev_num, **args),
46 next_page=url_for(request.endpoint, page=users.next_num, **args),
47 q=q,
48 field=field,
49 )
50
51
52 @admin.route("/admin/users/new")
53 @admins_only
54 def users_new():
55 return render_template("admin/users/new.html")
56
57
58 @admin.route("/admin/users/<int:user_id>")
59 @admins_only
60 def users_detail(user_id):
61 # Get user object
62 user = Users.query.filter_by(id=user_id).first_or_404()
63
64 # Get the user's solves
65 solves = user.get_solves(admin=True)
66
67 # Get challenges that the user is missing
68 if get_config("user_mode") == TEAMS_MODE:
69 if user.team:
70 all_solves = user.team.get_solves(admin=True)
71 else:
72 all_solves = user.get_solves(admin=True)
73 else:
74 all_solves = user.get_solves(admin=True)
75
76 solve_ids = [s.challenge_id for s in all_solves]
77 missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()
78
79 # Get IP addresses that the User has used
80 addrs = (
81 Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()
82 )
83
84 # Get Fails
85 fails = user.get_fails(admin=True)
86
87 # Get Awards
88 awards = user.get_awards(admin=True)
89
90 # Get user properties
91 score = user.account.get_score(admin=True)
92 place = user.account.get_place(admin=True)
93
94 return render_template(
95 "admin/users/user.html",
96 solves=solves,
97 user=user,
98 addrs=addrs,
99 score=score,
100 missing=missing,
101 place=place,
102 fails=fails,
103 awards=awards,
104 )
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/admin/users.py b/CTFd/admin/users.py
--- a/CTFd/admin/users.py
+++ b/CTFd/admin/users.py
@@ -87,9 +87,14 @@
# Get Awards
awards = user.get_awards(admin=True)
- # Get user properties
- score = user.account.get_score(admin=True)
- place = user.account.get_place(admin=True)
+ # Check if the user has an account (team or user)
+ # so that we don't throw an error if they dont
+ if user.account:
+ score = user.account.get_score(admin=True)
+ place = user.account.get_place(admin=True)
+ else:
+ score = None
+ place = None
return render_template(
"admin/users/user.html",
| {"golden_diff": "diff --git a/CTFd/admin/users.py b/CTFd/admin/users.py\n--- a/CTFd/admin/users.py\n+++ b/CTFd/admin/users.py\n@@ -87,9 +87,14 @@\n # Get Awards\n awards = user.get_awards(admin=True)\n \n- # Get user properties\n- score = user.account.get_score(admin=True)\n- place = user.account.get_place(admin=True)\n+ # Check if the user has an account (team or user)\n+ # so that we don't throw an error if they dont\n+ if user.account:\n+ score = user.account.get_score(admin=True)\n+ place = user.account.get_place(admin=True)\n+ else:\n+ score = None\n+ place = None\n \n return render_template(\n \"admin/users/user.html\",\n", "issue": "Error 500 when visiting /admin/users/1 - AttributeError: 'NoneType' object has no attribute 'get_score'\n**Environment**:\r\n\r\n- CTFd Version/Commit: HEAD\r\n- Operating System: Docker image based off official Dockerfile\r\n- Web Browser and Version: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1 Safari/605.1.15\r\n\r\n**What happened?**\r\n500 Internal server error\r\n\r\n**What did you expect to happen?**\r\nShow the admin user details when in team mode\r\n\r\n**How to reproduce your issue**\r\n* visited the `/admin/users/1`\r\n* this seems due to the fact that, when in team mode, the admin user does not belong to any team and, for some reason, this one returns `None`\r\n\r\n```python\r\n @hybrid_property\r\n def account(self):\r\n from CTFd.utils import get_config\r\n\r\n user_mode = get_config(\"user_mode\")\r\n if user_mode == \"teams\":\r\n return self.team\r\n elif user_mode == \"users\":\r\n return self\r\n```\r\n\r\n**Any associated stack traces or error logs**\r\n```\r\nERROR [CTFd] Exception on /admin/users/1 [GET]\r\n--\r\nTraceback (most recent call last):\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 2447, in wsgi_app\r\nresponse = self.full_dispatch_request()\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 1952, in full_dispatch_request\r\nrv = self.handle_user_exception(e)\r\nFile \"/usr/local/lib/python3.7/site-packages/flask_restx/api.py\", line 639, in error_router\r\nreturn original_handler(e)\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 1821, in handle_user_exception\r\nreraise(exc_type, exc_value, tb)\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/_compat.py\", line 39, in reraise\r\nraise value\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\nrv = self.dispatch_request()\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 1936, in dispatch_request\r\nreturn self.view_functions[rule.endpoint](**req.view_args)\r\nFile \"/opt/CTFd/CTFd/utils/decorators/__init__.py\", line 133, in admins_only_wrapper\r\nreturn f(*args, **kwargs)\r\nFile \"/opt/CTFd/CTFd/admin/users.py\", line 91, in users_detail\r\nscore = user.account.get_score(admin=True)\r\nAttributeError: 'NoneType' object has no attribute 'get_score'\r\n```\r\n\n", "before_files": [{"content": "from flask import render_template, request, url_for\nfrom sqlalchemy.sql import not_\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Tracking, Users\nfrom CTFd.utils import get_config\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import TEAMS_MODE\n\n\[email protected](\"/admin/users\")\n@admins_only\ndef users_listing():\n q = request.args.get(\"q\")\n field = request.args.get(\"field\")\n page = abs(request.args.get(\"page\", 1, type=int))\n filters = []\n users = []\n\n if q:\n # The field exists as an exposed column\n if Users.__mapper__.has_property(field):\n filters.append(getattr(Users, field).like(\"%{}%\".format(q)))\n\n if q and field == \"ip\":\n users = (\n Users.query.join(Tracking, Users.id == Tracking.user_id)\n .filter(Tracking.ip.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n else:\n users = (\n Users.query.filter(*filters)\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n\n args = dict(request.args)\n args.pop(\"page\", 1)\n\n return render_template(\n \"admin/users/users.html\",\n users=users,\n prev_page=url_for(request.endpoint, page=users.prev_num, **args),\n next_page=url_for(request.endpoint, page=users.next_num, **args),\n q=q,\n field=field,\n )\n\n\[email protected](\"/admin/users/new\")\n@admins_only\ndef users_new():\n return render_template(\"admin/users/new.html\")\n\n\[email protected](\"/admin/users/<int:user_id>\")\n@admins_only\ndef users_detail(user_id):\n # Get user object\n user = Users.query.filter_by(id=user_id).first_or_404()\n\n # Get the user's solves\n solves = user.get_solves(admin=True)\n\n # Get challenges that the user is missing\n if get_config(\"user_mode\") == TEAMS_MODE:\n if user.team:\n all_solves = user.team.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n\n solve_ids = [s.challenge_id for s in all_solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n\n # Get IP addresses that the User has used\n addrs = (\n Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()\n )\n\n # Get Fails\n fails = user.get_fails(admin=True)\n\n # Get Awards\n awards = user.get_awards(admin=True)\n\n # Get user properties\n score = user.account.get_score(admin=True)\n place = user.account.get_place(admin=True)\n\n return render_template(\n \"admin/users/user.html\",\n solves=solves,\n user=user,\n addrs=addrs,\n score=score,\n missing=missing,\n place=place,\n fails=fails,\n awards=awards,\n )\n", "path": "CTFd/admin/users.py"}], "after_files": [{"content": "from flask import render_template, request, url_for\nfrom sqlalchemy.sql import not_\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Tracking, Users\nfrom CTFd.utils import get_config\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import TEAMS_MODE\n\n\[email protected](\"/admin/users\")\n@admins_only\ndef users_listing():\n q = request.args.get(\"q\")\n field = request.args.get(\"field\")\n page = abs(request.args.get(\"page\", 1, type=int))\n filters = []\n users = []\n\n if q:\n # The field exists as an exposed column\n if Users.__mapper__.has_property(field):\n filters.append(getattr(Users, field).like(\"%{}%\".format(q)))\n\n if q and field == \"ip\":\n users = (\n Users.query.join(Tracking, Users.id == Tracking.user_id)\n .filter(Tracking.ip.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n else:\n users = (\n Users.query.filter(*filters)\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n\n args = dict(request.args)\n args.pop(\"page\", 1)\n\n return render_template(\n \"admin/users/users.html\",\n users=users,\n prev_page=url_for(request.endpoint, page=users.prev_num, **args),\n next_page=url_for(request.endpoint, page=users.next_num, **args),\n q=q,\n field=field,\n )\n\n\[email protected](\"/admin/users/new\")\n@admins_only\ndef users_new():\n return render_template(\"admin/users/new.html\")\n\n\[email protected](\"/admin/users/<int:user_id>\")\n@admins_only\ndef users_detail(user_id):\n # Get user object\n user = Users.query.filter_by(id=user_id).first_or_404()\n\n # Get the user's solves\n solves = user.get_solves(admin=True)\n\n # Get challenges that the user is missing\n if get_config(\"user_mode\") == TEAMS_MODE:\n if user.team:\n all_solves = user.team.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n\n solve_ids = [s.challenge_id for s in all_solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n\n # Get IP addresses that the User has used\n addrs = (\n Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()\n )\n\n # Get Fails\n fails = user.get_fails(admin=True)\n\n # Get Awards\n awards = user.get_awards(admin=True)\n\n # Check if the user has an account (team or user)\n # so that we don't throw an error if they dont\n if user.account:\n score = user.account.get_score(admin=True)\n place = user.account.get_place(admin=True)\n else:\n score = None\n place = None\n\n return render_template(\n \"admin/users/user.html\",\n solves=solves,\n user=user,\n addrs=addrs,\n score=score,\n missing=missing,\n place=place,\n fails=fails,\n awards=awards,\n )\n", "path": "CTFd/admin/users.py"}]} | 1,802 | 180 |
gh_patches_debug_29719 | rasdani/github-patches | git_diff | vispy__vispy-823 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Text is misplaced in scene/console example

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/basics/scene/console.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # vispy: gallery 30
3 # -----------------------------------------------------------------------------
4 # Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
5 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
6 # -----------------------------------------------------------------------------
7 """
8 Demonstrate the use of the vispy console. Note how the console size is
9 independent of the canvas scaling.
10 """
11 import sys
12
13 from vispy import scene, app
14 from vispy.scene.widgets import Console
15 from vispy.scene.visuals import Text
16
17 canvas = scene.SceneCanvas(keys='interactive', size=(400, 400))
18 grid = canvas.central_widget.add_grid()
19
20 vb = scene.widgets.ViewBox(border_color='b')
21 vb.camera.rect = -1, -1, 2, 2
22 grid.add_widget(vb, row=0, col=0)
23 text = Text('Starting timer...', color='w', font_size=24, parent=vb.scene)
24
25 console = Console(text_color='g', font_size=12., border_color='g')
26 grid.add_widget(console, row=1, col=0)
27
28
29 def on_timer(event):
30 text.text = 'Tick #%s' % event.iteration
31 if event.iteration > 1 and event.iteration % 10 == 0:
32 console.clear()
33 console.write('Elapsed:\n %s' % event.elapsed)
34 canvas.update()
35
36 timer = app.Timer(2.0, connect=on_timer, start=True)
37
38 console.write('This is a line that will be wrapped automatically by the '
39 'console.\n')
40 console.write('This line will be truncated ....................,\n'
41 'but this next line will survive.\n', wrap=False)
42
43 if __name__ == '__main__':
44 canvas.show()
45 if sys.flags.interactive != 1:
46 canvas.app.run()
47
```
Path: `examples/basics/scene/grid_large.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # vispy: testskip # disabled due to segfaults on travis
3 # -----------------------------------------------------------------------------
4 # Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
5 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
6 # -----------------------------------------------------------------------------
7 """
8 Test automatic layout of multiple viewboxes using Grid.
9 """
10
11 import sys
12 from vispy import scene
13 from vispy import app
14 import numpy as np
15
16 canvas = scene.SceneCanvas(keys='interactive')
17 canvas.size = 600, 600
18 canvas.show()
19
20 grid = canvas.central_widget.add_grid()
21
22
23 N = 10000
24 lines = []
25 for i in range(10):
26 lines.append([])
27 for j in range(10):
28 vb = grid.add_view(row=i, col=j)
29 vb.camera.rect = (0, -5), (100, 10)
30 vb.border = (1, 1, 1, 0.4)
31
32 pos = np.empty((N, 2), dtype=np.float32)
33 pos[:, 0] = np.linspace(0, 100, N)
34 pos[:, 1] = np.random.normal(size=N)
35 line = scene.visuals.Line(pos=pos, color=(1, 1, 1, 0.5), mode='gl')
36 vb.add(line)
37
38
39 if __name__ == '__main__' and sys.flags.interactive == 0:
40 app.run()
41
```
Path: `examples/basics/scene/grid.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # vispy: gallery 30
3 # -----------------------------------------------------------------------------
4 # Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
5 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
6 # -----------------------------------------------------------------------------
7 """
8 Test automatic layout of multiple viewboxes using Grid.
9 """
10 import sys
11 import numpy as np
12
13 from vispy import scene, app
14
15 canvas = scene.SceneCanvas(keys='interactive')
16 canvas.size = 600, 600
17 canvas.show()
18
19 # This is the top-level widget that will hold three ViewBoxes, which will
20 # be automatically resized whenever the grid is resized.
21 grid = canvas.central_widget.add_grid()
22
23
24 # Add 3 ViewBoxes to the grid
25 b1 = grid.add_view(row=0, col=0, col_span=2)
26 b1.border_color = (0.5, 0.5, 0.5, 1)
27 b1.camera = scene.PanZoomCamera(rect=(-0.5, -5, 11, 10))
28 b1.border = (1, 0, 0, 1)
29
30 b2 = grid.add_view(row=1, col=0)
31 b2.border_color = (0.5, 0.5, 0.5, 1)
32 b2.camera = scene.PanZoomCamera(rect=(-10, -5, 15, 10))
33 b2.border = (1, 0, 0, 1)
34
35 b3 = grid.add_view(row=1, col=1)
36 b3.border_color = (0.5, 0.5, 0.5, 1)
37 b3.camera = scene.PanZoomCamera(rect=(-5, -5, 10, 10))
38 b3.border = (1, 0, 0, 1)
39
40
41 # Generate some random vertex data and a color gradient
42 N = 10000
43 pos = np.empty((N, 2), dtype=np.float32)
44 pos[:, 0] = np.linspace(0, 10, N)
45 pos[:, 1] = np.random.normal(size=N)
46 pos[5000, 1] += 50
47
48 color = np.ones((N, 4), dtype=np.float32)
49 color[:, 0] = np.linspace(0, 1, N)
50 color[:, 1] = color[::-1, 0]
51
52 # Top grid cell shows plot data in a rectangular coordinate system.
53 l1 = scene.visuals.Line(pos=pos, color=color, antialias=False, mode='gl')
54 b1.add(l1)
55 grid1 = scene.visuals.GridLines(parent=b1.scene)
56
57 # Bottom-left grid cell shows the same data with log-transformed X
58 e2 = scene.Node(parent=b2.scene)
59 e2.transform = scene.transforms.LogTransform(base=(2, 0, 0))
60 l2 = scene.visuals.Line(pos=pos, color=color, antialias=False, parent=e2,
61 mode='gl')
62 grid2 = scene.visuals.GridLines(parent=e2)
63
64 # Bottom-right grid cell shows the same data again, but with a much more
65 # interesting transformation.
66 e3 = scene.Node(parent=b3.scene)
67 affine = scene.transforms.AffineTransform()
68 affine.scale((1, 0.1))
69 affine.rotate(10, (0, 0, 1))
70 affine.translate((0, 1))
71 e3.transform = scene.transforms.ChainTransform([
72 scene.transforms.PolarTransform(),
73 affine])
74 l3 = scene.visuals.Line(pos=pos, color=color, antialias=False, parent=e3,
75 mode='gl')
76 grid3 = scene.visuals.GridLines(scale=(np.pi/6., 1.0), parent=e3)
77
78 if __name__ == '__main__' and sys.flags.interactive == 0:
79 app.run()
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/basics/scene/console.py b/examples/basics/scene/console.py
--- a/examples/basics/scene/console.py
+++ b/examples/basics/scene/console.py
@@ -18,6 +18,7 @@
grid = canvas.central_widget.add_grid()
vb = scene.widgets.ViewBox(border_color='b')
+vb.camera = 'panzoom'
vb.camera.rect = -1, -1, 2, 2
grid.add_widget(vb, row=0, col=0)
text = Text('Starting timer...', color='w', font_size=24, parent=vb.scene)
diff --git a/examples/basics/scene/grid.py b/examples/basics/scene/grid.py
--- a/examples/basics/scene/grid.py
+++ b/examples/basics/scene/grid.py
@@ -23,16 +23,19 @@
# Add 3 ViewBoxes to the grid
b1 = grid.add_view(row=0, col=0, col_span=2)
+b1.camera = 'panzoom'
b1.border_color = (0.5, 0.5, 0.5, 1)
b1.camera = scene.PanZoomCamera(rect=(-0.5, -5, 11, 10))
b1.border = (1, 0, 0, 1)
b2 = grid.add_view(row=1, col=0)
+b2.camera = 'panzoom'
b2.border_color = (0.5, 0.5, 0.5, 1)
b2.camera = scene.PanZoomCamera(rect=(-10, -5, 15, 10))
b2.border = (1, 0, 0, 1)
b3 = grid.add_view(row=1, col=1)
+b3.camera = 'panzoom'
b3.border_color = (0.5, 0.5, 0.5, 1)
b3.camera = scene.PanZoomCamera(rect=(-5, -5, 10, 10))
b3.border = (1, 0, 0, 1)
diff --git a/examples/basics/scene/grid_large.py b/examples/basics/scene/grid_large.py
--- a/examples/basics/scene/grid_large.py
+++ b/examples/basics/scene/grid_large.py
@@ -26,6 +26,7 @@
lines.append([])
for j in range(10):
vb = grid.add_view(row=i, col=j)
+ vb.camera = 'panzoom'
vb.camera.rect = (0, -5), (100, 10)
vb.border = (1, 1, 1, 0.4)
| {"golden_diff": "diff --git a/examples/basics/scene/console.py b/examples/basics/scene/console.py\n--- a/examples/basics/scene/console.py\n+++ b/examples/basics/scene/console.py\n@@ -18,6 +18,7 @@\n grid = canvas.central_widget.add_grid()\n \n vb = scene.widgets.ViewBox(border_color='b')\n+vb.camera = 'panzoom'\n vb.camera.rect = -1, -1, 2, 2\n grid.add_widget(vb, row=0, col=0)\n text = Text('Starting timer...', color='w', font_size=24, parent=vb.scene)\ndiff --git a/examples/basics/scene/grid.py b/examples/basics/scene/grid.py\n--- a/examples/basics/scene/grid.py\n+++ b/examples/basics/scene/grid.py\n@@ -23,16 +23,19 @@\n \n # Add 3 ViewBoxes to the grid\n b1 = grid.add_view(row=0, col=0, col_span=2)\n+b1.camera = 'panzoom'\n b1.border_color = (0.5, 0.5, 0.5, 1)\n b1.camera = scene.PanZoomCamera(rect=(-0.5, -5, 11, 10))\n b1.border = (1, 0, 0, 1)\n \n b2 = grid.add_view(row=1, col=0)\n+b2.camera = 'panzoom'\n b2.border_color = (0.5, 0.5, 0.5, 1)\n b2.camera = scene.PanZoomCamera(rect=(-10, -5, 15, 10))\n b2.border = (1, 0, 0, 1)\n \n b3 = grid.add_view(row=1, col=1)\n+b3.camera = 'panzoom'\n b3.border_color = (0.5, 0.5, 0.5, 1)\n b3.camera = scene.PanZoomCamera(rect=(-5, -5, 10, 10))\n b3.border = (1, 0, 0, 1)\ndiff --git a/examples/basics/scene/grid_large.py b/examples/basics/scene/grid_large.py\n--- a/examples/basics/scene/grid_large.py\n+++ b/examples/basics/scene/grid_large.py\n@@ -26,6 +26,7 @@\n lines.append([])\n for j in range(10):\n vb = grid.add_view(row=i, col=j)\n+ vb.camera = 'panzoom'\n vb.camera.rect = (0, -5), (100, 10)\n vb.border = (1, 1, 1, 0.4)\n", "issue": "Text is misplaced in scene/console example\n\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# vispy: gallery 30\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\"\"\"\nDemonstrate the use of the vispy console. Note how the console size is\nindependent of the canvas scaling.\n\"\"\"\nimport sys\n\nfrom vispy import scene, app\nfrom vispy.scene.widgets import Console\nfrom vispy.scene.visuals import Text\n\ncanvas = scene.SceneCanvas(keys='interactive', size=(400, 400))\ngrid = canvas.central_widget.add_grid()\n\nvb = scene.widgets.ViewBox(border_color='b')\nvb.camera.rect = -1, -1, 2, 2\ngrid.add_widget(vb, row=0, col=0)\ntext = Text('Starting timer...', color='w', font_size=24, parent=vb.scene)\n\nconsole = Console(text_color='g', font_size=12., border_color='g')\ngrid.add_widget(console, row=1, col=0)\n\n\ndef on_timer(event):\n text.text = 'Tick #%s' % event.iteration\n if event.iteration > 1 and event.iteration % 10 == 0:\n console.clear()\n console.write('Elapsed:\\n %s' % event.elapsed)\n canvas.update()\n\ntimer = app.Timer(2.0, connect=on_timer, start=True)\n\nconsole.write('This is a line that will be wrapped automatically by the '\n 'console.\\n')\nconsole.write('This line will be truncated ....................,\\n'\n 'but this next line will survive.\\n', wrap=False)\n\nif __name__ == '__main__':\n canvas.show()\n if sys.flags.interactive != 1:\n canvas.app.run()\n", "path": "examples/basics/scene/console.py"}, {"content": "# -*- coding: utf-8 -*-\n# vispy: testskip # disabled due to segfaults on travis\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\"\"\"\nTest automatic layout of multiple viewboxes using Grid.\n\"\"\"\n\nimport sys\nfrom vispy import scene\nfrom vispy import app\nimport numpy as np\n\ncanvas = scene.SceneCanvas(keys='interactive')\ncanvas.size = 600, 600\ncanvas.show()\n\ngrid = canvas.central_widget.add_grid()\n\n\nN = 10000\nlines = []\nfor i in range(10):\n lines.append([])\n for j in range(10):\n vb = grid.add_view(row=i, col=j)\n vb.camera.rect = (0, -5), (100, 10)\n vb.border = (1, 1, 1, 0.4)\n\n pos = np.empty((N, 2), dtype=np.float32)\n pos[:, 0] = np.linspace(0, 100, N)\n pos[:, 1] = np.random.normal(size=N)\n line = scene.visuals.Line(pos=pos, color=(1, 1, 1, 0.5), mode='gl')\n vb.add(line)\n\n\nif __name__ == '__main__' and sys.flags.interactive == 0:\n app.run()\n", "path": "examples/basics/scene/grid_large.py"}, {"content": "# -*- coding: utf-8 -*-\n# vispy: gallery 30\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\"\"\"\nTest automatic layout of multiple viewboxes using Grid.\n\"\"\"\nimport sys\nimport numpy as np\n\nfrom vispy import scene, app\n\ncanvas = scene.SceneCanvas(keys='interactive')\ncanvas.size = 600, 600\ncanvas.show()\n\n# This is the top-level widget that will hold three ViewBoxes, which will\n# be automatically resized whenever the grid is resized.\ngrid = canvas.central_widget.add_grid()\n\n\n# Add 3 ViewBoxes to the grid\nb1 = grid.add_view(row=0, col=0, col_span=2)\nb1.border_color = (0.5, 0.5, 0.5, 1)\nb1.camera = scene.PanZoomCamera(rect=(-0.5, -5, 11, 10))\nb1.border = (1, 0, 0, 1)\n\nb2 = grid.add_view(row=1, col=0)\nb2.border_color = (0.5, 0.5, 0.5, 1)\nb2.camera = scene.PanZoomCamera(rect=(-10, -5, 15, 10))\nb2.border = (1, 0, 0, 1)\n\nb3 = grid.add_view(row=1, col=1)\nb3.border_color = (0.5, 0.5, 0.5, 1)\nb3.camera = scene.PanZoomCamera(rect=(-5, -5, 10, 10))\nb3.border = (1, 0, 0, 1)\n\n\n# Generate some random vertex data and a color gradient\nN = 10000\npos = np.empty((N, 2), dtype=np.float32)\npos[:, 0] = np.linspace(0, 10, N)\npos[:, 1] = np.random.normal(size=N)\npos[5000, 1] += 50\n\ncolor = np.ones((N, 4), dtype=np.float32)\ncolor[:, 0] = np.linspace(0, 1, N)\ncolor[:, 1] = color[::-1, 0]\n\n# Top grid cell shows plot data in a rectangular coordinate system.\nl1 = scene.visuals.Line(pos=pos, color=color, antialias=False, mode='gl')\nb1.add(l1)\ngrid1 = scene.visuals.GridLines(parent=b1.scene)\n\n# Bottom-left grid cell shows the same data with log-transformed X\ne2 = scene.Node(parent=b2.scene)\ne2.transform = scene.transforms.LogTransform(base=(2, 0, 0))\nl2 = scene.visuals.Line(pos=pos, color=color, antialias=False, parent=e2,\n mode='gl')\ngrid2 = scene.visuals.GridLines(parent=e2)\n\n# Bottom-right grid cell shows the same data again, but with a much more\n# interesting transformation.\ne3 = scene.Node(parent=b3.scene)\naffine = scene.transforms.AffineTransform()\naffine.scale((1, 0.1))\naffine.rotate(10, (0, 0, 1))\naffine.translate((0, 1))\ne3.transform = scene.transforms.ChainTransform([\n scene.transforms.PolarTransform(),\n affine])\nl3 = scene.visuals.Line(pos=pos, color=color, antialias=False, parent=e3,\n mode='gl')\ngrid3 = scene.visuals.GridLines(scale=(np.pi/6., 1.0), parent=e3)\n\nif __name__ == '__main__' and sys.flags.interactive == 0:\n app.run()\n", "path": "examples/basics/scene/grid.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# vispy: gallery 30\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\"\"\"\nDemonstrate the use of the vispy console. Note how the console size is\nindependent of the canvas scaling.\n\"\"\"\nimport sys\n\nfrom vispy import scene, app\nfrom vispy.scene.widgets import Console\nfrom vispy.scene.visuals import Text\n\ncanvas = scene.SceneCanvas(keys='interactive', size=(400, 400))\ngrid = canvas.central_widget.add_grid()\n\nvb = scene.widgets.ViewBox(border_color='b')\nvb.camera = 'panzoom'\nvb.camera.rect = -1, -1, 2, 2\ngrid.add_widget(vb, row=0, col=0)\ntext = Text('Starting timer...', color='w', font_size=24, parent=vb.scene)\n\nconsole = Console(text_color='g', font_size=12., border_color='g')\ngrid.add_widget(console, row=1, col=0)\n\n\ndef on_timer(event):\n text.text = 'Tick #%s' % event.iteration\n if event.iteration > 1 and event.iteration % 10 == 0:\n console.clear()\n console.write('Elapsed:\\n %s' % event.elapsed)\n canvas.update()\n\ntimer = app.Timer(2.0, connect=on_timer, start=True)\n\nconsole.write('This is a line that will be wrapped automatically by the '\n 'console.\\n')\nconsole.write('This line will be truncated ....................,\\n'\n 'but this next line will survive.\\n', wrap=False)\n\nif __name__ == '__main__':\n canvas.show()\n if sys.flags.interactive != 1:\n canvas.app.run()\n", "path": "examples/basics/scene/console.py"}, {"content": "# -*- coding: utf-8 -*-\n# vispy: testskip # disabled due to segfaults on travis\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\"\"\"\nTest automatic layout of multiple viewboxes using Grid.\n\"\"\"\n\nimport sys\nfrom vispy import scene\nfrom vispy import app\nimport numpy as np\n\ncanvas = scene.SceneCanvas(keys='interactive')\ncanvas.size = 600, 600\ncanvas.show()\n\ngrid = canvas.central_widget.add_grid()\n\n\nN = 10000\nlines = []\nfor i in range(10):\n lines.append([])\n for j in range(10):\n vb = grid.add_view(row=i, col=j)\n vb.camera = 'panzoom'\n vb.camera.rect = (0, -5), (100, 10)\n vb.border = (1, 1, 1, 0.4)\n\n pos = np.empty((N, 2), dtype=np.float32)\n pos[:, 0] = np.linspace(0, 100, N)\n pos[:, 1] = np.random.normal(size=N)\n line = scene.visuals.Line(pos=pos, color=(1, 1, 1, 0.5), mode='gl')\n vb.add(line)\n\n\nif __name__ == '__main__' and sys.flags.interactive == 0:\n app.run()\n", "path": "examples/basics/scene/grid_large.py"}, {"content": "# -*- coding: utf-8 -*-\n# vispy: gallery 30\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\"\"\"\nTest automatic layout of multiple viewboxes using Grid.\n\"\"\"\nimport sys\nimport numpy as np\n\nfrom vispy import scene, app\n\ncanvas = scene.SceneCanvas(keys='interactive')\ncanvas.size = 600, 600\ncanvas.show()\n\n# This is the top-level widget that will hold three ViewBoxes, which will\n# be automatically resized whenever the grid is resized.\ngrid = canvas.central_widget.add_grid()\n\n\n# Add 3 ViewBoxes to the grid\nb1 = grid.add_view(row=0, col=0, col_span=2)\nb1.camera = 'panzoom'\nb1.border_color = (0.5, 0.5, 0.5, 1)\nb1.camera = scene.PanZoomCamera(rect=(-0.5, -5, 11, 10))\nb1.border = (1, 0, 0, 1)\n\nb2 = grid.add_view(row=1, col=0)\nb2.camera = 'panzoom'\nb2.border_color = (0.5, 0.5, 0.5, 1)\nb2.camera = scene.PanZoomCamera(rect=(-10, -5, 15, 10))\nb2.border = (1, 0, 0, 1)\n\nb3 = grid.add_view(row=1, col=1)\nb3.camera = 'panzoom'\nb3.border_color = (0.5, 0.5, 0.5, 1)\nb3.camera = scene.PanZoomCamera(rect=(-5, -5, 10, 10))\nb3.border = (1, 0, 0, 1)\n\n\n# Generate some random vertex data and a color gradient\nN = 10000\npos = np.empty((N, 2), dtype=np.float32)\npos[:, 0] = np.linspace(0, 10, N)\npos[:, 1] = np.random.normal(size=N)\npos[5000, 1] += 50\n\ncolor = np.ones((N, 4), dtype=np.float32)\ncolor[:, 0] = np.linspace(0, 1, N)\ncolor[:, 1] = color[::-1, 0]\n\n# Top grid cell shows plot data in a rectangular coordinate system.\nl1 = scene.visuals.Line(pos=pos, color=color, antialias=False, mode='gl')\nb1.add(l1)\ngrid1 = scene.visuals.GridLines(parent=b1.scene)\n\n# Bottom-left grid cell shows the same data with log-transformed X\ne2 = scene.Node(parent=b2.scene)\ne2.transform = scene.transforms.LogTransform(base=(2, 0, 0))\nl2 = scene.visuals.Line(pos=pos, color=color, antialias=False, parent=e2,\n mode='gl')\ngrid2 = scene.visuals.GridLines(parent=e2)\n\n# Bottom-right grid cell shows the same data again, but with a much more\n# interesting transformation.\ne3 = scene.Node(parent=b3.scene)\naffine = scene.transforms.AffineTransform()\naffine.scale((1, 0.1))\naffine.rotate(10, (0, 0, 1))\naffine.translate((0, 1))\ne3.transform = scene.transforms.ChainTransform([\n scene.transforms.PolarTransform(),\n affine])\nl3 = scene.visuals.Line(pos=pos, color=color, antialias=False, parent=e3,\n mode='gl')\ngrid3 = scene.visuals.GridLines(scale=(np.pi/6., 1.0), parent=e3)\n\nif __name__ == '__main__' and sys.flags.interactive == 0:\n app.run()\n", "path": "examples/basics/scene/grid.py"}]} | 2,231 | 590 |
gh_patches_debug_28351 | rasdani/github-patches | git_diff | openai__gym-3024 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug Report] Total order of keys for `spaces.Dict`
If you are submitting a bug report, please fill in the following details and use the tag [bug].
**Describe the bug**
A clear and concise description of what the bug is.
Pair of issue google/jax#11871. A total order is required for dictionary keys.
Equal `Dict` spaces `dict1 == dict2` do not imply `dict1.sample() == dict2.sample()` and `flatten_space(dict1) == flatten_space(dict2)`.
```python
In [1]: from gym import spaces
In [2]: dict1 = spaces.Dict({1: spaces.Box(0, 1, shape=(1,)), 'a': spaces.Box(0, 2, shape=(1,))})
In [3]: dict2 = spaces.Dict({'a': spaces.Box(0, 2, shape=(1,)), 1: spaces.Box(0, 1, shape=(1,))})
In [4]: dict1
Out[4]: Dict(1: Box(0.0, 1.0, (1,), float32), a: Box(0.0, 2.0, (1,), float32))
In [5]: dict2
Out[5]: Dict(a: Box(0.0, 2.0, (1,), float32), 1: Box(0.0, 1.0, (1,), float32))
In [6]: dict1 == dict2
Out[6]: True
```
Not equally seeded in subspaces:
```python
In [7]: dict1.seed(0)
Out[7]: [0, 2488343231644625808, 5874934615388537134]
In [8]: dict2.seed(0)
Out[8]: [0, 2488343231644625808, 5874934615388537134]
In [9]: dict1.sample() == dict2.sample()
Out[9]: False
```
Different order while flattening:
```python
In [10]: spaces.flatten_space(dict1)
Out[10]: Box(0.0, [1. 2.], (2,), float32)
In [11]: spaces.flatten_space(dict2)
Out[11]: Box(0.0, [2. 1.], (2,), float32)
In [12]: spaces.flatten_space(dict1) == spaces.flatten_space(dict2)
Out[12]: False
```
The order of keys is important when seeding the subspaces and flattening the space.
------
In `spaces.Dict.__init__` method, we always convert the inputs into an `OrderedDict`:
https://github.com/openai/gym/blob/8b744130bc25d0f4fe2eac8bce8e47527a8eee2c/gym/spaces/dict.py#L82-L92
However, function `sorted` will fail when sorting with uncomparable types (e.g. `int` vs. `str`):
```python
In [1]: d = {1: 1, 'a': 2}
In [2]: sorted(d.items())
TypeError: '<' not supported between instances of 'str' and 'int'
```
So we add a failback choice at line 88 in PR #2491. This means when the keys are not sortable, the keys are ordered by the insertion order (since Python 3.6). However, the order of keys is important when seeding the subspaces and flattening the space.
`seed`: (seed in order of `dict.spaces.values()`)
https://github.com/openai/gym/blob/8b744130bc25d0f4fe2eac8bce8e47527a8eee2c/gym/spaces/dict.py#L131-L135
`flatten_space`: (flatten in order of `dict.spaces.values()`)
https://github.com/openai/gym/blob/8b744130bc25d0f4fe2eac8bce8e47527a8eee2c/gym/spaces/utils.py#L326-L333
This will cause `dict1 == dict2` does not imply `flatten_space(dict1) == flatten_space(dict2)`.
**Code example**
Please try to provide a minimal example to reproduce the bug. Error messages and stack traces are also helpful.
See Description above.
**System Info**
Describe the characteristic of your environment:
* Describe how Gym was installed: pip
* What OS/version of Linux you're using: Ubuntu 20.04 LTS
* Python version: 3.9
**Additional context**
Add any other context about the problem here.
See also:
- google/jax#4085
- google/jax#11871
### Checklist
- [X] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/spaces/dict.py`
Content:
```
1 """Implementation of a space that represents the cartesian product of other spaces as a dictionary."""
2 from collections import OrderedDict
3 from collections.abc import Mapping, Sequence
4 from typing import Any
5 from typing import Dict as TypingDict
6 from typing import Optional, Union
7
8 import numpy as np
9
10 from gym.spaces.space import Space
11 from gym.utils import seeding
12
13
14 class Dict(Space[TypingDict[str, Space]], Mapping):
15 """A dictionary of :class:`Space` instances.
16
17 Elements of this space are (ordered) dictionaries of elements from the constituent spaces.
18
19 Example usage:
20
21 >>> from gym.spaces import Dict, Discrete
22 >>> observation_space = Dict({"position": Discrete(2), "velocity": Discrete(3)})
23 >>> observation_space.sample()
24 OrderedDict([('position', 1), ('velocity', 2)])
25
26 Example usage [nested]::
27
28 >>> from gym.spaces import Box, Dict, Discrete, MultiBinary, MultiDiscrete
29 >>> Dict(
30 ... {
31 ... "ext_controller": MultiDiscrete([5, 2, 2]),
32 ... "inner_state": Dict(
33 ... {
34 ... "charge": Discrete(100),
35 ... "system_checks": MultiBinary(10),
36 ... "job_status": Dict(
37 ... {
38 ... "task": Discrete(5),
39 ... "progress": Box(low=0, high=100, shape=()),
40 ... }
41 ... ),
42 ... }
43 ... ),
44 ... }
45 ... )
46
47 It can be convenient to use :class:`Dict` spaces if you want to make complex observations or actions more human-readable.
48 Usually, it will not be possible to use elements of this space directly in learning code. However, you can easily
49 convert `Dict` observations to flat arrays by using a :class:`gym.wrappers.FlattenObservation` wrapper. Similar wrappers can be
50 implemented to deal with :class:`Dict` actions.
51 """
52
53 def __init__(
54 self,
55 spaces: Optional[TypingDict[str, Space]] = None,
56 seed: Optional[Union[dict, int, seeding.RandomNumberGenerator]] = None,
57 **spaces_kwargs: Space,
58 ):
59 """Constructor of :class:`Dict` space.
60
61 This space can be instantiated in one of two ways: Either you pass a dictionary
62 of spaces to :meth:`__init__` via the ``spaces`` argument, or you pass the spaces as separate
63 keyword arguments (where you will need to avoid the keys ``spaces`` and ``seed``)
64
65 Example::
66
67 >>> from gym.spaces import Box, Discrete
68 >>> Dict({"position": Box(-1, 1, shape=(2,)), "color": Discrete(3)})
69 Dict(color:Discrete(3), position:Box(-1.0, 1.0, (2,), float32))
70 >>> Dict(position=Box(-1, 1, shape=(2,)), color=Discrete(3))
71 Dict(color:Discrete(3), position:Box(-1.0, 1.0, (2,), float32))
72
73 Args:
74 spaces: A dictionary of spaces. This specifies the structure of the :class:`Dict` space
75 seed: Optionally, you can use this argument to seed the RNGs of the spaces that make up the :class:`Dict` space.
76 **spaces_kwargs: If ``spaces`` is ``None``, you need to pass the constituent spaces as keyword arguments, as described above.
77 """
78 assert (spaces is None) or (
79 not spaces_kwargs
80 ), "Use either Dict(spaces=dict(...)) or Dict(foo=x, bar=z)"
81
82 if spaces is None:
83 spaces = spaces_kwargs
84 if isinstance(spaces, dict) and not isinstance(spaces, OrderedDict):
85 try:
86 spaces = OrderedDict(sorted(spaces.items()))
87 except TypeError: # raise when sort by different types of keys
88 spaces = OrderedDict(spaces.items())
89 if isinstance(spaces, Sequence):
90 spaces = OrderedDict(spaces)
91
92 assert isinstance(spaces, OrderedDict), "spaces must be a dictionary"
93
94 self.spaces = spaces
95 for space in spaces.values():
96 assert isinstance(
97 space, Space
98 ), "Values of the dict should be instances of gym.Space"
99 super().__init__(
100 None, None, seed # type: ignore
101 ) # None for shape and dtype, since it'll require special handling
102
103 def seed(self, seed: Optional[Union[dict, int]] = None) -> list:
104 """Seed the PRNG of this space and all subspaces."""
105 seeds = []
106 if isinstance(seed, dict):
107 for key, seed_key in zip(self.spaces, seed):
108 assert key == seed_key, print(
109 "Key value",
110 seed_key,
111 "in passed seed dict did not match key value",
112 key,
113 "in spaces Dict.",
114 )
115 seeds += self.spaces[key].seed(seed[seed_key])
116 elif isinstance(seed, int):
117 seeds = super().seed(seed)
118 try:
119 subseeds = self.np_random.choice(
120 np.iinfo(int).max,
121 size=len(self.spaces),
122 replace=False, # unique subseed for each subspace
123 )
124 except ValueError:
125 subseeds = self.np_random.choice(
126 np.iinfo(int).max,
127 size=len(self.spaces),
128 replace=True, # we get more than INT_MAX subspaces
129 )
130
131 for subspace, subseed in zip(self.spaces.values(), subseeds):
132 seeds.append(subspace.seed(int(subseed))[0])
133 elif seed is None:
134 for space in self.spaces.values():
135 seeds += space.seed(seed)
136 else:
137 raise TypeError("Passed seed not of an expected type: dict or int or None")
138
139 return seeds
140
141 def sample(self, mask: Optional[TypingDict[str, Any]] = None) -> dict:
142 """Generates a single random sample from this space.
143
144 The sample is an ordered dictionary of independent samples from the constituent spaces.
145
146 Args:
147 mask: An optional mask for each of the subspaces, expects the same keys as the space
148
149 Returns:
150 A dictionary with the same key and sampled values from :attr:`self.spaces`
151 """
152 if mask is not None:
153 assert isinstance(
154 mask, dict
155 ), f"Expects mask to be a dict, actual type: {type(mask)}"
156 assert (
157 mask.keys() == self.spaces.keys()
158 ), f"Expect mask keys to be same as space keys, mask keys: {mask.keys()}, space keys: {self.spaces.keys()}"
159 return OrderedDict(
160 [(k, space.sample(mask[k])) for k, space in self.spaces.items()]
161 )
162
163 return OrderedDict([(k, space.sample()) for k, space in self.spaces.items()])
164
165 def contains(self, x) -> bool:
166 """Return boolean specifying if x is a valid member of this space."""
167 if not isinstance(x, dict) or len(x) != len(self.spaces):
168 return False
169 for k, space in self.spaces.items():
170 if k not in x:
171 return False
172 if not space.contains(x[k]):
173 return False
174 return True
175
176 def __getitem__(self, key: str) -> Space:
177 """Get the space that is associated to `key`."""
178 return self.spaces[key]
179
180 def __setitem__(self, key: str, value: Space):
181 """Set the space that is associated to `key`."""
182 self.spaces[key] = value
183
184 def __iter__(self):
185 """Iterator through the keys of the subspaces."""
186 yield from self.spaces
187
188 def __len__(self) -> int:
189 """Gives the number of simpler spaces that make up the `Dict` space."""
190 return len(self.spaces)
191
192 def __repr__(self) -> str:
193 """Gives a string representation of this space."""
194 return "Dict(" + ", ".join([f"{k}: {s}" for k, s in self.spaces.items()]) + ")"
195
196 def to_jsonable(self, sample_n: list) -> dict:
197 """Convert a batch of samples from this space to a JSONable data type."""
198 # serialize as dict-repr of vectors
199 return {
200 key: space.to_jsonable([sample[key] for sample in sample_n])
201 for key, space in self.spaces.items()
202 }
203
204 def from_jsonable(self, sample_n: TypingDict[str, list]) -> list:
205 """Convert a JSONable data type to a batch of samples from this space."""
206 dict_of_list: TypingDict[str, list] = {}
207 for key, space in self.spaces.items():
208 dict_of_list[key] = space.from_jsonable(sample_n[key])
209 ret = []
210 n_elements = len(next(iter(dict_of_list.values())))
211 for i in range(n_elements):
212 entry = {}
213 for key, value in dict_of_list.items():
214 entry[key] = value[i]
215 ret.append(entry)
216 return ret
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gym/spaces/dict.py b/gym/spaces/dict.py
--- a/gym/spaces/dict.py
+++ b/gym/spaces/dict.py
@@ -81,10 +81,12 @@
if spaces is None:
spaces = spaces_kwargs
- if isinstance(spaces, dict) and not isinstance(spaces, OrderedDict):
+ if isinstance(spaces, Mapping) and not isinstance(spaces, OrderedDict):
try:
spaces = OrderedDict(sorted(spaces.items()))
- except TypeError: # raise when sort by different types of keys
+ except TypeError:
+ # Incomparable types (e.g. `int` vs. `str`, or user-defined types) found.
+ # The keys remain in the insertion order.
spaces = OrderedDict(spaces.items())
if isinstance(spaces, Sequence):
spaces = OrderedDict(spaces)
@@ -191,7 +193,17 @@
def __repr__(self) -> str:
"""Gives a string representation of this space."""
- return "Dict(" + ", ".join([f"{k}: {s}" for k, s in self.spaces.items()]) + ")"
+ return (
+ "Dict(" + ", ".join([f"{k!r}: {s}" for k, s in self.spaces.items()]) + ")"
+ )
+
+ def __eq__(self, other) -> bool:
+ """Check whether `other` is equivalent to this instance."""
+ return (
+ isinstance(other, Dict)
+ # Comparison of `OrderedDict`s is order-sensitive
+ and self.spaces == other.spaces # OrderedDict.__eq__
+ )
def to_jsonable(self, sample_n: list) -> dict:
"""Convert a batch of samples from this space to a JSONable data type."""
| {"golden_diff": "diff --git a/gym/spaces/dict.py b/gym/spaces/dict.py\n--- a/gym/spaces/dict.py\n+++ b/gym/spaces/dict.py\n@@ -81,10 +81,12 @@\n \n if spaces is None:\n spaces = spaces_kwargs\n- if isinstance(spaces, dict) and not isinstance(spaces, OrderedDict):\n+ if isinstance(spaces, Mapping) and not isinstance(spaces, OrderedDict):\n try:\n spaces = OrderedDict(sorted(spaces.items()))\n- except TypeError: # raise when sort by different types of keys\n+ except TypeError:\n+ # Incomparable types (e.g. `int` vs. `str`, or user-defined types) found.\n+ # The keys remain in the insertion order.\n spaces = OrderedDict(spaces.items())\n if isinstance(spaces, Sequence):\n spaces = OrderedDict(spaces)\n@@ -191,7 +193,17 @@\n \n def __repr__(self) -> str:\n \"\"\"Gives a string representation of this space.\"\"\"\n- return \"Dict(\" + \", \".join([f\"{k}: {s}\" for k, s in self.spaces.items()]) + \")\"\n+ return (\n+ \"Dict(\" + \", \".join([f\"{k!r}: {s}\" for k, s in self.spaces.items()]) + \")\"\n+ )\n+\n+ def __eq__(self, other) -> bool:\n+ \"\"\"Check whether `other` is equivalent to this instance.\"\"\"\n+ return (\n+ isinstance(other, Dict)\n+ # Comparison of `OrderedDict`s is order-sensitive\n+ and self.spaces == other.spaces # OrderedDict.__eq__\n+ )\n \n def to_jsonable(self, sample_n: list) -> dict:\n \"\"\"Convert a batch of samples from this space to a JSONable data type.\"\"\"\n", "issue": "[Bug Report] Total order of keys for `spaces.Dict`\nIf you are submitting a bug report, please fill in the following details and use the tag [bug].\r\n\r\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\nPair of issue google/jax#11871. A total order is required for dictionary keys.\r\n\r\nEqual `Dict` spaces `dict1 == dict2` do not imply `dict1.sample() == dict2.sample()` and `flatten_space(dict1) == flatten_space(dict2)`.\r\n\r\n```python\r\nIn [1]: from gym import spaces\r\n\r\nIn [2]: dict1 = spaces.Dict({1: spaces.Box(0, 1, shape=(1,)), 'a': spaces.Box(0, 2, shape=(1,))})\r\n\r\nIn [3]: dict2 = spaces.Dict({'a': spaces.Box(0, 2, shape=(1,)), 1: spaces.Box(0, 1, shape=(1,))})\r\n\r\nIn [4]: dict1\r\nOut[4]: Dict(1: Box(0.0, 1.0, (1,), float32), a: Box(0.0, 2.0, (1,), float32))\r\n\r\nIn [5]: dict2\r\nOut[5]: Dict(a: Box(0.0, 2.0, (1,), float32), 1: Box(0.0, 1.0, (1,), float32))\r\n\r\nIn [6]: dict1 == dict2\r\nOut[6]: True\r\n```\r\n\r\nNot equally seeded in subspaces:\r\n\r\n```python\r\nIn [7]: dict1.seed(0)\r\nOut[7]: [0, 2488343231644625808, 5874934615388537134]\r\n\r\nIn [8]: dict2.seed(0)\r\nOut[8]: [0, 2488343231644625808, 5874934615388537134]\r\n\r\nIn [9]: dict1.sample() == dict2.sample()\r\nOut[9]: False\r\n```\r\n\r\nDifferent order while flattening:\r\n\r\n```python\r\nIn [10]: spaces.flatten_space(dict1)\r\nOut[10]: Box(0.0, [1. 2.], (2,), float32)\r\n\r\nIn [11]: spaces.flatten_space(dict2)\r\nOut[11]: Box(0.0, [2. 1.], (2,), float32)\r\n\r\nIn [12]: spaces.flatten_space(dict1) == spaces.flatten_space(dict2)\r\nOut[12]: False\r\n```\r\n\r\nThe order of keys is important when seeding the subspaces and flattening the space.\r\n\r\n------\r\n\r\nIn `spaces.Dict.__init__` method, we always convert the inputs into an `OrderedDict`:\r\n\r\nhttps://github.com/openai/gym/blob/8b744130bc25d0f4fe2eac8bce8e47527a8eee2c/gym/spaces/dict.py#L82-L92\r\n\r\nHowever, function `sorted` will fail when sorting with uncomparable types (e.g. `int` vs. `str`):\r\n\r\n```python\r\nIn [1]: d = {1: 1, 'a': 2}\r\n\r\nIn [2]: sorted(d.items())\r\nTypeError: '<' not supported between instances of 'str' and 'int'\r\n```\r\n\r\nSo we add a failback choice at line 88 in PR #2491. This means when the keys are not sortable, the keys are ordered by the insertion order (since Python 3.6). However, the order of keys is important when seeding the subspaces and flattening the space.\r\n\r\n`seed`: (seed in order of `dict.spaces.values()`)\r\n\r\nhttps://github.com/openai/gym/blob/8b744130bc25d0f4fe2eac8bce8e47527a8eee2c/gym/spaces/dict.py#L131-L135\r\n\r\n`flatten_space`: (flatten in order of `dict.spaces.values()`)\r\n\r\nhttps://github.com/openai/gym/blob/8b744130bc25d0f4fe2eac8bce8e47527a8eee2c/gym/spaces/utils.py#L326-L333\r\n\r\nThis will cause `dict1 == dict2` does not imply `flatten_space(dict1) == flatten_space(dict2)`.\r\n\r\n**Code example**\r\nPlease try to provide a minimal example to reproduce the bug. Error messages and stack traces are also helpful.\r\n\r\nSee Description above.\r\n\r\n**System Info**\r\nDescribe the characteristic of your environment:\r\n * Describe how Gym was installed: pip\r\n * What OS/version of Linux you're using: Ubuntu 20.04 LTS\r\n * Python version: 3.9\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\r\nSee also:\r\n\r\n- google/jax#4085\r\n- google/jax#11871\r\n\r\n### Checklist\r\n\r\n- [X] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)\r\n\n", "before_files": [{"content": "\"\"\"Implementation of a space that represents the cartesian product of other spaces as a dictionary.\"\"\"\nfrom collections import OrderedDict\nfrom collections.abc import Mapping, Sequence\nfrom typing import Any\nfrom typing import Dict as TypingDict\nfrom typing import Optional, Union\n\nimport numpy as np\n\nfrom gym.spaces.space import Space\nfrom gym.utils import seeding\n\n\nclass Dict(Space[TypingDict[str, Space]], Mapping):\n \"\"\"A dictionary of :class:`Space` instances.\n\n Elements of this space are (ordered) dictionaries of elements from the constituent spaces.\n\n Example usage:\n\n >>> from gym.spaces import Dict, Discrete\n >>> observation_space = Dict({\"position\": Discrete(2), \"velocity\": Discrete(3)})\n >>> observation_space.sample()\n OrderedDict([('position', 1), ('velocity', 2)])\n\n Example usage [nested]::\n\n >>> from gym.spaces import Box, Dict, Discrete, MultiBinary, MultiDiscrete\n >>> Dict(\n ... {\n ... \"ext_controller\": MultiDiscrete([5, 2, 2]),\n ... \"inner_state\": Dict(\n ... {\n ... \"charge\": Discrete(100),\n ... \"system_checks\": MultiBinary(10),\n ... \"job_status\": Dict(\n ... {\n ... \"task\": Discrete(5),\n ... \"progress\": Box(low=0, high=100, shape=()),\n ... }\n ... ),\n ... }\n ... ),\n ... }\n ... )\n\n It can be convenient to use :class:`Dict` spaces if you want to make complex observations or actions more human-readable.\n Usually, it will not be possible to use elements of this space directly in learning code. However, you can easily\n convert `Dict` observations to flat arrays by using a :class:`gym.wrappers.FlattenObservation` wrapper. Similar wrappers can be\n implemented to deal with :class:`Dict` actions.\n \"\"\"\n\n def __init__(\n self,\n spaces: Optional[TypingDict[str, Space]] = None,\n seed: Optional[Union[dict, int, seeding.RandomNumberGenerator]] = None,\n **spaces_kwargs: Space,\n ):\n \"\"\"Constructor of :class:`Dict` space.\n\n This space can be instantiated in one of two ways: Either you pass a dictionary\n of spaces to :meth:`__init__` via the ``spaces`` argument, or you pass the spaces as separate\n keyword arguments (where you will need to avoid the keys ``spaces`` and ``seed``)\n\n Example::\n\n >>> from gym.spaces import Box, Discrete\n >>> Dict({\"position\": Box(-1, 1, shape=(2,)), \"color\": Discrete(3)})\n Dict(color:Discrete(3), position:Box(-1.0, 1.0, (2,), float32))\n >>> Dict(position=Box(-1, 1, shape=(2,)), color=Discrete(3))\n Dict(color:Discrete(3), position:Box(-1.0, 1.0, (2,), float32))\n\n Args:\n spaces: A dictionary of spaces. This specifies the structure of the :class:`Dict` space\n seed: Optionally, you can use this argument to seed the RNGs of the spaces that make up the :class:`Dict` space.\n **spaces_kwargs: If ``spaces`` is ``None``, you need to pass the constituent spaces as keyword arguments, as described above.\n \"\"\"\n assert (spaces is None) or (\n not spaces_kwargs\n ), \"Use either Dict(spaces=dict(...)) or Dict(foo=x, bar=z)\"\n\n if spaces is None:\n spaces = spaces_kwargs\n if isinstance(spaces, dict) and not isinstance(spaces, OrderedDict):\n try:\n spaces = OrderedDict(sorted(spaces.items()))\n except TypeError: # raise when sort by different types of keys\n spaces = OrderedDict(spaces.items())\n if isinstance(spaces, Sequence):\n spaces = OrderedDict(spaces)\n\n assert isinstance(spaces, OrderedDict), \"spaces must be a dictionary\"\n\n self.spaces = spaces\n for space in spaces.values():\n assert isinstance(\n space, Space\n ), \"Values of the dict should be instances of gym.Space\"\n super().__init__(\n None, None, seed # type: ignore\n ) # None for shape and dtype, since it'll require special handling\n\n def seed(self, seed: Optional[Union[dict, int]] = None) -> list:\n \"\"\"Seed the PRNG of this space and all subspaces.\"\"\"\n seeds = []\n if isinstance(seed, dict):\n for key, seed_key in zip(self.spaces, seed):\n assert key == seed_key, print(\n \"Key value\",\n seed_key,\n \"in passed seed dict did not match key value\",\n key,\n \"in spaces Dict.\",\n )\n seeds += self.spaces[key].seed(seed[seed_key])\n elif isinstance(seed, int):\n seeds = super().seed(seed)\n try:\n subseeds = self.np_random.choice(\n np.iinfo(int).max,\n size=len(self.spaces),\n replace=False, # unique subseed for each subspace\n )\n except ValueError:\n subseeds = self.np_random.choice(\n np.iinfo(int).max,\n size=len(self.spaces),\n replace=True, # we get more than INT_MAX subspaces\n )\n\n for subspace, subseed in zip(self.spaces.values(), subseeds):\n seeds.append(subspace.seed(int(subseed))[0])\n elif seed is None:\n for space in self.spaces.values():\n seeds += space.seed(seed)\n else:\n raise TypeError(\"Passed seed not of an expected type: dict or int or None\")\n\n return seeds\n\n def sample(self, mask: Optional[TypingDict[str, Any]] = None) -> dict:\n \"\"\"Generates a single random sample from this space.\n\n The sample is an ordered dictionary of independent samples from the constituent spaces.\n\n Args:\n mask: An optional mask for each of the subspaces, expects the same keys as the space\n\n Returns:\n A dictionary with the same key and sampled values from :attr:`self.spaces`\n \"\"\"\n if mask is not None:\n assert isinstance(\n mask, dict\n ), f\"Expects mask to be a dict, actual type: {type(mask)}\"\n assert (\n mask.keys() == self.spaces.keys()\n ), f\"Expect mask keys to be same as space keys, mask keys: {mask.keys()}, space keys: {self.spaces.keys()}\"\n return OrderedDict(\n [(k, space.sample(mask[k])) for k, space in self.spaces.items()]\n )\n\n return OrderedDict([(k, space.sample()) for k, space in self.spaces.items()])\n\n def contains(self, x) -> bool:\n \"\"\"Return boolean specifying if x is a valid member of this space.\"\"\"\n if not isinstance(x, dict) or len(x) != len(self.spaces):\n return False\n for k, space in self.spaces.items():\n if k not in x:\n return False\n if not space.contains(x[k]):\n return False\n return True\n\n def __getitem__(self, key: str) -> Space:\n \"\"\"Get the space that is associated to `key`.\"\"\"\n return self.spaces[key]\n\n def __setitem__(self, key: str, value: Space):\n \"\"\"Set the space that is associated to `key`.\"\"\"\n self.spaces[key] = value\n\n def __iter__(self):\n \"\"\"Iterator through the keys of the subspaces.\"\"\"\n yield from self.spaces\n\n def __len__(self) -> int:\n \"\"\"Gives the number of simpler spaces that make up the `Dict` space.\"\"\"\n return len(self.spaces)\n\n def __repr__(self) -> str:\n \"\"\"Gives a string representation of this space.\"\"\"\n return \"Dict(\" + \", \".join([f\"{k}: {s}\" for k, s in self.spaces.items()]) + \")\"\n\n def to_jsonable(self, sample_n: list) -> dict:\n \"\"\"Convert a batch of samples from this space to a JSONable data type.\"\"\"\n # serialize as dict-repr of vectors\n return {\n key: space.to_jsonable([sample[key] for sample in sample_n])\n for key, space in self.spaces.items()\n }\n\n def from_jsonable(self, sample_n: TypingDict[str, list]) -> list:\n \"\"\"Convert a JSONable data type to a batch of samples from this space.\"\"\"\n dict_of_list: TypingDict[str, list] = {}\n for key, space in self.spaces.items():\n dict_of_list[key] = space.from_jsonable(sample_n[key])\n ret = []\n n_elements = len(next(iter(dict_of_list.values())))\n for i in range(n_elements):\n entry = {}\n for key, value in dict_of_list.items():\n entry[key] = value[i]\n ret.append(entry)\n return ret\n", "path": "gym/spaces/dict.py"}], "after_files": [{"content": "\"\"\"Implementation of a space that represents the cartesian product of other spaces as a dictionary.\"\"\"\nfrom collections import OrderedDict\nfrom collections.abc import Mapping, Sequence\nfrom typing import Any\nfrom typing import Dict as TypingDict\nfrom typing import Optional, Union\n\nimport numpy as np\n\nfrom gym.spaces.space import Space\nfrom gym.utils import seeding\n\n\nclass Dict(Space[TypingDict[str, Space]], Mapping):\n \"\"\"A dictionary of :class:`Space` instances.\n\n Elements of this space are (ordered) dictionaries of elements from the constituent spaces.\n\n Example usage:\n\n >>> from gym.spaces import Dict, Discrete\n >>> observation_space = Dict({\"position\": Discrete(2), \"velocity\": Discrete(3)})\n >>> observation_space.sample()\n OrderedDict([('position', 1), ('velocity', 2)])\n\n Example usage [nested]::\n\n >>> from gym.spaces import Box, Dict, Discrete, MultiBinary, MultiDiscrete\n >>> Dict(\n ... {\n ... \"ext_controller\": MultiDiscrete([5, 2, 2]),\n ... \"inner_state\": Dict(\n ... {\n ... \"charge\": Discrete(100),\n ... \"system_checks\": MultiBinary(10),\n ... \"job_status\": Dict(\n ... {\n ... \"task\": Discrete(5),\n ... \"progress\": Box(low=0, high=100, shape=()),\n ... }\n ... ),\n ... }\n ... ),\n ... }\n ... )\n\n It can be convenient to use :class:`Dict` spaces if you want to make complex observations or actions more human-readable.\n Usually, it will not be possible to use elements of this space directly in learning code. However, you can easily\n convert `Dict` observations to flat arrays by using a :class:`gym.wrappers.FlattenObservation` wrapper. Similar wrappers can be\n implemented to deal with :class:`Dict` actions.\n \"\"\"\n\n def __init__(\n self,\n spaces: Optional[TypingDict[str, Space]] = None,\n seed: Optional[Union[dict, int, seeding.RandomNumberGenerator]] = None,\n **spaces_kwargs: Space,\n ):\n \"\"\"Constructor of :class:`Dict` space.\n\n This space can be instantiated in one of two ways: Either you pass a dictionary\n of spaces to :meth:`__init__` via the ``spaces`` argument, or you pass the spaces as separate\n keyword arguments (where you will need to avoid the keys ``spaces`` and ``seed``)\n\n Example::\n\n >>> from gym.spaces import Box, Discrete\n >>> Dict({\"position\": Box(-1, 1, shape=(2,)), \"color\": Discrete(3)})\n Dict(color:Discrete(3), position:Box(-1.0, 1.0, (2,), float32))\n >>> Dict(position=Box(-1, 1, shape=(2,)), color=Discrete(3))\n Dict(color:Discrete(3), position:Box(-1.0, 1.0, (2,), float32))\n\n Args:\n spaces: A dictionary of spaces. This specifies the structure of the :class:`Dict` space\n seed: Optionally, you can use this argument to seed the RNGs of the spaces that make up the :class:`Dict` space.\n **spaces_kwargs: If ``spaces`` is ``None``, you need to pass the constituent spaces as keyword arguments, as described above.\n \"\"\"\n assert (spaces is None) or (\n not spaces_kwargs\n ), \"Use either Dict(spaces=dict(...)) or Dict(foo=x, bar=z)\"\n\n if spaces is None:\n spaces = spaces_kwargs\n if isinstance(spaces, Mapping) and not isinstance(spaces, OrderedDict):\n try:\n spaces = OrderedDict(sorted(spaces.items()))\n except TypeError:\n # Incomparable types (e.g. `int` vs. `str`, or user-defined types) found.\n # The keys remain in the insertion order.\n spaces = OrderedDict(spaces.items())\n if isinstance(spaces, Sequence):\n spaces = OrderedDict(spaces)\n\n assert isinstance(spaces, OrderedDict), \"spaces must be a dictionary\"\n\n self.spaces = spaces\n for space in spaces.values():\n assert isinstance(\n space, Space\n ), \"Values of the dict should be instances of gym.Space\"\n super().__init__(\n None, None, seed # type: ignore\n ) # None for shape and dtype, since it'll require special handling\n\n def seed(self, seed: Optional[Union[dict, int]] = None) -> list:\n \"\"\"Seed the PRNG of this space and all subspaces.\"\"\"\n seeds = []\n if isinstance(seed, dict):\n for key, seed_key in zip(self.spaces, seed):\n assert key == seed_key, print(\n \"Key value\",\n seed_key,\n \"in passed seed dict did not match key value\",\n key,\n \"in spaces Dict.\",\n )\n seeds += self.spaces[key].seed(seed[seed_key])\n elif isinstance(seed, int):\n seeds = super().seed(seed)\n try:\n subseeds = self.np_random.choice(\n np.iinfo(int).max,\n size=len(self.spaces),\n replace=False, # unique subseed for each subspace\n )\n except ValueError:\n subseeds = self.np_random.choice(\n np.iinfo(int).max,\n size=len(self.spaces),\n replace=True, # we get more than INT_MAX subspaces\n )\n\n for subspace, subseed in zip(self.spaces.values(), subseeds):\n seeds.append(subspace.seed(int(subseed))[0])\n elif seed is None:\n for space in self.spaces.values():\n seeds += space.seed(seed)\n else:\n raise TypeError(\"Passed seed not of an expected type: dict or int or None\")\n\n return seeds\n\n def sample(self, mask: Optional[TypingDict[str, Any]] = None) -> dict:\n \"\"\"Generates a single random sample from this space.\n\n The sample is an ordered dictionary of independent samples from the constituent spaces.\n\n Args:\n mask: An optional mask for each of the subspaces, expects the same keys as the space\n\n Returns:\n A dictionary with the same key and sampled values from :attr:`self.spaces`\n \"\"\"\n if mask is not None:\n assert isinstance(\n mask, dict\n ), f\"Expects mask to be a dict, actual type: {type(mask)}\"\n assert (\n mask.keys() == self.spaces.keys()\n ), f\"Expect mask keys to be same as space keys, mask keys: {mask.keys()}, space keys: {self.spaces.keys()}\"\n return OrderedDict(\n [(k, space.sample(mask[k])) for k, space in self.spaces.items()]\n )\n\n return OrderedDict([(k, space.sample()) for k, space in self.spaces.items()])\n\n def contains(self, x) -> bool:\n \"\"\"Return boolean specifying if x is a valid member of this space.\"\"\"\n if not isinstance(x, dict) or len(x) != len(self.spaces):\n return False\n for k, space in self.spaces.items():\n if k not in x:\n return False\n if not space.contains(x[k]):\n return False\n return True\n\n def __getitem__(self, key: str) -> Space:\n \"\"\"Get the space that is associated to `key`.\"\"\"\n return self.spaces[key]\n\n def __setitem__(self, key: str, value: Space):\n \"\"\"Set the space that is associated to `key`.\"\"\"\n self.spaces[key] = value\n\n def __iter__(self):\n \"\"\"Iterator through the keys of the subspaces.\"\"\"\n yield from self.spaces\n\n def __len__(self) -> int:\n \"\"\"Gives the number of simpler spaces that make up the `Dict` space.\"\"\"\n return len(self.spaces)\n\n def __repr__(self) -> str:\n \"\"\"Gives a string representation of this space.\"\"\"\n return (\n \"Dict(\" + \", \".join([f\"{k!r}: {s}\" for k, s in self.spaces.items()]) + \")\"\n )\n\n def __eq__(self, other) -> bool:\n \"\"\"Check whether `other` is equivalent to this instance.\"\"\"\n return (\n isinstance(other, Dict)\n # Comparison of `OrderedDict`s is order-sensitive\n and self.spaces == other.spaces # OrderedDict.__eq__\n )\n\n def to_jsonable(self, sample_n: list) -> dict:\n \"\"\"Convert a batch of samples from this space to a JSONable data type.\"\"\"\n # serialize as dict-repr of vectors\n return {\n key: space.to_jsonable([sample[key] for sample in sample_n])\n for key, space in self.spaces.items()\n }\n\n def from_jsonable(self, sample_n: TypingDict[str, list]) -> list:\n \"\"\"Convert a JSONable data type to a batch of samples from this space.\"\"\"\n dict_of_list: TypingDict[str, list] = {}\n for key, space in self.spaces.items():\n dict_of_list[key] = space.from_jsonable(sample_n[key])\n ret = []\n n_elements = len(next(iter(dict_of_list.values())))\n for i in range(n_elements):\n entry = {}\n for key, value in dict_of_list.items():\n entry[key] = value[i]\n ret.append(entry)\n return ret\n", "path": "gym/spaces/dict.py"}]} | 3,946 | 405 |
gh_patches_debug_21854 | rasdani/github-patches | git_diff | pydantic__pydantic-1994 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
data.json file appears after `make docs`
# Step to reproduce
Follow the steps [here](https://pydantic-docs.helpmanual.io/contributing/).
When running `make docs`, a new file `data.json` appears at `/`. This should not appear there, or should already be committed there, or should be excluded by `.gitignore`.
For context, see [this comment](https://github.com/samuelcolvin/pydantic/pull/1987/files#r502764774).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/build/exec_examples.py`
Content:
```
1 #!/usr/bin/env python3
2 import importlib
3 import inspect
4 import json
5 import os
6 import re
7 import shutil
8 import subprocess
9 import sys
10 import textwrap
11 import traceback
12 from pathlib import Path
13 from typing import Any, List, Tuple
14 from unittest.mock import patch
15
16 from ansi2html import Ansi2HTMLConverter
17 from devtools import PrettyFormat
18
19 THIS_DIR = Path(__file__).parent
20 DOCS_DIR = (THIS_DIR / '..').resolve()
21 EXAMPLES_DIR = DOCS_DIR / 'examples'
22 TMP_EXAMPLES_DIR = DOCS_DIR / '.tmp_examples'
23 MAX_LINE_LENGTH = int(re.search(r'max_line_length = (\d+)', (EXAMPLES_DIR / '.editorconfig').read_text()).group(1))
24 LONG_LINE = 50
25 pformat = PrettyFormat(simple_cutoff=LONG_LINE)
26
27
28 def to_string(value: Any) -> str:
29 # attempt to build a pretty equivalent of the print output
30 if isinstance(value, (dict, list, tuple, set)):
31 return pformat(value)
32 elif isinstance(value, str) and any(re.fullmatch(r, value, flags=re.DOTALL) for r in ['{".+}', r'\[.+\]']):
33 try:
34 obj = json.loads(value)
35 except ValueError:
36 # not JSON, not a problem
37 pass
38 else:
39 s = json.dumps(obj)
40 if len(s) > LONG_LINE:
41 json.dumps(obj, indent=2)
42 else:
43 return s
44
45 return str(value)
46
47
48 class MockPrint:
49 def __init__(self, file: Path):
50 self.file = file
51 self.statements = []
52
53 def __call__(self, *args, file=None, flush=None):
54 frame = inspect.currentframe().f_back.f_back.f_back
55 if sys.version_info >= (3, 8):
56 frame = frame.f_back
57 if not self.file.samefile(frame.f_code.co_filename):
58 # happens when index_error.py imports index_main.py
59 return
60 s = ' '.join(map(to_string, args))
61
62 self.statements.append((frame.f_lineno, s))
63
64
65 def build_print_lines(s: str, max_len_reduction: int = 0):
66 print_lines = []
67 max_len = MAX_LINE_LENGTH - 3 - max_len_reduction
68 for line in s.split('\n'):
69 if len(line) > max_len:
70 print_lines += textwrap.wrap(line, width=max_len)
71 else:
72 print_lines.append(line)
73 return print_lines
74
75
76 def build_print_statement(line_no: int, s: str, lines: List[str]) -> None:
77 indent = ''
78 for back in range(1, 100):
79 m = re.search(r'^( *)print\(', lines[line_no - back])
80 if m:
81 indent = m.group(1)
82 break
83 print_lines = build_print_lines(s, len(indent))
84
85 if len(print_lines) > 2:
86 text = textwrap.indent('"""\n{}\n"""'.format('\n'.join(print_lines)), indent)
87 else:
88 text = '\n'.join(f'{indent}#> {line}' for line in print_lines)
89 lines.insert(line_no, text)
90
91
92 def all_md_contents() -> str:
93 file_contents = []
94 for f in DOCS_DIR.glob('**/*.md'):
95 file_contents.append(f.read_text())
96 return '\n\n\n'.join(file_contents)
97
98
99 def gen_ansi_output():
100
101 conv = Ansi2HTMLConverter()
102
103 input_file = EXAMPLES_DIR / 'devtools_main.py'
104 os.environ['PY_DEVTOOLS_HIGHLIGHT'] = 'true'
105 p = subprocess.run((sys.executable, str(input_file)), stdout=subprocess.PIPE, check=True, encoding='utf8')
106 html = conv.convert(p.stdout, full=False).strip('\r\n')
107 full_html = f'<div class="terminal">\n<pre class="terminal-content">\n{html}\n</pre>\n</div>'
108 path = TMP_EXAMPLES_DIR / f'{input_file.stem}.html'
109 path.write_text(full_html)
110 print(f'generated ansi output to {path}')
111
112
113 dont_execute_re = re.compile(r'^# dont-execute\n', flags=re.M | re.I)
114 required_py_re = re.compile(r'^# *requires *python *(\d+).(\d+)', flags=re.M)
115
116
117 def should_execute(file_name: str, file_text: str) -> Tuple[str, bool]:
118 if dont_execute_re.search(file_text):
119 return dont_execute_re.sub('', file_text), False
120 m = required_py_re.search(file_text)
121 if m:
122 if sys.version_info >= tuple(int(v) for v in m.groups()):
123 return required_py_re.sub('', file_text), True
124 else:
125 v = '.'.join(m.groups())
126 print(f'WARNING: {file_name} requires python {v}, not running')
127 return required_py_re.sub(f'# requires python {v}, NOT EXECUTED!', file_text), False
128 else:
129 return file_text, True
130
131
132 def exec_examples():
133 errors = []
134 all_md = all_md_contents()
135 new_files = {}
136 os.environ.update({'my_auth_key': 'xxx', 'my_api_key': 'xxx'})
137
138 sys.path.append(str(EXAMPLES_DIR))
139 for file in sorted(EXAMPLES_DIR.iterdir()):
140
141 def error(desc: str):
142 errors.append((file, desc))
143 sys.stderr.write(f'error in {file.name}: {desc}\n')
144
145 if not file.is_file():
146 # __pycache__, maybe others
147 continue
148
149 if file.suffix != '.py':
150 # just copy
151 new_files[file.name] = file.read_text()
152 continue
153
154 if f'{{!.tmp_examples/{file.name}!}}' not in all_md:
155 error('file not used anywhere')
156
157 file_text = file.read_text('utf-8')
158 if '\n\n\n\n' in file_text:
159 error('too many new lines')
160 if not file_text.endswith('\n'):
161 error('no trailing new line')
162 if re.search('^ *# *>', file_text, flags=re.M):
163 error('contains comments with print output, please remove')
164
165 file_text, execute = should_execute(file.name, file_text)
166 if execute:
167 no_print_intercept_re = re.compile(r'^# no-print-intercept\n', flags=re.M)
168 print_intercept = not bool(no_print_intercept_re.search(file_text))
169 if not print_intercept:
170 file_text = no_print_intercept_re.sub('', file_text)
171
172 if file.stem in sys.modules:
173 del sys.modules[file.stem]
174 mp = MockPrint(file)
175 mod = None
176 with patch('builtins.print') as mock_print:
177 if print_intercept:
178 mock_print.side_effect = mp
179 try:
180 mod = importlib.import_module(file.stem)
181 except Exception:
182 tb = traceback.format_exception(*sys.exc_info())
183 error(''.join(e for e in tb if '/pydantic/docs/examples/' in e or not e.startswith(' File ')))
184
185 if mod and not mod.__file__.startswith(str(EXAMPLES_DIR)):
186 error(f'module path "{mod.__file__}" not inside "{EXAMPLES_DIR}", name may shadow another module?')
187
188 lines = file_text.split('\n')
189
190 to_json_line = '# output-json'
191 if to_json_line in lines:
192 lines = [line for line in lines if line != to_json_line]
193 if len(mp.statements) != 1:
194 error('should have exactly one print statement')
195 print_lines = build_print_lines(mp.statements[0][1])
196 new_files[file.stem + '.json'] = '\n'.join(print_lines) + '\n'
197 else:
198 for line_no, print_string in reversed(mp.statements):
199 build_print_statement(line_no, print_string, lines)
200 else:
201 lines = file_text.split('\n')
202
203 try:
204 ignore_above = lines.index('# ignore-above')
205 except ValueError:
206 pass
207 else:
208 lines = lines[ignore_above + 1 :]
209
210 lines = '\n'.join(lines).split('\n')
211 if any(len(l) > MAX_LINE_LENGTH for l in lines):
212 error(f'lines longer than {MAX_LINE_LENGTH} characters')
213
214 new_files[file.name] = '\n'.join(lines)
215
216 if errors:
217 print(f'\n{len(errors)} errors, not writing files\n')
218 return 1
219
220 if TMP_EXAMPLES_DIR.exists():
221 shutil.rmtree(TMP_EXAMPLES_DIR)
222
223 print(f'writing {len(new_files)} example files to {TMP_EXAMPLES_DIR}')
224 TMP_EXAMPLES_DIR.mkdir()
225 for file_name, content in new_files.items():
226 (TMP_EXAMPLES_DIR / file_name).write_text(content, 'utf-8')
227 gen_ansi_output()
228 return 0
229
230
231 if __name__ == '__main__':
232 sys.exit(exec_examples())
233
```
Path: `docs/examples/models_parse.py`
Content:
```
1 import pickle
2 from datetime import datetime
3 from pydantic import BaseModel, ValidationError
4
5
6 class User(BaseModel):
7 id: int
8 name = 'John Doe'
9 signup_ts: datetime = None
10
11
12 m = User.parse_obj({'id': 123, 'name': 'James'})
13 print(m)
14
15 try:
16 User.parse_obj(['not', 'a', 'dict'])
17 except ValidationError as e:
18 print(e)
19
20 # assumes json as no content type passed
21 m = User.parse_raw('{"id": 123, "name": "James"}')
22 print(m)
23
24 pickle_data = pickle.dumps({
25 'id': 123,
26 'name': 'James',
27 'signup_ts': datetime(2017, 7, 14)
28 })
29 m = User.parse_raw(
30 pickle_data, content_type='application/pickle', allow_pickle=True
31 )
32 print(m)
33
34 path = 'data.json'
35 with open(path, 'w') as f:
36 f.write('{"id": 123, "name": "James"}')
37 m = User.parse_file(path)
38 print(m)
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/build/exec_examples.py b/docs/build/exec_examples.py
--- a/docs/build/exec_examples.py
+++ b/docs/build/exec_examples.py
@@ -207,6 +207,13 @@
else:
lines = lines[ignore_above + 1 :]
+ try:
+ ignore_below = lines.index('# ignore-below')
+ except ValueError:
+ pass
+ else:
+ lines = lines[:ignore_below]
+
lines = '\n'.join(lines).split('\n')
if any(len(l) > MAX_LINE_LENGTH for l in lines):
error(f'lines longer than {MAX_LINE_LENGTH} characters')
diff --git a/docs/examples/models_parse.py b/docs/examples/models_parse.py
--- a/docs/examples/models_parse.py
+++ b/docs/examples/models_parse.py
@@ -1,5 +1,7 @@
import pickle
from datetime import datetime
+from pathlib import Path
+
from pydantic import BaseModel, ValidationError
@@ -31,8 +33,10 @@
)
print(m)
-path = 'data.json'
-with open(path, 'w') as f:
- f.write('{"id": 123, "name": "James"}')
+path = Path('data.json')
+path.write_text('{"id": 123, "name": "James"}')
m = User.parse_file(path)
print(m)
+# ignore-below
+if path.exists():
+ path.unlink()
| {"golden_diff": "diff --git a/docs/build/exec_examples.py b/docs/build/exec_examples.py\n--- a/docs/build/exec_examples.py\n+++ b/docs/build/exec_examples.py\n@@ -207,6 +207,13 @@\n else:\n lines = lines[ignore_above + 1 :]\n \n+ try:\n+ ignore_below = lines.index('# ignore-below')\n+ except ValueError:\n+ pass\n+ else:\n+ lines = lines[:ignore_below]\n+\n lines = '\\n'.join(lines).split('\\n')\n if any(len(l) > MAX_LINE_LENGTH for l in lines):\n error(f'lines longer than {MAX_LINE_LENGTH} characters')\ndiff --git a/docs/examples/models_parse.py b/docs/examples/models_parse.py\n--- a/docs/examples/models_parse.py\n+++ b/docs/examples/models_parse.py\n@@ -1,5 +1,7 @@\n import pickle\n from datetime import datetime\n+from pathlib import Path\n+\n from pydantic import BaseModel, ValidationError\n \n \n@@ -31,8 +33,10 @@\n )\n print(m)\n \n-path = 'data.json'\n-with open(path, 'w') as f:\n- f.write('{\"id\": 123, \"name\": \"James\"}')\n+path = Path('data.json')\n+path.write_text('{\"id\": 123, \"name\": \"James\"}')\n m = User.parse_file(path)\n print(m)\n+# ignore-below\n+if path.exists():\n+ path.unlink()\n", "issue": "data.json file appears after `make docs`\n# Step to reproduce\r\n\r\nFollow the steps [here](https://pydantic-docs.helpmanual.io/contributing/).\r\n\r\nWhen running `make docs`, a new file `data.json` appears at `/`. This should not appear there, or should already be committed there, or should be excluded by `.gitignore`.\r\n\r\nFor context, see [this comment](https://github.com/samuelcolvin/pydantic/pull/1987/files#r502764774).\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport importlib\nimport inspect\nimport json\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport textwrap\nimport traceback\nfrom pathlib import Path\nfrom typing import Any, List, Tuple\nfrom unittest.mock import patch\n\nfrom ansi2html import Ansi2HTMLConverter\nfrom devtools import PrettyFormat\n\nTHIS_DIR = Path(__file__).parent\nDOCS_DIR = (THIS_DIR / '..').resolve()\nEXAMPLES_DIR = DOCS_DIR / 'examples'\nTMP_EXAMPLES_DIR = DOCS_DIR / '.tmp_examples'\nMAX_LINE_LENGTH = int(re.search(r'max_line_length = (\\d+)', (EXAMPLES_DIR / '.editorconfig').read_text()).group(1))\nLONG_LINE = 50\npformat = PrettyFormat(simple_cutoff=LONG_LINE)\n\n\ndef to_string(value: Any) -> str:\n # attempt to build a pretty equivalent of the print output\n if isinstance(value, (dict, list, tuple, set)):\n return pformat(value)\n elif isinstance(value, str) and any(re.fullmatch(r, value, flags=re.DOTALL) for r in ['{\".+}', r'\\[.+\\]']):\n try:\n obj = json.loads(value)\n except ValueError:\n # not JSON, not a problem\n pass\n else:\n s = json.dumps(obj)\n if len(s) > LONG_LINE:\n json.dumps(obj, indent=2)\n else:\n return s\n\n return str(value)\n\n\nclass MockPrint:\n def __init__(self, file: Path):\n self.file = file\n self.statements = []\n\n def __call__(self, *args, file=None, flush=None):\n frame = inspect.currentframe().f_back.f_back.f_back\n if sys.version_info >= (3, 8):\n frame = frame.f_back\n if not self.file.samefile(frame.f_code.co_filename):\n # happens when index_error.py imports index_main.py\n return\n s = ' '.join(map(to_string, args))\n\n self.statements.append((frame.f_lineno, s))\n\n\ndef build_print_lines(s: str, max_len_reduction: int = 0):\n print_lines = []\n max_len = MAX_LINE_LENGTH - 3 - max_len_reduction\n for line in s.split('\\n'):\n if len(line) > max_len:\n print_lines += textwrap.wrap(line, width=max_len)\n else:\n print_lines.append(line)\n return print_lines\n\n\ndef build_print_statement(line_no: int, s: str, lines: List[str]) -> None:\n indent = ''\n for back in range(1, 100):\n m = re.search(r'^( *)print\\(', lines[line_no - back])\n if m:\n indent = m.group(1)\n break\n print_lines = build_print_lines(s, len(indent))\n\n if len(print_lines) > 2:\n text = textwrap.indent('\"\"\"\\n{}\\n\"\"\"'.format('\\n'.join(print_lines)), indent)\n else:\n text = '\\n'.join(f'{indent}#> {line}' for line in print_lines)\n lines.insert(line_no, text)\n\n\ndef all_md_contents() -> str:\n file_contents = []\n for f in DOCS_DIR.glob('**/*.md'):\n file_contents.append(f.read_text())\n return '\\n\\n\\n'.join(file_contents)\n\n\ndef gen_ansi_output():\n\n conv = Ansi2HTMLConverter()\n\n input_file = EXAMPLES_DIR / 'devtools_main.py'\n os.environ['PY_DEVTOOLS_HIGHLIGHT'] = 'true'\n p = subprocess.run((sys.executable, str(input_file)), stdout=subprocess.PIPE, check=True, encoding='utf8')\n html = conv.convert(p.stdout, full=False).strip('\\r\\n')\n full_html = f'<div class=\"terminal\">\\n<pre class=\"terminal-content\">\\n{html}\\n</pre>\\n</div>'\n path = TMP_EXAMPLES_DIR / f'{input_file.stem}.html'\n path.write_text(full_html)\n print(f'generated ansi output to {path}')\n\n\ndont_execute_re = re.compile(r'^# dont-execute\\n', flags=re.M | re.I)\nrequired_py_re = re.compile(r'^# *requires *python *(\\d+).(\\d+)', flags=re.M)\n\n\ndef should_execute(file_name: str, file_text: str) -> Tuple[str, bool]:\n if dont_execute_re.search(file_text):\n return dont_execute_re.sub('', file_text), False\n m = required_py_re.search(file_text)\n if m:\n if sys.version_info >= tuple(int(v) for v in m.groups()):\n return required_py_re.sub('', file_text), True\n else:\n v = '.'.join(m.groups())\n print(f'WARNING: {file_name} requires python {v}, not running')\n return required_py_re.sub(f'# requires python {v}, NOT EXECUTED!', file_text), False\n else:\n return file_text, True\n\n\ndef exec_examples():\n errors = []\n all_md = all_md_contents()\n new_files = {}\n os.environ.update({'my_auth_key': 'xxx', 'my_api_key': 'xxx'})\n\n sys.path.append(str(EXAMPLES_DIR))\n for file in sorted(EXAMPLES_DIR.iterdir()):\n\n def error(desc: str):\n errors.append((file, desc))\n sys.stderr.write(f'error in {file.name}: {desc}\\n')\n\n if not file.is_file():\n # __pycache__, maybe others\n continue\n\n if file.suffix != '.py':\n # just copy\n new_files[file.name] = file.read_text()\n continue\n\n if f'{{!.tmp_examples/{file.name}!}}' not in all_md:\n error('file not used anywhere')\n\n file_text = file.read_text('utf-8')\n if '\\n\\n\\n\\n' in file_text:\n error('too many new lines')\n if not file_text.endswith('\\n'):\n error('no trailing new line')\n if re.search('^ *# *>', file_text, flags=re.M):\n error('contains comments with print output, please remove')\n\n file_text, execute = should_execute(file.name, file_text)\n if execute:\n no_print_intercept_re = re.compile(r'^# no-print-intercept\\n', flags=re.M)\n print_intercept = not bool(no_print_intercept_re.search(file_text))\n if not print_intercept:\n file_text = no_print_intercept_re.sub('', file_text)\n\n if file.stem in sys.modules:\n del sys.modules[file.stem]\n mp = MockPrint(file)\n mod = None\n with patch('builtins.print') as mock_print:\n if print_intercept:\n mock_print.side_effect = mp\n try:\n mod = importlib.import_module(file.stem)\n except Exception:\n tb = traceback.format_exception(*sys.exc_info())\n error(''.join(e for e in tb if '/pydantic/docs/examples/' in e or not e.startswith(' File ')))\n\n if mod and not mod.__file__.startswith(str(EXAMPLES_DIR)):\n error(f'module path \"{mod.__file__}\" not inside \"{EXAMPLES_DIR}\", name may shadow another module?')\n\n lines = file_text.split('\\n')\n\n to_json_line = '# output-json'\n if to_json_line in lines:\n lines = [line for line in lines if line != to_json_line]\n if len(mp.statements) != 1:\n error('should have exactly one print statement')\n print_lines = build_print_lines(mp.statements[0][1])\n new_files[file.stem + '.json'] = '\\n'.join(print_lines) + '\\n'\n else:\n for line_no, print_string in reversed(mp.statements):\n build_print_statement(line_no, print_string, lines)\n else:\n lines = file_text.split('\\n')\n\n try:\n ignore_above = lines.index('# ignore-above')\n except ValueError:\n pass\n else:\n lines = lines[ignore_above + 1 :]\n\n lines = '\\n'.join(lines).split('\\n')\n if any(len(l) > MAX_LINE_LENGTH for l in lines):\n error(f'lines longer than {MAX_LINE_LENGTH} characters')\n\n new_files[file.name] = '\\n'.join(lines)\n\n if errors:\n print(f'\\n{len(errors)} errors, not writing files\\n')\n return 1\n\n if TMP_EXAMPLES_DIR.exists():\n shutil.rmtree(TMP_EXAMPLES_DIR)\n\n print(f'writing {len(new_files)} example files to {TMP_EXAMPLES_DIR}')\n TMP_EXAMPLES_DIR.mkdir()\n for file_name, content in new_files.items():\n (TMP_EXAMPLES_DIR / file_name).write_text(content, 'utf-8')\n gen_ansi_output()\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(exec_examples())\n", "path": "docs/build/exec_examples.py"}, {"content": "import pickle\nfrom datetime import datetime\nfrom pydantic import BaseModel, ValidationError\n\n\nclass User(BaseModel):\n id: int\n name = 'John Doe'\n signup_ts: datetime = None\n\n\nm = User.parse_obj({'id': 123, 'name': 'James'})\nprint(m)\n\ntry:\n User.parse_obj(['not', 'a', 'dict'])\nexcept ValidationError as e:\n print(e)\n\n# assumes json as no content type passed\nm = User.parse_raw('{\"id\": 123, \"name\": \"James\"}')\nprint(m)\n\npickle_data = pickle.dumps({\n 'id': 123,\n 'name': 'James',\n 'signup_ts': datetime(2017, 7, 14)\n})\nm = User.parse_raw(\n pickle_data, content_type='application/pickle', allow_pickle=True\n)\nprint(m)\n\npath = 'data.json'\nwith open(path, 'w') as f:\n f.write('{\"id\": 123, \"name\": \"James\"}')\nm = User.parse_file(path)\nprint(m)\n", "path": "docs/examples/models_parse.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport importlib\nimport inspect\nimport json\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport textwrap\nimport traceback\nfrom pathlib import Path\nfrom typing import Any, List, Tuple\nfrom unittest.mock import patch\n\nfrom ansi2html import Ansi2HTMLConverter\nfrom devtools import PrettyFormat\n\nTHIS_DIR = Path(__file__).parent\nDOCS_DIR = (THIS_DIR / '..').resolve()\nEXAMPLES_DIR = DOCS_DIR / 'examples'\nTMP_EXAMPLES_DIR = DOCS_DIR / '.tmp_examples'\nMAX_LINE_LENGTH = int(re.search(r'max_line_length = (\\d+)', (EXAMPLES_DIR / '.editorconfig').read_text()).group(1))\nLONG_LINE = 50\npformat = PrettyFormat(simple_cutoff=LONG_LINE)\n\n\ndef to_string(value: Any) -> str:\n # attempt to build a pretty equivalent of the print output\n if isinstance(value, (dict, list, tuple, set)):\n return pformat(value)\n elif isinstance(value, str) and any(re.fullmatch(r, value, flags=re.DOTALL) for r in ['{\".+}', r'\\[.+\\]']):\n try:\n obj = json.loads(value)\n except ValueError:\n # not JSON, not a problem\n pass\n else:\n s = json.dumps(obj)\n if len(s) > LONG_LINE:\n json.dumps(obj, indent=2)\n else:\n return s\n\n return str(value)\n\n\nclass MockPrint:\n def __init__(self, file: Path):\n self.file = file\n self.statements = []\n\n def __call__(self, *args, file=None, flush=None):\n frame = inspect.currentframe().f_back.f_back.f_back\n if sys.version_info >= (3, 8):\n frame = frame.f_back\n if not self.file.samefile(frame.f_code.co_filename):\n # happens when index_error.py imports index_main.py\n return\n s = ' '.join(map(to_string, args))\n\n self.statements.append((frame.f_lineno, s))\n\n\ndef build_print_lines(s: str, max_len_reduction: int = 0):\n print_lines = []\n max_len = MAX_LINE_LENGTH - 3 - max_len_reduction\n for line in s.split('\\n'):\n if len(line) > max_len:\n print_lines += textwrap.wrap(line, width=max_len)\n else:\n print_lines.append(line)\n return print_lines\n\n\ndef build_print_statement(line_no: int, s: str, lines: List[str]) -> None:\n indent = ''\n for back in range(1, 100):\n m = re.search(r'^( *)print\\(', lines[line_no - back])\n if m:\n indent = m.group(1)\n break\n print_lines = build_print_lines(s, len(indent))\n\n if len(print_lines) > 2:\n text = textwrap.indent('\"\"\"\\n{}\\n\"\"\"'.format('\\n'.join(print_lines)), indent)\n else:\n text = '\\n'.join(f'{indent}#> {line}' for line in print_lines)\n lines.insert(line_no, text)\n\n\ndef all_md_contents() -> str:\n file_contents = []\n for f in DOCS_DIR.glob('**/*.md'):\n file_contents.append(f.read_text())\n return '\\n\\n\\n'.join(file_contents)\n\n\ndef gen_ansi_output():\n\n conv = Ansi2HTMLConverter()\n\n input_file = EXAMPLES_DIR / 'devtools_main.py'\n os.environ['PY_DEVTOOLS_HIGHLIGHT'] = 'true'\n p = subprocess.run((sys.executable, str(input_file)), stdout=subprocess.PIPE, check=True, encoding='utf8')\n html = conv.convert(p.stdout, full=False).strip('\\r\\n')\n full_html = f'<div class=\"terminal\">\\n<pre class=\"terminal-content\">\\n{html}\\n</pre>\\n</div>'\n path = TMP_EXAMPLES_DIR / f'{input_file.stem}.html'\n path.write_text(full_html)\n print(f'generated ansi output to {path}')\n\n\ndont_execute_re = re.compile(r'^# dont-execute\\n', flags=re.M | re.I)\nrequired_py_re = re.compile(r'^# *requires *python *(\\d+).(\\d+)', flags=re.M)\n\n\ndef should_execute(file_name: str, file_text: str) -> Tuple[str, bool]:\n if dont_execute_re.search(file_text):\n return dont_execute_re.sub('', file_text), False\n m = required_py_re.search(file_text)\n if m:\n if sys.version_info >= tuple(int(v) for v in m.groups()):\n return required_py_re.sub('', file_text), True\n else:\n v = '.'.join(m.groups())\n print(f'WARNING: {file_name} requires python {v}, not running')\n return required_py_re.sub(f'# requires python {v}, NOT EXECUTED!', file_text), False\n else:\n return file_text, True\n\n\ndef exec_examples():\n errors = []\n all_md = all_md_contents()\n new_files = {}\n os.environ.update({'my_auth_key': 'xxx', 'my_api_key': 'xxx'})\n\n sys.path.append(str(EXAMPLES_DIR))\n for file in sorted(EXAMPLES_DIR.iterdir()):\n\n def error(desc: str):\n errors.append((file, desc))\n sys.stderr.write(f'error in {file.name}: {desc}\\n')\n\n if not file.is_file():\n # __pycache__, maybe others\n continue\n\n if file.suffix != '.py':\n # just copy\n new_files[file.name] = file.read_text()\n continue\n\n if f'{{!.tmp_examples/{file.name}!}}' not in all_md:\n error('file not used anywhere')\n\n file_text = file.read_text('utf-8')\n if '\\n\\n\\n\\n' in file_text:\n error('too many new lines')\n if not file_text.endswith('\\n'):\n error('no trailing new line')\n if re.search('^ *# *>', file_text, flags=re.M):\n error('contains comments with print output, please remove')\n\n file_text, execute = should_execute(file.name, file_text)\n if execute:\n no_print_intercept_re = re.compile(r'^# no-print-intercept\\n', flags=re.M)\n print_intercept = not bool(no_print_intercept_re.search(file_text))\n if not print_intercept:\n file_text = no_print_intercept_re.sub('', file_text)\n\n if file.stem in sys.modules:\n del sys.modules[file.stem]\n mp = MockPrint(file)\n mod = None\n with patch('builtins.print') as mock_print:\n if print_intercept:\n mock_print.side_effect = mp\n try:\n mod = importlib.import_module(file.stem)\n except Exception:\n tb = traceback.format_exception(*sys.exc_info())\n error(''.join(e for e in tb if '/pydantic/docs/examples/' in e or not e.startswith(' File ')))\n\n if mod and not mod.__file__.startswith(str(EXAMPLES_DIR)):\n error(f'module path \"{mod.__file__}\" not inside \"{EXAMPLES_DIR}\", name may shadow another module?')\n\n lines = file_text.split('\\n')\n\n to_json_line = '# output-json'\n if to_json_line in lines:\n lines = [line for line in lines if line != to_json_line]\n if len(mp.statements) != 1:\n error('should have exactly one print statement')\n print_lines = build_print_lines(mp.statements[0][1])\n new_files[file.stem + '.json'] = '\\n'.join(print_lines) + '\\n'\n else:\n for line_no, print_string in reversed(mp.statements):\n build_print_statement(line_no, print_string, lines)\n else:\n lines = file_text.split('\\n')\n\n try:\n ignore_above = lines.index('# ignore-above')\n except ValueError:\n pass\n else:\n lines = lines[ignore_above + 1 :]\n\n try:\n ignore_below = lines.index('# ignore-below')\n except ValueError:\n pass\n else:\n lines = lines[:ignore_below]\n\n lines = '\\n'.join(lines).split('\\n')\n if any(len(l) > MAX_LINE_LENGTH for l in lines):\n error(f'lines longer than {MAX_LINE_LENGTH} characters')\n\n new_files[file.name] = '\\n'.join(lines)\n\n if errors:\n print(f'\\n{len(errors)} errors, not writing files\\n')\n return 1\n\n if TMP_EXAMPLES_DIR.exists():\n shutil.rmtree(TMP_EXAMPLES_DIR)\n\n print(f'writing {len(new_files)} example files to {TMP_EXAMPLES_DIR}')\n TMP_EXAMPLES_DIR.mkdir()\n for file_name, content in new_files.items():\n (TMP_EXAMPLES_DIR / file_name).write_text(content, 'utf-8')\n gen_ansi_output()\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(exec_examples())\n", "path": "docs/build/exec_examples.py"}, {"content": "import pickle\nfrom datetime import datetime\nfrom pathlib import Path\n\nfrom pydantic import BaseModel, ValidationError\n\n\nclass User(BaseModel):\n id: int\n name = 'John Doe'\n signup_ts: datetime = None\n\n\nm = User.parse_obj({'id': 123, 'name': 'James'})\nprint(m)\n\ntry:\n User.parse_obj(['not', 'a', 'dict'])\nexcept ValidationError as e:\n print(e)\n\n# assumes json as no content type passed\nm = User.parse_raw('{\"id\": 123, \"name\": \"James\"}')\nprint(m)\n\npickle_data = pickle.dumps({\n 'id': 123,\n 'name': 'James',\n 'signup_ts': datetime(2017, 7, 14)\n})\nm = User.parse_raw(\n pickle_data, content_type='application/pickle', allow_pickle=True\n)\nprint(m)\n\npath = Path('data.json')\npath.write_text('{\"id\": 123, \"name\": \"James\"}')\nm = User.parse_file(path)\nprint(m)\n# ignore-below\nif path.exists():\n path.unlink()\n", "path": "docs/examples/models_parse.py"}]} | 3,239 | 317 |
gh_patches_debug_24602 | rasdani/github-patches | git_diff | enthought__chaco-215 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError when using PanTool with restrict_to_data and empty data source
This can come up when a plot's data sources contain empty arrays. None-empty data sources may be visible on screen, and the user may wish to pan based on that view. The empty data sources should not keep this from happening. Here's a minimal example.
``` python
import numpy as np
from chaco.array_plot_data import ArrayPlotData
from chaco.plot import Plot
from chaco.tools.pan_tool import PanTool
plot_data = ArrayPlotData()
plot = Plot(plot_data)
arr = np.arange(4.0)
plot_data.set_data("x", arr)
plot_data.set_data("y", arr)
plot_data.set_data("z", np.array([], np.float64)
plot.plot(('x', 'y'))
plot.plot(('z', 'z'))
tool = PanTool(plot, restrict_to_data=True)
plot.tools.append(tool)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chaco/tools/pan_tool.py`
Content:
```
1 """ Defines the PanTool class.
2 """
3
4 from numpy import inf
5
6 # Enthought library imports
7 from enable.api import BaseTool, Pointer, KeySpec
8 from traits.api import Bool, Enum, Float, Tuple, Instance
9
10
11 class PanTool(BaseTool):
12 """ A tool that enables the user to pan a plot by clicking a mouse
13 button and dragging.
14 """
15
16 # The mouse button that initiates the drag operation.
17 drag_button = Enum("left", "middle", "right")
18
19 # The cursor to use when panning.
20 drag_pointer = Pointer("hand")
21
22 # Scaling factor on the panning "speed".
23 speed = Float(1.0)
24
25 # The modifier key that, if depressed when the drag is initiated, constrains
26 # the panning to happen in the only direction of largest initial motion.
27 # It is possible to permanently restrict this tool to always drag along one
28 # direction. To do so, set constrain=True, constrain_key=None, and
29 # constrain_direction to the desired direction.
30 constrain_key = Enum(None, "shift", "control", "alt")
31
32 # Keys to Pan via keyboard
33 pan_right_key = Instance(KeySpec, args=("Right",))
34 pan_left_key = Instance(KeySpec, args=("Left",))
35 pan_up_key = Instance(KeySpec, args=("Up",))
36 pan_down_key = Instance(KeySpec, args=("Down",))
37
38 # number of pixels the keys should pan
39 # disabled if 0.0
40 pan_keys_step = Float(0.0)
41
42 # Constrain the panning to one direction?
43 constrain = Bool(False)
44
45 # The direction of constrained draw. A value of None means that the user
46 # has initiated the drag and pressed the constrain_key, but hasn't moved
47 # the mouse yet; the magnitude of the components of the next mouse_move
48 # event will determine the constrain_direction.
49 constrain_direction = Enum(None, "x", "y")
50
51 # Restrict to the bounds of the plot data
52 restrict_to_data = Bool(False)
53
54 # (x,y) of the point where the mouse button was pressed.
55 _original_xy = Tuple
56
57 # Data coordinates of **_original_xy**. This may be either (index,value)
58 # or (value,index) depending on the component's orientation.
59 _original_data = Tuple
60
61 # Was constrain=True triggered by the **contrain_key**? If False, it was
62 # set programmatically.
63 _auto_constrain = Bool(False)
64
65
66 #------------------------------------------------------------------------
67 # Inherited BaseTool traits
68 #------------------------------------------------------------------------
69
70 # The tool does not have a visual representation (overrides
71 # BaseTool).
72 draw_mode = "none"
73
74 # The tool is not visible (overrides BaseTool).
75 visible = False
76
77 # The possible event states of this tool (overrides enable.Interactor).
78 event_state = Enum("normal", "panning")
79
80 def normal_key_pressed(self, event):
81 """ Handles a key being pressed when the tool is in the 'normal'
82 state.
83 """
84 if self.pan_keys_step == 0.0:
85 return
86 src = self.component.bounds[0]/2, self.component.bounds[1]/2
87 dest = src
88 if self.pan_left_key.match(event):
89 dest = (src[0] - self.pan_keys_step,
90 src[1])
91 elif self.pan_right_key.match(event):
92 dest = (src[0] + self.pan_keys_step,
93 src[1])
94 elif self.pan_down_key.match(event):
95 dest = (src[0],
96 src[1] - self.pan_keys_step)
97 elif self.pan_up_key.match(event):
98 dest = (src[0],
99 src[1] + self.pan_keys_step)
100 if src != dest:
101 self._original_xy = src
102 event.x = dest[0]
103 event.y = dest[1]
104 self.panning_mouse_move(event)
105 return
106
107 def normal_left_down(self, event):
108 """ Handles the left mouse button being pressed when the tool is in
109 the 'normal' state.
110
111 Starts panning if the left mouse button is the drag button.
112 """
113 if self.drag_button == "left":
114 self._start_pan(event)
115 return
116
117 def normal_right_down(self, event):
118 """ Handles the right mouse button being pressed when the tool is in
119 the 'normal' state.
120
121 Starts panning if the right mouse button is the drag button.
122 """
123 if self.drag_button == "right":
124 self._start_pan(event)
125 return
126
127 def normal_middle_down(self, event):
128 """ Handles the middle mouse button being pressed when the tool is in
129 the 'normal' state.
130
131 Starts panning if the middle mouse button is the drag button.
132 """
133 if self.drag_button == "middle":
134 self._start_pan(event)
135 return
136
137 def panning_left_up(self, event):
138 """ Handles the left mouse button coming up when the tool is in the
139 'panning' state.
140
141 Stops panning if the left mouse button is the drag button.
142 """
143 if self.drag_button == "left":
144 self._end_pan(event)
145 return
146
147 def panning_right_up(self, event):
148 """ Handles the right mouse button coming up when the tool is in the
149 'panning' state.
150
151 Stops panning if the right mouse button is the drag button.
152 """
153 if self.drag_button == "right":
154 self._end_pan(event)
155 return
156
157 def panning_middle_up(self, event):
158 """ Handles the middle mouse button coming up when the tool is in the
159 'panning' state.
160
161 Stops panning if the middle mouse button is the drag button.
162 """
163 if self.drag_button == "middle":
164 self._end_pan(event)
165 return
166
167 def panning_mouse_move(self, event):
168 """ Handles the mouse being moved when the tool is in the 'panning'
169 state.
170 """
171 plot = self.component
172
173 if self._auto_constrain and self.constrain_direction is None:
174 # Determine the constraint direction
175 x_orig, y_orig = self._original_xy
176 if abs(event.x - x_orig) > abs(event.y - y_orig):
177 self.constrain_direction = "x"
178 else:
179 self.constrain_direction = "y"
180
181 direction_info = [("x", "width", 0), ("y", "height", 1)]
182 for direction, bound_name, index in direction_info:
183 if not self.constrain or self.constrain_direction == direction:
184 mapper = getattr(plot, direction + "_mapper")
185 domain_min, domain_max = mapper.domain_limits
186 eventpos = getattr(event, direction)
187 origpos = self._original_xy[index]
188
189 screenlow, screenhigh = mapper.screen_bounds
190 screendelta = self.speed * (eventpos - origpos)
191
192 newlow = mapper.map_data(screenlow - screendelta)
193 newhigh = mapper.map_data(screenhigh - screendelta)
194
195 # Don't set the range in this dimension if the panning
196 # would exceed the domain limits.
197 # To do this offset properly, we would need to iteratively
198 # solve for a root using map_data on successive trial
199 # values. As a first approximation, we're just going to
200 # use a linear approximation, which works perfectly for
201 # linear mappers (which is used 99% of the time).
202 if domain_min is None:
203 if self.restrict_to_data:
204 domain_min = min([source.get_data().min()
205 for source in mapper.range.sources])
206 else:
207 domain_min = -inf
208 if domain_max is None:
209 if self.restrict_to_data:
210 domain_max = max([source.get_data().max()
211 for source in mapper.range.sources])
212 else:
213 domain_max = inf
214
215 if (newlow <= domain_min) and (newhigh >= domain_max):
216 # Don't do anything; effectively, freeze the pan
217 continue
218
219 if newlow <= domain_min:
220 newlow = domain_min
221 # Calculate delta in screen space, which is always linear.
222 screen_delta = mapper.map_screen(domain_min) - screenlow
223 newhigh = mapper.map_data(screenhigh + screen_delta)
224 elif newhigh >= domain_max:
225 newhigh = domain_max
226 # Calculate delta in screen space, which is always linear.
227 screen_delta = mapper.map_screen(domain_max) - screenhigh
228 newlow = mapper.map_data(screenlow + screen_delta)
229
230 # Use .set_bounds() so that we don't generate two range_changed
231 # events on the DataRange
232 mapper.range.set_bounds(newlow, newhigh)
233
234 event.handled = True
235
236 self._original_xy = (event.x, event.y)
237 plot.request_redraw()
238 return
239
240 def panning_mouse_leave(self, event):
241 """ Handles the mouse leaving the plot when the tool is in the 'panning'
242 state.
243
244 Ends panning.
245 """
246 return self._end_pan(event)
247
248 def _start_pan(self, event, capture_mouse=True):
249 self._original_xy = (event.x, event.y)
250 if self.constrain_key is not None:
251 if getattr(event, self.constrain_key + "_down"):
252 self.constrain = True
253 self._auto_constrain = True
254 self.constrain_direction = None
255 self.event_state = "panning"
256 if capture_mouse:
257 event.window.set_pointer(self.drag_pointer)
258 event.window.set_mouse_owner(self, event.net_transform())
259 event.handled = True
260 return
261
262 def _end_pan(self, event):
263 if self._auto_constrain:
264 self.constrain = False
265 self.constrain_direction = None
266 self.event_state = "normal"
267 event.window.set_pointer("arrow")
268 if event.window.mouse_owner == self:
269 event.window.set_mouse_owner(None)
270 event.handled = True
271 return
272
273 # EOF
274
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chaco/tools/pan_tool.py b/chaco/tools/pan_tool.py
--- a/chaco/tools/pan_tool.py
+++ b/chaco/tools/pan_tool.py
@@ -199,16 +199,17 @@
# values. As a first approximation, we're just going to
# use a linear approximation, which works perfectly for
# linear mappers (which is used 99% of the time).
+ data = [arr for arr in
+ (source.get_data() for source in mapper.range.sources)
+ if arr.size > 0]
if domain_min is None:
if self.restrict_to_data:
- domain_min = min([source.get_data().min()
- for source in mapper.range.sources])
+ domain_min = min([arr.min() for arr in data])
else:
domain_min = -inf
if domain_max is None:
if self.restrict_to_data:
- domain_max = max([source.get_data().max()
- for source in mapper.range.sources])
+ domain_max = max([arr.max() for arr in data])
else:
domain_max = inf
| {"golden_diff": "diff --git a/chaco/tools/pan_tool.py b/chaco/tools/pan_tool.py\n--- a/chaco/tools/pan_tool.py\n+++ b/chaco/tools/pan_tool.py\n@@ -199,16 +199,17 @@\n # values. As a first approximation, we're just going to\n # use a linear approximation, which works perfectly for\n # linear mappers (which is used 99% of the time).\n+ data = [arr for arr in\n+ (source.get_data() for source in mapper.range.sources)\n+ if arr.size > 0]\n if domain_min is None:\n if self.restrict_to_data:\n- domain_min = min([source.get_data().min()\n- for source in mapper.range.sources])\n+ domain_min = min([arr.min() for arr in data])\n else:\n domain_min = -inf\n if domain_max is None:\n if self.restrict_to_data:\n- domain_max = max([source.get_data().max()\n- for source in mapper.range.sources])\n+ domain_max = max([arr.max() for arr in data])\n else:\n domain_max = inf\n", "issue": "ValueError when using PanTool with restrict_to_data and empty data source\nThis can come up when a plot's data sources contain empty arrays. None-empty data sources may be visible on screen, and the user may wish to pan based on that view. The empty data sources should not keep this from happening. Here's a minimal example.\n\n``` python\n\nimport numpy as np\n\nfrom chaco.array_plot_data import ArrayPlotData\nfrom chaco.plot import Plot\nfrom chaco.tools.pan_tool import PanTool\n\nplot_data = ArrayPlotData()\nplot = Plot(plot_data)\narr = np.arange(4.0)\nplot_data.set_data(\"x\", arr)\nplot_data.set_data(\"y\", arr)\nplot_data.set_data(\"z\", np.array([], np.float64)\nplot.plot(('x', 'y'))\nplot.plot(('z', 'z'))\ntool = PanTool(plot, restrict_to_data=True)\nplot.tools.append(tool)\n\n```\n\n", "before_files": [{"content": "\"\"\" Defines the PanTool class.\n\"\"\"\n\nfrom numpy import inf\n\n# Enthought library imports\nfrom enable.api import BaseTool, Pointer, KeySpec\nfrom traits.api import Bool, Enum, Float, Tuple, Instance\n\n\nclass PanTool(BaseTool):\n \"\"\" A tool that enables the user to pan a plot by clicking a mouse\n button and dragging.\n \"\"\"\n\n # The mouse button that initiates the drag operation.\n drag_button = Enum(\"left\", \"middle\", \"right\")\n\n # The cursor to use when panning.\n drag_pointer = Pointer(\"hand\")\n\n # Scaling factor on the panning \"speed\".\n speed = Float(1.0)\n\n # The modifier key that, if depressed when the drag is initiated, constrains\n # the panning to happen in the only direction of largest initial motion.\n # It is possible to permanently restrict this tool to always drag along one\n # direction. To do so, set constrain=True, constrain_key=None, and\n # constrain_direction to the desired direction.\n constrain_key = Enum(None, \"shift\", \"control\", \"alt\")\n\n # Keys to Pan via keyboard\n pan_right_key = Instance(KeySpec, args=(\"Right\",))\n pan_left_key = Instance(KeySpec, args=(\"Left\",))\n pan_up_key = Instance(KeySpec, args=(\"Up\",))\n pan_down_key = Instance(KeySpec, args=(\"Down\",))\n\n # number of pixels the keys should pan\n # disabled if 0.0\n pan_keys_step = Float(0.0)\n\n # Constrain the panning to one direction?\n constrain = Bool(False)\n\n # The direction of constrained draw. A value of None means that the user\n # has initiated the drag and pressed the constrain_key, but hasn't moved\n # the mouse yet; the magnitude of the components of the next mouse_move\n # event will determine the constrain_direction.\n constrain_direction = Enum(None, \"x\", \"y\")\n\n # Restrict to the bounds of the plot data\n restrict_to_data = Bool(False)\n\n # (x,y) of the point where the mouse button was pressed.\n _original_xy = Tuple\n\n # Data coordinates of **_original_xy**. This may be either (index,value)\n # or (value,index) depending on the component's orientation.\n _original_data = Tuple\n\n # Was constrain=True triggered by the **contrain_key**? If False, it was\n # set programmatically.\n _auto_constrain = Bool(False)\n\n\n #------------------------------------------------------------------------\n # Inherited BaseTool traits\n #------------------------------------------------------------------------\n\n # The tool does not have a visual representation (overrides\n # BaseTool).\n draw_mode = \"none\"\n\n # The tool is not visible (overrides BaseTool).\n visible = False\n\n # The possible event states of this tool (overrides enable.Interactor).\n event_state = Enum(\"normal\", \"panning\")\n\n def normal_key_pressed(self, event):\n \"\"\" Handles a key being pressed when the tool is in the 'normal'\n state.\n \"\"\"\n if self.pan_keys_step == 0.0:\n return\n src = self.component.bounds[0]/2, self.component.bounds[1]/2\n dest = src\n if self.pan_left_key.match(event):\n dest = (src[0] - self.pan_keys_step,\n src[1])\n elif self.pan_right_key.match(event):\n dest = (src[0] + self.pan_keys_step,\n src[1])\n elif self.pan_down_key.match(event):\n dest = (src[0],\n src[1] - self.pan_keys_step)\n elif self.pan_up_key.match(event):\n dest = (src[0],\n src[1] + self.pan_keys_step)\n if src != dest:\n self._original_xy = src\n event.x = dest[0]\n event.y = dest[1]\n self.panning_mouse_move(event)\n return\n\n def normal_left_down(self, event):\n \"\"\" Handles the left mouse button being pressed when the tool is in\n the 'normal' state.\n\n Starts panning if the left mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"left\":\n self._start_pan(event)\n return\n\n def normal_right_down(self, event):\n \"\"\" Handles the right mouse button being pressed when the tool is in\n the 'normal' state.\n\n Starts panning if the right mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"right\":\n self._start_pan(event)\n return\n\n def normal_middle_down(self, event):\n \"\"\" Handles the middle mouse button being pressed when the tool is in\n the 'normal' state.\n\n Starts panning if the middle mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"middle\":\n self._start_pan(event)\n return\n\n def panning_left_up(self, event):\n \"\"\" Handles the left mouse button coming up when the tool is in the\n 'panning' state.\n\n Stops panning if the left mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"left\":\n self._end_pan(event)\n return\n\n def panning_right_up(self, event):\n \"\"\" Handles the right mouse button coming up when the tool is in the\n 'panning' state.\n\n Stops panning if the right mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"right\":\n self._end_pan(event)\n return\n\n def panning_middle_up(self, event):\n \"\"\" Handles the middle mouse button coming up when the tool is in the\n 'panning' state.\n\n Stops panning if the middle mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"middle\":\n self._end_pan(event)\n return\n\n def panning_mouse_move(self, event):\n \"\"\" Handles the mouse being moved when the tool is in the 'panning'\n state.\n \"\"\"\n plot = self.component\n\n if self._auto_constrain and self.constrain_direction is None:\n # Determine the constraint direction\n x_orig, y_orig = self._original_xy\n if abs(event.x - x_orig) > abs(event.y - y_orig):\n self.constrain_direction = \"x\"\n else:\n self.constrain_direction = \"y\"\n\n direction_info = [(\"x\", \"width\", 0), (\"y\", \"height\", 1)]\n for direction, bound_name, index in direction_info:\n if not self.constrain or self.constrain_direction == direction:\n mapper = getattr(plot, direction + \"_mapper\")\n domain_min, domain_max = mapper.domain_limits\n eventpos = getattr(event, direction)\n origpos = self._original_xy[index]\n\n screenlow, screenhigh = mapper.screen_bounds\n screendelta = self.speed * (eventpos - origpos)\n\n newlow = mapper.map_data(screenlow - screendelta)\n newhigh = mapper.map_data(screenhigh - screendelta)\n\n # Don't set the range in this dimension if the panning\n # would exceed the domain limits.\n # To do this offset properly, we would need to iteratively\n # solve for a root using map_data on successive trial\n # values. As a first approximation, we're just going to\n # use a linear approximation, which works perfectly for\n # linear mappers (which is used 99% of the time).\n if domain_min is None:\n if self.restrict_to_data:\n domain_min = min([source.get_data().min()\n for source in mapper.range.sources])\n else:\n domain_min = -inf\n if domain_max is None:\n if self.restrict_to_data:\n domain_max = max([source.get_data().max()\n for source in mapper.range.sources])\n else:\n domain_max = inf\n\n if (newlow <= domain_min) and (newhigh >= domain_max):\n # Don't do anything; effectively, freeze the pan\n continue\n\n if newlow <= domain_min:\n newlow = domain_min\n # Calculate delta in screen space, which is always linear.\n screen_delta = mapper.map_screen(domain_min) - screenlow\n newhigh = mapper.map_data(screenhigh + screen_delta)\n elif newhigh >= domain_max:\n newhigh = domain_max\n # Calculate delta in screen space, which is always linear.\n screen_delta = mapper.map_screen(domain_max) - screenhigh\n newlow = mapper.map_data(screenlow + screen_delta)\n\n # Use .set_bounds() so that we don't generate two range_changed\n # events on the DataRange\n mapper.range.set_bounds(newlow, newhigh)\n\n event.handled = True\n\n self._original_xy = (event.x, event.y)\n plot.request_redraw()\n return\n\n def panning_mouse_leave(self, event):\n \"\"\" Handles the mouse leaving the plot when the tool is in the 'panning'\n state.\n\n Ends panning.\n \"\"\"\n return self._end_pan(event)\n\n def _start_pan(self, event, capture_mouse=True):\n self._original_xy = (event.x, event.y)\n if self.constrain_key is not None:\n if getattr(event, self.constrain_key + \"_down\"):\n self.constrain = True\n self._auto_constrain = True\n self.constrain_direction = None\n self.event_state = \"panning\"\n if capture_mouse:\n event.window.set_pointer(self.drag_pointer)\n event.window.set_mouse_owner(self, event.net_transform())\n event.handled = True\n return\n\n def _end_pan(self, event):\n if self._auto_constrain:\n self.constrain = False\n self.constrain_direction = None\n self.event_state = \"normal\"\n event.window.set_pointer(\"arrow\")\n if event.window.mouse_owner == self:\n event.window.set_mouse_owner(None)\n event.handled = True\n return\n\n# EOF\n", "path": "chaco/tools/pan_tool.py"}], "after_files": [{"content": "\"\"\" Defines the PanTool class.\n\"\"\"\n\nfrom numpy import inf\n\n# Enthought library imports\nfrom enable.api import BaseTool, Pointer, KeySpec\nfrom traits.api import Bool, Enum, Float, Tuple, Instance\n\n\nclass PanTool(BaseTool):\n \"\"\" A tool that enables the user to pan a plot by clicking a mouse\n button and dragging.\n \"\"\"\n\n # The mouse button that initiates the drag operation.\n drag_button = Enum(\"left\", \"middle\", \"right\")\n\n # The cursor to use when panning.\n drag_pointer = Pointer(\"hand\")\n\n # Scaling factor on the panning \"speed\".\n speed = Float(1.0)\n\n # The modifier key that, if depressed when the drag is initiated, constrains\n # the panning to happen in the only direction of largest initial motion.\n # It is possible to permanently restrict this tool to always drag along one\n # direction. To do so, set constrain=True, constrain_key=None, and\n # constrain_direction to the desired direction.\n constrain_key = Enum(None, \"shift\", \"control\", \"alt\")\n\n # Keys to Pan via keyboard\n pan_right_key = Instance(KeySpec, args=(\"Right\",))\n pan_left_key = Instance(KeySpec, args=(\"Left\",))\n pan_up_key = Instance(KeySpec, args=(\"Up\",))\n pan_down_key = Instance(KeySpec, args=(\"Down\",))\n\n # number of pixels the keys should pan\n # disabled if 0.0\n pan_keys_step = Float(0.0)\n\n # Constrain the panning to one direction?\n constrain = Bool(False)\n\n # The direction of constrained draw. A value of None means that the user\n # has initiated the drag and pressed the constrain_key, but hasn't moved\n # the mouse yet; the magnitude of the components of the next mouse_move\n # event will determine the constrain_direction.\n constrain_direction = Enum(None, \"x\", \"y\")\n\n # Restrict to the bounds of the plot data\n restrict_to_data = Bool(False)\n\n # (x,y) of the point where the mouse button was pressed.\n _original_xy = Tuple\n\n # Data coordinates of **_original_xy**. This may be either (index,value)\n # or (value,index) depending on the component's orientation.\n _original_data = Tuple\n\n # Was constrain=True triggered by the **contrain_key**? If False, it was\n # set programmatically.\n _auto_constrain = Bool(False)\n\n\n #------------------------------------------------------------------------\n # Inherited BaseTool traits\n #------------------------------------------------------------------------\n\n # The tool does not have a visual representation (overrides\n # BaseTool).\n draw_mode = \"none\"\n\n # The tool is not visible (overrides BaseTool).\n visible = False\n\n # The possible event states of this tool (overrides enable.Interactor).\n event_state = Enum(\"normal\", \"panning\")\n\n def normal_key_pressed(self, event):\n \"\"\" Handles a key being pressed when the tool is in the 'normal'\n state.\n \"\"\"\n if self.pan_keys_step == 0.0:\n return\n src = self.component.bounds[0]/2, self.component.bounds[1]/2\n dest = src\n if self.pan_left_key.match(event):\n dest = (src[0] - self.pan_keys_step,\n src[1])\n elif self.pan_right_key.match(event):\n dest = (src[0] + self.pan_keys_step,\n src[1])\n elif self.pan_down_key.match(event):\n dest = (src[0],\n src[1] - self.pan_keys_step)\n elif self.pan_up_key.match(event):\n dest = (src[0],\n src[1] + self.pan_keys_step)\n if src != dest:\n self._original_xy = src\n event.x = dest[0]\n event.y = dest[1]\n self.panning_mouse_move(event)\n return\n\n def normal_left_down(self, event):\n \"\"\" Handles the left mouse button being pressed when the tool is in\n the 'normal' state.\n\n Starts panning if the left mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"left\":\n self._start_pan(event)\n return\n\n def normal_right_down(self, event):\n \"\"\" Handles the right mouse button being pressed when the tool is in\n the 'normal' state.\n\n Starts panning if the right mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"right\":\n self._start_pan(event)\n return\n\n def normal_middle_down(self, event):\n \"\"\" Handles the middle mouse button being pressed when the tool is in\n the 'normal' state.\n\n Starts panning if the middle mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"middle\":\n self._start_pan(event)\n return\n\n def panning_left_up(self, event):\n \"\"\" Handles the left mouse button coming up when the tool is in the\n 'panning' state.\n\n Stops panning if the left mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"left\":\n self._end_pan(event)\n return\n\n def panning_right_up(self, event):\n \"\"\" Handles the right mouse button coming up when the tool is in the\n 'panning' state.\n\n Stops panning if the right mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"right\":\n self._end_pan(event)\n return\n\n def panning_middle_up(self, event):\n \"\"\" Handles the middle mouse button coming up when the tool is in the\n 'panning' state.\n\n Stops panning if the middle mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"middle\":\n self._end_pan(event)\n return\n\n def panning_mouse_move(self, event):\n \"\"\" Handles the mouse being moved when the tool is in the 'panning'\n state.\n \"\"\"\n plot = self.component\n\n if self._auto_constrain and self.constrain_direction is None:\n # Determine the constraint direction\n x_orig, y_orig = self._original_xy\n if abs(event.x - x_orig) > abs(event.y - y_orig):\n self.constrain_direction = \"x\"\n else:\n self.constrain_direction = \"y\"\n\n direction_info = [(\"x\", \"width\", 0), (\"y\", \"height\", 1)]\n for direction, bound_name, index in direction_info:\n if not self.constrain or self.constrain_direction == direction:\n mapper = getattr(plot, direction + \"_mapper\")\n domain_min, domain_max = mapper.domain_limits\n eventpos = getattr(event, direction)\n origpos = self._original_xy[index]\n\n screenlow, screenhigh = mapper.screen_bounds\n screendelta = self.speed * (eventpos - origpos)\n\n newlow = mapper.map_data(screenlow - screendelta)\n newhigh = mapper.map_data(screenhigh - screendelta)\n\n # Don't set the range in this dimension if the panning\n # would exceed the domain limits.\n # To do this offset properly, we would need to iteratively\n # solve for a root using map_data on successive trial\n # values. As a first approximation, we're just going to\n # use a linear approximation, which works perfectly for\n # linear mappers (which is used 99% of the time).\n data = [arr for arr in\n (source.get_data() for source in mapper.range.sources)\n if arr.size > 0]\n if domain_min is None:\n if self.restrict_to_data:\n domain_min = min([arr.min() for arr in data])\n else:\n domain_min = -inf\n if domain_max is None:\n if self.restrict_to_data:\n domain_max = max([arr.max() for arr in data])\n else:\n domain_max = inf\n\n if (newlow <= domain_min) and (newhigh >= domain_max):\n # Don't do anything; effectively, freeze the pan\n continue\n\n if newlow <= domain_min:\n newlow = domain_min\n # Calculate delta in screen space, which is always linear.\n screen_delta = mapper.map_screen(domain_min) - screenlow\n newhigh = mapper.map_data(screenhigh + screen_delta)\n elif newhigh >= domain_max:\n newhigh = domain_max\n # Calculate delta in screen space, which is always linear.\n screen_delta = mapper.map_screen(domain_max) - screenhigh\n newlow = mapper.map_data(screenlow + screen_delta)\n\n # Use .set_bounds() so that we don't generate two range_changed\n # events on the DataRange\n mapper.range.set_bounds(newlow, newhigh)\n\n event.handled = True\n\n self._original_xy = (event.x, event.y)\n plot.request_redraw()\n return\n\n def panning_mouse_leave(self, event):\n \"\"\" Handles the mouse leaving the plot when the tool is in the 'panning'\n state.\n\n Ends panning.\n \"\"\"\n return self._end_pan(event)\n\n def _start_pan(self, event, capture_mouse=True):\n self._original_xy = (event.x, event.y)\n if self.constrain_key is not None:\n if getattr(event, self.constrain_key + \"_down\"):\n self.constrain = True\n self._auto_constrain = True\n self.constrain_direction = None\n self.event_state = \"panning\"\n if capture_mouse:\n event.window.set_pointer(self.drag_pointer)\n event.window.set_mouse_owner(self, event.net_transform())\n event.handled = True\n return\n\n def _end_pan(self, event):\n if self._auto_constrain:\n self.constrain = False\n self.constrain_direction = None\n self.event_state = \"normal\"\n event.window.set_pointer(\"arrow\")\n if event.window.mouse_owner == self:\n event.window.set_mouse_owner(None)\n event.handled = True\n return\n\n# EOF\n", "path": "chaco/tools/pan_tool.py"}]} | 3,351 | 257 |
gh_patches_debug_33037 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-3884 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Checking for new version fails
After doing a crash report:
> There was an error while getting the newest version: Invalid JSON received in reply: Expecting value: line 1 column 1 (char 0)!. Please check for a new version on qutebrowser.org by yourself.
Probably something changed on PyPI?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/misc/httpclient.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """An HTTP client based on QNetworkAccessManager."""
21
22 import functools
23 import urllib.request
24 import urllib.parse
25
26 from PyQt5.QtCore import pyqtSignal, QObject, QTimer
27 from PyQt5.QtNetwork import (QNetworkAccessManager, QNetworkRequest,
28 QNetworkReply)
29
30
31 class HTTPClient(QObject):
32
33 """An HTTP client based on QNetworkAccessManager.
34
35 Intended for APIs, automatically decodes data.
36
37 Attributes:
38 _nam: The QNetworkAccessManager used.
39 _timers: A {QNetworkReply: QTimer} dict.
40
41 Signals:
42 success: Emitted when the operation succeeded.
43 arg: The received data.
44 error: Emitted when the request failed.
45 arg: The error message, as string.
46 """
47
48 success = pyqtSignal(str)
49 error = pyqtSignal(str)
50
51 def __init__(self, parent=None):
52 super().__init__(parent)
53 self._nam = QNetworkAccessManager(self)
54 self._timers = {}
55
56 def post(self, url, data=None):
57 """Create a new POST request.
58
59 Args:
60 url: The URL to post to, as QUrl.
61 data: A dict of data to send.
62 """
63 if data is None:
64 data = {}
65 encoded_data = urllib.parse.urlencode(data).encode('utf-8')
66 request = QNetworkRequest(url)
67 request.setHeader(QNetworkRequest.ContentTypeHeader,
68 'application/x-www-form-urlencoded;charset=utf-8')
69 reply = self._nam.post(request, encoded_data)
70 self._handle_reply(reply)
71
72 def get(self, url):
73 """Create a new GET request.
74
75 Emits success/error when done.
76
77 Args:
78 url: The URL to access, as QUrl.
79 """
80 request = QNetworkRequest(url)
81 reply = self._nam.get(request)
82 self._handle_reply(reply)
83
84 def _handle_reply(self, reply):
85 """Handle a new QNetworkReply."""
86 if reply.isFinished():
87 self.on_reply_finished(reply)
88 else:
89 timer = QTimer(self)
90 timer.setInterval(10000)
91 timer.timeout.connect(reply.abort)
92 timer.start()
93 self._timers[reply] = timer
94 reply.finished.connect(functools.partial(
95 self.on_reply_finished, reply))
96
97 def on_reply_finished(self, reply):
98 """Read the data and finish when the reply finished.
99
100 Args:
101 reply: The QNetworkReply which finished.
102 """
103 timer = self._timers.pop(reply)
104 if timer is not None:
105 timer.stop()
106 timer.deleteLater()
107 if reply.error() != QNetworkReply.NoError:
108 self.error.emit(reply.errorString())
109 return
110 try:
111 data = bytes(reply.readAll()).decode('utf-8')
112 except UnicodeDecodeError:
113 self.error.emit("Invalid UTF-8 data received in reply!")
114 return
115 self.success.emit(data)
116
```
Path: `qutebrowser/misc/autoupdate.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Classes related to auto-updating and getting the latest version."""
21
22 import json
23
24 from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QUrl
25
26 from qutebrowser.misc import httpclient
27
28
29 class PyPIVersionClient(QObject):
30
31 """A client for the PyPI API using HTTPClient.
32
33 It gets the latest version of qutebrowser from PyPI.
34
35 Attributes:
36 _client: The HTTPClient used.
37
38 Class attributes:
39 API_URL: The base API URL.
40
41 Signals:
42 success: Emitted when getting the version info succeeded.
43 arg: The newest version.
44 error: Emitted when getting the version info failed.
45 arg: The error message, as string.
46 """
47
48 API_URL = 'https://pypi.python.org/pypi/{}/json'
49 success = pyqtSignal(str)
50 error = pyqtSignal(str)
51
52 def __init__(self, parent=None, client=None):
53 super().__init__(parent)
54 if client is None:
55 self._client = httpclient.HTTPClient(self)
56 else:
57 self._client = client
58 self._client.error.connect(self.error)
59 self._client.success.connect(self.on_client_success)
60
61 def get_version(self, package='qutebrowser'):
62 """Get the newest version of a given package.
63
64 Emits success/error when done.
65
66 Args:
67 package: The name of the package to check.
68 """
69 url = QUrl(self.API_URL.format(package))
70 self._client.get(url)
71
72 @pyqtSlot(str)
73 def on_client_success(self, data):
74 """Process the data and finish when the client finished.
75
76 Args:
77 data: A string with the received data.
78 """
79 try:
80 json_data = json.loads(data)
81 except ValueError as e:
82 self.error.emit("Invalid JSON received in reply: {}!".format(e))
83 return
84 try:
85 self.success.emit(json_data['info']['version'])
86 except KeyError as e:
87 self.error.emit("Malformed data received in reply "
88 "({!r} not found)!".format(e))
89 return
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutebrowser/misc/autoupdate.py b/qutebrowser/misc/autoupdate.py
--- a/qutebrowser/misc/autoupdate.py
+++ b/qutebrowser/misc/autoupdate.py
@@ -45,7 +45,7 @@
arg: The error message, as string.
"""
- API_URL = 'https://pypi.python.org/pypi/{}/json'
+ API_URL = 'https://pypi.org/pypi/{}/json'
success = pyqtSignal(str)
error = pyqtSignal(str)
diff --git a/qutebrowser/misc/httpclient.py b/qutebrowser/misc/httpclient.py
--- a/qutebrowser/misc/httpclient.py
+++ b/qutebrowser/misc/httpclient.py
@@ -28,6 +28,21 @@
QNetworkReply)
+class HTTPRequest(QNetworkRequest):
+ """A QNetworkRquest that follows (secure) redirects by default."""
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ try:
+ self.setAttribute(QNetworkRequest.RedirectPolicyAttribute,
+ QNetworkRequest.NoLessSafeRedirectPolicy)
+ except AttributeError:
+ # RedirectPolicyAttribute was introduced in 5.9 to replace
+ # FollowRedirectsAttribute.
+ self.setAttribute(QNetworkRequest.FollowRedirectsAttribute,
+ True)
+
+
class HTTPClient(QObject):
"""An HTTP client based on QNetworkAccessManager.
@@ -63,7 +78,7 @@
if data is None:
data = {}
encoded_data = urllib.parse.urlencode(data).encode('utf-8')
- request = QNetworkRequest(url)
+ request = HTTPRequest(url)
request.setHeader(QNetworkRequest.ContentTypeHeader,
'application/x-www-form-urlencoded;charset=utf-8')
reply = self._nam.post(request, encoded_data)
@@ -77,7 +92,7 @@
Args:
url: The URL to access, as QUrl.
"""
- request = QNetworkRequest(url)
+ request = HTTPRequest(url)
reply = self._nam.get(request)
self._handle_reply(reply)
| {"golden_diff": "diff --git a/qutebrowser/misc/autoupdate.py b/qutebrowser/misc/autoupdate.py\n--- a/qutebrowser/misc/autoupdate.py\n+++ b/qutebrowser/misc/autoupdate.py\n@@ -45,7 +45,7 @@\n arg: The error message, as string.\n \"\"\"\n \n- API_URL = 'https://pypi.python.org/pypi/{}/json'\n+ API_URL = 'https://pypi.org/pypi/{}/json'\n success = pyqtSignal(str)\n error = pyqtSignal(str)\n \ndiff --git a/qutebrowser/misc/httpclient.py b/qutebrowser/misc/httpclient.py\n--- a/qutebrowser/misc/httpclient.py\n+++ b/qutebrowser/misc/httpclient.py\n@@ -28,6 +28,21 @@\n QNetworkReply)\n \n \n+class HTTPRequest(QNetworkRequest):\n+ \"\"\"A QNetworkRquest that follows (secure) redirects by default.\"\"\"\n+\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ try:\n+ self.setAttribute(QNetworkRequest.RedirectPolicyAttribute,\n+ QNetworkRequest.NoLessSafeRedirectPolicy)\n+ except AttributeError:\n+ # RedirectPolicyAttribute was introduced in 5.9 to replace\n+ # FollowRedirectsAttribute.\n+ self.setAttribute(QNetworkRequest.FollowRedirectsAttribute,\n+ True)\n+\n+\n class HTTPClient(QObject):\n \n \"\"\"An HTTP client based on QNetworkAccessManager.\n@@ -63,7 +78,7 @@\n if data is None:\n data = {}\n encoded_data = urllib.parse.urlencode(data).encode('utf-8')\n- request = QNetworkRequest(url)\n+ request = HTTPRequest(url)\n request.setHeader(QNetworkRequest.ContentTypeHeader,\n 'application/x-www-form-urlencoded;charset=utf-8')\n reply = self._nam.post(request, encoded_data)\n@@ -77,7 +92,7 @@\n Args:\n url: The URL to access, as QUrl.\n \"\"\"\n- request = QNetworkRequest(url)\n+ request = HTTPRequest(url)\n reply = self._nam.get(request)\n self._handle_reply(reply)\n", "issue": "Checking for new version fails\nAfter doing a crash report:\r\n\r\n> There was an error while getting the newest version: Invalid JSON received in reply: Expecting value: line 1 column 1 (char 0)!. Please check for a new version on qutebrowser.org by yourself.\r\n\r\nProbably something changed on PyPI?\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"An HTTP client based on QNetworkAccessManager.\"\"\"\n\nimport functools\nimport urllib.request\nimport urllib.parse\n\nfrom PyQt5.QtCore import pyqtSignal, QObject, QTimer\nfrom PyQt5.QtNetwork import (QNetworkAccessManager, QNetworkRequest,\n QNetworkReply)\n\n\nclass HTTPClient(QObject):\n\n \"\"\"An HTTP client based on QNetworkAccessManager.\n\n Intended for APIs, automatically decodes data.\n\n Attributes:\n _nam: The QNetworkAccessManager used.\n _timers: A {QNetworkReply: QTimer} dict.\n\n Signals:\n success: Emitted when the operation succeeded.\n arg: The received data.\n error: Emitted when the request failed.\n arg: The error message, as string.\n \"\"\"\n\n success = pyqtSignal(str)\n error = pyqtSignal(str)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self._nam = QNetworkAccessManager(self)\n self._timers = {}\n\n def post(self, url, data=None):\n \"\"\"Create a new POST request.\n\n Args:\n url: The URL to post to, as QUrl.\n data: A dict of data to send.\n \"\"\"\n if data is None:\n data = {}\n encoded_data = urllib.parse.urlencode(data).encode('utf-8')\n request = QNetworkRequest(url)\n request.setHeader(QNetworkRequest.ContentTypeHeader,\n 'application/x-www-form-urlencoded;charset=utf-8')\n reply = self._nam.post(request, encoded_data)\n self._handle_reply(reply)\n\n def get(self, url):\n \"\"\"Create a new GET request.\n\n Emits success/error when done.\n\n Args:\n url: The URL to access, as QUrl.\n \"\"\"\n request = QNetworkRequest(url)\n reply = self._nam.get(request)\n self._handle_reply(reply)\n\n def _handle_reply(self, reply):\n \"\"\"Handle a new QNetworkReply.\"\"\"\n if reply.isFinished():\n self.on_reply_finished(reply)\n else:\n timer = QTimer(self)\n timer.setInterval(10000)\n timer.timeout.connect(reply.abort)\n timer.start()\n self._timers[reply] = timer\n reply.finished.connect(functools.partial(\n self.on_reply_finished, reply))\n\n def on_reply_finished(self, reply):\n \"\"\"Read the data and finish when the reply finished.\n\n Args:\n reply: The QNetworkReply which finished.\n \"\"\"\n timer = self._timers.pop(reply)\n if timer is not None:\n timer.stop()\n timer.deleteLater()\n if reply.error() != QNetworkReply.NoError:\n self.error.emit(reply.errorString())\n return\n try:\n data = bytes(reply.readAll()).decode('utf-8')\n except UnicodeDecodeError:\n self.error.emit(\"Invalid UTF-8 data received in reply!\")\n return\n self.success.emit(data)\n", "path": "qutebrowser/misc/httpclient.py"}, {"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Classes related to auto-updating and getting the latest version.\"\"\"\n\nimport json\n\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QUrl\n\nfrom qutebrowser.misc import httpclient\n\n\nclass PyPIVersionClient(QObject):\n\n \"\"\"A client for the PyPI API using HTTPClient.\n\n It gets the latest version of qutebrowser from PyPI.\n\n Attributes:\n _client: The HTTPClient used.\n\n Class attributes:\n API_URL: The base API URL.\n\n Signals:\n success: Emitted when getting the version info succeeded.\n arg: The newest version.\n error: Emitted when getting the version info failed.\n arg: The error message, as string.\n \"\"\"\n\n API_URL = 'https://pypi.python.org/pypi/{}/json'\n success = pyqtSignal(str)\n error = pyqtSignal(str)\n\n def __init__(self, parent=None, client=None):\n super().__init__(parent)\n if client is None:\n self._client = httpclient.HTTPClient(self)\n else:\n self._client = client\n self._client.error.connect(self.error)\n self._client.success.connect(self.on_client_success)\n\n def get_version(self, package='qutebrowser'):\n \"\"\"Get the newest version of a given package.\n\n Emits success/error when done.\n\n Args:\n package: The name of the package to check.\n \"\"\"\n url = QUrl(self.API_URL.format(package))\n self._client.get(url)\n\n @pyqtSlot(str)\n def on_client_success(self, data):\n \"\"\"Process the data and finish when the client finished.\n\n Args:\n data: A string with the received data.\n \"\"\"\n try:\n json_data = json.loads(data)\n except ValueError as e:\n self.error.emit(\"Invalid JSON received in reply: {}!\".format(e))\n return\n try:\n self.success.emit(json_data['info']['version'])\n except KeyError as e:\n self.error.emit(\"Malformed data received in reply \"\n \"({!r} not found)!\".format(e))\n return\n", "path": "qutebrowser/misc/autoupdate.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"An HTTP client based on QNetworkAccessManager.\"\"\"\n\nimport functools\nimport urllib.request\nimport urllib.parse\n\nfrom PyQt5.QtCore import pyqtSignal, QObject, QTimer\nfrom PyQt5.QtNetwork import (QNetworkAccessManager, QNetworkRequest,\n QNetworkReply)\n\n\nclass HTTPRequest(QNetworkRequest):\n \"\"\"A QNetworkRquest that follows (secure) redirects by default.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n try:\n self.setAttribute(QNetworkRequest.RedirectPolicyAttribute,\n QNetworkRequest.NoLessSafeRedirectPolicy)\n except AttributeError:\n # RedirectPolicyAttribute was introduced in 5.9 to replace\n # FollowRedirectsAttribute.\n self.setAttribute(QNetworkRequest.FollowRedirectsAttribute,\n True)\n\n\nclass HTTPClient(QObject):\n\n \"\"\"An HTTP client based on QNetworkAccessManager.\n\n Intended for APIs, automatically decodes data.\n\n Attributes:\n _nam: The QNetworkAccessManager used.\n _timers: A {QNetworkReply: QTimer} dict.\n\n Signals:\n success: Emitted when the operation succeeded.\n arg: The received data.\n error: Emitted when the request failed.\n arg: The error message, as string.\n \"\"\"\n\n success = pyqtSignal(str)\n error = pyqtSignal(str)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self._nam = QNetworkAccessManager(self)\n self._timers = {}\n\n def post(self, url, data=None):\n \"\"\"Create a new POST request.\n\n Args:\n url: The URL to post to, as QUrl.\n data: A dict of data to send.\n \"\"\"\n if data is None:\n data = {}\n encoded_data = urllib.parse.urlencode(data).encode('utf-8')\n request = HTTPRequest(url)\n request.setHeader(QNetworkRequest.ContentTypeHeader,\n 'application/x-www-form-urlencoded;charset=utf-8')\n reply = self._nam.post(request, encoded_data)\n self._handle_reply(reply)\n\n def get(self, url):\n \"\"\"Create a new GET request.\n\n Emits success/error when done.\n\n Args:\n url: The URL to access, as QUrl.\n \"\"\"\n request = HTTPRequest(url)\n reply = self._nam.get(request)\n self._handle_reply(reply)\n\n def _handle_reply(self, reply):\n \"\"\"Handle a new QNetworkReply.\"\"\"\n if reply.isFinished():\n self.on_reply_finished(reply)\n else:\n timer = QTimer(self)\n timer.setInterval(10000)\n timer.timeout.connect(reply.abort)\n timer.start()\n self._timers[reply] = timer\n reply.finished.connect(functools.partial(\n self.on_reply_finished, reply))\n\n def on_reply_finished(self, reply):\n \"\"\"Read the data and finish when the reply finished.\n\n Args:\n reply: The QNetworkReply which finished.\n \"\"\"\n timer = self._timers.pop(reply)\n if timer is not None:\n timer.stop()\n timer.deleteLater()\n if reply.error() != QNetworkReply.NoError:\n self.error.emit(reply.errorString())\n return\n try:\n data = bytes(reply.readAll()).decode('utf-8')\n except UnicodeDecodeError:\n self.error.emit(\"Invalid UTF-8 data received in reply!\")\n return\n self.success.emit(data)\n", "path": "qutebrowser/misc/httpclient.py"}, {"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Classes related to auto-updating and getting the latest version.\"\"\"\n\nimport json\n\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QUrl\n\nfrom qutebrowser.misc import httpclient\n\n\nclass PyPIVersionClient(QObject):\n\n \"\"\"A client for the PyPI API using HTTPClient.\n\n It gets the latest version of qutebrowser from PyPI.\n\n Attributes:\n _client: The HTTPClient used.\n\n Class attributes:\n API_URL: The base API URL.\n\n Signals:\n success: Emitted when getting the version info succeeded.\n arg: The newest version.\n error: Emitted when getting the version info failed.\n arg: The error message, as string.\n \"\"\"\n\n API_URL = 'https://pypi.org/pypi/{}/json'\n success = pyqtSignal(str)\n error = pyqtSignal(str)\n\n def __init__(self, parent=None, client=None):\n super().__init__(parent)\n if client is None:\n self._client = httpclient.HTTPClient(self)\n else:\n self._client = client\n self._client.error.connect(self.error)\n self._client.success.connect(self.on_client_success)\n\n def get_version(self, package='qutebrowser'):\n \"\"\"Get the newest version of a given package.\n\n Emits success/error when done.\n\n Args:\n package: The name of the package to check.\n \"\"\"\n url = QUrl(self.API_URL.format(package))\n self._client.get(url)\n\n @pyqtSlot(str)\n def on_client_success(self, data):\n \"\"\"Process the data and finish when the client finished.\n\n Args:\n data: A string with the received data.\n \"\"\"\n try:\n json_data = json.loads(data)\n except ValueError as e:\n self.error.emit(\"Invalid JSON received in reply: {}!\".format(e))\n return\n try:\n self.success.emit(json_data['info']['version'])\n except KeyError as e:\n self.error.emit(\"Malformed data received in reply \"\n \"({!r} not found)!\".format(e))\n return\n", "path": "qutebrowser/misc/autoupdate.py"}]} | 2,250 | 475 |
gh_patches_debug_7512 | rasdani/github-patches | git_diff | lhotse-speech__lhotse-728 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
JSONDecodeError when running feat extract after doing kaldi import
Hi!
If I run `lhotse kaldi import <kaldidir> 16000 <newdir>` and then
`lhotse feat extract <newdir>/recordings.jsonl.gz <newdir>/feat` I get an error
```
[..]
File "/home/rudolf/.local/lib/python3.8/site-packages/lhotse/bin/modes/features.py", line 86, in extract
recordings: RecordingSet = RecordingSet.from_json(recording_manifest)
[..]
File "/usr/lib/python3.8/json/decoder.py", line 340, in decode
raise JSONDecodeError("Extra data", s, end)
json.decoder.JSONDecodeError: Extra data: line 2 column 1 (char 285)
```
There's nothing wrong with the kaldi dir (it was fixed and I've validated it). This is lhotse 1.2, I tried out both `pip install lhotse` and `pip install lhotse[orjson]`.
This is what the first 3 lines of `zcat` look like:
```
{"id": "5ccae615b4e948578998a20f", "sources": [{"type": "file", "channels": [0], "source": "/path/to/wav/5ccae615b4e948578998a20f-wav.wav"}], "sampling_rate": 16000, "num_samples": 24992427, "duration": 1562.0266875}
{"id": "5ccae699b4e948578998a211", "sources": [{"type": "file", "channels": [0], "source": "/path/to/wav/wav/5ccae699b4e948578998a211-wav.wav"}], "sampling_rate": 16000, "num_samples": 25830741, "duration": 1614.4213125}
{"id": "5ccae7b2b4e948578998a215", "sources": [{"type": "file", "channels": [0], "source": "/path/to/wav/5ccae7b2b4e948578998a215-wav.wav"}], "sampling_rate": 16000, "num_samples": 11936427, "duration": 746.0266875}
```
Any ideas?
edit: Seems it's just loading it as a json when it should be reading line by line? should I be passing a specific cli flag?
edit2: I think the **fix** is to change `from_json` to `from_file` [here](https://github.com/lhotse-speech/lhotse/blob/e400cb7ca8f2ecfe9c67b0589b208574c3f997da/lhotse/bin/modes/features.py#L86), I could make a PR?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lhotse/bin/modes/features.py`
Content:
```
1 from pathlib import Path
2 from typing import Optional
3
4 import click
5
6 from lhotse import Features, FeatureSet, LilcomURLWriter, Seconds
7 from lhotse.audio import RecordingSet
8 from lhotse.bin.modes.cli_base import cli
9 from lhotse.features import (
10 Fbank,
11 FeatureExtractor,
12 FeatureSetBuilder,
13 create_default_feature_extractor,
14 )
15 from lhotse.features.base import FEATURE_EXTRACTORS
16 from lhotse.features.io import available_storage_backends, get_writer
17 from lhotse.utils import Pathlike, fastcopy
18
19
20 @cli.group()
21 def feat():
22 """Feature extraction related commands."""
23 pass
24
25
26 @feat.command(context_settings=dict(show_default=True))
27 @click.argument("output_config", type=click.Path())
28 @click.option(
29 "-f",
30 "--feature-type",
31 type=click.Choice(list(FEATURE_EXTRACTORS)),
32 default="fbank",
33 help="Which feature extractor type to use.",
34 )
35 def write_default_config(output_config: Pathlike, feature_type: str):
36 """Save a default feature extraction config to OUTPUT_CONFIG."""
37 create_default_feature_extractor(feature_type).to_yaml(output_config)
38
39
40 @feat.command(context_settings=dict(show_default=True))
41 @click.argument("recording_manifest", type=click.Path(exists=True, dir_okay=False))
42 @click.argument("output_dir", type=click.Path())
43 @click.option(
44 "-f",
45 "--feature-manifest",
46 type=click.Path(exists=True, dir_okay=False),
47 help="Optional manifest specifying feature extractor configuration.",
48 )
49 @click.option(
50 "--storage-type",
51 type=click.Choice(available_storage_backends()),
52 default="lilcom_files",
53 help="Select a storage backend for the feature matrices.",
54 )
55 @click.option(
56 "-t",
57 "--lilcom-tick-power",
58 type=int,
59 default=-5,
60 help="Determines the compression accuracy; "
61 "the input will be compressed to integer multiples of 2^tick_power",
62 )
63 @click.option(
64 "-r",
65 "--root-dir",
66 type=click.Path(exists=True, file_okay=False),
67 default=None,
68 help="Root directory - all paths in the manifest will use this as prefix.",
69 )
70 @click.option(
71 "-j", "--num-jobs", type=int, default=1, help="Number of parallel processes."
72 )
73 def extract(
74 recording_manifest: Pathlike,
75 output_dir: Pathlike,
76 feature_manifest: Optional[Pathlike],
77 storage_type: str,
78 lilcom_tick_power: int,
79 root_dir: Optional[Pathlike],
80 num_jobs: int,
81 ):
82 """
83 Extract features for recordings in a given AUDIO_MANIFEST. The features are stored in OUTPUT_DIR,
84 with one file per recording (or segment).
85 """
86 recordings: RecordingSet = RecordingSet.from_json(recording_manifest)
87 if root_dir is not None:
88 recordings = recordings.with_path_prefix(root_dir)
89
90 feature_extractor = (
91 FeatureExtractor.from_yaml(feature_manifest)
92 if feature_manifest is not None
93 else Fbank()
94 )
95
96 output_dir = Path(output_dir)
97 output_dir.mkdir(exist_ok=True, parents=True)
98 storage_path = (
99 output_dir / "feats.h5" if "hdf5" in storage_type else output_dir / "storage"
100 )
101
102 with get_writer(storage_type)(
103 storage_path, tick_power=lilcom_tick_power
104 ) as storage:
105 feature_set_builder = FeatureSetBuilder(
106 feature_extractor=feature_extractor,
107 storage=storage,
108 )
109 feature_set_builder.process_and_store_recordings(
110 recordings=recordings,
111 output_manifest=output_dir / "feature_manifest.json.gz",
112 num_jobs=num_jobs,
113 )
114
115
116 @feat.command(context_settings=dict(show_default=True))
117 @click.argument("cutset", type=click.Path(exists=True, dir_okay=False))
118 @click.argument("output_cutset", type=click.Path())
119 @click.argument("storage_path", type=click.Path())
120 @click.option(
121 "-f",
122 "--feature-manifest",
123 type=click.Path(exists=True, dir_okay=False),
124 help="Optional manifest specifying feature extractor configuration.",
125 )
126 @click.option(
127 "--storage-type",
128 type=click.Choice(available_storage_backends()),
129 default="lilcom_hdf5",
130 help="Select a storage backend for the feature matrices.",
131 )
132 @click.option(
133 "-j", "--num-jobs", type=int, default=1, help="Number of parallel processes."
134 )
135 def extract_cuts(
136 cutset: Pathlike,
137 output_cutset: Pathlike,
138 storage_path: Pathlike,
139 feature_manifest: Optional[Pathlike],
140 storage_type: str,
141 num_jobs: int,
142 ):
143 """
144 Extract features for cuts in a given CUTSET manifest.
145 The features are stored in STORAGE_PATH, and the output manifest
146 with features is stored in OUTPUT_CUTSET.
147 """
148 from lhotse import CutSet
149
150 cuts: CutSet = CutSet.from_file(cutset)
151 feature_extractor = (
152 FeatureExtractor.from_yaml(feature_manifest)
153 if feature_manifest is not None
154 else Fbank()
155 )
156 cuts = cuts.compute_and_store_features(
157 extractor=feature_extractor,
158 storage_path=storage_path,
159 num_jobs=num_jobs,
160 storage_type=get_writer(storage_type),
161 )
162 Path(output_cutset).parent.mkdir(parents=True, exist_ok=True)
163 cuts.to_file(output_cutset)
164
165
166 @feat.command(context_settings=dict(show_default=True))
167 @click.argument("cutset", type=click.Path(exists=True, dir_okay=False))
168 @click.argument("output_cutset", type=click.Path())
169 @click.argument("storage_path", type=click.Path())
170 @click.option(
171 "-f",
172 "--feature-manifest",
173 type=click.Path(exists=True, dir_okay=False),
174 help="Optional manifest specifying feature extractor configuration. "
175 "If you want to use CUDA, you should specify the device in this "
176 "config.",
177 )
178 @click.option(
179 "--storage-type",
180 type=click.Choice(available_storage_backends()),
181 default="lilcom_hdf5",
182 help="Select a storage backend for the feature matrices.",
183 )
184 @click.option(
185 "-j", "--num-jobs", type=int, default=4, help="Number of dataloader workers."
186 )
187 @click.option(
188 "-b",
189 "--batch-duration",
190 type=float,
191 default=600.0,
192 help="At most this many seconds of audio will be processed in each batch.",
193 )
194 def extract_cuts_batch(
195 cutset: Pathlike,
196 output_cutset: Pathlike,
197 storage_path: Pathlike,
198 feature_manifest: Optional[Pathlike],
199 storage_type: str,
200 num_jobs: int,
201 batch_duration: Seconds,
202 ):
203 """
204 Extract features for cuts in a given CUTSET manifest.
205 The features are stored in STORAGE_PATH, and the output manifest
206 with features is stored in OUTPUT_CUTSET.
207
208 This version enables CUDA acceleration for feature extractors
209 that support it (e.g., kaldifeat extractors).
210
211 \b
212 Example usage of kaldifeat fbank with CUDA:
213
214 $ pip install kaldifeat # note: ensure it's compiled with CUDA
215
216 $ lhotse feat write-default-config -f kaldifeat-fbank feat.yml
217
218 $ sed 's/device: cpu/device: cuda/' feat.yml feat-cuda.yml
219
220 $ lhotse feat extract-cuts-batch -f feat-cuda.yml cuts.jsonl cuts_with_feats.jsonl feats.h5
221 """
222 from lhotse import CutSet
223
224 cuts: CutSet = CutSet.from_file(cutset)
225 feature_extractor = (
226 FeatureExtractor.from_yaml(feature_manifest)
227 if feature_manifest is not None
228 else Fbank()
229 )
230 cuts = cuts.compute_and_store_features_batch(
231 extractor=feature_extractor,
232 storage_path=storage_path,
233 batch_duration=batch_duration,
234 num_workers=num_jobs,
235 storage_type=get_writer(storage_type),
236 )
237 Path(output_cutset).parent.mkdir(parents=True, exist_ok=True)
238 cuts.to_file(output_cutset)
239
240
241 @feat.command(context_settings=dict(show_default=True))
242 @click.argument("feature_manifest", type=click.Path(exists=True, dir_okay=False))
243 @click.argument("url")
244 @click.argument("output_manifest", type=click.Path())
245 @click.option("--num-jobs", "-j", type=int, default=1)
246 def upload(
247 feature_manifest: Pathlike, url: str, output_manifest: Pathlike, num_jobs: int
248 ):
249 """
250 Read an existing FEATURE_MANIFEST, upload the feature matrices it contains to a URL location,
251 and save a new feature OUTPUT_MANIFEST that refers to the uploaded features.
252
253 The URL can refer to endpoints such as AWS S3, GCP, Azure, etc.
254 For example: "s3://my-bucket/my-features" is a valid URL.
255
256 This script does not currently support credentials,
257 and assumes that you have the write permissions.
258 """
259 from concurrent.futures import ProcessPoolExecutor
260
261 from tqdm import tqdm
262
263 output_manifest = Path(output_manifest)
264 assert (
265 ".jsonl" in output_manifest.suffixes
266 ), "This mode only supports writing to JSONL feature manifests."
267
268 local_features: FeatureSet = FeatureSet.from_file(feature_manifest)
269
270 with FeatureSet.open_writer(
271 output_manifest
272 ) as manifest_writer, ProcessPoolExecutor(num_jobs) as ex:
273 futures = []
274 for item in tqdm(local_features, desc="Submitting parallel uploading tasks..."):
275 futures.append(ex.submit(_upload_one, item, url))
276 for item in tqdm(futures, desc=f"Uploading features to {url}"):
277 manifest_writer.write(item.result())
278
279
280 def _upload_one(item: Features, url: str) -> Features:
281 feats_mat = item.load()
282 feats_writer = LilcomURLWriter(url)
283 new_key = feats_writer.write(key=item.storage_key, value=feats_mat)
284 return fastcopy(
285 item, storage_path=url, storage_key=new_key, storage_type=feats_writer.name
286 )
287
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lhotse/bin/modes/features.py b/lhotse/bin/modes/features.py
--- a/lhotse/bin/modes/features.py
+++ b/lhotse/bin/modes/features.py
@@ -83,7 +83,7 @@
Extract features for recordings in a given AUDIO_MANIFEST. The features are stored in OUTPUT_DIR,
with one file per recording (or segment).
"""
- recordings: RecordingSet = RecordingSet.from_json(recording_manifest)
+ recordings: RecordingSet = RecordingSet.from_file(recording_manifest)
if root_dir is not None:
recordings = recordings.with_path_prefix(root_dir)
| {"golden_diff": "diff --git a/lhotse/bin/modes/features.py b/lhotse/bin/modes/features.py\n--- a/lhotse/bin/modes/features.py\n+++ b/lhotse/bin/modes/features.py\n@@ -83,7 +83,7 @@\n Extract features for recordings in a given AUDIO_MANIFEST. The features are stored in OUTPUT_DIR,\n with one file per recording (or segment).\n \"\"\"\n- recordings: RecordingSet = RecordingSet.from_json(recording_manifest)\n+ recordings: RecordingSet = RecordingSet.from_file(recording_manifest)\n if root_dir is not None:\n recordings = recordings.with_path_prefix(root_dir)\n", "issue": "JSONDecodeError when running feat extract after doing kaldi import\nHi!\r\nIf I run `lhotse kaldi import <kaldidir> 16000 <newdir>` and then\r\n`lhotse feat extract <newdir>/recordings.jsonl.gz <newdir>/feat` I get an error\r\n\r\n```\r\n[..]\r\n File \"/home/rudolf/.local/lib/python3.8/site-packages/lhotse/bin/modes/features.py\", line 86, in extract\r\n recordings: RecordingSet = RecordingSet.from_json(recording_manifest)\r\n[..]\r\n File \"/usr/lib/python3.8/json/decoder.py\", line 340, in decode\r\n raise JSONDecodeError(\"Extra data\", s, end)\r\njson.decoder.JSONDecodeError: Extra data: line 2 column 1 (char 285)\r\n```\r\n\r\nThere's nothing wrong with the kaldi dir (it was fixed and I've validated it). This is lhotse 1.2, I tried out both `pip install lhotse` and `pip install lhotse[orjson]`.\r\n\r\nThis is what the first 3 lines of `zcat` look like:\r\n```\r\n{\"id\": \"5ccae615b4e948578998a20f\", \"sources\": [{\"type\": \"file\", \"channels\": [0], \"source\": \"/path/to/wav/5ccae615b4e948578998a20f-wav.wav\"}], \"sampling_rate\": 16000, \"num_samples\": 24992427, \"duration\": 1562.0266875}\r\n{\"id\": \"5ccae699b4e948578998a211\", \"sources\": [{\"type\": \"file\", \"channels\": [0], \"source\": \"/path/to/wav/wav/5ccae699b4e948578998a211-wav.wav\"}], \"sampling_rate\": 16000, \"num_samples\": 25830741, \"duration\": 1614.4213125}\r\n{\"id\": \"5ccae7b2b4e948578998a215\", \"sources\": [{\"type\": \"file\", \"channels\": [0], \"source\": \"/path/to/wav/5ccae7b2b4e948578998a215-wav.wav\"}], \"sampling_rate\": 16000, \"num_samples\": 11936427, \"duration\": 746.0266875}\r\n```\r\n\r\nAny ideas?\r\n\r\nedit: Seems it's just loading it as a json when it should be reading line by line? should I be passing a specific cli flag?\r\n\r\nedit2: I think the **fix** is to change `from_json` to `from_file` [here](https://github.com/lhotse-speech/lhotse/blob/e400cb7ca8f2ecfe9c67b0589b208574c3f997da/lhotse/bin/modes/features.py#L86), I could make a PR?\n", "before_files": [{"content": "from pathlib import Path\nfrom typing import Optional\n\nimport click\n\nfrom lhotse import Features, FeatureSet, LilcomURLWriter, Seconds\nfrom lhotse.audio import RecordingSet\nfrom lhotse.bin.modes.cli_base import cli\nfrom lhotse.features import (\n Fbank,\n FeatureExtractor,\n FeatureSetBuilder,\n create_default_feature_extractor,\n)\nfrom lhotse.features.base import FEATURE_EXTRACTORS\nfrom lhotse.features.io import available_storage_backends, get_writer\nfrom lhotse.utils import Pathlike, fastcopy\n\n\[email protected]()\ndef feat():\n \"\"\"Feature extraction related commands.\"\"\"\n pass\n\n\[email protected](context_settings=dict(show_default=True))\[email protected](\"output_config\", type=click.Path())\[email protected](\n \"-f\",\n \"--feature-type\",\n type=click.Choice(list(FEATURE_EXTRACTORS)),\n default=\"fbank\",\n help=\"Which feature extractor type to use.\",\n)\ndef write_default_config(output_config: Pathlike, feature_type: str):\n \"\"\"Save a default feature extraction config to OUTPUT_CONFIG.\"\"\"\n create_default_feature_extractor(feature_type).to_yaml(output_config)\n\n\[email protected](context_settings=dict(show_default=True))\[email protected](\"recording_manifest\", type=click.Path(exists=True, dir_okay=False))\[email protected](\"output_dir\", type=click.Path())\[email protected](\n \"-f\",\n \"--feature-manifest\",\n type=click.Path(exists=True, dir_okay=False),\n help=\"Optional manifest specifying feature extractor configuration.\",\n)\[email protected](\n \"--storage-type\",\n type=click.Choice(available_storage_backends()),\n default=\"lilcom_files\",\n help=\"Select a storage backend for the feature matrices.\",\n)\[email protected](\n \"-t\",\n \"--lilcom-tick-power\",\n type=int,\n default=-5,\n help=\"Determines the compression accuracy; \"\n \"the input will be compressed to integer multiples of 2^tick_power\",\n)\[email protected](\n \"-r\",\n \"--root-dir\",\n type=click.Path(exists=True, file_okay=False),\n default=None,\n help=\"Root directory - all paths in the manifest will use this as prefix.\",\n)\[email protected](\n \"-j\", \"--num-jobs\", type=int, default=1, help=\"Number of parallel processes.\"\n)\ndef extract(\n recording_manifest: Pathlike,\n output_dir: Pathlike,\n feature_manifest: Optional[Pathlike],\n storage_type: str,\n lilcom_tick_power: int,\n root_dir: Optional[Pathlike],\n num_jobs: int,\n):\n \"\"\"\n Extract features for recordings in a given AUDIO_MANIFEST. The features are stored in OUTPUT_DIR,\n with one file per recording (or segment).\n \"\"\"\n recordings: RecordingSet = RecordingSet.from_json(recording_manifest)\n if root_dir is not None:\n recordings = recordings.with_path_prefix(root_dir)\n\n feature_extractor = (\n FeatureExtractor.from_yaml(feature_manifest)\n if feature_manifest is not None\n else Fbank()\n )\n\n output_dir = Path(output_dir)\n output_dir.mkdir(exist_ok=True, parents=True)\n storage_path = (\n output_dir / \"feats.h5\" if \"hdf5\" in storage_type else output_dir / \"storage\"\n )\n\n with get_writer(storage_type)(\n storage_path, tick_power=lilcom_tick_power\n ) as storage:\n feature_set_builder = FeatureSetBuilder(\n feature_extractor=feature_extractor,\n storage=storage,\n )\n feature_set_builder.process_and_store_recordings(\n recordings=recordings,\n output_manifest=output_dir / \"feature_manifest.json.gz\",\n num_jobs=num_jobs,\n )\n\n\[email protected](context_settings=dict(show_default=True))\[email protected](\"cutset\", type=click.Path(exists=True, dir_okay=False))\[email protected](\"output_cutset\", type=click.Path())\[email protected](\"storage_path\", type=click.Path())\[email protected](\n \"-f\",\n \"--feature-manifest\",\n type=click.Path(exists=True, dir_okay=False),\n help=\"Optional manifest specifying feature extractor configuration.\",\n)\[email protected](\n \"--storage-type\",\n type=click.Choice(available_storage_backends()),\n default=\"lilcom_hdf5\",\n help=\"Select a storage backend for the feature matrices.\",\n)\[email protected](\n \"-j\", \"--num-jobs\", type=int, default=1, help=\"Number of parallel processes.\"\n)\ndef extract_cuts(\n cutset: Pathlike,\n output_cutset: Pathlike,\n storage_path: Pathlike,\n feature_manifest: Optional[Pathlike],\n storage_type: str,\n num_jobs: int,\n):\n \"\"\"\n Extract features for cuts in a given CUTSET manifest.\n The features are stored in STORAGE_PATH, and the output manifest\n with features is stored in OUTPUT_CUTSET.\n \"\"\"\n from lhotse import CutSet\n\n cuts: CutSet = CutSet.from_file(cutset)\n feature_extractor = (\n FeatureExtractor.from_yaml(feature_manifest)\n if feature_manifest is not None\n else Fbank()\n )\n cuts = cuts.compute_and_store_features(\n extractor=feature_extractor,\n storage_path=storage_path,\n num_jobs=num_jobs,\n storage_type=get_writer(storage_type),\n )\n Path(output_cutset).parent.mkdir(parents=True, exist_ok=True)\n cuts.to_file(output_cutset)\n\n\[email protected](context_settings=dict(show_default=True))\[email protected](\"cutset\", type=click.Path(exists=True, dir_okay=False))\[email protected](\"output_cutset\", type=click.Path())\[email protected](\"storage_path\", type=click.Path())\[email protected](\n \"-f\",\n \"--feature-manifest\",\n type=click.Path(exists=True, dir_okay=False),\n help=\"Optional manifest specifying feature extractor configuration. \"\n \"If you want to use CUDA, you should specify the device in this \"\n \"config.\",\n)\[email protected](\n \"--storage-type\",\n type=click.Choice(available_storage_backends()),\n default=\"lilcom_hdf5\",\n help=\"Select a storage backend for the feature matrices.\",\n)\[email protected](\n \"-j\", \"--num-jobs\", type=int, default=4, help=\"Number of dataloader workers.\"\n)\[email protected](\n \"-b\",\n \"--batch-duration\",\n type=float,\n default=600.0,\n help=\"At most this many seconds of audio will be processed in each batch.\",\n)\ndef extract_cuts_batch(\n cutset: Pathlike,\n output_cutset: Pathlike,\n storage_path: Pathlike,\n feature_manifest: Optional[Pathlike],\n storage_type: str,\n num_jobs: int,\n batch_duration: Seconds,\n):\n \"\"\"\n Extract features for cuts in a given CUTSET manifest.\n The features are stored in STORAGE_PATH, and the output manifest\n with features is stored in OUTPUT_CUTSET.\n\n This version enables CUDA acceleration for feature extractors\n that support it (e.g., kaldifeat extractors).\n\n \\b\n Example usage of kaldifeat fbank with CUDA:\n\n $ pip install kaldifeat # note: ensure it's compiled with CUDA\n\n $ lhotse feat write-default-config -f kaldifeat-fbank feat.yml\n\n $ sed 's/device: cpu/device: cuda/' feat.yml feat-cuda.yml\n\n $ lhotse feat extract-cuts-batch -f feat-cuda.yml cuts.jsonl cuts_with_feats.jsonl feats.h5\n \"\"\"\n from lhotse import CutSet\n\n cuts: CutSet = CutSet.from_file(cutset)\n feature_extractor = (\n FeatureExtractor.from_yaml(feature_manifest)\n if feature_manifest is not None\n else Fbank()\n )\n cuts = cuts.compute_and_store_features_batch(\n extractor=feature_extractor,\n storage_path=storage_path,\n batch_duration=batch_duration,\n num_workers=num_jobs,\n storage_type=get_writer(storage_type),\n )\n Path(output_cutset).parent.mkdir(parents=True, exist_ok=True)\n cuts.to_file(output_cutset)\n\n\[email protected](context_settings=dict(show_default=True))\[email protected](\"feature_manifest\", type=click.Path(exists=True, dir_okay=False))\[email protected](\"url\")\[email protected](\"output_manifest\", type=click.Path())\[email protected](\"--num-jobs\", \"-j\", type=int, default=1)\ndef upload(\n feature_manifest: Pathlike, url: str, output_manifest: Pathlike, num_jobs: int\n):\n \"\"\"\n Read an existing FEATURE_MANIFEST, upload the feature matrices it contains to a URL location,\n and save a new feature OUTPUT_MANIFEST that refers to the uploaded features.\n\n The URL can refer to endpoints such as AWS S3, GCP, Azure, etc.\n For example: \"s3://my-bucket/my-features\" is a valid URL.\n\n This script does not currently support credentials,\n and assumes that you have the write permissions.\n \"\"\"\n from concurrent.futures import ProcessPoolExecutor\n\n from tqdm import tqdm\n\n output_manifest = Path(output_manifest)\n assert (\n \".jsonl\" in output_manifest.suffixes\n ), \"This mode only supports writing to JSONL feature manifests.\"\n\n local_features: FeatureSet = FeatureSet.from_file(feature_manifest)\n\n with FeatureSet.open_writer(\n output_manifest\n ) as manifest_writer, ProcessPoolExecutor(num_jobs) as ex:\n futures = []\n for item in tqdm(local_features, desc=\"Submitting parallel uploading tasks...\"):\n futures.append(ex.submit(_upload_one, item, url))\n for item in tqdm(futures, desc=f\"Uploading features to {url}\"):\n manifest_writer.write(item.result())\n\n\ndef _upload_one(item: Features, url: str) -> Features:\n feats_mat = item.load()\n feats_writer = LilcomURLWriter(url)\n new_key = feats_writer.write(key=item.storage_key, value=feats_mat)\n return fastcopy(\n item, storage_path=url, storage_key=new_key, storage_type=feats_writer.name\n )\n", "path": "lhotse/bin/modes/features.py"}], "after_files": [{"content": "from pathlib import Path\nfrom typing import Optional\n\nimport click\n\nfrom lhotse import Features, FeatureSet, LilcomURLWriter, Seconds\nfrom lhotse.audio import RecordingSet\nfrom lhotse.bin.modes.cli_base import cli\nfrom lhotse.features import (\n Fbank,\n FeatureExtractor,\n FeatureSetBuilder,\n create_default_feature_extractor,\n)\nfrom lhotse.features.base import FEATURE_EXTRACTORS\nfrom lhotse.features.io import available_storage_backends, get_writer\nfrom lhotse.utils import Pathlike, fastcopy\n\n\[email protected]()\ndef feat():\n \"\"\"Feature extraction related commands.\"\"\"\n pass\n\n\[email protected](context_settings=dict(show_default=True))\[email protected](\"output_config\", type=click.Path())\[email protected](\n \"-f\",\n \"--feature-type\",\n type=click.Choice(list(FEATURE_EXTRACTORS)),\n default=\"fbank\",\n help=\"Which feature extractor type to use.\",\n)\ndef write_default_config(output_config: Pathlike, feature_type: str):\n \"\"\"Save a default feature extraction config to OUTPUT_CONFIG.\"\"\"\n create_default_feature_extractor(feature_type).to_yaml(output_config)\n\n\[email protected](context_settings=dict(show_default=True))\[email protected](\"recording_manifest\", type=click.Path(exists=True, dir_okay=False))\[email protected](\"output_dir\", type=click.Path())\[email protected](\n \"-f\",\n \"--feature-manifest\",\n type=click.Path(exists=True, dir_okay=False),\n help=\"Optional manifest specifying feature extractor configuration.\",\n)\[email protected](\n \"--storage-type\",\n type=click.Choice(available_storage_backends()),\n default=\"lilcom_files\",\n help=\"Select a storage backend for the feature matrices.\",\n)\[email protected](\n \"-t\",\n \"--lilcom-tick-power\",\n type=int,\n default=-5,\n help=\"Determines the compression accuracy; \"\n \"the input will be compressed to integer multiples of 2^tick_power\",\n)\[email protected](\n \"-r\",\n \"--root-dir\",\n type=click.Path(exists=True, file_okay=False),\n default=None,\n help=\"Root directory - all paths in the manifest will use this as prefix.\",\n)\[email protected](\n \"-j\", \"--num-jobs\", type=int, default=1, help=\"Number of parallel processes.\"\n)\ndef extract(\n recording_manifest: Pathlike,\n output_dir: Pathlike,\n feature_manifest: Optional[Pathlike],\n storage_type: str,\n lilcom_tick_power: int,\n root_dir: Optional[Pathlike],\n num_jobs: int,\n):\n \"\"\"\n Extract features for recordings in a given AUDIO_MANIFEST. The features are stored in OUTPUT_DIR,\n with one file per recording (or segment).\n \"\"\"\n recordings: RecordingSet = RecordingSet.from_file(recording_manifest)\n if root_dir is not None:\n recordings = recordings.with_path_prefix(root_dir)\n\n feature_extractor = (\n FeatureExtractor.from_yaml(feature_manifest)\n if feature_manifest is not None\n else Fbank()\n )\n\n output_dir = Path(output_dir)\n output_dir.mkdir(exist_ok=True, parents=True)\n storage_path = (\n output_dir / \"feats.h5\" if \"hdf5\" in storage_type else output_dir / \"storage\"\n )\n\n with get_writer(storage_type)(\n storage_path, tick_power=lilcom_tick_power\n ) as storage:\n feature_set_builder = FeatureSetBuilder(\n feature_extractor=feature_extractor,\n storage=storage,\n )\n feature_set_builder.process_and_store_recordings(\n recordings=recordings,\n output_manifest=output_dir / \"feature_manifest.json.gz\",\n num_jobs=num_jobs,\n )\n\n\[email protected](context_settings=dict(show_default=True))\[email protected](\"cutset\", type=click.Path(exists=True, dir_okay=False))\[email protected](\"output_cutset\", type=click.Path())\[email protected](\"storage_path\", type=click.Path())\[email protected](\n \"-f\",\n \"--feature-manifest\",\n type=click.Path(exists=True, dir_okay=False),\n help=\"Optional manifest specifying feature extractor configuration.\",\n)\[email protected](\n \"--storage-type\",\n type=click.Choice(available_storage_backends()),\n default=\"lilcom_hdf5\",\n help=\"Select a storage backend for the feature matrices.\",\n)\[email protected](\n \"-j\", \"--num-jobs\", type=int, default=1, help=\"Number of parallel processes.\"\n)\ndef extract_cuts(\n cutset: Pathlike,\n output_cutset: Pathlike,\n storage_path: Pathlike,\n feature_manifest: Optional[Pathlike],\n storage_type: str,\n num_jobs: int,\n):\n \"\"\"\n Extract features for cuts in a given CUTSET manifest.\n The features are stored in STORAGE_PATH, and the output manifest\n with features is stored in OUTPUT_CUTSET.\n \"\"\"\n from lhotse import CutSet\n\n cuts: CutSet = CutSet.from_file(cutset)\n feature_extractor = (\n FeatureExtractor.from_yaml(feature_manifest)\n if feature_manifest is not None\n else Fbank()\n )\n cuts = cuts.compute_and_store_features(\n extractor=feature_extractor,\n storage_path=storage_path,\n num_jobs=num_jobs,\n storage_type=get_writer(storage_type),\n )\n Path(output_cutset).parent.mkdir(parents=True, exist_ok=True)\n cuts.to_file(output_cutset)\n\n\[email protected](context_settings=dict(show_default=True))\[email protected](\"cutset\", type=click.Path(exists=True, dir_okay=False))\[email protected](\"output_cutset\", type=click.Path())\[email protected](\"storage_path\", type=click.Path())\[email protected](\n \"-f\",\n \"--feature-manifest\",\n type=click.Path(exists=True, dir_okay=False),\n help=\"Optional manifest specifying feature extractor configuration. \"\n \"If you want to use CUDA, you should specify the device in this \"\n \"config.\",\n)\[email protected](\n \"--storage-type\",\n type=click.Choice(available_storage_backends()),\n default=\"lilcom_hdf5\",\n help=\"Select a storage backend for the feature matrices.\",\n)\[email protected](\n \"-j\", \"--num-jobs\", type=int, default=4, help=\"Number of dataloader workers.\"\n)\[email protected](\n \"-b\",\n \"--batch-duration\",\n type=float,\n default=600.0,\n help=\"At most this many seconds of audio will be processed in each batch.\",\n)\ndef extract_cuts_batch(\n cutset: Pathlike,\n output_cutset: Pathlike,\n storage_path: Pathlike,\n feature_manifest: Optional[Pathlike],\n storage_type: str,\n num_jobs: int,\n batch_duration: Seconds,\n):\n \"\"\"\n Extract features for cuts in a given CUTSET manifest.\n The features are stored in STORAGE_PATH, and the output manifest\n with features is stored in OUTPUT_CUTSET.\n\n This version enables CUDA acceleration for feature extractors\n that support it (e.g., kaldifeat extractors).\n\n \\b\n Example usage of kaldifeat fbank with CUDA:\n\n $ pip install kaldifeat # note: ensure it's compiled with CUDA\n\n $ lhotse feat write-default-config -f kaldifeat-fbank feat.yml\n\n $ sed 's/device: cpu/device: cuda/' feat.yml feat-cuda.yml\n\n $ lhotse feat extract-cuts-batch -f feat-cuda.yml cuts.jsonl cuts_with_feats.jsonl feats.h5\n \"\"\"\n from lhotse import CutSet\n\n cuts: CutSet = CutSet.from_file(cutset)\n feature_extractor = (\n FeatureExtractor.from_yaml(feature_manifest)\n if feature_manifest is not None\n else Fbank()\n )\n cuts = cuts.compute_and_store_features_batch(\n extractor=feature_extractor,\n storage_path=storage_path,\n batch_duration=batch_duration,\n num_workers=num_jobs,\n storage_type=get_writer(storage_type),\n )\n Path(output_cutset).parent.mkdir(parents=True, exist_ok=True)\n cuts.to_file(output_cutset)\n\n\[email protected](context_settings=dict(show_default=True))\[email protected](\"feature_manifest\", type=click.Path(exists=True, dir_okay=False))\[email protected](\"url\")\[email protected](\"output_manifest\", type=click.Path())\[email protected](\"--num-jobs\", \"-j\", type=int, default=1)\ndef upload(\n feature_manifest: Pathlike, url: str, output_manifest: Pathlike, num_jobs: int\n):\n \"\"\"\n Read an existing FEATURE_MANIFEST, upload the feature matrices it contains to a URL location,\n and save a new feature OUTPUT_MANIFEST that refers to the uploaded features.\n\n The URL can refer to endpoints such as AWS S3, GCP, Azure, etc.\n For example: \"s3://my-bucket/my-features\" is a valid URL.\n\n This script does not currently support credentials,\n and assumes that you have the write permissions.\n \"\"\"\n from concurrent.futures import ProcessPoolExecutor\n\n from tqdm import tqdm\n\n output_manifest = Path(output_manifest)\n assert (\n \".jsonl\" in output_manifest.suffixes\n ), \"This mode only supports writing to JSONL feature manifests.\"\n\n local_features: FeatureSet = FeatureSet.from_file(feature_manifest)\n\n with FeatureSet.open_writer(\n output_manifest\n ) as manifest_writer, ProcessPoolExecutor(num_jobs) as ex:\n futures = []\n for item in tqdm(local_features, desc=\"Submitting parallel uploading tasks...\"):\n futures.append(ex.submit(_upload_one, item, url))\n for item in tqdm(futures, desc=f\"Uploading features to {url}\"):\n manifest_writer.write(item.result())\n\n\ndef _upload_one(item: Features, url: str) -> Features:\n feats_mat = item.load()\n feats_writer = LilcomURLWriter(url)\n new_key = feats_writer.write(key=item.storage_key, value=feats_mat)\n return fastcopy(\n item, storage_path=url, storage_key=new_key, storage_type=feats_writer.name\n )\n", "path": "lhotse/bin/modes/features.py"}]} | 3,935 | 136 |
gh_patches_debug_13796 | rasdani/github-patches | git_diff | Mailu__Mailu-1874 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Weblate instance is down
I tried accessing the Weblate instance and potentially add another language but it looks down.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup/server.py`
Content:
```
1 import flask
2 import flask_bootstrap
3 import redis
4 import json
5 import os
6 import jinja2
7 import uuid
8 import string
9 import random
10 import ipaddress
11 import hashlib
12 import time
13
14
15 version = os.getenv("this_version", "master")
16 static_url_path = "/" + version + "/static"
17 app = flask.Flask(__name__, static_url_path=static_url_path)
18 flask_bootstrap.Bootstrap(app)
19 db = redis.StrictRedis(host='redis', port=6379, db=0)
20
21
22 def render_flavor(flavor, template, data):
23 return flask.render_template(
24 os.path.join(flavor, template),
25 **data
26 )
27
28
29 @app.add_template_global
30 def secret(length=16):
31 charset = string.ascii_uppercase + string.digits
32 return ''.join(
33 random.SystemRandom().choice(charset)
34 for _ in range(length)
35 )
36
37 #Original copied from https://github.com/andrewlkho/ulagen
38 def random_ipv6_subnet():
39 eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff
40 eui64_canon = "-".join([format(eui64, "02X")[i:i+2] for i in range(0, 18, 2)])
41
42 h = hashlib.sha1()
43 h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))
44 globalid = h.hexdigest()[0:10]
45
46 prefix = ":".join(("fd" + globalid[0:2], globalid[2:6], globalid[6:10]))
47 return prefix
48
49 def build_app(path):
50
51 app.jinja_env.trim_blocks = True
52 app.jinja_env.lstrip_blocks = True
53
54 @app.context_processor
55 def app_context():
56 return dict(
57 versions=os.getenv("VERSIONS","master").split(','),
58 stable_version = os.getenv("stable_version", "master")
59 )
60
61 prefix_bp = flask.Blueprint(version, __name__)
62 prefix_bp.jinja_loader = jinja2.ChoiceLoader([
63 jinja2.FileSystemLoader(os.path.join(path, "templates")),
64 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
65 ])
66
67 root_bp = flask.Blueprint("root", __name__)
68 root_bp.jinja_loader = jinja2.ChoiceLoader([
69 jinja2.FileSystemLoader(os.path.join(path, "templates")),
70 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
71 ])
72
73 @prefix_bp.context_processor
74 @root_bp.context_processor
75 def bp_context(version=version):
76 return dict(version=version)
77
78 @prefix_bp.route("/")
79 @root_bp.route("/")
80 def wizard():
81 return flask.render_template('wizard.html')
82
83 @prefix_bp.route("/submit_flavor", methods=["POST"])
84 @root_bp.route("/submit_flavor", methods=["POST"])
85 def submit_flavor():
86 data = flask.request.form.copy()
87 subnet6 = random_ipv6_subnet()
88 steps = sorted(os.listdir(os.path.join(path, "templates", "steps", data["flavor"])))
89 return flask.render_template('wizard.html', flavor=data["flavor"], steps=steps, subnet6=subnet6)
90
91 @prefix_bp.route("/submit", methods=["POST"])
92 @root_bp.route("/submit", methods=["POST"])
93 def submit():
94 data = flask.request.form.copy()
95 data['uid'] = str(uuid.uuid4())
96 try:
97 data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])
98 except ValueError as err:
99 return "Error while generating files: " + str(err)
100 db.set(data['uid'], json.dumps(data))
101 return flask.redirect(flask.url_for('.setup', uid=data['uid']))
102
103 @prefix_bp.route("/setup/<uid>", methods=["GET"])
104 @root_bp.route("/setup/<uid>", methods=["GET"])
105 def setup(uid):
106 data = json.loads(db.get(uid))
107 flavor = data.get("flavor", "compose")
108 rendered = render_flavor(flavor, "setup.html", data)
109 return flask.render_template("setup.html", contents=rendered)
110
111 @prefix_bp.route("/file/<uid>/<filepath>", methods=["GET"])
112 @root_bp.route("/file/<uid>/<filepath>", methods=["GET"])
113 def file(uid, filepath):
114 data = json.loads(db.get(uid))
115 flavor = data.get("flavor", "compose")
116 return flask.Response(
117 render_flavor(flavor, filepath, data),
118 mimetype="application/text"
119 )
120
121 app.register_blueprint(prefix_bp, url_prefix="/{}".format(version))
122 app.register_blueprint(root_bp)
123
124
125 if __name__ == "__main__":
126 build_app("/tmp/mailutest")
127 app.run(debug=True)
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup/server.py b/setup/server.py
--- a/setup/server.py
+++ b/setup/server.py
@@ -54,11 +54,11 @@
@app.context_processor
def app_context():
return dict(
- versions=os.getenv("VERSIONS","master").split(','),
+ versions=os.getenv("VERSIONS","master").split(','),
stable_version = os.getenv("stable_version", "master")
)
- prefix_bp = flask.Blueprint(version, __name__)
+ prefix_bp = flask.Blueprint(version.replace(".", "_"), __name__)
prefix_bp.jinja_loader = jinja2.ChoiceLoader([
jinja2.FileSystemLoader(os.path.join(path, "templates")),
jinja2.FileSystemLoader(os.path.join(path, "flavors"))
| {"golden_diff": "diff --git a/setup/server.py b/setup/server.py\n--- a/setup/server.py\n+++ b/setup/server.py\n@@ -54,11 +54,11 @@\n @app.context_processor\n def app_context():\n return dict(\n- versions=os.getenv(\"VERSIONS\",\"master\").split(','), \n+ versions=os.getenv(\"VERSIONS\",\"master\").split(','),\n stable_version = os.getenv(\"stable_version\", \"master\")\n )\n \n- prefix_bp = flask.Blueprint(version, __name__)\n+ prefix_bp = flask.Blueprint(version.replace(\".\", \"_\"), __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n", "issue": "Weblate instance is down\nI tried accessing the Weblate instance and potentially add another language but it looks down.\n", "before_files": [{"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\nimport time\n\n\nversion = os.getenv(\"this_version\", \"master\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n#Original copied from https://github.com/andrewlkho/ulagen\ndef random_ipv6_subnet():\n eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff\n eui64_canon = \"-\".join([format(eui64, \"02X\")[i:i+2] for i in range(0, 18, 2)])\n\n h = hashlib.sha1()\n h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))\n globalid = h.hexdigest()[0:10]\n\n prefix = \":\".join((\"fd\" + globalid[0:2], globalid[2:6], globalid[6:10]))\n return prefix\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(\n versions=os.getenv(\"VERSIONS\",\"master\").split(','), \n stable_version = os.getenv(\"stable_version\", \"master\")\n )\n\n prefix_bp = flask.Blueprint(version, __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n subnet6 = random_ipv6_subnet()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps, subnet6=subnet6)\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n try:\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])\n except ValueError as err:\n return \"Error while generating files: \" + str(err)\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py"}], "after_files": [{"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\nimport time\n\n\nversion = os.getenv(\"this_version\", \"master\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n#Original copied from https://github.com/andrewlkho/ulagen\ndef random_ipv6_subnet():\n eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff\n eui64_canon = \"-\".join([format(eui64, \"02X\")[i:i+2] for i in range(0, 18, 2)])\n\n h = hashlib.sha1()\n h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))\n globalid = h.hexdigest()[0:10]\n\n prefix = \":\".join((\"fd\" + globalid[0:2], globalid[2:6], globalid[6:10]))\n return prefix\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(\n versions=os.getenv(\"VERSIONS\",\"master\").split(','),\n stable_version = os.getenv(\"stable_version\", \"master\")\n )\n\n prefix_bp = flask.Blueprint(version.replace(\".\", \"_\"), __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n subnet6 = random_ipv6_subnet()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps, subnet6=subnet6)\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n try:\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])\n except ValueError as err:\n return \"Error while generating files: \" + str(err)\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py"}]} | 1,648 | 170 |
gh_patches_debug_31183 | rasdani/github-patches | git_diff | modin-project__modin-3156 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`ValueError` during initialization of Ray 1.4 in Modin
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu
- **Modin version** (`modin.__version__`): 0.10.0
- **Python version**: 3.8.10
- **Code we can use to reproduce**:
```python
import modin.pandas as pd
df = pd.DataFrame([0])
```
<!--
You can obtain the Modin version with
python -c "import modin; print(modin.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
During initialization of Ray in Modin `ValueError` is occurred:
```
ValueError: The configured object store size (486.0 GB) exceeds /dev/shm size (405.07611136 GB). This will harm performance. Consider deleting files in /dev/shm or increasing its size with --shm-size in Docker. To ignore this warning, set RAY_OBJECT_STORE_ALLOW_SLOW_STORAGE=1.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modin/engines/ray/utils.py`
Content:
```
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 """The module holds utility and initialization routines for Modin on Ray."""
15
16 import os
17 import sys
18
19 from modin.config import (
20 Backend,
21 IsRayCluster,
22 RayRedisAddress,
23 RayRedisPassword,
24 CpuCount,
25 GpuCount,
26 Memory,
27 NPartitions,
28 )
29
30
31 def _move_stdlib_ahead_of_site_packages(*args):
32 """
33 Ensure packages from stdlib have higher import priority than from site-packages.
34
35 Parameters
36 ----------
37 *args : tuple
38 Ignored, added for compatibility with Ray.
39
40 Notes
41 -----
42 This function is expected to be run on all workers including the driver.
43 This is a hack solution to fix GH-#647, GH-#746.
44 """
45 site_packages_path = None
46 site_packages_path_index = -1
47 for i, path in enumerate(sys.path):
48 if sys.exec_prefix in path and path.endswith("site-packages"):
49 site_packages_path = path
50 site_packages_path_index = i
51 # break on first found
52 break
53
54 if site_packages_path is not None:
55 # stdlib packages layout as follows:
56 # - python3.x
57 # - typing.py
58 # - site-packages/
59 # - pandas
60 # So extracting the dirname of the site_packages can point us
61 # to the directory containing standard libraries.
62 sys.path.insert(site_packages_path_index, os.path.dirname(site_packages_path))
63
64
65 def _import_pandas(*args):
66 """
67 Import pandas to make sure all its machinery is ready.
68
69 This prevents a race condition between two threads deserializing functions
70 and trying to import pandas at the same time.
71
72 Parameters
73 ----------
74 *args : tuple
75 Ignored, added for compatibility with Ray.
76
77 Notes
78 -----
79 This function is expected to be run on all workers before any
80 serialization or deserialization starts.
81 """
82 import pandas # noqa F401
83
84
85 def initialize_ray(
86 override_is_cluster=False,
87 override_redis_address: str = None,
88 override_redis_password: str = None,
89 ):
90 """
91 Initialize Ray based on parameters, ``modin.config`` variables and internal defaults.
92
93 Parameters
94 ----------
95 override_is_cluster : bool, default: False
96 Whether to override the detection of Modin being run in a cluster
97 and always assume this runs on cluster head node.
98 This also overrides Ray worker detection and always runs the initialization
99 function (runs from main thread only by default).
100 If not specified, ``modin.config.IsRayCluster`` variable is used.
101 override_redis_address : str, optional
102 What Redis address to connect to when running in Ray cluster.
103 If not specified, ``modin.config.RayRedisAddress`` is used.
104 override_redis_password : str, optional
105 What password to use when connecting to Redis.
106 If not specified, ``modin.config.RayRedisPassword`` is used.
107 """
108 import ray
109
110 if not ray.is_initialized() or override_is_cluster:
111 cluster = override_is_cluster or IsRayCluster.get()
112 redis_address = override_redis_address or RayRedisAddress.get()
113 redis_password = override_redis_password or RayRedisPassword.get()
114
115 if cluster:
116 # We only start ray in a cluster setting for the head node.
117 ray.init(
118 address=redis_address or "auto",
119 include_dashboard=False,
120 ignore_reinit_error=True,
121 _redis_password=redis_password,
122 )
123 else:
124 from modin.error_message import ErrorMessage
125
126 # This string is intentionally formatted this way. We want it indented in
127 # the warning message.
128 ErrorMessage.not_initialized(
129 "Ray",
130 """
131 import ray
132 ray.init()
133 """,
134 )
135 object_store_memory = Memory.get()
136 # In case anything failed above, we can still improve the memory for Modin.
137 if object_store_memory is None:
138 # Round down to the nearest Gigabyte.
139 system_memory = ray._private.utils.get_system_memory()
140 object_store_memory = int(0.6 * system_memory // 10 ** 9 * 10 ** 9)
141 # If the memory pool is smaller than 2GB, just use the default in ray.
142 if object_store_memory == 0:
143 object_store_memory = None
144 else:
145 object_store_memory = int(object_store_memory)
146
147 ray_init_kwargs = {
148 "num_cpus": CpuCount.get(),
149 "num_gpus": GpuCount.get(),
150 "include_dashboard": False,
151 "ignore_reinit_error": True,
152 "object_store_memory": object_store_memory,
153 "address": redis_address,
154 "_redis_password": redis_password,
155 "_memory": object_store_memory,
156 }
157 ray.init(**ray_init_kwargs)
158
159 _move_stdlib_ahead_of_site_packages()
160 ray.worker.global_worker.run_function_on_all_workers(
161 _move_stdlib_ahead_of_site_packages
162 )
163 if Backend.get() == "Cudf":
164 from modin.engines.ray.cudf_on_ray.frame.gpu_manager import GPUManager
165 from modin.engines.ray.cudf_on_ray.frame.partition_manager import (
166 GPU_MANAGERS,
167 )
168
169 # Check that GPU_MANAGERS is empty because _update_engine can be called multiple times
170 if not GPU_MANAGERS:
171 for i in range(GpuCount.get()):
172 GPU_MANAGERS.append(GPUManager.remote(i))
173 ray.worker.global_worker.run_function_on_all_workers(_import_pandas)
174 num_cpus = int(ray.cluster_resources()["CPU"])
175 num_gpus = int(ray.cluster_resources().get("GPU", 0))
176 if Backend.get() == "Cudf":
177 NPartitions._put(num_gpus)
178 else:
179 NPartitions._put(num_cpus)
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modin/engines/ray/utils.py b/modin/engines/ray/utils.py
--- a/modin/engines/ray/utils.py
+++ b/modin/engines/ray/utils.py
@@ -15,6 +15,8 @@
import os
import sys
+import psutil
+import warnings
from modin.config import (
Backend,
@@ -135,9 +137,24 @@
object_store_memory = Memory.get()
# In case anything failed above, we can still improve the memory for Modin.
if object_store_memory is None:
- # Round down to the nearest Gigabyte.
- system_memory = ray._private.utils.get_system_memory()
- object_store_memory = int(0.6 * system_memory // 10 ** 9 * 10 ** 9)
+ virtual_memory = psutil.virtual_memory().total
+ if sys.platform.startswith("linux"):
+ shm_fd = os.open("/dev/shm", os.O_RDONLY)
+ try:
+ shm_stats = os.fstatvfs(shm_fd)
+ system_memory = shm_stats.f_bsize * shm_stats.f_bavail
+ if system_memory / (virtual_memory / 2) < 0.99:
+ warnings.warn(
+ f"The size of /dev/shm is too small ({system_memory} bytes). The required size "
+ f"at least half of RAM ({virtual_memory // 2} bytes). Please, delete files in /dev/shm or "
+ "increase size of /dev/shm with --shm-size in Docker. Also, you can set "
+ "the required memory size for each Ray worker in bytes to MODIN_MEMORY environment variable."
+ )
+ finally:
+ os.close(shm_fd)
+ else:
+ system_memory = virtual_memory
+ object_store_memory = int(0.6 * system_memory // 1e9 * 1e9)
# If the memory pool is smaller than 2GB, just use the default in ray.
if object_store_memory == 0:
object_store_memory = None
| {"golden_diff": "diff --git a/modin/engines/ray/utils.py b/modin/engines/ray/utils.py\n--- a/modin/engines/ray/utils.py\n+++ b/modin/engines/ray/utils.py\n@@ -15,6 +15,8 @@\n \n import os\n import sys\n+import psutil\n+import warnings\n \n from modin.config import (\n Backend,\n@@ -135,9 +137,24 @@\n object_store_memory = Memory.get()\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n- # Round down to the nearest Gigabyte.\n- system_memory = ray._private.utils.get_system_memory()\n- object_store_memory = int(0.6 * system_memory // 10 ** 9 * 10 ** 9)\n+ virtual_memory = psutil.virtual_memory().total\n+ if sys.platform.startswith(\"linux\"):\n+ shm_fd = os.open(\"/dev/shm\", os.O_RDONLY)\n+ try:\n+ shm_stats = os.fstatvfs(shm_fd)\n+ system_memory = shm_stats.f_bsize * shm_stats.f_bavail\n+ if system_memory / (virtual_memory / 2) < 0.99:\n+ warnings.warn(\n+ f\"The size of /dev/shm is too small ({system_memory} bytes). The required size \"\n+ f\"at least half of RAM ({virtual_memory // 2} bytes). Please, delete files in /dev/shm or \"\n+ \"increase size of /dev/shm with --shm-size in Docker. Also, you can set \"\n+ \"the required memory size for each Ray worker in bytes to MODIN_MEMORY environment variable.\"\n+ )\n+ finally:\n+ os.close(shm_fd)\n+ else:\n+ system_memory = virtual_memory\n+ object_store_memory = int(0.6 * system_memory // 1e9 * 1e9)\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n", "issue": "`ValueError` during initialization of Ray 1.4 in Modin\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu\r\n- **Modin version** (`modin.__version__`): 0.10.0\r\n- **Python version**: 3.8.10\r\n- **Code we can use to reproduce**:\r\n\r\n```python\r\nimport modin.pandas as pd\r\ndf = pd.DataFrame([0])\r\n```\r\n\r\n<!--\r\nYou can obtain the Modin version with\r\n\r\npython -c \"import modin; print(modin.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nDuring initialization of Ray in Modin `ValueError` is occurred:\r\n```\r\nValueError: The configured object store size (486.0 GB) exceeds /dev/shm size (405.07611136 GB). This will harm performance. Consider deleting files in /dev/shm or increasing its size with --shm-size in Docker. To ignore this warning, set RAY_OBJECT_STORE_ALLOW_SLOW_STORAGE=1.\r\n```\r\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"The module holds utility and initialization routines for Modin on Ray.\"\"\"\n\nimport os\nimport sys\n\nfrom modin.config import (\n Backend,\n IsRayCluster,\n RayRedisAddress,\n RayRedisPassword,\n CpuCount,\n GpuCount,\n Memory,\n NPartitions,\n)\n\n\ndef _move_stdlib_ahead_of_site_packages(*args):\n \"\"\"\n Ensure packages from stdlib have higher import priority than from site-packages.\n\n Parameters\n ----------\n *args : tuple\n Ignored, added for compatibility with Ray.\n\n Notes\n -----\n This function is expected to be run on all workers including the driver.\n This is a hack solution to fix GH-#647, GH-#746.\n \"\"\"\n site_packages_path = None\n site_packages_path_index = -1\n for i, path in enumerate(sys.path):\n if sys.exec_prefix in path and path.endswith(\"site-packages\"):\n site_packages_path = path\n site_packages_path_index = i\n # break on first found\n break\n\n if site_packages_path is not None:\n # stdlib packages layout as follows:\n # - python3.x\n # - typing.py\n # - site-packages/\n # - pandas\n # So extracting the dirname of the site_packages can point us\n # to the directory containing standard libraries.\n sys.path.insert(site_packages_path_index, os.path.dirname(site_packages_path))\n\n\ndef _import_pandas(*args):\n \"\"\"\n Import pandas to make sure all its machinery is ready.\n\n This prevents a race condition between two threads deserializing functions\n and trying to import pandas at the same time.\n\n Parameters\n ----------\n *args : tuple\n Ignored, added for compatibility with Ray.\n\n Notes\n -----\n This function is expected to be run on all workers before any\n serialization or deserialization starts.\n \"\"\"\n import pandas # noqa F401\n\n\ndef initialize_ray(\n override_is_cluster=False,\n override_redis_address: str = None,\n override_redis_password: str = None,\n):\n \"\"\"\n Initialize Ray based on parameters, ``modin.config`` variables and internal defaults.\n\n Parameters\n ----------\n override_is_cluster : bool, default: False\n Whether to override the detection of Modin being run in a cluster\n and always assume this runs on cluster head node.\n This also overrides Ray worker detection and always runs the initialization\n function (runs from main thread only by default).\n If not specified, ``modin.config.IsRayCluster`` variable is used.\n override_redis_address : str, optional\n What Redis address to connect to when running in Ray cluster.\n If not specified, ``modin.config.RayRedisAddress`` is used.\n override_redis_password : str, optional\n What password to use when connecting to Redis.\n If not specified, ``modin.config.RayRedisPassword`` is used.\n \"\"\"\n import ray\n\n if not ray.is_initialized() or override_is_cluster:\n cluster = override_is_cluster or IsRayCluster.get()\n redis_address = override_redis_address or RayRedisAddress.get()\n redis_password = override_redis_password or RayRedisPassword.get()\n\n if cluster:\n # We only start ray in a cluster setting for the head node.\n ray.init(\n address=redis_address or \"auto\",\n include_dashboard=False,\n ignore_reinit_error=True,\n _redis_password=redis_password,\n )\n else:\n from modin.error_message import ErrorMessage\n\n # This string is intentionally formatted this way. We want it indented in\n # the warning message.\n ErrorMessage.not_initialized(\n \"Ray\",\n \"\"\"\n import ray\n ray.init()\n\"\"\",\n )\n object_store_memory = Memory.get()\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n system_memory = ray._private.utils.get_system_memory()\n object_store_memory = int(0.6 * system_memory // 10 ** 9 * 10 ** 9)\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n else:\n object_store_memory = int(object_store_memory)\n\n ray_init_kwargs = {\n \"num_cpus\": CpuCount.get(),\n \"num_gpus\": GpuCount.get(),\n \"include_dashboard\": False,\n \"ignore_reinit_error\": True,\n \"object_store_memory\": object_store_memory,\n \"address\": redis_address,\n \"_redis_password\": redis_password,\n \"_memory\": object_store_memory,\n }\n ray.init(**ray_init_kwargs)\n\n _move_stdlib_ahead_of_site_packages()\n ray.worker.global_worker.run_function_on_all_workers(\n _move_stdlib_ahead_of_site_packages\n )\n if Backend.get() == \"Cudf\":\n from modin.engines.ray.cudf_on_ray.frame.gpu_manager import GPUManager\n from modin.engines.ray.cudf_on_ray.frame.partition_manager import (\n GPU_MANAGERS,\n )\n\n # Check that GPU_MANAGERS is empty because _update_engine can be called multiple times\n if not GPU_MANAGERS:\n for i in range(GpuCount.get()):\n GPU_MANAGERS.append(GPUManager.remote(i))\n ray.worker.global_worker.run_function_on_all_workers(_import_pandas)\n num_cpus = int(ray.cluster_resources()[\"CPU\"])\n num_gpus = int(ray.cluster_resources().get(\"GPU\", 0))\n if Backend.get() == \"Cudf\":\n NPartitions._put(num_gpus)\n else:\n NPartitions._put(num_cpus)\n", "path": "modin/engines/ray/utils.py"}], "after_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"The module holds utility and initialization routines for Modin on Ray.\"\"\"\n\nimport os\nimport sys\nimport psutil\nimport warnings\n\nfrom modin.config import (\n Backend,\n IsRayCluster,\n RayRedisAddress,\n RayRedisPassword,\n CpuCount,\n GpuCount,\n Memory,\n NPartitions,\n)\n\n\ndef _move_stdlib_ahead_of_site_packages(*args):\n \"\"\"\n Ensure packages from stdlib have higher import priority than from site-packages.\n\n Parameters\n ----------\n *args : tuple\n Ignored, added for compatibility with Ray.\n\n Notes\n -----\n This function is expected to be run on all workers including the driver.\n This is a hack solution to fix GH-#647, GH-#746.\n \"\"\"\n site_packages_path = None\n site_packages_path_index = -1\n for i, path in enumerate(sys.path):\n if sys.exec_prefix in path and path.endswith(\"site-packages\"):\n site_packages_path = path\n site_packages_path_index = i\n # break on first found\n break\n\n if site_packages_path is not None:\n # stdlib packages layout as follows:\n # - python3.x\n # - typing.py\n # - site-packages/\n # - pandas\n # So extracting the dirname of the site_packages can point us\n # to the directory containing standard libraries.\n sys.path.insert(site_packages_path_index, os.path.dirname(site_packages_path))\n\n\ndef _import_pandas(*args):\n \"\"\"\n Import pandas to make sure all its machinery is ready.\n\n This prevents a race condition between two threads deserializing functions\n and trying to import pandas at the same time.\n\n Parameters\n ----------\n *args : tuple\n Ignored, added for compatibility with Ray.\n\n Notes\n -----\n This function is expected to be run on all workers before any\n serialization or deserialization starts.\n \"\"\"\n import pandas # noqa F401\n\n\ndef initialize_ray(\n override_is_cluster=False,\n override_redis_address: str = None,\n override_redis_password: str = None,\n):\n \"\"\"\n Initialize Ray based on parameters, ``modin.config`` variables and internal defaults.\n\n Parameters\n ----------\n override_is_cluster : bool, default: False\n Whether to override the detection of Modin being run in a cluster\n and always assume this runs on cluster head node.\n This also overrides Ray worker detection and always runs the initialization\n function (runs from main thread only by default).\n If not specified, ``modin.config.IsRayCluster`` variable is used.\n override_redis_address : str, optional\n What Redis address to connect to when running in Ray cluster.\n If not specified, ``modin.config.RayRedisAddress`` is used.\n override_redis_password : str, optional\n What password to use when connecting to Redis.\n If not specified, ``modin.config.RayRedisPassword`` is used.\n \"\"\"\n import ray\n\n if not ray.is_initialized() or override_is_cluster:\n cluster = override_is_cluster or IsRayCluster.get()\n redis_address = override_redis_address or RayRedisAddress.get()\n redis_password = override_redis_password or RayRedisPassword.get()\n\n if cluster:\n # We only start ray in a cluster setting for the head node.\n ray.init(\n address=redis_address or \"auto\",\n include_dashboard=False,\n ignore_reinit_error=True,\n _redis_password=redis_password,\n )\n else:\n from modin.error_message import ErrorMessage\n\n # This string is intentionally formatted this way. We want it indented in\n # the warning message.\n ErrorMessage.not_initialized(\n \"Ray\",\n \"\"\"\n import ray\n ray.init()\n\"\"\",\n )\n object_store_memory = Memory.get()\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n virtual_memory = psutil.virtual_memory().total\n if sys.platform.startswith(\"linux\"):\n shm_fd = os.open(\"/dev/shm\", os.O_RDONLY)\n try:\n shm_stats = os.fstatvfs(shm_fd)\n system_memory = shm_stats.f_bsize * shm_stats.f_bavail\n if system_memory / (virtual_memory / 2) < 0.99:\n warnings.warn(\n f\"The size of /dev/shm is too small ({system_memory} bytes). The required size \"\n f\"at least half of RAM ({virtual_memory // 2} bytes). Please, delete files in /dev/shm or \"\n \"increase size of /dev/shm with --shm-size in Docker. Also, you can set \"\n \"the required memory size for each Ray worker in bytes to MODIN_MEMORY environment variable.\"\n )\n finally:\n os.close(shm_fd)\n else:\n system_memory = virtual_memory\n object_store_memory = int(0.6 * system_memory // 1e9 * 1e9)\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n else:\n object_store_memory = int(object_store_memory)\n\n ray_init_kwargs = {\n \"num_cpus\": CpuCount.get(),\n \"num_gpus\": GpuCount.get(),\n \"include_dashboard\": False,\n \"ignore_reinit_error\": True,\n \"object_store_memory\": object_store_memory,\n \"address\": redis_address,\n \"_redis_password\": redis_password,\n \"_memory\": object_store_memory,\n }\n ray.init(**ray_init_kwargs)\n\n _move_stdlib_ahead_of_site_packages()\n ray.worker.global_worker.run_function_on_all_workers(\n _move_stdlib_ahead_of_site_packages\n )\n if Backend.get() == \"Cudf\":\n from modin.engines.ray.cudf_on_ray.frame.gpu_manager import GPUManager\n from modin.engines.ray.cudf_on_ray.frame.partition_manager import (\n GPU_MANAGERS,\n )\n\n # Check that GPU_MANAGERS is empty because _update_engine can be called multiple times\n if not GPU_MANAGERS:\n for i in range(GpuCount.get()):\n GPU_MANAGERS.append(GPUManager.remote(i))\n ray.worker.global_worker.run_function_on_all_workers(_import_pandas)\n num_cpus = int(ray.cluster_resources()[\"CPU\"])\n num_gpus = int(ray.cluster_resources().get(\"GPU\", 0))\n if Backend.get() == \"Cudf\":\n NPartitions._put(num_gpus)\n else:\n NPartitions._put(num_cpus)\n", "path": "modin/engines/ray/utils.py"}]} | 2,353 | 466 |
gh_patches_debug_28968 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2710 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
testing 2394: moderation cannot add module
**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/testing-dashbaord/basic/
**user:** moderator
**expected behaviour:** can create module
**behaviour:** cannot create module
**important screensize:**
**device & browser:**
**Comment/Question:** We should allow this. In the long run we should reconsider general project changing rights to moderators. Now that we have groups we might no longer need it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/dashboard/views.py`
Content:
```
1 from django.apps import apps
2 from django.contrib import messages
3 from django.contrib.messages.views import SuccessMessageMixin
4 from django.http import HttpResponseRedirect
5 from django.urls import resolve
6 from django.urls import reverse
7 from django.utils.translation import ugettext_lazy as _
8 from django.views import generic
9 from django.views.generic.detail import SingleObjectMixin
10
11 from adhocracy4.dashboard import mixins
12 from adhocracy4.dashboard import signals
13 from adhocracy4.dashboard import views as a4dashboard_views
14 from adhocracy4.dashboard.blueprints import get_blueprints
15 from adhocracy4.modules import models as module_models
16 from adhocracy4.phases import models as phase_models
17 from adhocracy4.projects import models as project_models
18 from adhocracy4.projects.mixins import ProjectMixin
19 from meinberlin.apps.dashboard.forms import DashboardProjectCreateForm
20
21
22 class ModuleBlueprintListView(ProjectMixin,
23 mixins.DashboardBaseMixin,
24 mixins.BlueprintMixin,
25 generic.DetailView):
26 template_name = 'meinberlin_dashboard/module_blueprint_list_dashboard.html'
27 permission_required = 'a4projects.add_project'
28 model = project_models.Project
29 slug_url_kwarg = 'project_slug'
30 menu_item = 'project'
31
32 @property
33 def blueprints(self):
34 return get_blueprints()
35
36 def get_permission_object(self):
37 return self.organisation
38
39
40 class ModuleCreateView(ProjectMixin,
41 mixins.DashboardBaseMixin,
42 mixins.BlueprintMixin,
43 SingleObjectMixin,
44 generic.View):
45 permission_required = 'a4projects.add_project'
46 model = project_models.Project
47 slug_url_kwarg = 'project_slug'
48
49 def post(self, request, *args, **kwargs):
50 project = self.get_object()
51 weight = 1
52 if project.modules:
53 weight = max(
54 project.modules.values_list('weight', flat=True)
55 ) + 1
56 module = module_models.Module(
57 name=self.blueprint.title,
58 weight=weight,
59 project=project,
60 is_draft=True,
61 )
62 module.save()
63 signals.module_created.send(sender=None,
64 module=module,
65 user=self.request.user)
66
67 self._create_module_settings(module)
68 self._create_phases(module, self.blueprint.content)
69
70 return HttpResponseRedirect(self.get_next(module))
71
72 def _create_module_settings(self, module):
73 if self.blueprint.settings_model:
74 settings_model = apps.get_model(*self.blueprint.settings_model)
75 module_settings = settings_model(module=module)
76 module_settings.save()
77
78 def _create_phases(self, module, blueprint_phases):
79 for index, phase_content in enumerate(blueprint_phases):
80 phase = phase_models.Phase(
81 type=phase_content.identifier,
82 name=phase_content.name,
83 description=phase_content.description,
84 weight=index,
85 module=module,
86 )
87 phase.save()
88
89 def get_next(self, module):
90 return reverse('a4dashboard:dashboard-module_basic-edit', kwargs={
91 'module_slug': module.slug
92 })
93
94 def get_permission_object(self):
95 return self.organisation
96
97
98 class ModulePublishView(SingleObjectMixin,
99 generic.View):
100 permission_required = 'a4projects.change_project'
101 model = module_models.Module
102 slug_url_kwarg = 'module_slug'
103
104 def get_permission_object(self):
105 return self.get_object().project
106
107 def post(self, request, *args, **kwargs):
108 action = request.POST.get('action', None)
109 if action == 'publish':
110 self.publish_module()
111 elif action == 'unpublish':
112 self.unpublish_module()
113 else:
114 messages.warning(self.request, _('Invalid action'))
115
116 return HttpResponseRedirect(self.get_next())
117
118 def get_next(self):
119 if 'referrer' in self.request.POST:
120 return self.request.POST['referrer']
121 elif 'HTTP_REFERER' in self.request.META:
122 return self.request.META['HTTP_REFERER']
123
124 return reverse('a4dashboard:project-edit', kwargs={
125 'project_slug': self.project.slug
126 })
127
128 def publish_module(self):
129 module = self.get_object()
130 if not module.is_draft:
131 messages.info(self.request, _('Module is already added'))
132 return
133
134 module.is_draft = False
135 module.save()
136
137 signals.module_published.send(sender=None,
138 module=module,
139 user=self.request.user)
140
141 messages.success(self.request,
142 _('Module successfully added.'))
143
144 def unpublish_module(self):
145 module = self.get_object()
146 if module.is_draft:
147 messages.info(self.request, _('Module is already removed'))
148 return
149
150 module.is_draft = True
151 module.save()
152
153 signals.module_unpublished.send(sender=None,
154 module=module,
155 user=self.request.user)
156
157 messages.success(self.request,
158 _('Module successfully removed.'))
159
160
161 class ModuleDeleteView(generic.DeleteView):
162 permission_required = 'a4projects.change_project'
163 model = module_models.Module
164 success_message = _('The module has been deleted')
165
166 def delete(self, request, *args, **kwargs):
167 messages.success(self.request, self.success_message)
168 return super().delete(request, *args, **kwargs)
169
170 def get_permission_object(self):
171 return self.get_object().project
172
173 def get_success_url(self):
174 referrer = self.request.POST.get('referrer', None) \
175 or self.request.META.get('HTTP_REFERER', None)
176 if referrer:
177 view, args, kwargs = resolve(referrer)
178 if 'module_slug' not in kwargs \
179 or not kwargs['module_slug'] == self.get_object().slug:
180 return referrer
181
182 return reverse('a4dashboard:project-edit', kwargs={
183 'project_slug': self.get_object().project.slug
184 })
185
186
187 class DashboardProjectListView(a4dashboard_views.ProjectListView):
188 def get_queryset(self):
189 return super().get_queryset().filter(
190 projectcontainer=None,
191 externalproject=None
192 )
193
194
195 class ProjectCreateView(mixins.DashboardBaseMixin,
196 SuccessMessageMixin,
197 generic.CreateView):
198 model = project_models.Project
199 slug_url_kwarg = 'project_slug'
200 form_class = DashboardProjectCreateForm
201 template_name = 'a4dashboard/project_create_form.html'
202 permission_required = 'a4projects.add_project'
203 menu_item = 'project'
204 success_message = _('Project successfully created.')
205
206 def get_permission_object(self):
207 return self.organisation
208
209 def get_form_kwargs(self):
210 kwargs = super().get_form_kwargs()
211 kwargs['organisation'] = self.organisation
212 kwargs['creator'] = self.request.user
213 return kwargs
214
215 def get_success_url(self):
216 return reverse('a4dashboard:project-edit',
217 kwargs={'project_slug': self.object.slug})
218
219 def form_valid(self, form):
220 response = super().form_valid(form)
221 signals.project_created.send(sender=None,
222 project=self.object,
223 user=self.request.user)
224
225 return response
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/dashboard/views.py b/meinberlin/apps/dashboard/views.py
--- a/meinberlin/apps/dashboard/views.py
+++ b/meinberlin/apps/dashboard/views.py
@@ -24,7 +24,7 @@
mixins.BlueprintMixin,
generic.DetailView):
template_name = 'meinberlin_dashboard/module_blueprint_list_dashboard.html'
- permission_required = 'a4projects.add_project'
+ permission_required = 'a4projects.change_project'
model = project_models.Project
slug_url_kwarg = 'project_slug'
menu_item = 'project'
@@ -34,7 +34,7 @@
return get_blueprints()
def get_permission_object(self):
- return self.organisation
+ return self.project
class ModuleCreateView(ProjectMixin,
@@ -42,7 +42,7 @@
mixins.BlueprintMixin,
SingleObjectMixin,
generic.View):
- permission_required = 'a4projects.add_project'
+ permission_required = 'a4projects.change_project'
model = project_models.Project
slug_url_kwarg = 'project_slug'
@@ -92,7 +92,7 @@
})
def get_permission_object(self):
- return self.organisation
+ return self.project
class ModulePublishView(SingleObjectMixin,
| {"golden_diff": "diff --git a/meinberlin/apps/dashboard/views.py b/meinberlin/apps/dashboard/views.py\n--- a/meinberlin/apps/dashboard/views.py\n+++ b/meinberlin/apps/dashboard/views.py\n@@ -24,7 +24,7 @@\n mixins.BlueprintMixin,\n generic.DetailView):\n template_name = 'meinberlin_dashboard/module_blueprint_list_dashboard.html'\n- permission_required = 'a4projects.add_project'\n+ permission_required = 'a4projects.change_project'\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n menu_item = 'project'\n@@ -34,7 +34,7 @@\n return get_blueprints()\n \n def get_permission_object(self):\n- return self.organisation\n+ return self.project\n \n \n class ModuleCreateView(ProjectMixin,\n@@ -42,7 +42,7 @@\n mixins.BlueprintMixin,\n SingleObjectMixin,\n generic.View):\n- permission_required = 'a4projects.add_project'\n+ permission_required = 'a4projects.change_project'\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n \n@@ -92,7 +92,7 @@\n })\n \n def get_permission_object(self):\n- return self.organisation\n+ return self.project\n \n \n class ModulePublishView(SingleObjectMixin,\n", "issue": "testing 2394: moderation cannot add module\n**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/testing-dashbaord/basic/\r\n**user:** moderator\r\n**expected behaviour:** can create module\r\n**behaviour:** cannot create module\r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** We should allow this. In the long run we should reconsider general project changing rights to moderators. Now that we have groups we might no longer need it. \r\n\r\n\n", "before_files": [{"content": "from django.apps import apps\nfrom django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import HttpResponseRedirect\nfrom django.urls import resolve\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic.detail import SingleObjectMixin\n\nfrom adhocracy4.dashboard import mixins\nfrom adhocracy4.dashboard import signals\nfrom adhocracy4.dashboard import views as a4dashboard_views\nfrom adhocracy4.dashboard.blueprints import get_blueprints\nfrom adhocracy4.modules import models as module_models\nfrom adhocracy4.phases import models as phase_models\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom meinberlin.apps.dashboard.forms import DashboardProjectCreateForm\n\n\nclass ModuleBlueprintListView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.BlueprintMixin,\n generic.DetailView):\n template_name = 'meinberlin_dashboard/module_blueprint_list_dashboard.html'\n permission_required = 'a4projects.add_project'\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n menu_item = 'project'\n\n @property\n def blueprints(self):\n return get_blueprints()\n\n def get_permission_object(self):\n return self.organisation\n\n\nclass ModuleCreateView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.BlueprintMixin,\n SingleObjectMixin,\n generic.View):\n permission_required = 'a4projects.add_project'\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n\n def post(self, request, *args, **kwargs):\n project = self.get_object()\n weight = 1\n if project.modules:\n weight = max(\n project.modules.values_list('weight', flat=True)\n ) + 1\n module = module_models.Module(\n name=self.blueprint.title,\n weight=weight,\n project=project,\n is_draft=True,\n )\n module.save()\n signals.module_created.send(sender=None,\n module=module,\n user=self.request.user)\n\n self._create_module_settings(module)\n self._create_phases(module, self.blueprint.content)\n\n return HttpResponseRedirect(self.get_next(module))\n\n def _create_module_settings(self, module):\n if self.blueprint.settings_model:\n settings_model = apps.get_model(*self.blueprint.settings_model)\n module_settings = settings_model(module=module)\n module_settings.save()\n\n def _create_phases(self, module, blueprint_phases):\n for index, phase_content in enumerate(blueprint_phases):\n phase = phase_models.Phase(\n type=phase_content.identifier,\n name=phase_content.name,\n description=phase_content.description,\n weight=index,\n module=module,\n )\n phase.save()\n\n def get_next(self, module):\n return reverse('a4dashboard:dashboard-module_basic-edit', kwargs={\n 'module_slug': module.slug\n })\n\n def get_permission_object(self):\n return self.organisation\n\n\nclass ModulePublishView(SingleObjectMixin,\n generic.View):\n permission_required = 'a4projects.change_project'\n model = module_models.Module\n slug_url_kwarg = 'module_slug'\n\n def get_permission_object(self):\n return self.get_object().project\n\n def post(self, request, *args, **kwargs):\n action = request.POST.get('action', None)\n if action == 'publish':\n self.publish_module()\n elif action == 'unpublish':\n self.unpublish_module()\n else:\n messages.warning(self.request, _('Invalid action'))\n\n return HttpResponseRedirect(self.get_next())\n\n def get_next(self):\n if 'referrer' in self.request.POST:\n return self.request.POST['referrer']\n elif 'HTTP_REFERER' in self.request.META:\n return self.request.META['HTTP_REFERER']\n\n return reverse('a4dashboard:project-edit', kwargs={\n 'project_slug': self.project.slug\n })\n\n def publish_module(self):\n module = self.get_object()\n if not module.is_draft:\n messages.info(self.request, _('Module is already added'))\n return\n\n module.is_draft = False\n module.save()\n\n signals.module_published.send(sender=None,\n module=module,\n user=self.request.user)\n\n messages.success(self.request,\n _('Module successfully added.'))\n\n def unpublish_module(self):\n module = self.get_object()\n if module.is_draft:\n messages.info(self.request, _('Module is already removed'))\n return\n\n module.is_draft = True\n module.save()\n\n signals.module_unpublished.send(sender=None,\n module=module,\n user=self.request.user)\n\n messages.success(self.request,\n _('Module successfully removed.'))\n\n\nclass ModuleDeleteView(generic.DeleteView):\n permission_required = 'a4projects.change_project'\n model = module_models.Module\n success_message = _('The module has been deleted')\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, self.success_message)\n return super().delete(request, *args, **kwargs)\n\n def get_permission_object(self):\n return self.get_object().project\n\n def get_success_url(self):\n referrer = self.request.POST.get('referrer', None) \\\n or self.request.META.get('HTTP_REFERER', None)\n if referrer:\n view, args, kwargs = resolve(referrer)\n if 'module_slug' not in kwargs \\\n or not kwargs['module_slug'] == self.get_object().slug:\n return referrer\n\n return reverse('a4dashboard:project-edit', kwargs={\n 'project_slug': self.get_object().project.slug\n })\n\n\nclass DashboardProjectListView(a4dashboard_views.ProjectListView):\n def get_queryset(self):\n return super().get_queryset().filter(\n projectcontainer=None,\n externalproject=None\n )\n\n\nclass ProjectCreateView(mixins.DashboardBaseMixin,\n SuccessMessageMixin,\n generic.CreateView):\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n form_class = DashboardProjectCreateForm\n template_name = 'a4dashboard/project_create_form.html'\n permission_required = 'a4projects.add_project'\n menu_item = 'project'\n success_message = _('Project successfully created.')\n\n def get_permission_object(self):\n return self.organisation\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['organisation'] = self.organisation\n kwargs['creator'] = self.request.user\n return kwargs\n\n def get_success_url(self):\n return reverse('a4dashboard:project-edit',\n kwargs={'project_slug': self.object.slug})\n\n def form_valid(self, form):\n response = super().form_valid(form)\n signals.project_created.send(sender=None,\n project=self.object,\n user=self.request.user)\n\n return response\n", "path": "meinberlin/apps/dashboard/views.py"}], "after_files": [{"content": "from django.apps import apps\nfrom django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import HttpResponseRedirect\nfrom django.urls import resolve\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic.detail import SingleObjectMixin\n\nfrom adhocracy4.dashboard import mixins\nfrom adhocracy4.dashboard import signals\nfrom adhocracy4.dashboard import views as a4dashboard_views\nfrom adhocracy4.dashboard.blueprints import get_blueprints\nfrom adhocracy4.modules import models as module_models\nfrom adhocracy4.phases import models as phase_models\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom meinberlin.apps.dashboard.forms import DashboardProjectCreateForm\n\n\nclass ModuleBlueprintListView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.BlueprintMixin,\n generic.DetailView):\n template_name = 'meinberlin_dashboard/module_blueprint_list_dashboard.html'\n permission_required = 'a4projects.change_project'\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n menu_item = 'project'\n\n @property\n def blueprints(self):\n return get_blueprints()\n\n def get_permission_object(self):\n return self.project\n\n\nclass ModuleCreateView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.BlueprintMixin,\n SingleObjectMixin,\n generic.View):\n permission_required = 'a4projects.change_project'\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n\n def post(self, request, *args, **kwargs):\n project = self.get_object()\n weight = 1\n if project.modules:\n weight = max(\n project.modules.values_list('weight', flat=True)\n ) + 1\n module = module_models.Module(\n name=self.blueprint.title,\n weight=weight,\n project=project,\n is_draft=True,\n )\n module.save()\n signals.module_created.send(sender=None,\n module=module,\n user=self.request.user)\n\n self._create_module_settings(module)\n self._create_phases(module, self.blueprint.content)\n\n return HttpResponseRedirect(self.get_next(module))\n\n def _create_module_settings(self, module):\n if self.blueprint.settings_model:\n settings_model = apps.get_model(*self.blueprint.settings_model)\n module_settings = settings_model(module=module)\n module_settings.save()\n\n def _create_phases(self, module, blueprint_phases):\n for index, phase_content in enumerate(blueprint_phases):\n phase = phase_models.Phase(\n type=phase_content.identifier,\n name=phase_content.name,\n description=phase_content.description,\n weight=index,\n module=module,\n )\n phase.save()\n\n def get_next(self, module):\n return reverse('a4dashboard:dashboard-module_basic-edit', kwargs={\n 'module_slug': module.slug\n })\n\n def get_permission_object(self):\n return self.project\n\n\nclass ModulePublishView(SingleObjectMixin,\n generic.View):\n permission_required = 'a4projects.change_project'\n model = module_models.Module\n slug_url_kwarg = 'module_slug'\n\n def get_permission_object(self):\n return self.get_object().project\n\n def post(self, request, *args, **kwargs):\n action = request.POST.get('action', None)\n if action == 'publish':\n self.publish_module()\n elif action == 'unpublish':\n self.unpublish_module()\n else:\n messages.warning(self.request, _('Invalid action'))\n\n return HttpResponseRedirect(self.get_next())\n\n def get_next(self):\n if 'referrer' in self.request.POST:\n return self.request.POST['referrer']\n elif 'HTTP_REFERER' in self.request.META:\n return self.request.META['HTTP_REFERER']\n\n return reverse('a4dashboard:project-edit', kwargs={\n 'project_slug': self.project.slug\n })\n\n def publish_module(self):\n module = self.get_object()\n if not module.is_draft:\n messages.info(self.request, _('Module is already added'))\n return\n\n module.is_draft = False\n module.save()\n\n signals.module_published.send(sender=None,\n module=module,\n user=self.request.user)\n\n messages.success(self.request,\n _('Module successfully added.'))\n\n def unpublish_module(self):\n module = self.get_object()\n if module.is_draft:\n messages.info(self.request, _('Module is already removed'))\n return\n\n module.is_draft = True\n module.save()\n\n signals.module_unpublished.send(sender=None,\n module=module,\n user=self.request.user)\n\n messages.success(self.request,\n _('Module successfully removed.'))\n\n\nclass ModuleDeleteView(generic.DeleteView):\n permission_required = 'a4projects.change_project'\n model = module_models.Module\n success_message = _('The module has been deleted')\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, self.success_message)\n return super().delete(request, *args, **kwargs)\n\n def get_permission_object(self):\n return self.get_object().project\n\n def get_success_url(self):\n referrer = self.request.POST.get('referrer', None) \\\n or self.request.META.get('HTTP_REFERER', None)\n if referrer:\n view, args, kwargs = resolve(referrer)\n if 'module_slug' not in kwargs \\\n or not kwargs['module_slug'] == self.get_object().slug:\n return referrer\n\n return reverse('a4dashboard:project-edit', kwargs={\n 'project_slug': self.get_object().project.slug\n })\n\n\nclass DashboardProjectListView(a4dashboard_views.ProjectListView):\n def get_queryset(self):\n return super().get_queryset().filter(\n projectcontainer=None,\n externalproject=None\n )\n\n\nclass ProjectCreateView(mixins.DashboardBaseMixin,\n SuccessMessageMixin,\n generic.CreateView):\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n form_class = DashboardProjectCreateForm\n template_name = 'a4dashboard/project_create_form.html'\n permission_required = 'a4projects.add_project'\n menu_item = 'project'\n success_message = _('Project successfully created.')\n\n def get_permission_object(self):\n return self.organisation\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['organisation'] = self.organisation\n kwargs['creator'] = self.request.user\n return kwargs\n\n def get_success_url(self):\n return reverse('a4dashboard:project-edit',\n kwargs={'project_slug': self.object.slug})\n\n def form_valid(self, form):\n response = super().form_valid(form)\n signals.project_created.send(sender=None,\n project=self.object,\n user=self.request.user)\n\n return response\n", "path": "meinberlin/apps/dashboard/views.py"}]} | 2,414 | 292 |
gh_patches_debug_48394 | rasdani/github-patches | git_diff | DDMAL__CantusDB-274 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Assign a specific user to multiple sources in Django admin
In the user-edit page in the Django admin interface, we already have a selector that allows for multi-selecting sources and assigning the user to them. We need to make the selector box wider so that the source titles are not clipped.
This issue is related to issue #216 , the relationship between the User model and Source model should go both ways.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/models/source.py`
Content:
```
1 from django.db import models
2 from main_app.models import BaseModel, Segment
3 from django.contrib.auth import get_user_model
4
5
6 class Source(BaseModel):
7 cursus_choices = [("Monastic", "Monastic"), ("Secular", "Secular")]
8 source_status_choices = [
9 (
10 "Editing process (not all the fields have been proofread)",
11 "Editing process (not all the fields have been proofread)",
12 ),
13 ("Published / Complete", "Published / Complete"),
14 ("Published / Proofread pending", "Published / Proofread pending"),
15 ("Unpublished / Editing process", "Unpublished / Editing process"),
16 ("Unpublished / Indexing process", "Unpublished / Indexing process"),
17 ("Unpublished / Proofread pending", "Unpublished / Proofread pending"),
18 ("Unpublished / Proofreading process", "Unpublished / Proofreading process"),
19 ]
20
21 # sources with public=False cannot be accessed by its url (access denied) and do not appear in source list
22 public = models.BooleanField(blank=True, null=True)
23 # sources with visible=False can be accessed by typing in the url, but do not appear in source list
24 visible = models.BooleanField(blank=True, null=True)
25 title = models.CharField(
26 max_length=255,
27 help_text="Full Manuscript Identification (City, Archive, Shelf-mark)",
28 )
29 # the siglum field as implemented on the old Cantus is composed of both the RISM siglum and the shelfmark
30 # it is a human-readable ID for a source
31 siglum = models.CharField(
32 max_length=63,
33 null=True,
34 blank=True,
35 help_text="RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).",
36 )
37 # the RISM siglum uniquely identifies a library or holding institution
38 rism_siglum = models.ForeignKey(
39 "RismSiglum", on_delete=models.PROTECT, null=True, blank=True,
40 )
41 provenance = models.ForeignKey(
42 "Provenance",
43 on_delete=models.PROTECT,
44 help_text="If the origin is unknown, select a location where the source was "
45 "used later in its lifetime and provide details in the "
46 '"Provenance notes" field.',
47 null=True,
48 blank=True,
49 )
50 provenance_notes = models.TextField(
51 blank=True,
52 null=True,
53 help_text="More exact indication of the provenance (if necessary)",
54 )
55 full_source = models.BooleanField(blank=True, null=True)
56 date = models.CharField(
57 blank=True,
58 null=True,
59 max_length=63,
60 help_text='Date of the manuscript (e.g. "1200s", "1300-1350", etc.)',
61 )
62 century = models.ManyToManyField("Century", related_name="sources")
63 notation = models.ManyToManyField("Notation", related_name="sources")
64 cursus = models.CharField(
65 blank=True, null=True, choices=cursus_choices, max_length=63
66 )
67 # TODO: Fill this field up with JSON info when I have access to the Users
68 current_editors = models.ManyToManyField(get_user_model(), related_name="sources_user_can_edit")
69 inventoried_by = models.ManyToManyField(
70 "Indexer", related_name="sources_inventoried"
71 )
72 full_text_entered_by = models.ManyToManyField(
73 "Indexer", related_name="entered_full_text_for_sources"
74 )
75 melodies_entered_by = models.ManyToManyField(
76 "Indexer", related_name="entered_melody_for_sources"
77 )
78 proofreaders = models.ManyToManyField("Indexer", related_name="proofread_sources")
79 other_editors = models.ManyToManyField("Indexer", related_name="edited_sources")
80 segment = models.ForeignKey(
81 "Segment", on_delete=models.PROTECT, blank=True, null=True
82 )
83 source_status = models.CharField(blank=True, null=True, max_length=255)
84 complete_inventory = models.BooleanField(blank=True, null=True)
85 summary = models.TextField(blank=True, null=True)
86 liturgical_occasions = models.TextField(blank=True, null=True)
87 description = models.TextField(blank=True, null=True)
88 selected_bibliography = models.TextField(blank=True, null=True)
89 image_link = models.URLField(
90 blank=True,
91 null=True,
92 help_text='HTTP link to the image gallery of the source.',
93 )
94 indexing_notes = models.TextField(blank=True, null=True)
95 indexing_date = models.TextField(blank=True, null=True)
96 json_info = models.JSONField(blank=True, null=True)
97 fragmentarium_id = models.CharField(max_length=15, blank=True, null=True)
98 dact_id = models.CharField(max_length=15, blank=True, null=True)
99
100 def number_of_chants(self) -> int:
101 """Returns the number of Chants and Sequences in this Source."""
102 return self.chant_set.count() + self.sequence_set.count()
103
104 def number_of_melodies(self) -> int:
105 """Returns the number of Chants in this Source that have melodies."""
106 return self.chant_set.filter(volpiano__isnull=False).count()
107
108 def __str__(self):
109 string = '{t} ({i})'.format(t=self.title, i=self.id)
110 return string
111
112 def save(self, *args, **kwargs):
113 # when creating a source, assign it to "Cantus Database" by default
114 cantus_db_segment = Segment.objects.get(name="CANTUS Database")
115 self.segment = cantus_db_segment
116 super().save(*args, **kwargs)
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/main_app/models/source.py b/django/cantusdb_project/main_app/models/source.py
--- a/django/cantusdb_project/main_app/models/source.py
+++ b/django/cantusdb_project/main_app/models/source.py
@@ -106,7 +106,7 @@
return self.chant_set.filter(volpiano__isnull=False).count()
def __str__(self):
- string = '{t} ({i})'.format(t=self.title, i=self.id)
+ string = '[{s}] {t} ({i})'.format(s=self.rism_siglum, t=self.title, i=self.id)
return string
def save(self, *args, **kwargs):
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/models/source.py b/django/cantusdb_project/main_app/models/source.py\n--- a/django/cantusdb_project/main_app/models/source.py\n+++ b/django/cantusdb_project/main_app/models/source.py\n@@ -106,7 +106,7 @@\n return self.chant_set.filter(volpiano__isnull=False).count()\n \n def __str__(self):\n- string = '{t} ({i})'.format(t=self.title, i=self.id)\n+ string = '[{s}] {t} ({i})'.format(s=self.rism_siglum, t=self.title, i=self.id)\n return string\n \n def save(self, *args, **kwargs):\n", "issue": "Assign a specific user to multiple sources in Django admin\nIn the user-edit page in the Django admin interface, we already have a selector that allows for multi-selecting sources and assigning the user to them. We need to make the selector box wider so that the source titles are not clipped. \r\n\r\nThis issue is related to issue #216 , the relationship between the User model and Source model should go both ways. \n", "before_files": [{"content": "from django.db import models\nfrom main_app.models import BaseModel, Segment\nfrom django.contrib.auth import get_user_model\n\n\nclass Source(BaseModel):\n cursus_choices = [(\"Monastic\", \"Monastic\"), (\"Secular\", \"Secular\")]\n source_status_choices = [\n (\n \"Editing process (not all the fields have been proofread)\",\n \"Editing process (not all the fields have been proofread)\",\n ),\n (\"Published / Complete\", \"Published / Complete\"),\n (\"Published / Proofread pending\", \"Published / Proofread pending\"),\n (\"Unpublished / Editing process\", \"Unpublished / Editing process\"),\n (\"Unpublished / Indexing process\", \"Unpublished / Indexing process\"),\n (\"Unpublished / Proofread pending\", \"Unpublished / Proofread pending\"),\n (\"Unpublished / Proofreading process\", \"Unpublished / Proofreading process\"),\n ]\n\n # sources with public=False cannot be accessed by its url (access denied) and do not appear in source list\n public = models.BooleanField(blank=True, null=True)\n # sources with visible=False can be accessed by typing in the url, but do not appear in source list\n visible = models.BooleanField(blank=True, null=True)\n title = models.CharField(\n max_length=255,\n help_text=\"Full Manuscript Identification (City, Archive, Shelf-mark)\",\n )\n # the siglum field as implemented on the old Cantus is composed of both the RISM siglum and the shelfmark\n # it is a human-readable ID for a source\n siglum = models.CharField(\n max_length=63, \n null=True, \n blank=True,\n help_text=\"RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).\",\n )\n # the RISM siglum uniquely identifies a library or holding institution\n rism_siglum = models.ForeignKey(\n \"RismSiglum\", on_delete=models.PROTECT, null=True, blank=True,\n )\n provenance = models.ForeignKey(\n \"Provenance\",\n on_delete=models.PROTECT,\n help_text=\"If the origin is unknown, select a location where the source was \"\n \"used later in its lifetime and provide details in the \"\n '\"Provenance notes\" field.',\n null=True,\n blank=True,\n )\n provenance_notes = models.TextField(\n blank=True,\n null=True,\n help_text=\"More exact indication of the provenance (if necessary)\",\n )\n full_source = models.BooleanField(blank=True, null=True)\n date = models.CharField(\n blank=True,\n null=True,\n max_length=63,\n help_text='Date of the manuscript (e.g. \"1200s\", \"1300-1350\", etc.)',\n )\n century = models.ManyToManyField(\"Century\", related_name=\"sources\")\n notation = models.ManyToManyField(\"Notation\", related_name=\"sources\")\n cursus = models.CharField(\n blank=True, null=True, choices=cursus_choices, max_length=63\n )\n # TODO: Fill this field up with JSON info when I have access to the Users\n current_editors = models.ManyToManyField(get_user_model(), related_name=\"sources_user_can_edit\")\n inventoried_by = models.ManyToManyField(\n \"Indexer\", related_name=\"sources_inventoried\"\n )\n full_text_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_full_text_for_sources\"\n )\n melodies_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_melody_for_sources\"\n )\n proofreaders = models.ManyToManyField(\"Indexer\", related_name=\"proofread_sources\")\n other_editors = models.ManyToManyField(\"Indexer\", related_name=\"edited_sources\")\n segment = models.ForeignKey(\n \"Segment\", on_delete=models.PROTECT, blank=True, null=True\n )\n source_status = models.CharField(blank=True, null=True, max_length=255)\n complete_inventory = models.BooleanField(blank=True, null=True)\n summary = models.TextField(blank=True, null=True)\n liturgical_occasions = models.TextField(blank=True, null=True)\n description = models.TextField(blank=True, null=True)\n selected_bibliography = models.TextField(blank=True, null=True)\n image_link = models.URLField(\n blank=True, \n null=True,\n help_text='HTTP link to the image gallery of the source.',\n )\n indexing_notes = models.TextField(blank=True, null=True)\n indexing_date = models.TextField(blank=True, null=True)\n json_info = models.JSONField(blank=True, null=True)\n fragmentarium_id = models.CharField(max_length=15, blank=True, null=True)\n dact_id = models.CharField(max_length=15, blank=True, null=True)\n\n def number_of_chants(self) -> int:\n \"\"\"Returns the number of Chants and Sequences in this Source.\"\"\"\n return self.chant_set.count() + self.sequence_set.count()\n\n def number_of_melodies(self) -> int:\n \"\"\"Returns the number of Chants in this Source that have melodies.\"\"\"\n return self.chant_set.filter(volpiano__isnull=False).count()\n\n def __str__(self):\n string = '{t} ({i})'.format(t=self.title, i=self.id)\n return string\n\n def save(self, *args, **kwargs):\n # when creating a source, assign it to \"Cantus Database\" by default\n cantus_db_segment = Segment.objects.get(name=\"CANTUS Database\")\n self.segment = cantus_db_segment\n super().save(*args, **kwargs)\n", "path": "django/cantusdb_project/main_app/models/source.py"}], "after_files": [{"content": "from django.db import models\nfrom main_app.models import BaseModel, Segment\nfrom django.contrib.auth import get_user_model\n\n\nclass Source(BaseModel):\n cursus_choices = [(\"Monastic\", \"Monastic\"), (\"Secular\", \"Secular\")]\n source_status_choices = [\n (\n \"Editing process (not all the fields have been proofread)\",\n \"Editing process (not all the fields have been proofread)\",\n ),\n (\"Published / Complete\", \"Published / Complete\"),\n (\"Published / Proofread pending\", \"Published / Proofread pending\"),\n (\"Unpublished / Editing process\", \"Unpublished / Editing process\"),\n (\"Unpublished / Indexing process\", \"Unpublished / Indexing process\"),\n (\"Unpublished / Proofread pending\", \"Unpublished / Proofread pending\"),\n (\"Unpublished / Proofreading process\", \"Unpublished / Proofreading process\"),\n ]\n\n # sources with public=False cannot be accessed by its url (access denied) and do not appear in source list\n public = models.BooleanField(blank=True, null=True)\n # sources with visible=False can be accessed by typing in the url, but do not appear in source list\n visible = models.BooleanField(blank=True, null=True)\n title = models.CharField(\n max_length=255,\n help_text=\"Full Manuscript Identification (City, Archive, Shelf-mark)\",\n )\n # the siglum field as implemented on the old Cantus is composed of both the RISM siglum and the shelfmark\n # it is a human-readable ID for a source\n siglum = models.CharField(\n max_length=63, \n null=True, \n blank=True,\n help_text=\"RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).\",\n )\n # the RISM siglum uniquely identifies a library or holding institution\n rism_siglum = models.ForeignKey(\n \"RismSiglum\", on_delete=models.PROTECT, null=True, blank=True,\n )\n provenance = models.ForeignKey(\n \"Provenance\",\n on_delete=models.PROTECT,\n help_text=\"If the origin is unknown, select a location where the source was \"\n \"used later in its lifetime and provide details in the \"\n '\"Provenance notes\" field.',\n null=True,\n blank=True,\n )\n provenance_notes = models.TextField(\n blank=True,\n null=True,\n help_text=\"More exact indication of the provenance (if necessary)\",\n )\n full_source = models.BooleanField(blank=True, null=True)\n date = models.CharField(\n blank=True,\n null=True,\n max_length=63,\n help_text='Date of the manuscript (e.g. \"1200s\", \"1300-1350\", etc.)',\n )\n century = models.ManyToManyField(\"Century\", related_name=\"sources\")\n notation = models.ManyToManyField(\"Notation\", related_name=\"sources\")\n cursus = models.CharField(\n blank=True, null=True, choices=cursus_choices, max_length=63\n )\n # TODO: Fill this field up with JSON info when I have access to the Users\n current_editors = models.ManyToManyField(get_user_model(), related_name=\"sources_user_can_edit\")\n inventoried_by = models.ManyToManyField(\n \"Indexer\", related_name=\"sources_inventoried\"\n )\n full_text_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_full_text_for_sources\"\n )\n melodies_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_melody_for_sources\"\n )\n proofreaders = models.ManyToManyField(\"Indexer\", related_name=\"proofread_sources\")\n other_editors = models.ManyToManyField(\"Indexer\", related_name=\"edited_sources\")\n segment = models.ForeignKey(\n \"Segment\", on_delete=models.PROTECT, blank=True, null=True\n )\n source_status = models.CharField(blank=True, null=True, max_length=255)\n complete_inventory = models.BooleanField(blank=True, null=True)\n summary = models.TextField(blank=True, null=True)\n liturgical_occasions = models.TextField(blank=True, null=True)\n description = models.TextField(blank=True, null=True)\n selected_bibliography = models.TextField(blank=True, null=True)\n image_link = models.URLField(\n blank=True, \n null=True,\n help_text='HTTP link to the image gallery of the source.',\n )\n indexing_notes = models.TextField(blank=True, null=True)\n indexing_date = models.TextField(blank=True, null=True)\n json_info = models.JSONField(blank=True, null=True)\n fragmentarium_id = models.CharField(max_length=15, blank=True, null=True)\n dact_id = models.CharField(max_length=15, blank=True, null=True)\n\n def number_of_chants(self) -> int:\n \"\"\"Returns the number of Chants and Sequences in this Source.\"\"\"\n return self.chant_set.count() + self.sequence_set.count()\n\n def number_of_melodies(self) -> int:\n \"\"\"Returns the number of Chants in this Source that have melodies.\"\"\"\n return self.chant_set.filter(volpiano__isnull=False).count()\n\n def __str__(self):\n string = '[{s}] {t} ({i})'.format(s=self.rism_siglum, t=self.title, i=self.id)\n return string\n\n def save(self, *args, **kwargs):\n # when creating a source, assign it to \"Cantus Database\" by default\n cantus_db_segment = Segment.objects.get(name=\"CANTUS Database\")\n self.segment = cantus_db_segment\n super().save(*args, **kwargs)\n", "path": "django/cantusdb_project/main_app/models/source.py"}]} | 1,792 | 166 |
gh_patches_debug_6027 | rasdani/github-patches | git_diff | twisted__twisted-12103 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Avoid encode/decode in chat.py for better readablity
As discussed in [this comment](https://github.com/twisted/twisted/pull/12070#discussion_r1442784443), it's better to use byte concat as mentioned in order of better readability in [docs/core/howto/listings/servers/chat.py:35](https://github.com/twisted/twisted/pull/12070/files/c59c93ec644a17e0f3a1752ca9ceca31a27a9f5e#diff-0923ff3db530a2e5d28ea8cc2b3a8f91f399792786772c541bf9edf7a0c50126)
```python
message = b'<' + self.name + b'> ' + message
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/core/howto/listings/servers/chat.py`
Content:
```
1 from twisted.internet import reactor
2 from twisted.internet.protocol import Factory
3 from twisted.protocols.basic import LineReceiver
4
5
6 class Chat(LineReceiver):
7 def __init__(self, users):
8 self.users = users
9 self.name = None
10 self.state = "GETNAME"
11
12 def connectionMade(self):
13 self.sendLine(b"What's your name?")
14
15 def connectionLost(self, reason):
16 if self.name in self.users:
17 del self.users[self.name]
18
19 def lineReceived(self, line):
20 if self.state == "GETNAME":
21 self.handle_GETNAME(line)
22 else:
23 self.handle_CHAT(line)
24
25 def handle_GETNAME(self, name):
26 if name in self.users:
27 self.sendLine(b"Name taken, please choose another.")
28 return
29 self.sendLine(f"Welcome, {name.decode('utf-8')}!".encode("utf-8"))
30 self.name = name
31 self.users[name] = self
32 self.state = "CHAT"
33
34 def handle_CHAT(self, message):
35 message = f"<{self.name.decode('utf-8')}> {message.decode('utf-8')}".encode(
36 "utf-8"
37 )
38 for name, protocol in self.users.items():
39 if protocol != self:
40 protocol.sendLine(message)
41
42
43 class ChatFactory(Factory):
44 def __init__(self):
45 self.users = {} # maps user names to Chat instances
46
47 def buildProtocol(self, addr):
48 return Chat(self.users)
49
50
51 reactor.listenTCP(8123, ChatFactory())
52 reactor.run()
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/core/howto/listings/servers/chat.py b/docs/core/howto/listings/servers/chat.py
--- a/docs/core/howto/listings/servers/chat.py
+++ b/docs/core/howto/listings/servers/chat.py
@@ -32,9 +32,7 @@
self.state = "CHAT"
def handle_CHAT(self, message):
- message = f"<{self.name.decode('utf-8')}> {message.decode('utf-8')}".encode(
- "utf-8"
- )
+ message = b"<" + self.name + b"> " + message
for name, protocol in self.users.items():
if protocol != self:
protocol.sendLine(message)
| {"golden_diff": "diff --git a/docs/core/howto/listings/servers/chat.py b/docs/core/howto/listings/servers/chat.py\n--- a/docs/core/howto/listings/servers/chat.py\n+++ b/docs/core/howto/listings/servers/chat.py\n@@ -32,9 +32,7 @@\n self.state = \"CHAT\"\n \n def handle_CHAT(self, message):\n- message = f\"<{self.name.decode('utf-8')}> {message.decode('utf-8')}\".encode(\n- \"utf-8\"\n- )\n+ message = b\"<\" + self.name + b\"> \" + message\n for name, protocol in self.users.items():\n if protocol != self:\n protocol.sendLine(message)\n", "issue": "Avoid encode/decode in chat.py for better readablity\nAs discussed in [this comment](https://github.com/twisted/twisted/pull/12070#discussion_r1442784443), it's better to use byte concat as mentioned in order of better readability in [docs/core/howto/listings/servers/chat.py:35](https://github.com/twisted/twisted/pull/12070/files/c59c93ec644a17e0f3a1752ca9ceca31a27a9f5e#diff-0923ff3db530a2e5d28ea8cc2b3a8f91f399792786772c541bf9edf7a0c50126)\r\n```python\r\nmessage = b'<' + self.name + b'> ' + message\r\n```\n", "before_files": [{"content": "from twisted.internet import reactor\nfrom twisted.internet.protocol import Factory\nfrom twisted.protocols.basic import LineReceiver\n\n\nclass Chat(LineReceiver):\n def __init__(self, users):\n self.users = users\n self.name = None\n self.state = \"GETNAME\"\n\n def connectionMade(self):\n self.sendLine(b\"What's your name?\")\n\n def connectionLost(self, reason):\n if self.name in self.users:\n del self.users[self.name]\n\n def lineReceived(self, line):\n if self.state == \"GETNAME\":\n self.handle_GETNAME(line)\n else:\n self.handle_CHAT(line)\n\n def handle_GETNAME(self, name):\n if name in self.users:\n self.sendLine(b\"Name taken, please choose another.\")\n return\n self.sendLine(f\"Welcome, {name.decode('utf-8')}!\".encode(\"utf-8\"))\n self.name = name\n self.users[name] = self\n self.state = \"CHAT\"\n\n def handle_CHAT(self, message):\n message = f\"<{self.name.decode('utf-8')}> {message.decode('utf-8')}\".encode(\n \"utf-8\"\n )\n for name, protocol in self.users.items():\n if protocol != self:\n protocol.sendLine(message)\n\n\nclass ChatFactory(Factory):\n def __init__(self):\n self.users = {} # maps user names to Chat instances\n\n def buildProtocol(self, addr):\n return Chat(self.users)\n\n\nreactor.listenTCP(8123, ChatFactory())\nreactor.run()\n", "path": "docs/core/howto/listings/servers/chat.py"}], "after_files": [{"content": "from twisted.internet import reactor\nfrom twisted.internet.protocol import Factory\nfrom twisted.protocols.basic import LineReceiver\n\n\nclass Chat(LineReceiver):\n def __init__(self, users):\n self.users = users\n self.name = None\n self.state = \"GETNAME\"\n\n def connectionMade(self):\n self.sendLine(b\"What's your name?\")\n\n def connectionLost(self, reason):\n if self.name in self.users:\n del self.users[self.name]\n\n def lineReceived(self, line):\n if self.state == \"GETNAME\":\n self.handle_GETNAME(line)\n else:\n self.handle_CHAT(line)\n\n def handle_GETNAME(self, name):\n if name in self.users:\n self.sendLine(b\"Name taken, please choose another.\")\n return\n self.sendLine(f\"Welcome, {name.decode('utf-8')}!\".encode(\"utf-8\"))\n self.name = name\n self.users[name] = self\n self.state = \"CHAT\"\n\n def handle_CHAT(self, message):\n message = b\"<\" + self.name + b\"> \" + message\n for name, protocol in self.users.items():\n if protocol != self:\n protocol.sendLine(message)\n\n\nclass ChatFactory(Factory):\n def __init__(self):\n self.users = {} # maps user names to Chat instances\n\n def buildProtocol(self, addr):\n return Chat(self.users)\n\n\nreactor.listenTCP(8123, ChatFactory())\nreactor.run()\n", "path": "docs/core/howto/listings/servers/chat.py"}]} | 916 | 156 |
gh_patches_debug_43541 | rasdani/github-patches | git_diff | Qiskit__qiskit-6869 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Warning statement upon compiling `cz` gate into `['h', 'cx', 'rz', 'sx', 'x']` gate set
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: `0.18.0`
- **Python version**: `3.9.6`
- **Operating system**: `Pop!_OS 21.04 x86_64`
### What is the current behavior?
Upon updating from version `0.17.4`, there is a substantially amount of additional warning printing upon compiling cz gates to the basis gate set: `['h', 'cx', 'rz', 'sx', 'x']`:
```
/home/username/anaconda3/envs/qiskit-new/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py:166: UserWarning: Resynthesized [<qiskit.dagcircuit.dagnode.DAGNode object at 0x7f70be79e040>] and got global phase: π/4
┌─────────┐┌────┐┌─────────┐
qr_0: ┤ Rz(π/2) ├┤ √X ├┤ Rz(π/2) ├
└─────────┘└────┘└─────────┘, but the original was native and the new value is longer. This indicates an efficiency bug in synthesis. Please report it by opening an issue here: https://github.com/Qiskit/qiskit-terra/issues/new/choose
new_dag = pass_.run(dag)
```
This doesn't appear to change the ability for the circuits to run correctly, it just fills up the terminal with a lot of non-needed statements, especially when running large algorithms requiring many cz gates.
### Steps to reproduce the problem
```
from qiskit import QuantumCircuit, QuantumRegister
from qiskit import transpile
qr = QuantumRegister(2)
qc_ = QuantumCircuit(qr)
qc_.cz(qr[0], qr[1])
qc = transpile(qc_, basis_gates=['h', 'cx', 'rz', 'sx', 'x'])
```
### What is the expected behavior?
Not printing a ton of warning statements upon compiling to this basis gate set
### Suggested solutions
Remove the printing? The basis gates we are using is a pretty common basis gate set, and compiling away from the cz gate is the intended behavior.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py`
Content:
```
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2017, 2018.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12
13 """Optimize chains of single-qubit gates using Euler 1q decomposer"""
14
15 import copy
16 import logging
17 import warnings
18
19 import numpy as np
20
21 from qiskit.circuit.library.standard_gates import U3Gate
22 from qiskit.transpiler.basepasses import TransformationPass
23 from qiskit.quantum_info.synthesis import one_qubit_decompose
24 from qiskit.converters import circuit_to_dag
25
26 logger = logging.getLogger(__name__)
27
28
29 class Optimize1qGatesDecomposition(TransformationPass):
30 """Optimize chains of single-qubit gates by combining them into a single gate."""
31
32 def __init__(self, basis=None):
33 """Optimize1qGatesDecomposition initializer.
34
35 Args:
36 basis (list[str]): Basis gates to consider, e.g. `['u3', 'cx']`. For the effects
37 of this pass, the basis is the set intersection between the `basis` parameter
38 and the Euler basis.
39 """
40 super().__init__()
41 self._target_basis = basis
42 self._decomposers = None
43 if basis:
44 self._decomposers = []
45 basis_set = set(basis)
46 euler_basis_gates = one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES
47 for euler_basis_name, gates in euler_basis_gates.items():
48 if set(gates).issubset(basis_set):
49 basis_copy = copy.copy(self._decomposers)
50 for base in basis_copy:
51 # check if gates are a superset of another basis
52 # and if so, remove that basis
53 if set(euler_basis_gates[base.basis]).issubset(set(gates)):
54 self._decomposers.remove(base)
55 # check if the gates are a subset of another basis
56 elif set(gates).issubset(set(euler_basis_gates[base.basis])):
57 break
58 # if not a subset, add it to the list
59 else:
60 self._decomposers.append(
61 one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)
62 )
63
64 def run(self, dag):
65 """Run the Optimize1qGatesDecomposition pass on `dag`.
66
67 Args:
68 dag (DAGCircuit): the DAG to be optimized.
69
70 Returns:
71 DAGCircuit: the optimized DAG.
72 """
73 if not self._decomposers:
74 logger.info("Skipping pass because no basis is set")
75 return dag
76 runs = dag.collect_1q_runs()
77 for run in runs:
78 # SPECIAL CASE: Don't bother to optimize single U3 gates which are in the basis set.
79 # The U3 decomposer is only going to emit a sequence of length 1 anyhow.
80 if "u3" in self._target_basis and len(run) == 1 and isinstance(run[0].op, U3Gate):
81 # Toss U3 gates equivalent to the identity; there we get off easy.
82 if np.allclose(run[0].op.to_matrix(), np.eye(2), 1e-15, 0):
83 dag.remove_op_node(run[0])
84 continue
85 # We might rewrite into lower `u`s if they're available.
86 if "u2" not in self._target_basis and "u1" not in self._target_basis:
87 continue
88
89 new_circs = []
90 operator = run[0].op.to_matrix()
91 for gate in run[1:]:
92 operator = gate.op.to_matrix().dot(operator)
93 for decomposer in self._decomposers:
94 new_circs.append(decomposer._decompose(operator))
95 if new_circs:
96 new_circ = min(new_circs, key=len)
97
98 # do we even have calibrations?
99 has_cals_p = dag.calibrations is not None and len(dag.calibrations) > 0
100 # is this run all in the target set and also uncalibrated?
101 rewriteable_and_in_basis_p = all(
102 g.name in self._target_basis
103 and (not has_cals_p or not dag.has_calibration_for(g))
104 for g in run
105 )
106 # does this run have uncalibrated gates?
107 uncalibrated_p = not has_cals_p or any(not dag.has_calibration_for(g) for g in run)
108 # does this run have gates not in the image of ._decomposers _and_ uncalibrated?
109 uncalibrated_and_not_basis_p = any(
110 g.name not in self._target_basis
111 and (not has_cals_p or not dag.has_calibration_for(g))
112 for g in run
113 )
114
115 if rewriteable_and_in_basis_p and len(run) < len(new_circ):
116 # NOTE: This is short-circuited on calibrated gates, which we're timid about
117 # reducing.
118 warnings.warn(
119 f"Resynthesized {run} and got {new_circ}, "
120 f"but the original was native and the new value is longer. This "
121 f"indicates an efficiency bug in synthesis. Please report it by "
122 f"opening an issue here: "
123 f"https://github.com/Qiskit/qiskit-terra/issues/new/choose",
124 stacklevel=2,
125 )
126 # if we're outside of the basis set, we're obligated to logically decompose.
127 # if we're outside of the set of gates for which we have physical definitions,
128 # then we _try_ to decompose, using the results if we see improvement.
129 # NOTE: Here we use circuit length as a weak proxy for "improvement"; in reality,
130 # we care about something more like fidelity at runtime, which would mean,
131 # e.g., a preference for `RZGate`s over `RXGate`s. In fact, users sometimes
132 # express a preference for a "canonical form" of a circuit, which may come in
133 # the form of some parameter values, also not visible at the level of circuit
134 # length. Since we don't have a framework for the caller to programmatically
135 # express what they want here, we include some special casing for particular
136 # gates which we've promised to normalize --- but this is fragile and should
137 # ultimately be done away with.
138 if (
139 uncalibrated_and_not_basis_p
140 or (uncalibrated_p and len(run) > len(new_circ))
141 or isinstance(run[0].op, U3Gate)
142 ):
143 new_dag = circuit_to_dag(new_circ)
144 dag.substitute_node_with_dag(run[0], new_dag)
145 # Delete the other nodes in the run
146 for current_node in run[1:]:
147 dag.remove_op_node(current_node)
148 return dag
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py b/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py
--- a/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py
+++ b/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py
@@ -38,28 +38,31 @@
and the Euler basis.
"""
super().__init__()
+
self._target_basis = basis
self._decomposers = None
+
if basis:
- self._decomposers = []
+ self._decomposers = {}
basis_set = set(basis)
euler_basis_gates = one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES
for euler_basis_name, gates in euler_basis_gates.items():
if set(gates).issubset(basis_set):
basis_copy = copy.copy(self._decomposers)
- for base in basis_copy:
+ for base in basis_copy.keys():
# check if gates are a superset of another basis
- # and if so, remove that basis
- if set(euler_basis_gates[base.basis]).issubset(set(gates)):
- self._decomposers.remove(base)
+ if set(base).issubset(set(gates)):
+ # if so, remove that basis
+ del self._decomposers[base]
# check if the gates are a subset of another basis
- elif set(gates).issubset(set(euler_basis_gates[base.basis])):
+ elif set(gates).issubset(set(base)):
+ # if so, don't bother
break
# if not a subset, add it to the list
else:
- self._decomposers.append(
- one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)
- )
+ self._decomposers[
+ tuple(gates)
+ ] = one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)
def run(self, dag):
"""Run the Optimize1qGatesDecomposition pass on `dag`.
@@ -70,7 +73,7 @@
Returns:
DAGCircuit: the optimized DAG.
"""
- if not self._decomposers:
+ if self._decomposers is None:
logger.info("Skipping pass because no basis is set")
return dag
runs = dag.collect_1q_runs()
@@ -86,21 +89,20 @@
if "u2" not in self._target_basis and "u1" not in self._target_basis:
continue
- new_circs = []
operator = run[0].op.to_matrix()
for gate in run[1:]:
operator = gate.op.to_matrix().dot(operator)
- for decomposer in self._decomposers:
- new_circs.append(decomposer._decompose(operator))
- if new_circs:
- new_circ = min(new_circs, key=len)
+
+ new_circs = {k: v._decompose(operator) for k, v in self._decomposers.items()}
+
+ if len(new_circs) > 0:
+ new_basis, new_circ = min(new_circs.items(), key=lambda x: len(x[1]))
# do we even have calibrations?
has_cals_p = dag.calibrations is not None and len(dag.calibrations) > 0
- # is this run all in the target set and also uncalibrated?
+ # is this run in the target set of this particular decomposer and also uncalibrated?
rewriteable_and_in_basis_p = all(
- g.name in self._target_basis
- and (not has_cals_p or not dag.has_calibration_for(g))
+ g.name in new_basis and (not has_cals_p or not dag.has_calibration_for(g))
for g in run
)
# does this run have uncalibrated gates?
| {"golden_diff": "diff --git a/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py b/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py\n--- a/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py\n+++ b/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py\n@@ -38,28 +38,31 @@\n and the Euler basis.\n \"\"\"\n super().__init__()\n+\n self._target_basis = basis\n self._decomposers = None\n+\n if basis:\n- self._decomposers = []\n+ self._decomposers = {}\n basis_set = set(basis)\n euler_basis_gates = one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES\n for euler_basis_name, gates in euler_basis_gates.items():\n if set(gates).issubset(basis_set):\n basis_copy = copy.copy(self._decomposers)\n- for base in basis_copy:\n+ for base in basis_copy.keys():\n # check if gates are a superset of another basis\n- # and if so, remove that basis\n- if set(euler_basis_gates[base.basis]).issubset(set(gates)):\n- self._decomposers.remove(base)\n+ if set(base).issubset(set(gates)):\n+ # if so, remove that basis\n+ del self._decomposers[base]\n # check if the gates are a subset of another basis\n- elif set(gates).issubset(set(euler_basis_gates[base.basis])):\n+ elif set(gates).issubset(set(base)):\n+ # if so, don't bother\n break\n # if not a subset, add it to the list\n else:\n- self._decomposers.append(\n- one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)\n- )\n+ self._decomposers[\n+ tuple(gates)\n+ ] = one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)\n \n def run(self, dag):\n \"\"\"Run the Optimize1qGatesDecomposition pass on `dag`.\n@@ -70,7 +73,7 @@\n Returns:\n DAGCircuit: the optimized DAG.\n \"\"\"\n- if not self._decomposers:\n+ if self._decomposers is None:\n logger.info(\"Skipping pass because no basis is set\")\n return dag\n runs = dag.collect_1q_runs()\n@@ -86,21 +89,20 @@\n if \"u2\" not in self._target_basis and \"u1\" not in self._target_basis:\n continue\n \n- new_circs = []\n operator = run[0].op.to_matrix()\n for gate in run[1:]:\n operator = gate.op.to_matrix().dot(operator)\n- for decomposer in self._decomposers:\n- new_circs.append(decomposer._decompose(operator))\n- if new_circs:\n- new_circ = min(new_circs, key=len)\n+\n+ new_circs = {k: v._decompose(operator) for k, v in self._decomposers.items()}\n+\n+ if len(new_circs) > 0:\n+ new_basis, new_circ = min(new_circs.items(), key=lambda x: len(x[1]))\n \n # do we even have calibrations?\n has_cals_p = dag.calibrations is not None and len(dag.calibrations) > 0\n- # is this run all in the target set and also uncalibrated?\n+ # is this run in the target set of this particular decomposer and also uncalibrated?\n rewriteable_and_in_basis_p = all(\n- g.name in self._target_basis\n- and (not has_cals_p or not dag.has_calibration_for(g))\n+ g.name in new_basis and (not has_cals_p or not dag.has_calibration_for(g))\n for g in run\n )\n # does this run have uncalibrated gates?\n", "issue": "Warning statement upon compiling `cz` gate into `['h', 'cx', 'rz', 'sx', 'x']` gate set\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**: `0.18.0`\r\n- **Python version**: `3.9.6`\r\n- **Operating system**: `Pop!_OS 21.04 x86_64`\r\n\r\n### What is the current behavior?\r\nUpon updating from version `0.17.4`, there is a substantially amount of additional warning printing upon compiling cz gates to the basis gate set: `['h', 'cx', 'rz', 'sx', 'x']`:\r\n\r\n```\r\n/home/username/anaconda3/envs/qiskit-new/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py:166: UserWarning: Resynthesized [<qiskit.dagcircuit.dagnode.DAGNode object at 0x7f70be79e040>] and got global phase: \u03c0/4\r\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\u250c\u2500\u2500\u2500\u2500\u2510\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\r\nqr_0: \u2524 Rz(\u03c0/2) \u251c\u2524 \u221aX \u251c\u2524 Rz(\u03c0/2) \u251c\r\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\u2514\u2500\u2500\u2500\u2500\u2518\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518, but the original was native and the new value is longer. This indicates an efficiency bug in synthesis. Please report it by opening an issue here: https://github.com/Qiskit/qiskit-terra/issues/new/choose\r\n new_dag = pass_.run(dag)\r\n```\r\n\r\nThis doesn't appear to change the ability for the circuits to run correctly, it just fills up the terminal with a lot of non-needed statements, especially when running large algorithms requiring many cz gates.\r\n\r\n\r\n### Steps to reproduce the problem\r\n```\r\nfrom qiskit import QuantumCircuit, QuantumRegister\r\nfrom qiskit import transpile\r\n\r\nqr = QuantumRegister(2)\r\nqc_ = QuantumCircuit(qr)\r\n\r\nqc_.cz(qr[0], qr[1])\r\n\r\nqc = transpile(qc_, basis_gates=['h', 'cx', 'rz', 'sx', 'x'])\r\n```\r\n\r\n\r\n### What is the expected behavior?\r\nNot printing a ton of warning statements upon compiling to this basis gate set\r\n\r\n\r\n### Suggested solutions\r\nRemove the printing? The basis gates we are using is a pretty common basis gate set, and compiling away from the cz gate is the intended behavior.\r\n\r\n\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Optimize chains of single-qubit gates using Euler 1q decomposer\"\"\"\n\nimport copy\nimport logging\nimport warnings\n\nimport numpy as np\n\nfrom qiskit.circuit.library.standard_gates import U3Gate\nfrom qiskit.transpiler.basepasses import TransformationPass\nfrom qiskit.quantum_info.synthesis import one_qubit_decompose\nfrom qiskit.converters import circuit_to_dag\n\nlogger = logging.getLogger(__name__)\n\n\nclass Optimize1qGatesDecomposition(TransformationPass):\n \"\"\"Optimize chains of single-qubit gates by combining them into a single gate.\"\"\"\n\n def __init__(self, basis=None):\n \"\"\"Optimize1qGatesDecomposition initializer.\n\n Args:\n basis (list[str]): Basis gates to consider, e.g. `['u3', 'cx']`. For the effects\n of this pass, the basis is the set intersection between the `basis` parameter\n and the Euler basis.\n \"\"\"\n super().__init__()\n self._target_basis = basis\n self._decomposers = None\n if basis:\n self._decomposers = []\n basis_set = set(basis)\n euler_basis_gates = one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES\n for euler_basis_name, gates in euler_basis_gates.items():\n if set(gates).issubset(basis_set):\n basis_copy = copy.copy(self._decomposers)\n for base in basis_copy:\n # check if gates are a superset of another basis\n # and if so, remove that basis\n if set(euler_basis_gates[base.basis]).issubset(set(gates)):\n self._decomposers.remove(base)\n # check if the gates are a subset of another basis\n elif set(gates).issubset(set(euler_basis_gates[base.basis])):\n break\n # if not a subset, add it to the list\n else:\n self._decomposers.append(\n one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)\n )\n\n def run(self, dag):\n \"\"\"Run the Optimize1qGatesDecomposition pass on `dag`.\n\n Args:\n dag (DAGCircuit): the DAG to be optimized.\n\n Returns:\n DAGCircuit: the optimized DAG.\n \"\"\"\n if not self._decomposers:\n logger.info(\"Skipping pass because no basis is set\")\n return dag\n runs = dag.collect_1q_runs()\n for run in runs:\n # SPECIAL CASE: Don't bother to optimize single U3 gates which are in the basis set.\n # The U3 decomposer is only going to emit a sequence of length 1 anyhow.\n if \"u3\" in self._target_basis and len(run) == 1 and isinstance(run[0].op, U3Gate):\n # Toss U3 gates equivalent to the identity; there we get off easy.\n if np.allclose(run[0].op.to_matrix(), np.eye(2), 1e-15, 0):\n dag.remove_op_node(run[0])\n continue\n # We might rewrite into lower `u`s if they're available.\n if \"u2\" not in self._target_basis and \"u1\" not in self._target_basis:\n continue\n\n new_circs = []\n operator = run[0].op.to_matrix()\n for gate in run[1:]:\n operator = gate.op.to_matrix().dot(operator)\n for decomposer in self._decomposers:\n new_circs.append(decomposer._decompose(operator))\n if new_circs:\n new_circ = min(new_circs, key=len)\n\n # do we even have calibrations?\n has_cals_p = dag.calibrations is not None and len(dag.calibrations) > 0\n # is this run all in the target set and also uncalibrated?\n rewriteable_and_in_basis_p = all(\n g.name in self._target_basis\n and (not has_cals_p or not dag.has_calibration_for(g))\n for g in run\n )\n # does this run have uncalibrated gates?\n uncalibrated_p = not has_cals_p or any(not dag.has_calibration_for(g) for g in run)\n # does this run have gates not in the image of ._decomposers _and_ uncalibrated?\n uncalibrated_and_not_basis_p = any(\n g.name not in self._target_basis\n and (not has_cals_p or not dag.has_calibration_for(g))\n for g in run\n )\n\n if rewriteable_and_in_basis_p and len(run) < len(new_circ):\n # NOTE: This is short-circuited on calibrated gates, which we're timid about\n # reducing.\n warnings.warn(\n f\"Resynthesized {run} and got {new_circ}, \"\n f\"but the original was native and the new value is longer. This \"\n f\"indicates an efficiency bug in synthesis. Please report it by \"\n f\"opening an issue here: \"\n f\"https://github.com/Qiskit/qiskit-terra/issues/new/choose\",\n stacklevel=2,\n )\n # if we're outside of the basis set, we're obligated to logically decompose.\n # if we're outside of the set of gates for which we have physical definitions,\n # then we _try_ to decompose, using the results if we see improvement.\n # NOTE: Here we use circuit length as a weak proxy for \"improvement\"; in reality,\n # we care about something more like fidelity at runtime, which would mean,\n # e.g., a preference for `RZGate`s over `RXGate`s. In fact, users sometimes\n # express a preference for a \"canonical form\" of a circuit, which may come in\n # the form of some parameter values, also not visible at the level of circuit\n # length. Since we don't have a framework for the caller to programmatically\n # express what they want here, we include some special casing for particular\n # gates which we've promised to normalize --- but this is fragile and should\n # ultimately be done away with.\n if (\n uncalibrated_and_not_basis_p\n or (uncalibrated_p and len(run) > len(new_circ))\n or isinstance(run[0].op, U3Gate)\n ):\n new_dag = circuit_to_dag(new_circ)\n dag.substitute_node_with_dag(run[0], new_dag)\n # Delete the other nodes in the run\n for current_node in run[1:]:\n dag.remove_op_node(current_node)\n return dag\n", "path": "qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py"}], "after_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Optimize chains of single-qubit gates using Euler 1q decomposer\"\"\"\n\nimport copy\nimport logging\nimport warnings\n\nimport numpy as np\n\nfrom qiskit.circuit.library.standard_gates import U3Gate\nfrom qiskit.transpiler.basepasses import TransformationPass\nfrom qiskit.quantum_info.synthesis import one_qubit_decompose\nfrom qiskit.converters import circuit_to_dag\n\nlogger = logging.getLogger(__name__)\n\n\nclass Optimize1qGatesDecomposition(TransformationPass):\n \"\"\"Optimize chains of single-qubit gates by combining them into a single gate.\"\"\"\n\n def __init__(self, basis=None):\n \"\"\"Optimize1qGatesDecomposition initializer.\n\n Args:\n basis (list[str]): Basis gates to consider, e.g. `['u3', 'cx']`. For the effects\n of this pass, the basis is the set intersection between the `basis` parameter\n and the Euler basis.\n \"\"\"\n super().__init__()\n\n self._target_basis = basis\n self._decomposers = None\n\n if basis:\n self._decomposers = {}\n basis_set = set(basis)\n euler_basis_gates = one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES\n for euler_basis_name, gates in euler_basis_gates.items():\n if set(gates).issubset(basis_set):\n basis_copy = copy.copy(self._decomposers)\n for base in basis_copy.keys():\n # check if gates are a superset of another basis\n if set(base).issubset(set(gates)):\n # if so, remove that basis\n del self._decomposers[base]\n # check if the gates are a subset of another basis\n elif set(gates).issubset(set(base)):\n # if so, don't bother\n break\n # if not a subset, add it to the list\n else:\n self._decomposers[\n tuple(gates)\n ] = one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)\n\n def run(self, dag):\n \"\"\"Run the Optimize1qGatesDecomposition pass on `dag`.\n\n Args:\n dag (DAGCircuit): the DAG to be optimized.\n\n Returns:\n DAGCircuit: the optimized DAG.\n \"\"\"\n if self._decomposers is None:\n logger.info(\"Skipping pass because no basis is set\")\n return dag\n runs = dag.collect_1q_runs()\n for run in runs:\n # SPECIAL CASE: Don't bother to optimize single U3 gates which are in the basis set.\n # The U3 decomposer is only going to emit a sequence of length 1 anyhow.\n if \"u3\" in self._target_basis and len(run) == 1 and isinstance(run[0].op, U3Gate):\n # Toss U3 gates equivalent to the identity; there we get off easy.\n if np.allclose(run[0].op.to_matrix(), np.eye(2), 1e-15, 0):\n dag.remove_op_node(run[0])\n continue\n # We might rewrite into lower `u`s if they're available.\n if \"u2\" not in self._target_basis and \"u1\" not in self._target_basis:\n continue\n\n operator = run[0].op.to_matrix()\n for gate in run[1:]:\n operator = gate.op.to_matrix().dot(operator)\n\n new_circs = {k: v._decompose(operator) for k, v in self._decomposers.items()}\n\n if len(new_circs) > 0:\n new_basis, new_circ = min(new_circs.items(), key=lambda x: len(x[1]))\n\n # do we even have calibrations?\n has_cals_p = dag.calibrations is not None and len(dag.calibrations) > 0\n # is this run in the target set of this particular decomposer and also uncalibrated?\n rewriteable_and_in_basis_p = all(\n g.name in new_basis and (not has_cals_p or not dag.has_calibration_for(g))\n for g in run\n )\n # does this run have uncalibrated gates?\n uncalibrated_p = not has_cals_p or any(not dag.has_calibration_for(g) for g in run)\n # does this run have gates not in the image of ._decomposers _and_ uncalibrated?\n uncalibrated_and_not_basis_p = any(\n g.name not in self._target_basis\n and (not has_cals_p or not dag.has_calibration_for(g))\n for g in run\n )\n\n if rewriteable_and_in_basis_p and len(run) < len(new_circ):\n # NOTE: This is short-circuited on calibrated gates, which we're timid about\n # reducing.\n warnings.warn(\n f\"Resynthesized {run} and got {new_circ}, \"\n f\"but the original was native and the new value is longer. This \"\n f\"indicates an efficiency bug in synthesis. Please report it by \"\n f\"opening an issue here: \"\n f\"https://github.com/Qiskit/qiskit-terra/issues/new/choose\",\n stacklevel=2,\n )\n # if we're outside of the basis set, we're obligated to logically decompose.\n # if we're outside of the set of gates for which we have physical definitions,\n # then we _try_ to decompose, using the results if we see improvement.\n # NOTE: Here we use circuit length as a weak proxy for \"improvement\"; in reality,\n # we care about something more like fidelity at runtime, which would mean,\n # e.g., a preference for `RZGate`s over `RXGate`s. In fact, users sometimes\n # express a preference for a \"canonical form\" of a circuit, which may come in\n # the form of some parameter values, also not visible at the level of circuit\n # length. Since we don't have a framework for the caller to programmatically\n # express what they want here, we include some special casing for particular\n # gates which we've promised to normalize --- but this is fragile and should\n # ultimately be done away with.\n if (\n uncalibrated_and_not_basis_p\n or (uncalibrated_p and len(run) > len(new_circ))\n or isinstance(run[0].op, U3Gate)\n ):\n new_dag = circuit_to_dag(new_circ)\n dag.substitute_node_with_dag(run[0], new_dag)\n # Delete the other nodes in the run\n for current_node in run[1:]:\n dag.remove_op_node(current_node)\n return dag\n", "path": "qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py"}]} | 2,779 | 931 |
gh_patches_debug_548 | rasdani/github-patches | git_diff | Gallopsled__pwntools-532 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bpython
Hi,
Unfortunately pwntools doesn't seem to work with bpython 0.12 in conjunction of python 2.7.9.
from pwn import *
results in:
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/pwn/**init**.py", line 2, in <module>
from .toplevel import *
File "/usr/local/lib/python2.7/dist-packages/pwn/toplevel.py", line 2, in <module>
from pwnlib import *
File "/usr/local/lib/python2.7/dist-packages/pwnlib/**init**.py", line 10, in <module>
from . import \
File "/usr/local/lib/python2.7/dist-packages/pwnlib/asm.py", line 45, in <module>
from . import log
File "/usr/local/lib/python2.7/dist-packages/pwnlib/log.py", line 69, in <module>
from .term import spinners, text
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/**init**.py", line 1, in <module>
from . import key, readline, text, termcap, keymap, term
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py", line 2, in <module>
from . import term, text
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py", line 111, in <module>
sys.modules[**name**] = Module()
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py", line 22, in **init**
self.num_colors = termcap.get('colors', default = 8)
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.py", line 15, in get
init()
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.py", line 39, in init
curses.setupterm()
TypeError: argument must be an int, or have a fileno() method.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/term/termcap.py`
Content:
```
1 __all__ = ['get']
2 import os, curses
3
4 cache = None
5 def get(cap, *args, **kwargs):
6 default = kwargs.pop('default', '')
7
8 if 'PWNLIB_NOTERM' in os.environ:
9 return ''
10
11 if kwargs != {}:
12 raise TypeError("get(): No such argument %r" % kwargs.popitem()[0])
13
14 if cache == None:
15 init()
16 s = cache.get(cap)
17 if not s:
18 s = curses.tigetstr(cap)
19 if s == None:
20 s = curses.tigetnum(cap)
21 if s == -2:
22 s = curses.tigetflag(cap)
23 if s == -1:
24 # default to empty string so tparm doesn't fail
25 s = ''
26 else:
27 s = bool(s)
28 cache[cap] = s
29 # if `s' is not set `curses.tparm' will throw an error if given arguments
30 if args and s:
31 return curses.tparm(s, *args)
32 else:
33 return s
34
35 def init():
36 global cache
37
38 if 'PWNLIB_NOTERM' not in os.environ:
39 curses.setupterm()
40
41 cache = {}
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwnlib/term/termcap.py b/pwnlib/term/termcap.py
--- a/pwnlib/term/termcap.py
+++ b/pwnlib/term/termcap.py
@@ -36,6 +36,10 @@
global cache
if 'PWNLIB_NOTERM' not in os.environ:
- curses.setupterm()
+ # Fix for BPython
+ try:
+ curses.setupterm()
+ except:
+ pass
cache = {}
| {"golden_diff": "diff --git a/pwnlib/term/termcap.py b/pwnlib/term/termcap.py\n--- a/pwnlib/term/termcap.py\n+++ b/pwnlib/term/termcap.py\n@@ -36,6 +36,10 @@\n global cache\n \n if 'PWNLIB_NOTERM' not in os.environ:\n- curses.setupterm()\n+ # Fix for BPython\n+ try:\n+ curses.setupterm()\n+ except:\n+ pass\n \n cache = {}\n", "issue": "Bpython\nHi,\n\nUnfortunately pwntools doesn't seem to work with bpython 0.12 in conjunction of python 2.7.9.\n\nfrom pwn import *\n\nresults in:\n\nTraceback (most recent call last):\n File \"<input>\", line 1, in <module>\n File \"/usr/local/lib/python2.7/dist-packages/pwn/**init**.py\", line 2, in <module>\n from .toplevel import *\n File \"/usr/local/lib/python2.7/dist-packages/pwn/toplevel.py\", line 2, in <module>\n from pwnlib import *\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/**init**.py\", line 10, in <module>\n from . import \\\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/asm.py\", line 45, in <module>\n from . import log\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/log.py\", line 69, in <module>\n from .term import spinners, text\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/**init**.py\", line 1, in <module>\n from . import key, readline, text, termcap, keymap, term\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py\", line 2, in <module>\n from . import term, text\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py\", line 111, in <module>\n sys.modules[**name**] = Module()\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py\", line 22, in **init**\n self.num_colors = termcap.get('colors', default = 8)\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.py\", line 15, in get\n init()\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.py\", line 39, in init\n curses.setupterm()\nTypeError: argument must be an int, or have a fileno() method.\n\n", "before_files": [{"content": "__all__ = ['get']\nimport os, curses\n\ncache = None\ndef get(cap, *args, **kwargs):\n default = kwargs.pop('default', '')\n\n if 'PWNLIB_NOTERM' in os.environ:\n return ''\n\n if kwargs != {}:\n raise TypeError(\"get(): No such argument %r\" % kwargs.popitem()[0])\n\n if cache == None:\n init()\n s = cache.get(cap)\n if not s:\n s = curses.tigetstr(cap)\n if s == None:\n s = curses.tigetnum(cap)\n if s == -2:\n s = curses.tigetflag(cap)\n if s == -1:\n # default to empty string so tparm doesn't fail\n s = ''\n else:\n s = bool(s)\n cache[cap] = s\n # if `s' is not set `curses.tparm' will throw an error if given arguments\n if args and s:\n return curses.tparm(s, *args)\n else:\n return s\n\ndef init():\n global cache\n\n if 'PWNLIB_NOTERM' not in os.environ:\n curses.setupterm()\n\n cache = {}\n", "path": "pwnlib/term/termcap.py"}], "after_files": [{"content": "__all__ = ['get']\nimport os, curses\n\ncache = None\ndef get(cap, *args, **kwargs):\n default = kwargs.pop('default', '')\n\n if 'PWNLIB_NOTERM' in os.environ:\n return ''\n\n if kwargs != {}:\n raise TypeError(\"get(): No such argument %r\" % kwargs.popitem()[0])\n\n if cache == None:\n init()\n s = cache.get(cap)\n if not s:\n s = curses.tigetstr(cap)\n if s == None:\n s = curses.tigetnum(cap)\n if s == -2:\n s = curses.tigetflag(cap)\n if s == -1:\n # default to empty string so tparm doesn't fail\n s = ''\n else:\n s = bool(s)\n cache[cap] = s\n # if `s' is not set `curses.tparm' will throw an error if given arguments\n if args and s:\n return curses.tparm(s, *args)\n else:\n return s\n\ndef init():\n global cache\n\n if 'PWNLIB_NOTERM' not in os.environ:\n # Fix for BPython\n try:\n curses.setupterm()\n except:\n pass\n\n cache = {}\n", "path": "pwnlib/term/termcap.py"}]} | 1,098 | 117 |
gh_patches_debug_1990 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2137 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in project document category API
## Test plan
The project_document_category should not give an error. E.g. `http://rsr.localdev.akvo.org/rest/v1/project_document_category/` should load.
## Issue description
The project document category API gives an error. See http://sentry.support.akvo-ops.org/rsr/test/group/879/, or on the Test server: http://rsr.test.akvo.org/rest/v1/project_document_category/.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/project_document.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import ProjectDocument, ProjectDocumentCategory
9
10 from ..serializers import ProjectDocumentSerializer, ProjectDocumentCategorySerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class ProjectDocumentViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = ProjectDocument.objects.all()
18 serializer_class = ProjectDocumentSerializer
19
20
21 class ProjectDocumentCategoryViewSet(PublicProjectViewSet):
22 """
23 """
24 queryset = ProjectDocumentCategory.objects.all()
25 serializer_class = ProjectDocumentCategorySerializer
26 filter_fields = ('document__project', 'document', 'category', )
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/project_document.py b/akvo/rest/views/project_document.py
--- a/akvo/rest/views/project_document.py
+++ b/akvo/rest/views/project_document.py
@@ -24,3 +24,4 @@
queryset = ProjectDocumentCategory.objects.all()
serializer_class = ProjectDocumentCategorySerializer
filter_fields = ('document__project', 'document', 'category', )
+ project_relation = 'document__project__'
| {"golden_diff": "diff --git a/akvo/rest/views/project_document.py b/akvo/rest/views/project_document.py\n--- a/akvo/rest/views/project_document.py\n+++ b/akvo/rest/views/project_document.py\n@@ -24,3 +24,4 @@\n queryset = ProjectDocumentCategory.objects.all()\n serializer_class = ProjectDocumentCategorySerializer\n filter_fields = ('document__project', 'document', 'category', )\n+ project_relation = 'document__project__'\n", "issue": "Bug in project document category API\n## Test plan\n\nThe project_document_category should not give an error. E.g. `http://rsr.localdev.akvo.org/rest/v1/project_document_category/` should load.\n## Issue description\n\nThe project document category API gives an error. See http://sentry.support.akvo-ops.org/rsr/test/group/879/, or on the Test server: http://rsr.test.akvo.org/rest/v1/project_document_category/.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import ProjectDocument, ProjectDocumentCategory\n\nfrom ..serializers import ProjectDocumentSerializer, ProjectDocumentCategorySerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass ProjectDocumentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectDocument.objects.all()\n serializer_class = ProjectDocumentSerializer\n\n\nclass ProjectDocumentCategoryViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectDocumentCategory.objects.all()\n serializer_class = ProjectDocumentCategorySerializer\n filter_fields = ('document__project', 'document', 'category', )\n", "path": "akvo/rest/views/project_document.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import ProjectDocument, ProjectDocumentCategory\n\nfrom ..serializers import ProjectDocumentSerializer, ProjectDocumentCategorySerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass ProjectDocumentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectDocument.objects.all()\n serializer_class = ProjectDocumentSerializer\n\n\nclass ProjectDocumentCategoryViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectDocumentCategory.objects.all()\n serializer_class = ProjectDocumentCategorySerializer\n filter_fields = ('document__project', 'document', 'category', )\n project_relation = 'document__project__'\n", "path": "akvo/rest/views/project_document.py"}]} | 595 | 101 |
gh_patches_debug_7593 | rasdani/github-patches | git_diff | python-pillow__Pillow-1230 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot identify XBM file created with filename including underscore
Pillow 2.8.1, Python 2.7.6 (Anaconda 2.2.0), Windows 7 64bit
When I create git_hub.xbm (with ImageMagick), created file's header contains lines like this.
``` C
#define git_hub_width 32
#define git_hub_height 32
```
In XbmImagePlugin.py, regular expression to extract XBM header doesn't match defined macro with more than two underscores like above.This causes an IOError.
``` python
# XBM header
xbm_head = re.compile(
b"\s*#define[ \t]+[^_]*_width[ \t]+(?P<width>[0-9]+)[\r\n]+"
b"#define[ \t]+[^_]*_height[ \t]+(?P<height>[0-9]+)[\r\n]+"
b"(?P<hotspot>"
b"#define[ \t]+[^_]*_x_hot[ \t]+(?P<xhot>[0-9]+)[\r\n]+"
b"#define[ \t]+[^_]*_y_hot[ \t]+(?P<yhot>[0-9]+)[\r\n]+"
b")?"
b"[\\000-\\377]*_bits\\[\\]"
)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PIL/XbmImagePlugin.py`
Content:
```
1 #
2 # The Python Imaging Library.
3 # $Id$
4 #
5 # XBM File handling
6 #
7 # History:
8 # 1995-09-08 fl Created
9 # 1996-11-01 fl Added save support
10 # 1997-07-07 fl Made header parser more tolerant
11 # 1997-07-22 fl Fixed yet another parser bug
12 # 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
13 # 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog)
14 # 2004-02-24 fl Allow some whitespace before first #define
15 #
16 # Copyright (c) 1997-2004 by Secret Labs AB
17 # Copyright (c) 1996-1997 by Fredrik Lundh
18 #
19 # See the README file for information on usage and redistribution.
20 #
21
22 __version__ = "0.6"
23
24 import re
25 from PIL import Image, ImageFile
26
27 # XBM header
28 xbm_head = re.compile(
29 b"\s*#define[ \t]+[^_]*_width[ \t]+(?P<width>[0-9]+)[\r\n]+"
30 b"#define[ \t]+[^_]*_height[ \t]+(?P<height>[0-9]+)[\r\n]+"
31 b"(?P<hotspot>"
32 b"#define[ \t]+[^_]*_x_hot[ \t]+(?P<xhot>[0-9]+)[\r\n]+"
33 b"#define[ \t]+[^_]*_y_hot[ \t]+(?P<yhot>[0-9]+)[\r\n]+"
34 b")?"
35 b"[\\000-\\377]*_bits\\[\\]"
36 )
37
38
39 def _accept(prefix):
40 return prefix.lstrip()[:7] == b"#define"
41
42
43 ##
44 # Image plugin for X11 bitmaps.
45
46 class XbmImageFile(ImageFile.ImageFile):
47
48 format = "XBM"
49 format_description = "X11 Bitmap"
50
51 def _open(self):
52
53 m = xbm_head.match(self.fp.read(512))
54
55 if m:
56
57 xsize = int(m.group("width"))
58 ysize = int(m.group("height"))
59
60 if m.group("hotspot"):
61 self.info["hotspot"] = (
62 int(m.group("xhot")), int(m.group("yhot"))
63 )
64
65 self.mode = "1"
66 self.size = xsize, ysize
67
68 self.tile = [("xbm", (0, 0)+self.size, m.end(), None)]
69
70
71 def _save(im, fp, filename):
72
73 if im.mode != "1":
74 raise IOError("cannot write mode %s as XBM" % im.mode)
75
76 fp.write(("#define im_width %d\n" % im.size[0]).encode('ascii'))
77 fp.write(("#define im_height %d\n" % im.size[1]).encode('ascii'))
78
79 hotspot = im.encoderinfo.get("hotspot")
80 if hotspot:
81 fp.write(("#define im_x_hot %d\n" % hotspot[0]).encode('ascii'))
82 fp.write(("#define im_y_hot %d\n" % hotspot[1]).encode('ascii'))
83
84 fp.write(b"static char im_bits[] = {\n")
85
86 ImageFile._save(im, fp, [("xbm", (0, 0)+im.size, 0, None)])
87
88 fp.write(b"};\n")
89
90
91 Image.register_open("XBM", XbmImageFile, _accept)
92 Image.register_save("XBM", _save)
93
94 Image.register_extension("XBM", ".xbm")
95
96 Image.register_mime("XBM", "image/xbm")
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PIL/XbmImagePlugin.py b/PIL/XbmImagePlugin.py
--- a/PIL/XbmImagePlugin.py
+++ b/PIL/XbmImagePlugin.py
@@ -26,8 +26,8 @@
# XBM header
xbm_head = re.compile(
- b"\s*#define[ \t]+[^_]*_width[ \t]+(?P<width>[0-9]+)[\r\n]+"
- b"#define[ \t]+[^_]*_height[ \t]+(?P<height>[0-9]+)[\r\n]+"
+ b"\s*#define[ \t]+.*_width[ \t]+(?P<width>[0-9]+)[\r\n]+"
+ b"#define[ \t]+.*_height[ \t]+(?P<height>[0-9]+)[\r\n]+"
b"(?P<hotspot>"
b"#define[ \t]+[^_]*_x_hot[ \t]+(?P<xhot>[0-9]+)[\r\n]+"
b"#define[ \t]+[^_]*_y_hot[ \t]+(?P<yhot>[0-9]+)[\r\n]+"
| {"golden_diff": "diff --git a/PIL/XbmImagePlugin.py b/PIL/XbmImagePlugin.py\n--- a/PIL/XbmImagePlugin.py\n+++ b/PIL/XbmImagePlugin.py\n@@ -26,8 +26,8 @@\n \n # XBM header\n xbm_head = re.compile(\n- b\"\\s*#define[ \\t]+[^_]*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n- b\"#define[ \\t]+[^_]*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n+ b\"\\s*#define[ \\t]+.*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n+ b\"#define[ \\t]+.*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n b\"(?P<hotspot>\"\n b\"#define[ \\t]+[^_]*_x_hot[ \\t]+(?P<xhot>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_y_hot[ \\t]+(?P<yhot>[0-9]+)[\\r\\n]+\"\n", "issue": "Cannot identify XBM file created with filename including underscore\nPillow 2.8.1, Python 2.7.6 (Anaconda 2.2.0), Windows 7 64bit\n\nWhen I create git_hub.xbm (with ImageMagick), created file's header contains lines like this.\n\n``` C\n#define git_hub_width 32\n#define git_hub_height 32\n```\n\nIn XbmImagePlugin.py, regular expression to extract XBM header doesn't match defined macro with more than two underscores like above.This causes an IOError.\n\n``` python\n# XBM header\nxbm_head = re.compile(\n b\"\\s*#define[ \\t]+[^_]*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n b\"(?P<hotspot>\"\n b\"#define[ \\t]+[^_]*_x_hot[ \\t]+(?P<xhot>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_y_hot[ \\t]+(?P<yhot>[0-9]+)[\\r\\n]+\"\n b\")?\"\n b\"[\\\\000-\\\\377]*_bits\\\\[\\\\]\"\n)\n```\n\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# XBM File handling\n#\n# History:\n# 1995-09-08 fl Created\n# 1996-11-01 fl Added save support\n# 1997-07-07 fl Made header parser more tolerant\n# 1997-07-22 fl Fixed yet another parser bug\n# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)\n# 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog)\n# 2004-02-24 fl Allow some whitespace before first #define\n#\n# Copyright (c) 1997-2004 by Secret Labs AB\n# Copyright (c) 1996-1997 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\n__version__ = \"0.6\"\n\nimport re\nfrom PIL import Image, ImageFile\n\n# XBM header\nxbm_head = re.compile(\n b\"\\s*#define[ \\t]+[^_]*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n b\"(?P<hotspot>\"\n b\"#define[ \\t]+[^_]*_x_hot[ \\t]+(?P<xhot>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_y_hot[ \\t]+(?P<yhot>[0-9]+)[\\r\\n]+\"\n b\")?\"\n b\"[\\\\000-\\\\377]*_bits\\\\[\\\\]\"\n)\n\n\ndef _accept(prefix):\n return prefix.lstrip()[:7] == b\"#define\"\n\n\n##\n# Image plugin for X11 bitmaps.\n\nclass XbmImageFile(ImageFile.ImageFile):\n\n format = \"XBM\"\n format_description = \"X11 Bitmap\"\n\n def _open(self):\n\n m = xbm_head.match(self.fp.read(512))\n\n if m:\n\n xsize = int(m.group(\"width\"))\n ysize = int(m.group(\"height\"))\n\n if m.group(\"hotspot\"):\n self.info[\"hotspot\"] = (\n int(m.group(\"xhot\")), int(m.group(\"yhot\"))\n )\n\n self.mode = \"1\"\n self.size = xsize, ysize\n\n self.tile = [(\"xbm\", (0, 0)+self.size, m.end(), None)]\n\n\ndef _save(im, fp, filename):\n\n if im.mode != \"1\":\n raise IOError(\"cannot write mode %s as XBM\" % im.mode)\n\n fp.write((\"#define im_width %d\\n\" % im.size[0]).encode('ascii'))\n fp.write((\"#define im_height %d\\n\" % im.size[1]).encode('ascii'))\n\n hotspot = im.encoderinfo.get(\"hotspot\")\n if hotspot:\n fp.write((\"#define im_x_hot %d\\n\" % hotspot[0]).encode('ascii'))\n fp.write((\"#define im_y_hot %d\\n\" % hotspot[1]).encode('ascii'))\n\n fp.write(b\"static char im_bits[] = {\\n\")\n\n ImageFile._save(im, fp, [(\"xbm\", (0, 0)+im.size, 0, None)])\n\n fp.write(b\"};\\n\")\n\n\nImage.register_open(\"XBM\", XbmImageFile, _accept)\nImage.register_save(\"XBM\", _save)\n\nImage.register_extension(\"XBM\", \".xbm\")\n\nImage.register_mime(\"XBM\", \"image/xbm\")\n", "path": "PIL/XbmImagePlugin.py"}], "after_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# XBM File handling\n#\n# History:\n# 1995-09-08 fl Created\n# 1996-11-01 fl Added save support\n# 1997-07-07 fl Made header parser more tolerant\n# 1997-07-22 fl Fixed yet another parser bug\n# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)\n# 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog)\n# 2004-02-24 fl Allow some whitespace before first #define\n#\n# Copyright (c) 1997-2004 by Secret Labs AB\n# Copyright (c) 1996-1997 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\n__version__ = \"0.6\"\n\nimport re\nfrom PIL import Image, ImageFile\n\n# XBM header\nxbm_head = re.compile(\n b\"\\s*#define[ \\t]+.*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+.*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n b\"(?P<hotspot>\"\n b\"#define[ \\t]+[^_]*_x_hot[ \\t]+(?P<xhot>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_y_hot[ \\t]+(?P<yhot>[0-9]+)[\\r\\n]+\"\n b\")?\"\n b\"[\\\\000-\\\\377]*_bits\\\\[\\\\]\"\n)\n\n\ndef _accept(prefix):\n return prefix.lstrip()[:7] == b\"#define\"\n\n\n##\n# Image plugin for X11 bitmaps.\n\nclass XbmImageFile(ImageFile.ImageFile):\n\n format = \"XBM\"\n format_description = \"X11 Bitmap\"\n\n def _open(self):\n\n m = xbm_head.match(self.fp.read(512))\n\n if m:\n\n xsize = int(m.group(\"width\"))\n ysize = int(m.group(\"height\"))\n\n if m.group(\"hotspot\"):\n self.info[\"hotspot\"] = (\n int(m.group(\"xhot\")), int(m.group(\"yhot\"))\n )\n\n self.mode = \"1\"\n self.size = xsize, ysize\n\n self.tile = [(\"xbm\", (0, 0)+self.size, m.end(), None)]\n\n\ndef _save(im, fp, filename):\n\n if im.mode != \"1\":\n raise IOError(\"cannot write mode %s as XBM\" % im.mode)\n\n fp.write((\"#define im_width %d\\n\" % im.size[0]).encode('ascii'))\n fp.write((\"#define im_height %d\\n\" % im.size[1]).encode('ascii'))\n\n hotspot = im.encoderinfo.get(\"hotspot\")\n if hotspot:\n fp.write((\"#define im_x_hot %d\\n\" % hotspot[0]).encode('ascii'))\n fp.write((\"#define im_y_hot %d\\n\" % hotspot[1]).encode('ascii'))\n\n fp.write(b\"static char im_bits[] = {\\n\")\n\n ImageFile._save(im, fp, [(\"xbm\", (0, 0)+im.size, 0, None)])\n\n fp.write(b\"};\\n\")\n\n\nImage.register_open(\"XBM\", XbmImageFile, _accept)\nImage.register_save(\"XBM\", _save)\n\nImage.register_extension(\"XBM\", \".xbm\")\n\nImage.register_mime(\"XBM\", \"image/xbm\")\n", "path": "PIL/XbmImagePlugin.py"}]} | 1,620 | 274 |
gh_patches_debug_18857 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-5807 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Codecommit and Codepipeline missing tag filters and actions in 0.9.2.0
**Describe the bug**
Codecommit and codepipeline tagging was added per the release notes but it doesn't come up in the schema when you do a custodian schema codecommit or codepipeline
**To Reproduce**
Steps to reproduce the behavior:
custodian schema codecommit or codepipeline
**Expected behavior**
Expecting to see the marked-for-op filter and tag and mark-for-op actions avaialble for the 2 resources
**Background (please complete the following information):**
- OS: [e.g. OSX 10.15] Ubuntu v20
- Python Version: [e.g. python 3.8.1] 3.8
- Custodian Version: [e.g. 0.8.46.1] 0.9.2.0
- Tool Version: [if applicable]
- Cloud Provider: [e.g. gcp, aws, azure] aws
- Policy: [please exclude any account/sensitive information]
```yaml
policies:
- name: check-buckets
resource: aws.s3
```
- Traceback: [if applicable, please exclude sensitive/account information]
- `custodian version --debug` output
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/resources/code.py`
Content:
```
1 # Copyright 2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from botocore.exceptions import ClientError
15 import jmespath
16
17 from c7n.actions import BaseAction
18 from c7n.filters.vpc import SubnetFilter, SecurityGroupFilter, VpcFilter
19 from c7n.manager import resources
20 from c7n.query import QueryResourceManager, DescribeSource, ConfigSource, TypeInfo
21 from c7n.tags import universal_augment
22 from c7n.utils import local_session, type_schema
23
24 from .securityhub import OtherResourcePostFinding
25
26
27 @resources.register('codecommit')
28 class CodeRepository(QueryResourceManager):
29
30 class resource_type(TypeInfo):
31 service = 'codecommit'
32 enum_spec = ('list_repositories', 'repositories', None)
33 batch_detail_spec = (
34 'batch_get_repositories', 'repositoryNames', 'repositoryName',
35 'repositories', None)
36 name = id = 'repositoryName'
37 arn = "Arn"
38 date = 'creationDate'
39 cfn_type = 'AWS::CodeCommit::Repository'
40 universal_tagging = object()
41
42 def get_resources(self, ids, cache=True):
43 return universal_augment(self, self.augment([{'repositoryName': i} for i in ids]))
44
45
46 @CodeRepository.action_registry.register('delete')
47 class DeleteRepository(BaseAction):
48 """Action to delete code commit
49
50 It is recommended to use a filter to avoid unwanted deletion of repos
51
52 :example:
53
54 .. code-block:: yaml
55
56 policies:
57 - name: codecommit-delete
58 resource: codecommit
59 actions:
60 - delete
61 """
62
63 schema = type_schema('delete')
64 permissions = ("codecommit:DeleteRepository",)
65
66 def process(self, repositories):
67 client = local_session(
68 self.manager.session_factory).client('codecommit')
69 for r in repositories:
70 self.process_repository(client, r)
71
72 def process_repository(self, client, repository):
73 try:
74 client.delete_repository(repositoryName=repository['repositoryName'])
75 except ClientError as e:
76 self.log.exception(
77 "Exception deleting repo:\n %s" % e)
78
79
80 class DescribeBuild(DescribeSource):
81
82 def augment(self, resources):
83 return universal_augment(
84 self.manager,
85 super(DescribeBuild, self).augment(resources))
86
87
88 @resources.register('codebuild')
89 class CodeBuildProject(QueryResourceManager):
90
91 class resource_type(TypeInfo):
92 service = 'codebuild'
93 enum_spec = ('list_projects', 'projects', None)
94 batch_detail_spec = (
95 'batch_get_projects', 'names', None, 'projects', None)
96 name = id = 'name'
97 arn = 'arn'
98 date = 'created'
99 dimension = 'ProjectName'
100 cfn_type = config_type = "AWS::CodeBuild::Project"
101 arn_type = 'project'
102 universal_taggable = object()
103
104 source_mapping = {
105 'describe': DescribeBuild,
106 'config': ConfigSource
107 }
108
109
110 @CodeBuildProject.filter_registry.register('subnet')
111 class BuildSubnetFilter(SubnetFilter):
112
113 RelatedIdsExpression = "vpcConfig.subnets[]"
114
115
116 @CodeBuildProject.filter_registry.register('security-group')
117 class BuildSecurityGroupFilter(SecurityGroupFilter):
118
119 RelatedIdsExpression = "vpcConfig.securityGroupIds[]"
120
121
122 @CodeBuildProject.filter_registry.register('vpc')
123 class BuildVpcFilter(VpcFilter):
124
125 RelatedIdsExpression = "vpcConfig.vpcId"
126
127
128 @CodeBuildProject.action_registry.register('post-finding')
129 class BuildPostFinding(OtherResourcePostFinding):
130
131 resource_type = 'AwsCodeBuildProject'
132
133 def format_resource(self, r):
134 envelope, payload = self.format_envelope(r)
135 payload.update(self.filter_empty({
136 'Name': r['name'],
137 'EncryptionKey': r['encryptionKey'],
138 'Environment': self.filter_empty({
139 'Type': r['environment']['type'],
140 'Certificate': r['environment'].get('certificate'),
141 'RegistryCredential': self.filter_empty({
142 'Credential': jmespath.search(
143 'environment.registryCredential.credential', r),
144 'CredentialProvider': jmespath.search(
145 'environment.registryCredential.credentialProvider', r)
146 }),
147 'ImagePullCredentialsType': r['environment'].get(
148 'imagePullCredentialsType')
149 }),
150 'ServiceRole': r['serviceRole'],
151 'VpcConfig': self.filter_empty({
152 'VpcId': jmespath.search('vpcConfig.vpcId', r),
153 'Subnets': jmespath.search('vpcConfig.subnets', r),
154 'SecurityGroupIds': jmespath.search('vpcConfig.securityGroupIds', r)
155 }),
156 'Source': self.filter_empty({
157 'Type': jmespath.search('source.type', r),
158 'Location': jmespath.search('source.location', r),
159 'GitCloneDepth': jmespath.search('source.gitCloneDepth', r)
160 }),
161 }))
162 return envelope
163
164
165 @CodeBuildProject.action_registry.register('delete')
166 class DeleteProject(BaseAction):
167 """Action to delete code build
168
169 It is recommended to use a filter to avoid unwanted deletion of builds
170
171 :example:
172
173 .. code-block:: yaml
174
175 policies:
176 - name: codebuild-delete
177 resource: codebuild
178 actions:
179 - delete
180 """
181
182 schema = type_schema('delete')
183 permissions = ("codebuild:DeleteProject",)
184
185 def process(self, projects):
186 client = local_session(self.manager.session_factory).client('codebuild')
187 for p in projects:
188 self.process_project(client, p)
189
190 def process_project(self, client, project):
191
192 try:
193 client.delete_project(name=project['name'])
194 except ClientError as e:
195 self.log.exception(
196 "Exception deleting project:\n %s" % e)
197
198
199 class DescribePipeline(DescribeSource):
200
201 def augment(self, resources):
202 resources = super().augment(resources)
203 return universal_augment(self.manager, resources)
204
205
206 @resources.register('codepipeline')
207 class CodeDeployPipeline(QueryResourceManager):
208
209 class resource_type(TypeInfo):
210 service = 'codepipeline'
211 enum_spec = ('list_pipelines', 'pipelines', None)
212 detail_spec = ('get_pipeline', 'name', 'name', 'pipeline')
213 name = id = 'name'
214 date = 'created'
215 # Note this is purposeful, codepipeline don't have a separate type specifier.
216 arn_type = ""
217 cfn_type = config_type = "AWS::CodePipeline::Pipeline"
218 universal_tagging = object()
219
220 source_mapping = {
221 'describe': DescribePipeline,
222 'config': ConfigSource
223 }
224
225
226 @CodeDeployPipeline.action_registry.register('delete')
227 class DeletePipeline(BaseAction):
228
229 schema = type_schema('delete')
230 permissions = ('codepipeline:DeletePipeline',)
231
232 def process(self, resources):
233 client = local_session(self.manager.session_factory).client('codepipeline')
234 for r in resources:
235 try:
236 self.manager.retry(client.delete_pipeline, name=r['name'])
237 except client.exceptions.PipelineNotFoundException:
238 continue
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/c7n/resources/code.py b/c7n/resources/code.py
--- a/c7n/resources/code.py
+++ b/c7n/resources/code.py
@@ -37,7 +37,7 @@
arn = "Arn"
date = 'creationDate'
cfn_type = 'AWS::CodeCommit::Repository'
- universal_tagging = object()
+ universal_taggable = object()
def get_resources(self, ids, cache=True):
return universal_augment(self, self.augment([{'repositoryName': i} for i in ids]))
@@ -215,7 +215,7 @@
# Note this is purposeful, codepipeline don't have a separate type specifier.
arn_type = ""
cfn_type = config_type = "AWS::CodePipeline::Pipeline"
- universal_tagging = object()
+ universal_taggable = object()
source_mapping = {
'describe': DescribePipeline,
| {"golden_diff": "diff --git a/c7n/resources/code.py b/c7n/resources/code.py\n--- a/c7n/resources/code.py\n+++ b/c7n/resources/code.py\n@@ -37,7 +37,7 @@\n arn = \"Arn\"\n date = 'creationDate'\n cfn_type = 'AWS::CodeCommit::Repository'\n- universal_tagging = object()\n+ universal_taggable = object()\n \n def get_resources(self, ids, cache=True):\n return universal_augment(self, self.augment([{'repositoryName': i} for i in ids]))\n@@ -215,7 +215,7 @@\n # Note this is purposeful, codepipeline don't have a separate type specifier.\n arn_type = \"\"\n cfn_type = config_type = \"AWS::CodePipeline::Pipeline\"\n- universal_tagging = object()\n+ universal_taggable = object()\n \n source_mapping = {\n 'describe': DescribePipeline,\n", "issue": "Codecommit and Codepipeline missing tag filters and actions in 0.9.2.0\n**Describe the bug**\r\nCodecommit and codepipeline tagging was added per the release notes but it doesn't come up in the schema when you do a custodian schema codecommit or codepipeline\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n custodian schema codecommit or codepipeline\r\n**Expected behavior**\r\n Expecting to see the marked-for-op filter and tag and mark-for-op actions avaialble for the 2 resources\r\n\r\n\r\n**Background (please complete the following information):**\r\n - OS: [e.g. OSX 10.15] Ubuntu v20\r\n - Python Version: [e.g. python 3.8.1] 3.8\r\n - Custodian Version: [e.g. 0.8.46.1] 0.9.2.0\r\n - Tool Version: [if applicable]\r\n - Cloud Provider: [e.g. gcp, aws, azure] aws\r\n - Policy: [please exclude any account/sensitive information]\r\n```yaml\r\npolicies: \r\n - name: check-buckets\r\n resource: aws.s3\r\n```\r\n - Traceback: [if applicable, please exclude sensitive/account information]\r\n - `custodian version --debug` output\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# Copyright 2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom botocore.exceptions import ClientError\nimport jmespath\n\nfrom c7n.actions import BaseAction\nfrom c7n.filters.vpc import SubnetFilter, SecurityGroupFilter, VpcFilter\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, DescribeSource, ConfigSource, TypeInfo\nfrom c7n.tags import universal_augment\nfrom c7n.utils import local_session, type_schema\n\nfrom .securityhub import OtherResourcePostFinding\n\n\[email protected]('codecommit')\nclass CodeRepository(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'codecommit'\n enum_spec = ('list_repositories', 'repositories', None)\n batch_detail_spec = (\n 'batch_get_repositories', 'repositoryNames', 'repositoryName',\n 'repositories', None)\n name = id = 'repositoryName'\n arn = \"Arn\"\n date = 'creationDate'\n cfn_type = 'AWS::CodeCommit::Repository'\n universal_tagging = object()\n\n def get_resources(self, ids, cache=True):\n return universal_augment(self, self.augment([{'repositoryName': i} for i in ids]))\n\n\[email protected]_registry.register('delete')\nclass DeleteRepository(BaseAction):\n \"\"\"Action to delete code commit\n\n It is recommended to use a filter to avoid unwanted deletion of repos\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: codecommit-delete\n resource: codecommit\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"codecommit:DeleteRepository\",)\n\n def process(self, repositories):\n client = local_session(\n self.manager.session_factory).client('codecommit')\n for r in repositories:\n self.process_repository(client, r)\n\n def process_repository(self, client, repository):\n try:\n client.delete_repository(repositoryName=repository['repositoryName'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting repo:\\n %s\" % e)\n\n\nclass DescribeBuild(DescribeSource):\n\n def augment(self, resources):\n return universal_augment(\n self.manager,\n super(DescribeBuild, self).augment(resources))\n\n\[email protected]('codebuild')\nclass CodeBuildProject(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'codebuild'\n enum_spec = ('list_projects', 'projects', None)\n batch_detail_spec = (\n 'batch_get_projects', 'names', None, 'projects', None)\n name = id = 'name'\n arn = 'arn'\n date = 'created'\n dimension = 'ProjectName'\n cfn_type = config_type = \"AWS::CodeBuild::Project\"\n arn_type = 'project'\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeBuild,\n 'config': ConfigSource\n }\n\n\[email protected]_registry.register('subnet')\nclass BuildSubnetFilter(SubnetFilter):\n\n RelatedIdsExpression = \"vpcConfig.subnets[]\"\n\n\[email protected]_registry.register('security-group')\nclass BuildSecurityGroupFilter(SecurityGroupFilter):\n\n RelatedIdsExpression = \"vpcConfig.securityGroupIds[]\"\n\n\[email protected]_registry.register('vpc')\nclass BuildVpcFilter(VpcFilter):\n\n RelatedIdsExpression = \"vpcConfig.vpcId\"\n\n\[email protected]_registry.register('post-finding')\nclass BuildPostFinding(OtherResourcePostFinding):\n\n resource_type = 'AwsCodeBuildProject'\n\n def format_resource(self, r):\n envelope, payload = self.format_envelope(r)\n payload.update(self.filter_empty({\n 'Name': r['name'],\n 'EncryptionKey': r['encryptionKey'],\n 'Environment': self.filter_empty({\n 'Type': r['environment']['type'],\n 'Certificate': r['environment'].get('certificate'),\n 'RegistryCredential': self.filter_empty({\n 'Credential': jmespath.search(\n 'environment.registryCredential.credential', r),\n 'CredentialProvider': jmespath.search(\n 'environment.registryCredential.credentialProvider', r)\n }),\n 'ImagePullCredentialsType': r['environment'].get(\n 'imagePullCredentialsType')\n }),\n 'ServiceRole': r['serviceRole'],\n 'VpcConfig': self.filter_empty({\n 'VpcId': jmespath.search('vpcConfig.vpcId', r),\n 'Subnets': jmespath.search('vpcConfig.subnets', r),\n 'SecurityGroupIds': jmespath.search('vpcConfig.securityGroupIds', r)\n }),\n 'Source': self.filter_empty({\n 'Type': jmespath.search('source.type', r),\n 'Location': jmespath.search('source.location', r),\n 'GitCloneDepth': jmespath.search('source.gitCloneDepth', r)\n }),\n }))\n return envelope\n\n\[email protected]_registry.register('delete')\nclass DeleteProject(BaseAction):\n \"\"\"Action to delete code build\n\n It is recommended to use a filter to avoid unwanted deletion of builds\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: codebuild-delete\n resource: codebuild\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"codebuild:DeleteProject\",)\n\n def process(self, projects):\n client = local_session(self.manager.session_factory).client('codebuild')\n for p in projects:\n self.process_project(client, p)\n\n def process_project(self, client, project):\n\n try:\n client.delete_project(name=project['name'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting project:\\n %s\" % e)\n\n\nclass DescribePipeline(DescribeSource):\n\n def augment(self, resources):\n resources = super().augment(resources)\n return universal_augment(self.manager, resources)\n\n\[email protected]('codepipeline')\nclass CodeDeployPipeline(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'codepipeline'\n enum_spec = ('list_pipelines', 'pipelines', None)\n detail_spec = ('get_pipeline', 'name', 'name', 'pipeline')\n name = id = 'name'\n date = 'created'\n # Note this is purposeful, codepipeline don't have a separate type specifier.\n arn_type = \"\"\n cfn_type = config_type = \"AWS::CodePipeline::Pipeline\"\n universal_tagging = object()\n\n source_mapping = {\n 'describe': DescribePipeline,\n 'config': ConfigSource\n }\n\n\[email protected]_registry.register('delete')\nclass DeletePipeline(BaseAction):\n\n schema = type_schema('delete')\n permissions = ('codepipeline:DeletePipeline',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('codepipeline')\n for r in resources:\n try:\n self.manager.retry(client.delete_pipeline, name=r['name'])\n except client.exceptions.PipelineNotFoundException:\n continue\n", "path": "c7n/resources/code.py"}], "after_files": [{"content": "# Copyright 2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom botocore.exceptions import ClientError\nimport jmespath\n\nfrom c7n.actions import BaseAction\nfrom c7n.filters.vpc import SubnetFilter, SecurityGroupFilter, VpcFilter\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, DescribeSource, ConfigSource, TypeInfo\nfrom c7n.tags import universal_augment\nfrom c7n.utils import local_session, type_schema\n\nfrom .securityhub import OtherResourcePostFinding\n\n\[email protected]('codecommit')\nclass CodeRepository(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'codecommit'\n enum_spec = ('list_repositories', 'repositories', None)\n batch_detail_spec = (\n 'batch_get_repositories', 'repositoryNames', 'repositoryName',\n 'repositories', None)\n name = id = 'repositoryName'\n arn = \"Arn\"\n date = 'creationDate'\n cfn_type = 'AWS::CodeCommit::Repository'\n universal_taggable = object()\n\n def get_resources(self, ids, cache=True):\n return universal_augment(self, self.augment([{'repositoryName': i} for i in ids]))\n\n\[email protected]_registry.register('delete')\nclass DeleteRepository(BaseAction):\n \"\"\"Action to delete code commit\n\n It is recommended to use a filter to avoid unwanted deletion of repos\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: codecommit-delete\n resource: codecommit\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"codecommit:DeleteRepository\",)\n\n def process(self, repositories):\n client = local_session(\n self.manager.session_factory).client('codecommit')\n for r in repositories:\n self.process_repository(client, r)\n\n def process_repository(self, client, repository):\n try:\n client.delete_repository(repositoryName=repository['repositoryName'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting repo:\\n %s\" % e)\n\n\nclass DescribeBuild(DescribeSource):\n\n def augment(self, resources):\n return universal_augment(\n self.manager,\n super(DescribeBuild, self).augment(resources))\n\n\[email protected]('codebuild')\nclass CodeBuildProject(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'codebuild'\n enum_spec = ('list_projects', 'projects', None)\n batch_detail_spec = (\n 'batch_get_projects', 'names', None, 'projects', None)\n name = id = 'name'\n arn = 'arn'\n date = 'created'\n dimension = 'ProjectName'\n cfn_type = config_type = \"AWS::CodeBuild::Project\"\n arn_type = 'project'\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeBuild,\n 'config': ConfigSource\n }\n\n\[email protected]_registry.register('subnet')\nclass BuildSubnetFilter(SubnetFilter):\n\n RelatedIdsExpression = \"vpcConfig.subnets[]\"\n\n\[email protected]_registry.register('security-group')\nclass BuildSecurityGroupFilter(SecurityGroupFilter):\n\n RelatedIdsExpression = \"vpcConfig.securityGroupIds[]\"\n\n\[email protected]_registry.register('vpc')\nclass BuildVpcFilter(VpcFilter):\n\n RelatedIdsExpression = \"vpcConfig.vpcId\"\n\n\[email protected]_registry.register('post-finding')\nclass BuildPostFinding(OtherResourcePostFinding):\n\n resource_type = 'AwsCodeBuildProject'\n\n def format_resource(self, r):\n envelope, payload = self.format_envelope(r)\n payload.update(self.filter_empty({\n 'Name': r['name'],\n 'EncryptionKey': r['encryptionKey'],\n 'Environment': self.filter_empty({\n 'Type': r['environment']['type'],\n 'Certificate': r['environment'].get('certificate'),\n 'RegistryCredential': self.filter_empty({\n 'Credential': jmespath.search(\n 'environment.registryCredential.credential', r),\n 'CredentialProvider': jmespath.search(\n 'environment.registryCredential.credentialProvider', r)\n }),\n 'ImagePullCredentialsType': r['environment'].get(\n 'imagePullCredentialsType')\n }),\n 'ServiceRole': r['serviceRole'],\n 'VpcConfig': self.filter_empty({\n 'VpcId': jmespath.search('vpcConfig.vpcId', r),\n 'Subnets': jmespath.search('vpcConfig.subnets', r),\n 'SecurityGroupIds': jmespath.search('vpcConfig.securityGroupIds', r)\n }),\n 'Source': self.filter_empty({\n 'Type': jmespath.search('source.type', r),\n 'Location': jmespath.search('source.location', r),\n 'GitCloneDepth': jmespath.search('source.gitCloneDepth', r)\n }),\n }))\n return envelope\n\n\[email protected]_registry.register('delete')\nclass DeleteProject(BaseAction):\n \"\"\"Action to delete code build\n\n It is recommended to use a filter to avoid unwanted deletion of builds\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: codebuild-delete\n resource: codebuild\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"codebuild:DeleteProject\",)\n\n def process(self, projects):\n client = local_session(self.manager.session_factory).client('codebuild')\n for p in projects:\n self.process_project(client, p)\n\n def process_project(self, client, project):\n\n try:\n client.delete_project(name=project['name'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting project:\\n %s\" % e)\n\n\nclass DescribePipeline(DescribeSource):\n\n def augment(self, resources):\n resources = super().augment(resources)\n return universal_augment(self.manager, resources)\n\n\[email protected]('codepipeline')\nclass CodeDeployPipeline(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'codepipeline'\n enum_spec = ('list_pipelines', 'pipelines', None)\n detail_spec = ('get_pipeline', 'name', 'name', 'pipeline')\n name = id = 'name'\n date = 'created'\n # Note this is purposeful, codepipeline don't have a separate type specifier.\n arn_type = \"\"\n cfn_type = config_type = \"AWS::CodePipeline::Pipeline\"\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribePipeline,\n 'config': ConfigSource\n }\n\n\[email protected]_registry.register('delete')\nclass DeletePipeline(BaseAction):\n\n schema = type_schema('delete')\n permissions = ('codepipeline:DeletePipeline',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('codepipeline')\n for r in resources:\n try:\n self.manager.retry(client.delete_pipeline, name=r['name'])\n except client.exceptions.PipelineNotFoundException:\n continue\n", "path": "c7n/resources/code.py"}]} | 2,818 | 209 |
gh_patches_debug_6237 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3988 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add result type to /project/:id/results/
We want to display whether the result is an Outcome, Impact, etc...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/project_overview.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Akvo RSR is covered by the GNU Affero General Public License.
3
4 See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6 """
7
8 from akvo.rest.models import TastyTokenAuthentication
9 from akvo.rsr.models import Project, Result, IndicatorPeriod, IndicatorPeriodData
10 from akvo.rsr.models.result.utils import QUANTITATIVE
11 from decimal import Decimal, InvalidOperation
12 from django.http import Http404
13 from django.shortcuts import get_object_or_404
14 from rest_framework.authentication import SessionAuthentication
15 from rest_framework.decorators import api_view, authentication_classes
16 from rest_framework.response import Response
17
18
19 @api_view(['GET'])
20 @authentication_classes([SessionAuthentication, TastyTokenAuthentication])
21 def project_results(request, pk):
22 queryset = Project.objects.prefetch_related('results')
23 project = get_object_or_404(queryset, pk=pk)
24 if not request.user.has_perm('rsr.view_project', project):
25 raise Http404
26 data = {
27 'id': project.id,
28 'title': project.title,
29 'results': [
30 {'id': r.id, 'title': r.title, 'indicator_count': r.indicators.count()}
31 for r in project.results.all()
32 ],
33 }
34 return Response(data)
35
36
37 @api_view(['GET'])
38 @authentication_classes([SessionAuthentication, TastyTokenAuthentication])
39 def project_result_overview(request, project_pk, result_pk):
40 queryset = Result.objects.prefetch_related(
41 'indicators', 'indicators__periods').select_related('project')
42 result = get_object_or_404(queryset, pk=result_pk)
43 project = result.project
44 if project.id != int(project_pk) or not request.user.has_perm('rsr.view_project', project):
45 raise Http404
46
47 data = {
48 'id': result.id,
49 'title': result.title,
50 'indicators': [
51 {
52 'id': i.id,
53 'title': i.title,
54 'description': i.description,
55 'period_count': len(i.periods.all()),
56 'type': 'quantitative' if i.type == QUANTITATIVE else 'qualitative',
57 'measure': (
58 'unit' if i.measure == '1' else 'percentage' if i.measure == '2' else None),
59 'periods': _drilldown_indicator_periods_contributions(i)
60 }
61 for i in result.indicators.all()
62 ]
63 }
64 return Response(data)
65
66
67 def _drilldown_indicator_periods_contributions(indicator):
68 periods = _get_indicator_periods_hierarchy_flatlist(indicator)
69 periods_tree = _make_periods_hierarchy_tree(periods)
70
71 return [_transform_period_contributions_node(n) for n in periods_tree]
72
73
74 def _get_indicator_periods_hierarchy_flatlist(indicator):
75 family = {period.id for period in indicator.periods.all()}
76 while True:
77 children = set(
78 IndicatorPeriod.objects.filter(parent_period__in=family).values_list('pk', flat=True))
79 if family.union(children) == family:
80 break
81
82 family = family.union(children)
83
84 periods = IndicatorPeriod.objects.select_related(
85 'indicator__result__project',
86 'indicator__result__project__primary_location__country',
87 'parent_period',
88 ).prefetch_related(
89 'data',
90 'data__user',
91 'data__approved_by',
92 'data__comments',
93 'data__comments__user',
94 'data__disaggregations',
95 'data__disaggregations__dimension_value',
96 'data__disaggregations__dimension_value__name',
97 'disaggregation_targets',
98 'disaggregation_targets__dimension_value',
99 'disaggregation_targets__dimension_value__name'
100 ).filter(pk__in=family)
101
102 return periods
103
104
105 def _make_periods_hierarchy_tree(qs):
106 tree = []
107 lookup = {}
108 ids = [p.id for p in qs]
109
110 for period in qs:
111 item_id = period.id
112 parent_id = period.parent_period.id if period.parent_period else None
113
114 if item_id not in lookup:
115 lookup[item_id] = {'children': []}
116
117 lookup[item_id]['item'] = period
118 node = lookup[item_id]
119
120 if not parent_id or parent_id not in ids:
121 tree.append(node)
122 else:
123 if parent_id not in lookup:
124 lookup[parent_id] = {'children': []}
125
126 lookup[parent_id]['children'].append(node)
127
128 return tree
129
130
131 def _transform_period_contributions_node(node):
132 period = node['item']
133 contributors, countries, aggregated_value, disaggregations = _transform_contributions_hierarchy(node['children'])
134 updates = _transform_updates(period)
135
136 result = {
137 'period_id': period.id,
138 'period_start': period.period_start,
139 'period_end': period.period_end,
140 'actual_comment': period.actual_comment.split(' | ') if period.actual_comment else None,
141 'actual_value': _force_decimal(period.actual_value),
142 'aggregated_value': aggregated_value,
143 'target_value': _force_decimal(period.target_value),
144 'countries': countries,
145 'updates': updates,
146 'contributors': contributors,
147 'disaggregation_contributions': list(disaggregations.values()),
148 'disaggregation_targets': _transform_disaggregation_targets(period),
149 }
150
151 return result
152
153
154 def _transform_contributions_hierarchy(tree):
155 contributors = []
156 contributor_countries = []
157 aggregated_value = 0
158 disaggregations = {}
159 for node in tree:
160 contributor, countries = _transform_contributor_node(node)
161 if contributor:
162 contributors.append(contributor)
163 contributor_countries = _merge_unique(contributor_countries, countries)
164 aggregated_value += contributor['actual_value']
165 disaggregation_contributions = _extract_disaggregation_contributions(contributor)
166 for key in disaggregation_contributions:
167 if key not in disaggregations:
168 disaggregations[key] = disaggregation_contributions[key].copy()
169 else:
170 disaggregations[key]['value'] += disaggregation_contributions[key]['value']
171
172 return contributors, contributor_countries, aggregated_value, disaggregations
173
174
175 def _extract_disaggregation_contributions(contributor):
176 disaggregations = {}
177 for update in contributor['updates']:
178 if update['status']['code'] == 'A':
179 for d in update['disaggregations']:
180 key = (d['category'], d['type'])
181 if key not in disaggregations:
182 disaggregations[key] = d.copy()
183 else:
184 disaggregations[key]['value'] += d['value']
185
186 return disaggregations
187
188
189 def _transform_contributor_node(node):
190 contributor = _transform_contributor(node['item'])
191 contributor_countries = []
192 if contributor:
193 if contributor['country']:
194 contributor_countries.append(contributor['country'])
195 contributors, countries, aggregated_value, disaggregations = _transform_contributions_hierarchy(node['children'])
196 contributors_count = len(contributors)
197 if contributors_count:
198 contributor['aggregated_value'] = aggregated_value
199 contributor['contributors'] = contributors
200 contributor['disaggregation_contributions'] = list(disaggregations.values())
201 contributor_countries = _merge_unique(contributor_countries, countries)
202
203 return contributor, contributor_countries
204
205
206 def _transform_contributor(period):
207 value = _force_decimal(period.actual_value)
208
209 if value < 1 and period.data.count() < 1:
210 return None
211
212 project = period.indicator.result.project
213 country = project.primary_location.country if project.primary_location else None
214 updates = _transform_updates(period)
215
216 return {
217 'project_id': project.id,
218 'project_title': project.title,
219 'period_id': period.id,
220 'country': {'iso_code': country.iso_code} if country else None,
221 'actual_comment': period.actual_comment.split(' | ') if period.actual_comment else None,
222 'actual_value': value,
223 'aggregated_value': None,
224 'updates': updates,
225 'contributors': [],
226 'disaggregation_contributions': [],
227 'disaggregation_targets': _transform_disaggregation_targets(period),
228 }
229
230
231 def _transform_updates(period):
232 return [
233 {
234 'update_id': u.id,
235 'status': {'code': u.status, 'name': dict(IndicatorPeriodData.STATUSES)[u.status]},
236 'user': {
237 'user_id': u.user.id,
238 'email': u.user.email,
239 'name': u.user.get_full_name(),
240 } if u.user else None,
241 'approved_by': {
242 'user_id': u.approved_by.id,
243 'email': u.approved_by.email,
244 'name': u.user.get_full_name(),
245 } if u.approved_by else None,
246 'value': u.value,
247 'numerator': u.numerator,
248 'denominator': u.denominator,
249 'text': u.text,
250 'comments': [
251 {
252 'comment_id': c.id,
253 'user': {
254 'user_id': c.user.id,
255 'email': c.user.email,
256 'name': u.user.get_full_name(),
257 },
258 'comment': c.comment,
259 'created_at': c.created_at,
260 }
261 for c
262 in u.comments.all()
263 ],
264 'disaggregations': [
265 {
266 'category': d.dimension_value.name.name,
267 'type': d.dimension_value.value,
268 'value': d.value,
269 'numerator': d.numerator,
270 'denominator': d.denominator,
271 }
272 for d
273 in u.disaggregations.all()
274 ],
275 'created_at': u.created_at,
276 'last_modified_at': u.last_modified_at,
277
278 }
279 for u
280 in period.data.all()
281 ]
282
283
284 def _transform_disaggregation_targets(period):
285 return [
286 {
287 'category': t.dimension_value.name.name,
288 'type': t.dimension_value.value,
289 'value': t.value,
290 }
291 for t
292 in period.disaggregation_targets.all()
293 ]
294
295
296 def _force_decimal(value):
297 try:
298 return Decimal(value)
299 except (InvalidOperation, TypeError):
300 return Decimal(0)
301
302
303 def _merge_unique(l1, l2):
304 out = list(l1)
305 for i in l2:
306 if i not in out:
307 out.append(i)
308
309 return out
310
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/project_overview.py b/akvo/rest/views/project_overview.py
--- a/akvo/rest/views/project_overview.py
+++ b/akvo/rest/views/project_overview.py
@@ -27,7 +27,12 @@
'id': project.id,
'title': project.title,
'results': [
- {'id': r.id, 'title': r.title, 'indicator_count': r.indicators.count()}
+ {
+ 'id': r.id,
+ 'title': r.title,
+ 'indicator_count': r.indicators.count(),
+ 'type': r.iati_type().name if r.type else None
+ }
for r in project.results.all()
],
}
| {"golden_diff": "diff --git a/akvo/rest/views/project_overview.py b/akvo/rest/views/project_overview.py\n--- a/akvo/rest/views/project_overview.py\n+++ b/akvo/rest/views/project_overview.py\n@@ -27,7 +27,12 @@\n 'id': project.id,\n 'title': project.title,\n 'results': [\n- {'id': r.id, 'title': r.title, 'indicator_count': r.indicators.count()}\n+ {\n+ 'id': r.id,\n+ 'title': r.title,\n+ 'indicator_count': r.indicators.count(),\n+ 'type': r.iati_type().name if r.type else None\n+ }\n for r in project.results.all()\n ],\n }\n", "issue": "Add result type to /project/:id/results/\nWe want to display whether the result is an Outcome, Impact, etc...\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rest.models import TastyTokenAuthentication\nfrom akvo.rsr.models import Project, Result, IndicatorPeriod, IndicatorPeriodData\nfrom akvo.rsr.models.result.utils import QUANTITATIVE\nfrom decimal import Decimal, InvalidOperation\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.decorators import api_view, authentication_classes\nfrom rest_framework.response import Response\n\n\n@api_view(['GET'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication])\ndef project_results(request, pk):\n queryset = Project.objects.prefetch_related('results')\n project = get_object_or_404(queryset, pk=pk)\n if not request.user.has_perm('rsr.view_project', project):\n raise Http404\n data = {\n 'id': project.id,\n 'title': project.title,\n 'results': [\n {'id': r.id, 'title': r.title, 'indicator_count': r.indicators.count()}\n for r in project.results.all()\n ],\n }\n return Response(data)\n\n\n@api_view(['GET'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication])\ndef project_result_overview(request, project_pk, result_pk):\n queryset = Result.objects.prefetch_related(\n 'indicators', 'indicators__periods').select_related('project')\n result = get_object_or_404(queryset, pk=result_pk)\n project = result.project\n if project.id != int(project_pk) or not request.user.has_perm('rsr.view_project', project):\n raise Http404\n\n data = {\n 'id': result.id,\n 'title': result.title,\n 'indicators': [\n {\n 'id': i.id,\n 'title': i.title,\n 'description': i.description,\n 'period_count': len(i.periods.all()),\n 'type': 'quantitative' if i.type == QUANTITATIVE else 'qualitative',\n 'measure': (\n 'unit' if i.measure == '1' else 'percentage' if i.measure == '2' else None),\n 'periods': _drilldown_indicator_periods_contributions(i)\n }\n for i in result.indicators.all()\n ]\n }\n return Response(data)\n\n\ndef _drilldown_indicator_periods_contributions(indicator):\n periods = _get_indicator_periods_hierarchy_flatlist(indicator)\n periods_tree = _make_periods_hierarchy_tree(periods)\n\n return [_transform_period_contributions_node(n) for n in periods_tree]\n\n\ndef _get_indicator_periods_hierarchy_flatlist(indicator):\n family = {period.id for period in indicator.periods.all()}\n while True:\n children = set(\n IndicatorPeriod.objects.filter(parent_period__in=family).values_list('pk', flat=True))\n if family.union(children) == family:\n break\n\n family = family.union(children)\n\n periods = IndicatorPeriod.objects.select_related(\n 'indicator__result__project',\n 'indicator__result__project__primary_location__country',\n 'parent_period',\n ).prefetch_related(\n 'data',\n 'data__user',\n 'data__approved_by',\n 'data__comments',\n 'data__comments__user',\n 'data__disaggregations',\n 'data__disaggregations__dimension_value',\n 'data__disaggregations__dimension_value__name',\n 'disaggregation_targets',\n 'disaggregation_targets__dimension_value',\n 'disaggregation_targets__dimension_value__name'\n ).filter(pk__in=family)\n\n return periods\n\n\ndef _make_periods_hierarchy_tree(qs):\n tree = []\n lookup = {}\n ids = [p.id for p in qs]\n\n for period in qs:\n item_id = period.id\n parent_id = period.parent_period.id if period.parent_period else None\n\n if item_id not in lookup:\n lookup[item_id] = {'children': []}\n\n lookup[item_id]['item'] = period\n node = lookup[item_id]\n\n if not parent_id or parent_id not in ids:\n tree.append(node)\n else:\n if parent_id not in lookup:\n lookup[parent_id] = {'children': []}\n\n lookup[parent_id]['children'].append(node)\n\n return tree\n\n\ndef _transform_period_contributions_node(node):\n period = node['item']\n contributors, countries, aggregated_value, disaggregations = _transform_contributions_hierarchy(node['children'])\n updates = _transform_updates(period)\n\n result = {\n 'period_id': period.id,\n 'period_start': period.period_start,\n 'period_end': period.period_end,\n 'actual_comment': period.actual_comment.split(' | ') if period.actual_comment else None,\n 'actual_value': _force_decimal(period.actual_value),\n 'aggregated_value': aggregated_value,\n 'target_value': _force_decimal(period.target_value),\n 'countries': countries,\n 'updates': updates,\n 'contributors': contributors,\n 'disaggregation_contributions': list(disaggregations.values()),\n 'disaggregation_targets': _transform_disaggregation_targets(period),\n }\n\n return result\n\n\ndef _transform_contributions_hierarchy(tree):\n contributors = []\n contributor_countries = []\n aggregated_value = 0\n disaggregations = {}\n for node in tree:\n contributor, countries = _transform_contributor_node(node)\n if contributor:\n contributors.append(contributor)\n contributor_countries = _merge_unique(contributor_countries, countries)\n aggregated_value += contributor['actual_value']\n disaggregation_contributions = _extract_disaggregation_contributions(contributor)\n for key in disaggregation_contributions:\n if key not in disaggregations:\n disaggregations[key] = disaggregation_contributions[key].copy()\n else:\n disaggregations[key]['value'] += disaggregation_contributions[key]['value']\n\n return contributors, contributor_countries, aggregated_value, disaggregations\n\n\ndef _extract_disaggregation_contributions(contributor):\n disaggregations = {}\n for update in contributor['updates']:\n if update['status']['code'] == 'A':\n for d in update['disaggregations']:\n key = (d['category'], d['type'])\n if key not in disaggregations:\n disaggregations[key] = d.copy()\n else:\n disaggregations[key]['value'] += d['value']\n\n return disaggregations\n\n\ndef _transform_contributor_node(node):\n contributor = _transform_contributor(node['item'])\n contributor_countries = []\n if contributor:\n if contributor['country']:\n contributor_countries.append(contributor['country'])\n contributors, countries, aggregated_value, disaggregations = _transform_contributions_hierarchy(node['children'])\n contributors_count = len(contributors)\n if contributors_count:\n contributor['aggregated_value'] = aggregated_value\n contributor['contributors'] = contributors\n contributor['disaggregation_contributions'] = list(disaggregations.values())\n contributor_countries = _merge_unique(contributor_countries, countries)\n\n return contributor, contributor_countries\n\n\ndef _transform_contributor(period):\n value = _force_decimal(period.actual_value)\n\n if value < 1 and period.data.count() < 1:\n return None\n\n project = period.indicator.result.project\n country = project.primary_location.country if project.primary_location else None\n updates = _transform_updates(period)\n\n return {\n 'project_id': project.id,\n 'project_title': project.title,\n 'period_id': period.id,\n 'country': {'iso_code': country.iso_code} if country else None,\n 'actual_comment': period.actual_comment.split(' | ') if period.actual_comment else None,\n 'actual_value': value,\n 'aggregated_value': None,\n 'updates': updates,\n 'contributors': [],\n 'disaggregation_contributions': [],\n 'disaggregation_targets': _transform_disaggregation_targets(period),\n }\n\n\ndef _transform_updates(period):\n return [\n {\n 'update_id': u.id,\n 'status': {'code': u.status, 'name': dict(IndicatorPeriodData.STATUSES)[u.status]},\n 'user': {\n 'user_id': u.user.id,\n 'email': u.user.email,\n 'name': u.user.get_full_name(),\n } if u.user else None,\n 'approved_by': {\n 'user_id': u.approved_by.id,\n 'email': u.approved_by.email,\n 'name': u.user.get_full_name(),\n } if u.approved_by else None,\n 'value': u.value,\n 'numerator': u.numerator,\n 'denominator': u.denominator,\n 'text': u.text,\n 'comments': [\n {\n 'comment_id': c.id,\n 'user': {\n 'user_id': c.user.id,\n 'email': c.user.email,\n 'name': u.user.get_full_name(),\n },\n 'comment': c.comment,\n 'created_at': c.created_at,\n }\n for c\n in u.comments.all()\n ],\n 'disaggregations': [\n {\n 'category': d.dimension_value.name.name,\n 'type': d.dimension_value.value,\n 'value': d.value,\n 'numerator': d.numerator,\n 'denominator': d.denominator,\n }\n for d\n in u.disaggregations.all()\n ],\n 'created_at': u.created_at,\n 'last_modified_at': u.last_modified_at,\n\n }\n for u\n in period.data.all()\n ]\n\n\ndef _transform_disaggregation_targets(period):\n return [\n {\n 'category': t.dimension_value.name.name,\n 'type': t.dimension_value.value,\n 'value': t.value,\n }\n for t\n in period.disaggregation_targets.all()\n ]\n\n\ndef _force_decimal(value):\n try:\n return Decimal(value)\n except (InvalidOperation, TypeError):\n return Decimal(0)\n\n\ndef _merge_unique(l1, l2):\n out = list(l1)\n for i in l2:\n if i not in out:\n out.append(i)\n\n return out\n", "path": "akvo/rest/views/project_overview.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rest.models import TastyTokenAuthentication\nfrom akvo.rsr.models import Project, Result, IndicatorPeriod, IndicatorPeriodData\nfrom akvo.rsr.models.result.utils import QUANTITATIVE\nfrom decimal import Decimal, InvalidOperation\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.decorators import api_view, authentication_classes\nfrom rest_framework.response import Response\n\n\n@api_view(['GET'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication])\ndef project_results(request, pk):\n queryset = Project.objects.prefetch_related('results')\n project = get_object_or_404(queryset, pk=pk)\n if not request.user.has_perm('rsr.view_project', project):\n raise Http404\n data = {\n 'id': project.id,\n 'title': project.title,\n 'results': [\n {\n 'id': r.id,\n 'title': r.title,\n 'indicator_count': r.indicators.count(),\n 'type': r.iati_type().name if r.type else None\n }\n for r in project.results.all()\n ],\n }\n return Response(data)\n\n\n@api_view(['GET'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication])\ndef project_result_overview(request, project_pk, result_pk):\n queryset = Result.objects.prefetch_related(\n 'indicators', 'indicators__periods').select_related('project')\n result = get_object_or_404(queryset, pk=result_pk)\n project = result.project\n if project.id != int(project_pk) or not request.user.has_perm('rsr.view_project', project):\n raise Http404\n\n data = {\n 'id': result.id,\n 'title': result.title,\n 'indicators': [\n {\n 'id': i.id,\n 'title': i.title,\n 'description': i.description,\n 'period_count': len(i.periods.all()),\n 'type': 'quantitative' if i.type == QUANTITATIVE else 'qualitative',\n 'measure': (\n 'unit' if i.measure == '1' else 'percentage' if i.measure == '2' else None),\n 'periods': _drilldown_indicator_periods_contributions(i)\n }\n for i in result.indicators.all()\n ]\n }\n return Response(data)\n\n\ndef _drilldown_indicator_periods_contributions(indicator):\n periods = _get_indicator_periods_hierarchy_flatlist(indicator)\n periods_tree = _make_periods_hierarchy_tree(periods)\n\n return [_transform_period_contributions_node(n) for n in periods_tree]\n\n\ndef _get_indicator_periods_hierarchy_flatlist(indicator):\n family = {period.id for period in indicator.periods.all()}\n while True:\n children = set(\n IndicatorPeriod.objects.filter(parent_period__in=family).values_list('pk', flat=True))\n if family.union(children) == family:\n break\n\n family = family.union(children)\n\n periods = IndicatorPeriod.objects.select_related(\n 'indicator__result__project',\n 'indicator__result__project__primary_location__country',\n 'parent_period',\n ).prefetch_related(\n 'data',\n 'data__user',\n 'data__approved_by',\n 'data__comments',\n 'data__comments__user',\n 'data__disaggregations',\n 'data__disaggregations__dimension_value',\n 'data__disaggregations__dimension_value__name',\n 'disaggregation_targets',\n 'disaggregation_targets__dimension_value',\n 'disaggregation_targets__dimension_value__name'\n ).filter(pk__in=family)\n\n return periods\n\n\ndef _make_periods_hierarchy_tree(qs):\n tree = []\n lookup = {}\n ids = [p.id for p in qs]\n\n for period in qs:\n item_id = period.id\n parent_id = period.parent_period.id if period.parent_period else None\n\n if item_id not in lookup:\n lookup[item_id] = {'children': []}\n\n lookup[item_id]['item'] = period\n node = lookup[item_id]\n\n if not parent_id or parent_id not in ids:\n tree.append(node)\n else:\n if parent_id not in lookup:\n lookup[parent_id] = {'children': []}\n\n lookup[parent_id]['children'].append(node)\n\n return tree\n\n\ndef _transform_period_contributions_node(node):\n period = node['item']\n contributors, countries, aggregated_value, disaggregations = _transform_contributions_hierarchy(node['children'])\n updates = _transform_updates(period)\n\n result = {\n 'period_id': period.id,\n 'period_start': period.period_start,\n 'period_end': period.period_end,\n 'actual_comment': period.actual_comment.split(' | ') if period.actual_comment else None,\n 'actual_value': _force_decimal(period.actual_value),\n 'aggregated_value': aggregated_value,\n 'target_value': _force_decimal(period.target_value),\n 'countries': countries,\n 'updates': updates,\n 'contributors': contributors,\n 'disaggregation_contributions': list(disaggregations.values()),\n 'disaggregation_targets': _transform_disaggregation_targets(period),\n }\n\n return result\n\n\ndef _transform_contributions_hierarchy(tree):\n contributors = []\n contributor_countries = []\n aggregated_value = 0\n disaggregations = {}\n for node in tree:\n contributor, countries = _transform_contributor_node(node)\n if contributor:\n contributors.append(contributor)\n contributor_countries = _merge_unique(contributor_countries, countries)\n aggregated_value += contributor['actual_value']\n disaggregation_contributions = _extract_disaggregation_contributions(contributor)\n for key in disaggregation_contributions:\n if key not in disaggregations:\n disaggregations[key] = disaggregation_contributions[key].copy()\n else:\n disaggregations[key]['value'] += disaggregation_contributions[key]['value']\n\n return contributors, contributor_countries, aggregated_value, disaggregations\n\n\ndef _extract_disaggregation_contributions(contributor):\n disaggregations = {}\n for update in contributor['updates']:\n if update['status']['code'] == 'A':\n for d in update['disaggregations']:\n key = (d['category'], d['type'])\n if key not in disaggregations:\n disaggregations[key] = d.copy()\n else:\n disaggregations[key]['value'] += d['value']\n\n return disaggregations\n\n\ndef _transform_contributor_node(node):\n contributor = _transform_contributor(node['item'])\n contributor_countries = []\n if contributor:\n if contributor['country']:\n contributor_countries.append(contributor['country'])\n contributors, countries, aggregated_value, disaggregations = _transform_contributions_hierarchy(node['children'])\n contributors_count = len(contributors)\n if contributors_count:\n contributor['aggregated_value'] = aggregated_value\n contributor['contributors'] = contributors\n contributor['disaggregation_contributions'] = list(disaggregations.values())\n contributor_countries = _merge_unique(contributor_countries, countries)\n\n return contributor, contributor_countries\n\n\ndef _transform_contributor(period):\n value = _force_decimal(period.actual_value)\n\n if value < 1 and period.data.count() < 1:\n return None\n\n project = period.indicator.result.project\n country = project.primary_location.country if project.primary_location else None\n updates = _transform_updates(period)\n\n return {\n 'project_id': project.id,\n 'project_title': project.title,\n 'period_id': period.id,\n 'country': {'iso_code': country.iso_code} if country else None,\n 'actual_comment': period.actual_comment.split(' | ') if period.actual_comment else None,\n 'actual_value': value,\n 'aggregated_value': None,\n 'updates': updates,\n 'contributors': [],\n 'disaggregation_contributions': [],\n 'disaggregation_targets': _transform_disaggregation_targets(period),\n }\n\n\ndef _transform_updates(period):\n return [\n {\n 'update_id': u.id,\n 'status': {'code': u.status, 'name': dict(IndicatorPeriodData.STATUSES)[u.status]},\n 'user': {\n 'user_id': u.user.id,\n 'email': u.user.email,\n 'name': u.user.get_full_name(),\n } if u.user else None,\n 'approved_by': {\n 'user_id': u.approved_by.id,\n 'email': u.approved_by.email,\n 'name': u.user.get_full_name(),\n } if u.approved_by else None,\n 'value': u.value,\n 'numerator': u.numerator,\n 'denominator': u.denominator,\n 'text': u.text,\n 'comments': [\n {\n 'comment_id': c.id,\n 'user': {\n 'user_id': c.user.id,\n 'email': c.user.email,\n 'name': u.user.get_full_name(),\n },\n 'comment': c.comment,\n 'created_at': c.created_at,\n }\n for c\n in u.comments.all()\n ],\n 'disaggregations': [\n {\n 'category': d.dimension_value.name.name,\n 'type': d.dimension_value.value,\n 'value': d.value,\n 'numerator': d.numerator,\n 'denominator': d.denominator,\n }\n for d\n in u.disaggregations.all()\n ],\n 'created_at': u.created_at,\n 'last_modified_at': u.last_modified_at,\n\n }\n for u\n in period.data.all()\n ]\n\n\ndef _transform_disaggregation_targets(period):\n return [\n {\n 'category': t.dimension_value.name.name,\n 'type': t.dimension_value.value,\n 'value': t.value,\n }\n for t\n in period.disaggregation_targets.all()\n ]\n\n\ndef _force_decimal(value):\n try:\n return Decimal(value)\n except (InvalidOperation, TypeError):\n return Decimal(0)\n\n\ndef _merge_unique(l1, l2):\n out = list(l1)\n for i in l2:\n if i not in out:\n out.append(i)\n\n return out\n", "path": "akvo/rest/views/project_overview.py"}]} | 3,376 | 165 |
gh_patches_debug_28793 | rasdani/github-patches | git_diff | PaddlePaddle__models-123 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Modify inference script
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hsigmoid/infer.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 import os
4 import logging
5 import gzip
6
7 import paddle.v2 as paddle
8 from network_conf import ngram_lm
9
10 logger = logging.getLogger("paddle")
11 logger.setLevel(logging.WARNING)
12
13
14 def decode_res(infer_res, dict_size):
15 """
16 Inferring probabilities are orginized as a complete binary tree.
17 The actual labels are leaves (indices are counted from class number).
18 This function travels paths decoded from inferring results.
19 If the probability >0.5 then go to right child, otherwise go to left child.
20
21 param infer_res: inferring result
22 param dict_size: class number
23 return predict_lbls: actual class
24 """
25 predict_lbls = []
26 infer_res = infer_res > 0.5
27 for i, probs in enumerate(infer_res):
28 idx = 0
29 result = 1
30 while idx < len(probs):
31 result <<= 1
32 if probs[idx]:
33 result |= 1
34 if probs[idx]:
35 idx = idx * 2 + 2 # right child
36 else:
37 idx = idx * 2 + 1 # left child
38
39 predict_lbl = result - dict_size
40 predict_lbls.append(predict_lbl)
41 return predict_lbls
42
43
44 def predict(batch_ins, idx_word_dict, dict_size, prediction_layer, parameters):
45 infer_res = paddle.infer(
46 output_layer=prediction_layer, parameters=parameters, input=batch_ins)
47
48 predict_lbls = decode_res(infer_res, dict_size)
49 predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word
50
51 # Ouput format: word1 word2 word3 word4 -> predict label
52 for i, ins in enumerate(batch_ins):
53 print(" ".join([idx_word_dict[w]
54 for w in ins]) + " -> " + predict_words[i])
55
56
57 def main(model_path):
58 assert os.path.exists(model_path), "trained model does not exist."
59
60 paddle.init(use_gpu=False, trainer_count=1)
61 word_dict = paddle.dataset.imikolov.build_dict(min_word_freq=2)
62 dict_size = len(word_dict)
63 prediction_layer = ngram_lm(
64 is_train=False, hidden_size=256, embed_size=32, dict_size=dict_size)
65
66 with gzip.open(model_path, "r") as f:
67 parameters = paddle.parameters.Parameters.from_tar(f)
68
69 idx_word_dict = dict((v, k) for k, v in word_dict.items())
70 batch_size = 64
71 batch_ins = []
72 ins_iter = paddle.dataset.imikolov.test(word_dict, 5)
73
74 for ins in ins_iter():
75 batch_ins.append(ins[:-1])
76 if len(batch_ins) == batch_size:
77 predict(batch_ins, idx_word_dict, dict_size, prediction_layer,
78 parameters)
79 batch_ins = []
80
81 if len(batch_ins) > 0:
82 predict(batch_ins, idx_word_dict, dict_size, prediction_layer,
83 parameters)
84
85
86 if __name__ == "__main__":
87 main("models/hsigmoid_batch_00010.tar.gz")
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hsigmoid/infer.py b/hsigmoid/infer.py
--- a/hsigmoid/infer.py
+++ b/hsigmoid/infer.py
@@ -41,9 +41,8 @@
return predict_lbls
-def predict(batch_ins, idx_word_dict, dict_size, prediction_layer, parameters):
- infer_res = paddle.infer(
- output_layer=prediction_layer, parameters=parameters, input=batch_ins)
+def predict(batch_ins, idx_word_dict, dict_size, inferer):
+ infer_res = inferer.infer(input=batch_ins)
predict_lbls = decode_res(infer_res, dict_size)
predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word
@@ -66,6 +65,8 @@
with gzip.open(model_path, "r") as f:
parameters = paddle.parameters.Parameters.from_tar(f)
+ inferer = paddle.inference.Inference(
+ output_layer=prediction_layer, parameters=parameters)
idx_word_dict = dict((v, k) for k, v in word_dict.items())
batch_size = 64
batch_ins = []
@@ -74,13 +75,11 @@
for ins in ins_iter():
batch_ins.append(ins[:-1])
if len(batch_ins) == batch_size:
- predict(batch_ins, idx_word_dict, dict_size, prediction_layer,
- parameters)
+ predict(batch_ins, idx_word_dict, dict_size, inferer)
batch_ins = []
if len(batch_ins) > 0:
- predict(batch_ins, idx_word_dict, dict_size, prediction_layer,
- parameters)
+ predict(batch_ins, idx_word_dict, dict_size, inferer)
if __name__ == "__main__":
| {"golden_diff": "diff --git a/hsigmoid/infer.py b/hsigmoid/infer.py\n--- a/hsigmoid/infer.py\n+++ b/hsigmoid/infer.py\n@@ -41,9 +41,8 @@\n return predict_lbls\n \n \n-def predict(batch_ins, idx_word_dict, dict_size, prediction_layer, parameters):\n- infer_res = paddle.infer(\n- output_layer=prediction_layer, parameters=parameters, input=batch_ins)\n+def predict(batch_ins, idx_word_dict, dict_size, inferer):\n+ infer_res = inferer.infer(input=batch_ins)\n \n predict_lbls = decode_res(infer_res, dict_size)\n predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word\n@@ -66,6 +65,8 @@\n with gzip.open(model_path, \"r\") as f:\n parameters = paddle.parameters.Parameters.from_tar(f)\n \n+ inferer = paddle.inference.Inference(\n+ output_layer=prediction_layer, parameters=parameters)\n idx_word_dict = dict((v, k) for k, v in word_dict.items())\n batch_size = 64\n batch_ins = []\n@@ -74,13 +75,11 @@\n for ins in ins_iter():\n batch_ins.append(ins[:-1])\n if len(batch_ins) == batch_size:\n- predict(batch_ins, idx_word_dict, dict_size, prediction_layer,\n- parameters)\n+ predict(batch_ins, idx_word_dict, dict_size, inferer)\n batch_ins = []\n \n if len(batch_ins) > 0:\n- predict(batch_ins, idx_word_dict, dict_size, prediction_layer,\n- parameters)\n+ predict(batch_ins, idx_word_dict, dict_size, inferer)\n \n \n if __name__ == \"__main__\":\n", "issue": "Modify inference script\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport logging\nimport gzip\n\nimport paddle.v2 as paddle\nfrom network_conf import ngram_lm\n\nlogger = logging.getLogger(\"paddle\")\nlogger.setLevel(logging.WARNING)\n\n\ndef decode_res(infer_res, dict_size):\n \"\"\"\n Inferring probabilities are orginized as a complete binary tree.\n The actual labels are leaves (indices are counted from class number).\n This function travels paths decoded from inferring results.\n If the probability >0.5 then go to right child, otherwise go to left child.\n\n param infer_res: inferring result\n param dict_size: class number\n return predict_lbls: actual class\n \"\"\"\n predict_lbls = []\n infer_res = infer_res > 0.5\n for i, probs in enumerate(infer_res):\n idx = 0\n result = 1\n while idx < len(probs):\n result <<= 1\n if probs[idx]:\n result |= 1\n if probs[idx]:\n idx = idx * 2 + 2 # right child\n else:\n idx = idx * 2 + 1 # left child\n\n predict_lbl = result - dict_size\n predict_lbls.append(predict_lbl)\n return predict_lbls\n\n\ndef predict(batch_ins, idx_word_dict, dict_size, prediction_layer, parameters):\n infer_res = paddle.infer(\n output_layer=prediction_layer, parameters=parameters, input=batch_ins)\n\n predict_lbls = decode_res(infer_res, dict_size)\n predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word\n\n # Ouput format: word1 word2 word3 word4 -> predict label\n for i, ins in enumerate(batch_ins):\n print(\" \".join([idx_word_dict[w]\n for w in ins]) + \" -> \" + predict_words[i])\n\n\ndef main(model_path):\n assert os.path.exists(model_path), \"trained model does not exist.\"\n\n paddle.init(use_gpu=False, trainer_count=1)\n word_dict = paddle.dataset.imikolov.build_dict(min_word_freq=2)\n dict_size = len(word_dict)\n prediction_layer = ngram_lm(\n is_train=False, hidden_size=256, embed_size=32, dict_size=dict_size)\n\n with gzip.open(model_path, \"r\") as f:\n parameters = paddle.parameters.Parameters.from_tar(f)\n\n idx_word_dict = dict((v, k) for k, v in word_dict.items())\n batch_size = 64\n batch_ins = []\n ins_iter = paddle.dataset.imikolov.test(word_dict, 5)\n\n for ins in ins_iter():\n batch_ins.append(ins[:-1])\n if len(batch_ins) == batch_size:\n predict(batch_ins, idx_word_dict, dict_size, prediction_layer,\n parameters)\n batch_ins = []\n\n if len(batch_ins) > 0:\n predict(batch_ins, idx_word_dict, dict_size, prediction_layer,\n parameters)\n\n\nif __name__ == \"__main__\":\n main(\"models/hsigmoid_batch_00010.tar.gz\")\n", "path": "hsigmoid/infer.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport logging\nimport gzip\n\nimport paddle.v2 as paddle\nfrom network_conf import ngram_lm\n\nlogger = logging.getLogger(\"paddle\")\nlogger.setLevel(logging.WARNING)\n\n\ndef decode_res(infer_res, dict_size):\n \"\"\"\n Inferring probabilities are orginized as a complete binary tree.\n The actual labels are leaves (indices are counted from class number).\n This function travels paths decoded from inferring results.\n If the probability >0.5 then go to right child, otherwise go to left child.\n\n param infer_res: inferring result\n param dict_size: class number\n return predict_lbls: actual class\n \"\"\"\n predict_lbls = []\n infer_res = infer_res > 0.5\n for i, probs in enumerate(infer_res):\n idx = 0\n result = 1\n while idx < len(probs):\n result <<= 1\n if probs[idx]:\n result |= 1\n if probs[idx]:\n idx = idx * 2 + 2 # right child\n else:\n idx = idx * 2 + 1 # left child\n\n predict_lbl = result - dict_size\n predict_lbls.append(predict_lbl)\n return predict_lbls\n\n\ndef predict(batch_ins, idx_word_dict, dict_size, inferer):\n infer_res = inferer.infer(input=batch_ins)\n\n predict_lbls = decode_res(infer_res, dict_size)\n predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word\n\n # Ouput format: word1 word2 word3 word4 -> predict label\n for i, ins in enumerate(batch_ins):\n print(\" \".join([idx_word_dict[w]\n for w in ins]) + \" -> \" + predict_words[i])\n\n\ndef main(model_path):\n assert os.path.exists(model_path), \"trained model does not exist.\"\n\n paddle.init(use_gpu=False, trainer_count=1)\n word_dict = paddle.dataset.imikolov.build_dict(min_word_freq=2)\n dict_size = len(word_dict)\n prediction_layer = ngram_lm(\n is_train=False, hidden_size=256, embed_size=32, dict_size=dict_size)\n\n with gzip.open(model_path, \"r\") as f:\n parameters = paddle.parameters.Parameters.from_tar(f)\n\n inferer = paddle.inference.Inference(\n output_layer=prediction_layer, parameters=parameters)\n idx_word_dict = dict((v, k) for k, v in word_dict.items())\n batch_size = 64\n batch_ins = []\n ins_iter = paddle.dataset.imikolov.test(word_dict, 5)\n\n for ins in ins_iter():\n batch_ins.append(ins[:-1])\n if len(batch_ins) == batch_size:\n predict(batch_ins, idx_word_dict, dict_size, inferer)\n batch_ins = []\n\n if len(batch_ins) > 0:\n predict(batch_ins, idx_word_dict, dict_size, inferer)\n\n\nif __name__ == \"__main__\":\n main(\"models/hsigmoid_batch_00010.tar.gz\")\n", "path": "hsigmoid/infer.py"}]} | 1,124 | 395 |
gh_patches_debug_14264 | rasdani/github-patches | git_diff | CTFd__CTFd-1233 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Theme reset from CLI
There needs to be a way to reset the theme and other kinds of unrecoverable errors from the CLI.
Context: https://community.majorleaguecyber.org/t/ctfd-templatenotfound/51
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manage.py`
Content:
```
1 from flask import Flask
2 from flask_sqlalchemy import SQLAlchemy
3 from flask_script import Manager
4 from flask_migrate import Migrate, MigrateCommand
5 from CTFd import create_app
6 from CTFd.models import *
7
8 app = create_app()
9
10 manager = Manager(app)
11 manager.add_command('db', MigrateCommand)
12
13 if __name__ == '__main__':
14 manager.run()
15
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manage.py b/manage.py
--- a/manage.py
+++ b/manage.py
@@ -3,6 +3,7 @@
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from CTFd import create_app
+from CTFd.utils import get_config as get_config_util, set_config as set_config_util
from CTFd.models import *
app = create_app()
@@ -10,5 +11,18 @@
manager = Manager(app)
manager.add_command('db', MigrateCommand)
-if __name__ == '__main__':
+
[email protected]
+def get_config(key):
+ with app.app_context():
+ print(get_config_util(key))
+
+
[email protected]
+def set_config(key, value):
+ with app.app_context():
+ print(set_config_util(key, value).value)
+
+
+if __name__ == "__main__":
manager.run()
| {"golden_diff": "diff --git a/manage.py b/manage.py\n--- a/manage.py\n+++ b/manage.py\n@@ -3,6 +3,7 @@\n from flask_script import Manager\n from flask_migrate import Migrate, MigrateCommand\n from CTFd import create_app\n+from CTFd.utils import get_config as get_config_util, set_config as set_config_util\n from CTFd.models import *\n \n app = create_app()\n@@ -10,5 +11,18 @@\n manager = Manager(app)\n manager.add_command('db', MigrateCommand)\n \n-if __name__ == '__main__':\n+\[email protected]\n+def get_config(key):\n+ with app.app_context():\n+ print(get_config_util(key))\n+\n+\[email protected]\n+def set_config(key, value):\n+ with app.app_context():\n+ print(set_config_util(key, value).value)\n+\n+\n+if __name__ == \"__main__\":\n manager.run()\n", "issue": "Theme reset from CLI\nThere needs to be a way to reset the theme and other kinds of unrecoverable errors from the CLI. \r\n\r\nContext: https://community.majorleaguecyber.org/t/ctfd-templatenotfound/51\n", "before_files": [{"content": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\nfrom CTFd import create_app\nfrom CTFd.models import *\n\napp = create_app()\n\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\n\nif __name__ == '__main__':\n manager.run()\n", "path": "manage.py"}], "after_files": [{"content": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\nfrom CTFd import create_app\nfrom CTFd.utils import get_config as get_config_util, set_config as set_config_util\nfrom CTFd.models import *\n\napp = create_app()\n\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\n\n\[email protected]\ndef get_config(key):\n with app.app_context():\n print(get_config_util(key))\n\n\[email protected]\ndef set_config(key, value):\n with app.app_context():\n print(set_config_util(key, value).value)\n\n\nif __name__ == \"__main__\":\n manager.run()\n", "path": "manage.py"}]} | 408 | 203 |
gh_patches_debug_22940 | rasdani/github-patches | git_diff | conan-io__conan-center-index-3266 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] poco/1.10.1: CMake error Could NOT find ODBC with gcc9 and poco:enable_data_odbc=True
<!--
Please don't forget to update the issue title.
Include all applicable information to help us reproduce your problem.
-->
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **poco/1.10.1**
* Operating System+version: **Linux Ubuntu 20.04.1**
* Compiler+version: **GCC 9**
* Conan version: **conan 1.30.2**
* Python version: **Python 3.8.5**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
os=Linux
os_build=Linux
arch=x86_64
arch_build=x86_64
compiler=gcc
compiler.version=9
compiler.libcxx=libstdc++
build_type=Release
[options]
[build_requires]
[env]
```
### Steps to reproduce (Include if Applicable)
Build `poco/1.10.1` with gcc9 and `-o poco:enable_data_odbc=True`.
The problem is a missing requirement. I will submit a PR to fix.
### Logs (Include/Attach if Applicable)
<details><summary>Click to expand log</summary>
```
CMake Error at /usr/share/cmake-3.16/Modules/FindPackageHandleStandardArgs.cmake:146 (message):
Could NOT find ODBC (missing: ODBC_LIBRARY ODBC_INCLUDE_DIR)
Call Stack (most recent call first):
/usr/share/cmake-3.16/Modules/FindPackageHandleStandardArgs.cmake:393 (_FPHSA_FAILURE_MESSAGE)
source_subfolder/cmake/FindODBC.cmake:123 (find_package_handle_standard_args)
source_subfolder/CMakeLists.txt:145 (find_package)
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/poco/all/conanfile.py`
Content:
```
1 from conans import ConanFile, CMake, tools
2 from conans.errors import ConanException, ConanInvalidConfiguration
3 from collections import namedtuple, OrderedDict
4 import os
5
6
7 class PocoConan(ConanFile):
8 name = "poco"
9 url = "https://github.com/conan-io/conan-center-index"
10 homepage = "https://pocoproject.org"
11 topics = ("conan", "poco", "building", "networking", "server", "mobile", "embedded")
12 exports_sources = "CMakeLists.txt", "patches/**"
13 generators = "cmake", "cmake_find_package"
14 settings = "os", "arch", "compiler", "build_type"
15 license = "BSL-1.0"
16 description = "Modern, powerful open source C++ class libraries for building network- and internet-based " \
17 "applications that run on desktop, server, mobile and embedded systems."
18 options = {
19 "shared": [True, False],
20 "fPIC": [True, False],
21 }
22 default_options = {
23 "shared": False,
24 "fPIC": True,
25 }
26
27 _PocoComponent = namedtuple("_PocoComponent", ("option", "default_option", "dependencies", "is_lib"))
28 _poco_component_tree = {
29 "mod_poco": _PocoComponent("enable_apacheconnector", False, ("PocoUtil", "PocoNet", ), False), # also external apr and apr-util
30 "PocoCppParser": _PocoComponent("enable_cppparser", False, ("PocoFoundation", ), False),
31 # "PocoCppUnit": _PocoComponent("enable_cppunit", False, ("PocoFoundation", ), False)),
32 "PocoCrypto": _PocoComponent("enable_crypto", True, ("PocoFoundation", ), True), # also external openssl
33 "PocoData": _PocoComponent("enable_data", True, ("PocoFoundation", ), True),
34 "PocoDataMySQL": _PocoComponent("enable_data_mysql", False, ("PocoData", ), True),
35 "PocoDataODBC": _PocoComponent("enable_data_odbc", False, ("PocoData", ), True),
36 "PocoDataPostgreSQL": _PocoComponent("enable_data_postgresql", False, ("PocoData", ), True), # also external postgresql
37 "PocoDataSQLite": _PocoComponent("enable_data_sqlite", True, ("PocoData", ), True), # also external sqlite3
38 "PocoEncodings": _PocoComponent("enable_encodings", True, ("PocoFoundation", ), True),
39 # "PocoEncodingsCompiler": _PocoComponent("enable_encodingscompiler", False, ("PocoNet", "PocoUtil", ), False),
40 "PocoFoundation": _PocoComponent(None, "PocoFoundation", (), True),
41 "PocoJSON": _PocoComponent("enable_json", True, ("PocoFoundation", ), True),
42 "PocoJWT": _PocoComponent("enable_jwt", True, ("PocoJSON", "PocoCrypto", ), True),
43 "PocoMongoDB": _PocoComponent("enable_mongodb", True, ("PocoNet", ), True),
44 "PocoNet": _PocoComponent("enable_net", True, ("PocoFoundation", ), True),
45 "PocoNetSSL": _PocoComponent("enable_netssl", True, ("PocoCrypto", "PocoUtil", "PocoNet", ), True), # also external openssl
46 "PocoNetSSLWin": _PocoComponent("enable_netssl_win", True, ("PocoNet", "PocoUtil", ), True),
47 "PocoPDF": _PocoComponent("enable_pdf", False, ("PocoXML", "PocoUtil", ), True),
48 "PocoPageCompiler": _PocoComponent("enable_pagecompiler", False, ("PocoNet", "PocoUtil", ), False),
49 "PocoFile2Page": _PocoComponent("enable_pagecompiler_file2page", False, ("PocoNet", "PocoUtil", "PocoXML", "PocoJSON", ), False),
50 "PocoPocoDoc": _PocoComponent("enable_pocodoc", False, ("PocoUtil", "PocoXML", "PocoCppParser", ), False),
51 "PocoRedis": _PocoComponent("enable_redis", True, ("PocoNet", ), True),
52 "PocoSevenZip": _PocoComponent("enable_sevenzip", False, ("PocoUtil", "PocoXML", ), True),
53 "PocoUtil": _PocoComponent("enable_util", True, ("PocoFoundation", "PocoXML", "PocoJSON", ), True),
54 "PocoXML": _PocoComponent("enable_xml", True, ("PocoFoundation", ), True),
55 "PocoZip": _PocoComponent("enable_zip", True, ("PocoUtil", "PocoXML", ), True),
56 }
57
58 for comp in _poco_component_tree.values():
59 if comp.option:
60 options[comp.option] = [True, False]
61 default_options[comp.option] = comp.default_option
62 del comp
63
64 @property
65 def _poco_ordered_components(self):
66 remaining_components = dict((compname, set(compopts.dependencies)) for compname, compopts in self._poco_component_tree.items())
67 ordered_components = []
68 while remaining_components:
69 components_no_deps = set(compname for compname, compopts in remaining_components.items() if not compopts)
70 if not components_no_deps:
71 raise ConanException("The poco dependency tree is invalid and contains a cycle")
72 for c in components_no_deps:
73 remaining_components.pop(c)
74 ordered_components.extend(components_no_deps)
75 for rname in remaining_components.keys():
76 remaining_components[rname] = remaining_components[rname].difference(components_no_deps)
77 ordered_components.reverse()
78 return ordered_components
79
80 _cmake = None
81
82 @property
83 def _source_subfolder(self):
84 return "source_subfolder"
85
86 @property
87 def _build_subfolder(self):
88 return "build_subfolder"
89
90 def source(self):
91 tools.get(**self.conan_data["sources"][self.version])
92 extracted_folder = "poco-poco-{}-release".format(self.version)
93 os.rename(extracted_folder, self._source_subfolder)
94
95 def config_options(self):
96 if self.settings.os == "Windows":
97 del self.options.fPIC
98 else:
99 del self.options.enable_netssl_win
100 if tools.Version(self.version) < "1.9":
101 del self.options.enable_encodings
102 if tools.Version(self.version) < "1.10":
103 del self.options.enable_data_postgresql
104 del self.options.enable_jwt
105
106 def configure(self):
107 if self.options.enable_apacheconnector:
108 raise ConanInvalidConfiguration("Apache connector not supported: https://github.com/pocoproject/poco/issues/1764")
109 if self.options.enable_data_mysql:
110 raise ConanInvalidConfiguration("MySQL not supported yet, open an issue here please: %s" % self.url)
111 if self.settings.compiler == "Visual Studio":
112 if self.options.shared and "MT" in str(self.settings.compiler.runtime):
113 raise ConanInvalidConfiguration("Cannot build shared poco libraries with MT(d) runtime")
114 if self.options.get_safe("enable_data_postgresql", False):
115 raise ConanInvalidConfiguration("PostgreSQL not supported yet, open an issue here please: %s" % self.url)
116 for compopt in self._poco_component_tree.values():
117 if not compopt.option:
118 continue
119 if self.options.get_safe(compopt.option, False):
120 for compdep in compopt.dependencies:
121 if not self._poco_component_tree[compdep].option:
122 continue
123 if not self.options.get_safe(self._poco_component_tree[compdep].option, False):
124 raise ConanInvalidConfiguration("option {} requires also option {}".format(compopt.option, self._poco_component_tree[compdep].option))
125
126 def requirements(self):
127 self.requires("pcre/8.41")
128 self.requires("zlib/1.2.11")
129 if self.options.enable_xml:
130 self.requires("expat/2.2.10")
131 if self.options.enable_data_sqlite:
132 self.requires("sqlite3/3.33.0")
133 if self.options.enable_apacheconnector:
134 self.requires("apr/1.7.0")
135 self.requires("apr-util/1.6.1")
136 # FIXME: missing apache2 recipe
137 raise ConanInvalidConfiguration("apache2 is not (yet) available on CCI")
138 if self.options.enable_netssl or \
139 self.options.enable_crypto or \
140 self.options.get_safe("enable_jwt", False):
141 self.requires("openssl/1.1.1h")
142
143 def _patch_sources(self):
144 for patch in self.conan_data.get("patches", {}).get(self.version, []):
145 tools.patch(**patch)
146
147 def _configure_cmake(self):
148 if self._cmake:
149 return self._cmake
150 self._cmake = CMake(self)
151 if tools.Version(self.version) < "1.10.1":
152 self._cmake.definitions["POCO_STATIC"] = not self.options.shared
153 for comp in self._poco_component_tree.values():
154 if not comp.option:
155 continue
156 self._cmake.definitions[comp.option.upper()] = self.options.get_safe(comp.option, False)
157 self._cmake.definitions["POCO_UNBUNDLED"] = True
158 self._cmake.definitions["CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP"] = True
159 if self.settings.os == "Windows" and self.settings.compiler == "Visual Studio": # MT or MTd
160 self._cmake.definitions["POCO_MT"] = "ON" if "MT" in str(self.settings.compiler.runtime) else "OFF"
161 self.output.info(self._cmake.definitions)
162 # On Windows, Poco needs a message (MC) compiler.
163 with tools.vcvars(self.settings) if self.settings.compiler == "Visual Studio" else tools.no_op():
164 self._cmake.configure(build_dir=self._build_subfolder)
165 return self._cmake
166
167 def build(self):
168 if self.options.enable_data_sqlite:
169 if self.options["sqlite3"].threadsafe == 0:
170 raise ConanInvalidConfiguration("sqlite3 must be built with threadsafe enabled")
171 self._patch_sources()
172 cmake = self._configure_cmake()
173 cmake.build()
174
175 def package(self):
176 self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
177 cmake = self._configure_cmake()
178 cmake.install()
179 tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
180 tools.rmdir(os.path.join(self.package_folder, "cmake"))
181
182 @property
183 def _ordered_libs(self):
184 libs = []
185 for compname in self._poco_ordered_components:
186 comp_options = self._poco_component_tree[compname]
187 if comp_options.is_lib:
188 if not comp_options.option:
189 libs.append(compname)
190 elif self.options.get_safe(comp_options.option, False):
191 libs.append(compname)
192 return libs
193
194 def package_info(self):
195 suffix = str(self.settings.compiler.runtime).lower() \
196 if self.settings.compiler == "Visual Studio" and not self.options.shared \
197 else ("d" if self.settings.build_type == "Debug" else "")
198
199 self.cpp_info.libs = list("{}{}".format(lib, suffix) for lib in self._ordered_libs)
200
201 if self.settings.os == "Linux":
202 self.cpp_info.system_libs.extend(["pthread", "dl", "rt"])
203
204 if self.settings.compiler == "Visual Studio":
205 self.cpp_info.defines.append("POCO_NO_AUTOMATIC_LIBS")
206 if not self.options.shared:
207 self.cpp_info.defines.append("POCO_STATIC=ON")
208 if self.settings.os == "Windows":
209 self.cpp_info.system_libs.extend(["ws2_32", "iphlpapi", "crypt32"])
210 self.cpp_info.names["cmake_find_package"] = "Poco"
211 self.cpp_info.names["cmake_find_package_multi"] = "Poco"
212
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/poco/all/conanfile.py b/recipes/poco/all/conanfile.py
--- a/recipes/poco/all/conanfile.py
+++ b/recipes/poco/all/conanfile.py
@@ -139,6 +139,8 @@
self.options.enable_crypto or \
self.options.get_safe("enable_jwt", False):
self.requires("openssl/1.1.1h")
+ if self.options.enable_data_odbc and self.settings.os != "Windows":
+ self.requires("odbc/2.3.7")
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
@@ -207,5 +209,7 @@
self.cpp_info.defines.append("POCO_STATIC=ON")
if self.settings.os == "Windows":
self.cpp_info.system_libs.extend(["ws2_32", "iphlpapi", "crypt32"])
+ if self.options.enable_data_odbc:
+ self.cpp_info.system_libs.extend(["odbc32", "odbccp32"])
self.cpp_info.names["cmake_find_package"] = "Poco"
self.cpp_info.names["cmake_find_package_multi"] = "Poco"
| {"golden_diff": "diff --git a/recipes/poco/all/conanfile.py b/recipes/poco/all/conanfile.py\n--- a/recipes/poco/all/conanfile.py\n+++ b/recipes/poco/all/conanfile.py\n@@ -139,6 +139,8 @@\n self.options.enable_crypto or \\\n self.options.get_safe(\"enable_jwt\", False):\n self.requires(\"openssl/1.1.1h\")\n+ if self.options.enable_data_odbc and self.settings.os != \"Windows\":\n+ self.requires(\"odbc/2.3.7\")\n \n def _patch_sources(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n@@ -207,5 +209,7 @@\n self.cpp_info.defines.append(\"POCO_STATIC=ON\")\n if self.settings.os == \"Windows\":\n self.cpp_info.system_libs.extend([\"ws2_32\", \"iphlpapi\", \"crypt32\"])\n+ if self.options.enable_data_odbc:\n+ self.cpp_info.system_libs.extend([\"odbc32\", \"odbccp32\"])\n self.cpp_info.names[\"cmake_find_package\"] = \"Poco\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Poco\"\n", "issue": "[package] poco/1.10.1: CMake error Could NOT find ODBC with gcc9 and poco:enable_data_odbc=True\n<!-- \r\n Please don't forget to update the issue title.\r\n Include all applicable information to help us reproduce your problem.\r\n-->\r\n\r\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **poco/1.10.1**\r\n * Operating System+version: **Linux Ubuntu 20.04.1**\r\n * Compiler+version: **GCC 9**\r\n * Conan version: **conan 1.30.2**\r\n * Python version: **Python 3.8.5**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=9\r\ncompiler.libcxx=libstdc++\r\nbuild_type=Release\r\n[options]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\nBuild `poco/1.10.1` with gcc9 and `-o poco:enable_data_odbc=True`. \r\n\r\nThe problem is a missing requirement. I will submit a PR to fix.\r\n\r\n### Logs (Include/Attach if Applicable)\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nCMake Error at /usr/share/cmake-3.16/Modules/FindPackageHandleStandardArgs.cmake:146 (message):\r\n Could NOT find ODBC (missing: ODBC_LIBRARY ODBC_INCLUDE_DIR)\r\nCall Stack (most recent call first):\r\n /usr/share/cmake-3.16/Modules/FindPackageHandleStandardArgs.cmake:393 (_FPHSA_FAILURE_MESSAGE)\r\n source_subfolder/cmake/FindODBC.cmake:123 (find_package_handle_standard_args)\r\n source_subfolder/CMakeLists.txt:145 (find_package)\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanException, ConanInvalidConfiguration\nfrom collections import namedtuple, OrderedDict\nimport os\n\n\nclass PocoConan(ConanFile):\n name = \"poco\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://pocoproject.org\"\n topics = (\"conan\", \"poco\", \"building\", \"networking\", \"server\", \"mobile\", \"embedded\")\n exports_sources = \"CMakeLists.txt\", \"patches/**\"\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n license = \"BSL-1.0\"\n description = \"Modern, powerful open source C++ class libraries for building network- and internet-based \" \\\n \"applications that run on desktop, server, mobile and embedded systems.\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n _PocoComponent = namedtuple(\"_PocoComponent\", (\"option\", \"default_option\", \"dependencies\", \"is_lib\"))\n _poco_component_tree = {\n \"mod_poco\": _PocoComponent(\"enable_apacheconnector\", False, (\"PocoUtil\", \"PocoNet\", ), False), # also external apr and apr-util\n \"PocoCppParser\": _PocoComponent(\"enable_cppparser\", False, (\"PocoFoundation\", ), False),\n # \"PocoCppUnit\": _PocoComponent(\"enable_cppunit\", False, (\"PocoFoundation\", ), False)),\n \"PocoCrypto\": _PocoComponent(\"enable_crypto\", True, (\"PocoFoundation\", ), True), # also external openssl\n \"PocoData\": _PocoComponent(\"enable_data\", True, (\"PocoFoundation\", ), True),\n \"PocoDataMySQL\": _PocoComponent(\"enable_data_mysql\", False, (\"PocoData\", ), True),\n \"PocoDataODBC\": _PocoComponent(\"enable_data_odbc\", False, (\"PocoData\", ), True),\n \"PocoDataPostgreSQL\": _PocoComponent(\"enable_data_postgresql\", False, (\"PocoData\", ), True), # also external postgresql\n \"PocoDataSQLite\": _PocoComponent(\"enable_data_sqlite\", True, (\"PocoData\", ), True), # also external sqlite3\n \"PocoEncodings\": _PocoComponent(\"enable_encodings\", True, (\"PocoFoundation\", ), True),\n # \"PocoEncodingsCompiler\": _PocoComponent(\"enable_encodingscompiler\", False, (\"PocoNet\", \"PocoUtil\", ), False),\n \"PocoFoundation\": _PocoComponent(None, \"PocoFoundation\", (), True),\n \"PocoJSON\": _PocoComponent(\"enable_json\", True, (\"PocoFoundation\", ), True),\n \"PocoJWT\": _PocoComponent(\"enable_jwt\", True, (\"PocoJSON\", \"PocoCrypto\", ), True),\n \"PocoMongoDB\": _PocoComponent(\"enable_mongodb\", True, (\"PocoNet\", ), True),\n \"PocoNet\": _PocoComponent(\"enable_net\", True, (\"PocoFoundation\", ), True),\n \"PocoNetSSL\": _PocoComponent(\"enable_netssl\", True, (\"PocoCrypto\", \"PocoUtil\", \"PocoNet\", ), True), # also external openssl\n \"PocoNetSSLWin\": _PocoComponent(\"enable_netssl_win\", True, (\"PocoNet\", \"PocoUtil\", ), True),\n \"PocoPDF\": _PocoComponent(\"enable_pdf\", False, (\"PocoXML\", \"PocoUtil\", ), True),\n \"PocoPageCompiler\": _PocoComponent(\"enable_pagecompiler\", False, (\"PocoNet\", \"PocoUtil\", ), False),\n \"PocoFile2Page\": _PocoComponent(\"enable_pagecompiler_file2page\", False, (\"PocoNet\", \"PocoUtil\", \"PocoXML\", \"PocoJSON\", ), False),\n \"PocoPocoDoc\": _PocoComponent(\"enable_pocodoc\", False, (\"PocoUtil\", \"PocoXML\", \"PocoCppParser\", ), False),\n \"PocoRedis\": _PocoComponent(\"enable_redis\", True, (\"PocoNet\", ), True),\n \"PocoSevenZip\": _PocoComponent(\"enable_sevenzip\", False, (\"PocoUtil\", \"PocoXML\", ), True),\n \"PocoUtil\": _PocoComponent(\"enable_util\", True, (\"PocoFoundation\", \"PocoXML\", \"PocoJSON\", ), True),\n \"PocoXML\": _PocoComponent(\"enable_xml\", True, (\"PocoFoundation\", ), True),\n \"PocoZip\": _PocoComponent(\"enable_zip\", True, (\"PocoUtil\", \"PocoXML\", ), True),\n }\n\n for comp in _poco_component_tree.values():\n if comp.option:\n options[comp.option] = [True, False]\n default_options[comp.option] = comp.default_option\n del comp\n\n @property\n def _poco_ordered_components(self):\n remaining_components = dict((compname, set(compopts.dependencies)) for compname, compopts in self._poco_component_tree.items())\n ordered_components = []\n while remaining_components:\n components_no_deps = set(compname for compname, compopts in remaining_components.items() if not compopts)\n if not components_no_deps:\n raise ConanException(\"The poco dependency tree is invalid and contains a cycle\")\n for c in components_no_deps:\n remaining_components.pop(c)\n ordered_components.extend(components_no_deps)\n for rname in remaining_components.keys():\n remaining_components[rname] = remaining_components[rname].difference(components_no_deps)\n ordered_components.reverse()\n return ordered_components\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_folder = \"poco-poco-{}-release\".format(self.version)\n os.rename(extracted_folder, self._source_subfolder)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n else:\n del self.options.enable_netssl_win\n if tools.Version(self.version) < \"1.9\":\n del self.options.enable_encodings\n if tools.Version(self.version) < \"1.10\":\n del self.options.enable_data_postgresql\n del self.options.enable_jwt\n\n def configure(self):\n if self.options.enable_apacheconnector:\n raise ConanInvalidConfiguration(\"Apache connector not supported: https://github.com/pocoproject/poco/issues/1764\")\n if self.options.enable_data_mysql:\n raise ConanInvalidConfiguration(\"MySQL not supported yet, open an issue here please: %s\" % self.url)\n if self.settings.compiler == \"Visual Studio\":\n if self.options.shared and \"MT\" in str(self.settings.compiler.runtime):\n raise ConanInvalidConfiguration(\"Cannot build shared poco libraries with MT(d) runtime\")\n if self.options.get_safe(\"enable_data_postgresql\", False):\n raise ConanInvalidConfiguration(\"PostgreSQL not supported yet, open an issue here please: %s\" % self.url)\n for compopt in self._poco_component_tree.values():\n if not compopt.option:\n continue\n if self.options.get_safe(compopt.option, False):\n for compdep in compopt.dependencies:\n if not self._poco_component_tree[compdep].option:\n continue\n if not self.options.get_safe(self._poco_component_tree[compdep].option, False):\n raise ConanInvalidConfiguration(\"option {} requires also option {}\".format(compopt.option, self._poco_component_tree[compdep].option))\n\n def requirements(self):\n self.requires(\"pcre/8.41\")\n self.requires(\"zlib/1.2.11\")\n if self.options.enable_xml:\n self.requires(\"expat/2.2.10\")\n if self.options.enable_data_sqlite:\n self.requires(\"sqlite3/3.33.0\")\n if self.options.enable_apacheconnector:\n self.requires(\"apr/1.7.0\")\n self.requires(\"apr-util/1.6.1\")\n # FIXME: missing apache2 recipe\n raise ConanInvalidConfiguration(\"apache2 is not (yet) available on CCI\")\n if self.options.enable_netssl or \\\n self.options.enable_crypto or \\\n self.options.get_safe(\"enable_jwt\", False):\n self.requires(\"openssl/1.1.1h\")\n\n def _patch_sources(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n if tools.Version(self.version) < \"1.10.1\":\n self._cmake.definitions[\"POCO_STATIC\"] = not self.options.shared\n for comp in self._poco_component_tree.values():\n if not comp.option:\n continue\n self._cmake.definitions[comp.option.upper()] = self.options.get_safe(comp.option, False)\n self._cmake.definitions[\"POCO_UNBUNDLED\"] = True\n self._cmake.definitions[\"CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP\"] = True\n if self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\": # MT or MTd\n self._cmake.definitions[\"POCO_MT\"] = \"ON\" if \"MT\" in str(self.settings.compiler.runtime) else \"OFF\"\n self.output.info(self._cmake.definitions)\n # On Windows, Poco needs a message (MC) compiler.\n with tools.vcvars(self.settings) if self.settings.compiler == \"Visual Studio\" else tools.no_op():\n self._cmake.configure(build_dir=self._build_subfolder)\n return self._cmake\n\n def build(self):\n if self.options.enable_data_sqlite:\n if self.options[\"sqlite3\"].threadsafe == 0:\n raise ConanInvalidConfiguration(\"sqlite3 must be built with threadsafe enabled\")\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"cmake\"))\n\n @property\n def _ordered_libs(self):\n libs = []\n for compname in self._poco_ordered_components:\n comp_options = self._poco_component_tree[compname]\n if comp_options.is_lib:\n if not comp_options.option:\n libs.append(compname)\n elif self.options.get_safe(comp_options.option, False):\n libs.append(compname)\n return libs\n\n def package_info(self):\n suffix = str(self.settings.compiler.runtime).lower() \\\n if self.settings.compiler == \"Visual Studio\" and not self.options.shared \\\n else (\"d\" if self.settings.build_type == \"Debug\" else \"\")\n\n self.cpp_info.libs = list(\"{}{}\".format(lib, suffix) for lib in self._ordered_libs)\n\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"pthread\", \"dl\", \"rt\"])\n\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.defines.append(\"POCO_NO_AUTOMATIC_LIBS\")\n if not self.options.shared:\n self.cpp_info.defines.append(\"POCO_STATIC=ON\")\n if self.settings.os == \"Windows\":\n self.cpp_info.system_libs.extend([\"ws2_32\", \"iphlpapi\", \"crypt32\"])\n self.cpp_info.names[\"cmake_find_package\"] = \"Poco\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Poco\"\n", "path": "recipes/poco/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanException, ConanInvalidConfiguration\nfrom collections import namedtuple, OrderedDict\nimport os\n\n\nclass PocoConan(ConanFile):\n name = \"poco\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://pocoproject.org\"\n topics = (\"conan\", \"poco\", \"building\", \"networking\", \"server\", \"mobile\", \"embedded\")\n exports_sources = \"CMakeLists.txt\", \"patches/**\"\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n license = \"BSL-1.0\"\n description = \"Modern, powerful open source C++ class libraries for building network- and internet-based \" \\\n \"applications that run on desktop, server, mobile and embedded systems.\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n _PocoComponent = namedtuple(\"_PocoComponent\", (\"option\", \"default_option\", \"dependencies\", \"is_lib\"))\n _poco_component_tree = {\n \"mod_poco\": _PocoComponent(\"enable_apacheconnector\", False, (\"PocoUtil\", \"PocoNet\", ), False), # also external apr and apr-util\n \"PocoCppParser\": _PocoComponent(\"enable_cppparser\", False, (\"PocoFoundation\", ), False),\n # \"PocoCppUnit\": _PocoComponent(\"enable_cppunit\", False, (\"PocoFoundation\", ), False)),\n \"PocoCrypto\": _PocoComponent(\"enable_crypto\", True, (\"PocoFoundation\", ), True), # also external openssl\n \"PocoData\": _PocoComponent(\"enable_data\", True, (\"PocoFoundation\", ), True),\n \"PocoDataMySQL\": _PocoComponent(\"enable_data_mysql\", False, (\"PocoData\", ), True),\n \"PocoDataODBC\": _PocoComponent(\"enable_data_odbc\", False, (\"PocoData\", ), True),\n \"PocoDataPostgreSQL\": _PocoComponent(\"enable_data_postgresql\", False, (\"PocoData\", ), True), # also external postgresql\n \"PocoDataSQLite\": _PocoComponent(\"enable_data_sqlite\", True, (\"PocoData\", ), True), # also external sqlite3\n \"PocoEncodings\": _PocoComponent(\"enable_encodings\", True, (\"PocoFoundation\", ), True),\n # \"PocoEncodingsCompiler\": _PocoComponent(\"enable_encodingscompiler\", False, (\"PocoNet\", \"PocoUtil\", ), False),\n \"PocoFoundation\": _PocoComponent(None, \"PocoFoundation\", (), True),\n \"PocoJSON\": _PocoComponent(\"enable_json\", True, (\"PocoFoundation\", ), True),\n \"PocoJWT\": _PocoComponent(\"enable_jwt\", True, (\"PocoJSON\", \"PocoCrypto\", ), True),\n \"PocoMongoDB\": _PocoComponent(\"enable_mongodb\", True, (\"PocoNet\", ), True),\n \"PocoNet\": _PocoComponent(\"enable_net\", True, (\"PocoFoundation\", ), True),\n \"PocoNetSSL\": _PocoComponent(\"enable_netssl\", True, (\"PocoCrypto\", \"PocoUtil\", \"PocoNet\", ), True), # also external openssl\n \"PocoNetSSLWin\": _PocoComponent(\"enable_netssl_win\", True, (\"PocoNet\", \"PocoUtil\", ), True),\n \"PocoPDF\": _PocoComponent(\"enable_pdf\", False, (\"PocoXML\", \"PocoUtil\", ), True),\n \"PocoPageCompiler\": _PocoComponent(\"enable_pagecompiler\", False, (\"PocoNet\", \"PocoUtil\", ), False),\n \"PocoFile2Page\": _PocoComponent(\"enable_pagecompiler_file2page\", False, (\"PocoNet\", \"PocoUtil\", \"PocoXML\", \"PocoJSON\", ), False),\n \"PocoPocoDoc\": _PocoComponent(\"enable_pocodoc\", False, (\"PocoUtil\", \"PocoXML\", \"PocoCppParser\", ), False),\n \"PocoRedis\": _PocoComponent(\"enable_redis\", True, (\"PocoNet\", ), True),\n \"PocoSevenZip\": _PocoComponent(\"enable_sevenzip\", False, (\"PocoUtil\", \"PocoXML\", ), True),\n \"PocoUtil\": _PocoComponent(\"enable_util\", True, (\"PocoFoundation\", \"PocoXML\", \"PocoJSON\", ), True),\n \"PocoXML\": _PocoComponent(\"enable_xml\", True, (\"PocoFoundation\", ), True),\n \"PocoZip\": _PocoComponent(\"enable_zip\", True, (\"PocoUtil\", \"PocoXML\", ), True),\n }\n\n for comp in _poco_component_tree.values():\n if comp.option:\n options[comp.option] = [True, False]\n default_options[comp.option] = comp.default_option\n del comp\n\n @property\n def _poco_ordered_components(self):\n remaining_components = dict((compname, set(compopts.dependencies)) for compname, compopts in self._poco_component_tree.items())\n ordered_components = []\n while remaining_components:\n components_no_deps = set(compname for compname, compopts in remaining_components.items() if not compopts)\n if not components_no_deps:\n raise ConanException(\"The poco dependency tree is invalid and contains a cycle\")\n for c in components_no_deps:\n remaining_components.pop(c)\n ordered_components.extend(components_no_deps)\n for rname in remaining_components.keys():\n remaining_components[rname] = remaining_components[rname].difference(components_no_deps)\n ordered_components.reverse()\n return ordered_components\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_folder = \"poco-poco-{}-release\".format(self.version)\n os.rename(extracted_folder, self._source_subfolder)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n else:\n del self.options.enable_netssl_win\n if tools.Version(self.version) < \"1.9\":\n del self.options.enable_encodings\n if tools.Version(self.version) < \"1.10\":\n del self.options.enable_data_postgresql\n del self.options.enable_jwt\n\n def configure(self):\n if self.options.enable_apacheconnector:\n raise ConanInvalidConfiguration(\"Apache connector not supported: https://github.com/pocoproject/poco/issues/1764\")\n if self.options.enable_data_mysql:\n raise ConanInvalidConfiguration(\"MySQL not supported yet, open an issue here please: %s\" % self.url)\n if self.settings.compiler == \"Visual Studio\":\n if self.options.shared and \"MT\" in str(self.settings.compiler.runtime):\n raise ConanInvalidConfiguration(\"Cannot build shared poco libraries with MT(d) runtime\")\n if self.options.get_safe(\"enable_data_postgresql\", False):\n raise ConanInvalidConfiguration(\"PostgreSQL not supported yet, open an issue here please: %s\" % self.url)\n for compopt in self._poco_component_tree.values():\n if not compopt.option:\n continue\n if self.options.get_safe(compopt.option, False):\n for compdep in compopt.dependencies:\n if not self._poco_component_tree[compdep].option:\n continue\n if not self.options.get_safe(self._poco_component_tree[compdep].option, False):\n raise ConanInvalidConfiguration(\"option {} requires also option {}\".format(compopt.option, self._poco_component_tree[compdep].option))\n\n def requirements(self):\n self.requires(\"pcre/8.41\")\n self.requires(\"zlib/1.2.11\")\n if self.options.enable_xml:\n self.requires(\"expat/2.2.10\")\n if self.options.enable_data_sqlite:\n self.requires(\"sqlite3/3.33.0\")\n if self.options.enable_apacheconnector:\n self.requires(\"apr/1.7.0\")\n self.requires(\"apr-util/1.6.1\")\n # FIXME: missing apache2 recipe\n raise ConanInvalidConfiguration(\"apache2 is not (yet) available on CCI\")\n if self.options.enable_netssl or \\\n self.options.enable_crypto or \\\n self.options.get_safe(\"enable_jwt\", False):\n self.requires(\"openssl/1.1.1h\")\n if self.options.enable_data_odbc and self.settings.os != \"Windows\":\n self.requires(\"odbc/2.3.7\")\n\n def _patch_sources(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n if tools.Version(self.version) < \"1.10.1\":\n self._cmake.definitions[\"POCO_STATIC\"] = not self.options.shared\n for comp in self._poco_component_tree.values():\n if not comp.option:\n continue\n self._cmake.definitions[comp.option.upper()] = self.options.get_safe(comp.option, False)\n self._cmake.definitions[\"POCO_UNBUNDLED\"] = True\n self._cmake.definitions[\"CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP\"] = True\n if self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\": # MT or MTd\n self._cmake.definitions[\"POCO_MT\"] = \"ON\" if \"MT\" in str(self.settings.compiler.runtime) else \"OFF\"\n self.output.info(self._cmake.definitions)\n # On Windows, Poco needs a message (MC) compiler.\n with tools.vcvars(self.settings) if self.settings.compiler == \"Visual Studio\" else tools.no_op():\n self._cmake.configure(build_dir=self._build_subfolder)\n return self._cmake\n\n def build(self):\n if self.options.enable_data_sqlite:\n if self.options[\"sqlite3\"].threadsafe == 0:\n raise ConanInvalidConfiguration(\"sqlite3 must be built with threadsafe enabled\")\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"cmake\"))\n\n @property\n def _ordered_libs(self):\n libs = []\n for compname in self._poco_ordered_components:\n comp_options = self._poco_component_tree[compname]\n if comp_options.is_lib:\n if not comp_options.option:\n libs.append(compname)\n elif self.options.get_safe(comp_options.option, False):\n libs.append(compname)\n return libs\n\n def package_info(self):\n suffix = str(self.settings.compiler.runtime).lower() \\\n if self.settings.compiler == \"Visual Studio\" and not self.options.shared \\\n else (\"d\" if self.settings.build_type == \"Debug\" else \"\")\n\n self.cpp_info.libs = list(\"{}{}\".format(lib, suffix) for lib in self._ordered_libs)\n\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"pthread\", \"dl\", \"rt\"])\n\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.defines.append(\"POCO_NO_AUTOMATIC_LIBS\")\n if not self.options.shared:\n self.cpp_info.defines.append(\"POCO_STATIC=ON\")\n if self.settings.os == \"Windows\":\n self.cpp_info.system_libs.extend([\"ws2_32\", \"iphlpapi\", \"crypt32\"])\n if self.options.enable_data_odbc:\n self.cpp_info.system_libs.extend([\"odbc32\", \"odbccp32\"])\n self.cpp_info.names[\"cmake_find_package\"] = \"Poco\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Poco\"\n", "path": "recipes/poco/all/conanfile.py"}]} | 3,917 | 278 |
gh_patches_debug_24829 | rasdani/github-patches | git_diff | qtile__qtile-4348 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logs pollution - keyboardlayout.py
### The issue:
Bunch of these:
```
2023-07-15 15:28:06,304 ERROR libqtile keyboardlayout.py:set_keyboard():L102 Can not load ~/.Xmodmap:
```
My humble opinion is that if I don't use something optional, I shouldn't have an error about it in the logs. Oh and it's supposed to be **"Cannot"**. I don't have another idea on how this should be done atm.
### Required:
- [X] I have searched past issues to see if this bug has already been reported.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/widget/keyboardlayout.py`
Content:
```
1 # Copyright (c) 2013 Jacob Mourelos
2 # Copyright (c) 2014 Shepilov Vladislav
3 # Copyright (c) 2014-2015 Sean Vig
4 # Copyright (c) 2014 Tycho Andersen
5 # Copyright (c) 2019 zordsdavini
6 #
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
8 # of this software and associated documentation files (the "Software"), to deal
9 # in the Software without restriction, including without limitation the rights
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 # copies of the Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice shall be included in
15 # all copies or substantial portions of the Software.
16 #
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 # SOFTWARE.
24
25 from __future__ import annotations
26
27 import re
28 from abc import ABCMeta, abstractmethod
29 from subprocess import CalledProcessError, check_output
30 from typing import TYPE_CHECKING
31
32 from libqtile.command.base import expose_command
33 from libqtile.confreader import ConfigError
34 from libqtile.log_utils import logger
35 from libqtile.widget import base
36
37 if TYPE_CHECKING:
38 from libqtile.core.manager import Qtile
39
40
41 class _BaseLayoutBackend(metaclass=ABCMeta):
42 def __init__(self, qtile: Qtile):
43 """
44 This handles getting and setter the keyboard layout with the appropriate
45 backend.
46 """
47
48 @abstractmethod
49 def get_keyboard(self) -> str:
50 """
51 Return the currently used keyboard layout as a string
52
53 Examples: "us", "us dvorak". In case of error returns "unknown".
54 """
55
56 def set_keyboard(self, layout: str, options: str | None) -> None:
57 """
58 Set the keyboard layout with specified options.
59 """
60
61
62 class _X11LayoutBackend(_BaseLayoutBackend):
63 kb_layout_regex = re.compile(r"layout:\s+(?P<layout>[\w-]+)")
64 kb_variant_regex = re.compile(r"variant:\s+(?P<variant>\w+)")
65
66 def get_keyboard(self) -> str:
67 try:
68 command = "setxkbmap -verbose 10 -query"
69 setxkbmap_output = check_output(command.split(" ")).decode()
70 except CalledProcessError:
71 logger.exception("Can not get the keyboard layout:")
72 return "unknown"
73 except OSError:
74 logger.exception("Please, check that xset is available:")
75 return "unknown"
76
77 match_layout = self.kb_layout_regex.search(setxkbmap_output)
78 if match_layout is None:
79 return "ERR"
80 keyboard = match_layout.group("layout")
81
82 match_variant = self.kb_variant_regex.search(setxkbmap_output)
83 if match_variant:
84 keyboard += " " + match_variant.group("variant")
85 return keyboard
86
87 def set_keyboard(self, layout: str, options: str | None) -> None:
88 command = ["setxkbmap"]
89 command.extend(layout.split(" "))
90 if options:
91 command.extend(["-option", options])
92 try:
93 check_output(command)
94 except CalledProcessError:
95 logger.error("Can not change the keyboard layout:")
96 except OSError:
97 logger.error("Please, check that setxkbmap is available:")
98 else:
99 try:
100 check_output("xmodmap $HOME/.Xmodmap", shell=True)
101 except CalledProcessError:
102 logger.error("Can not load ~/.Xmodmap:")
103 except OSError:
104 logger.error("Please, check that xmodmap is available:")
105
106
107 class _WaylandLayoutBackend(_BaseLayoutBackend):
108 def __init__(self, qtile: Qtile) -> None:
109 self.set_keymap = qtile.core.set_keymap
110 self._layout: str = ""
111
112 def get_keyboard(self) -> str:
113 return self._layout
114
115 def set_keyboard(self, layout: str, options: str | None) -> None:
116 maybe_variant: str | None = None
117 if " " in layout:
118 layout_name, maybe_variant = layout.split(" ", maxsplit=1)
119 else:
120 layout_name = layout
121 self.set_keymap(layout_name, options, maybe_variant)
122 self._layout = layout
123
124
125 layout_backends = {
126 "x11": _X11LayoutBackend,
127 "wayland": _WaylandLayoutBackend,
128 }
129
130
131 class KeyboardLayout(base.InLoopPollText):
132 """Widget for changing and displaying the current keyboard layout
133
134 To use this widget effectively you need to specify keyboard layouts you want to use
135 (using "configured_keyboards") and bind function "next_keyboard" to specific keys in
136 order to change layouts.
137
138 For example:
139
140 Key([mod], "space", lazy.widget["keyboardlayout"].next_keyboard(), desc="Next keyboard layout."),
141
142 When running Qtile with the X11 backend, this widget requires setxkbmap to be available.
143 Xmodmap will also be used if .Xmodmap file is available.
144 """
145
146 defaults = [
147 ("update_interval", 1, "Update time in seconds."),
148 (
149 "configured_keyboards",
150 ["us"],
151 "A list of predefined keyboard layouts "
152 "represented as strings. For example: "
153 "['us', 'us colemak', 'es', 'fr'].",
154 ),
155 (
156 "display_map",
157 {},
158 "Custom display of layout. Key should be in format "
159 "'layout variant'. For example: "
160 "{'us': 'us', 'lt sgs': 'sgs', 'ru phonetic': 'ru'}",
161 ),
162 ("option", None, "string of setxkbmap option. Ex., 'compose:menu,grp_led:scroll'"),
163 ]
164
165 def __init__(self, **config):
166 base.InLoopPollText.__init__(self, **config)
167 self.add_defaults(KeyboardLayout.defaults)
168 self.add_callbacks({"Button1": self.next_keyboard})
169
170 def _configure(self, qtile, bar):
171 base.InLoopPollText._configure(self, qtile, bar)
172
173 if qtile.core.name not in layout_backends:
174 raise ConfigError("KeyboardLayout does not support backend: " + qtile.core.name)
175
176 self.backend = layout_backends[qtile.core.name](qtile)
177 self.backend.set_keyboard(self.configured_keyboards[0], self.option)
178
179 @expose_command()
180 def next_keyboard(self):
181 """set the next layout in the list of configured keyboard layouts as
182 new current layout in use
183
184 If the current keyboard layout is not in the list, it will set as new
185 layout the first one in the list.
186 """
187
188 current_keyboard = self.backend.get_keyboard()
189 if current_keyboard in self.configured_keyboards:
190 # iterate the list circularly
191 next_keyboard = self.configured_keyboards[
192 (self.configured_keyboards.index(current_keyboard) + 1)
193 % len(self.configured_keyboards)
194 ]
195 else:
196 next_keyboard = self.configured_keyboards[0]
197
198 self.backend.set_keyboard(next_keyboard, self.option)
199
200 self.tick()
201
202 def poll(self):
203 keyboard = self.backend.get_keyboard()
204 if keyboard in self.display_map.keys():
205 return self.display_map[keyboard]
206 return keyboard.upper()
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libqtile/widget/keyboardlayout.py b/libqtile/widget/keyboardlayout.py
--- a/libqtile/widget/keyboardlayout.py
+++ b/libqtile/widget/keyboardlayout.py
@@ -26,6 +26,7 @@
import re
from abc import ABCMeta, abstractmethod
+from pathlib import Path
from subprocess import CalledProcessError, check_output
from typing import TYPE_CHECKING
@@ -92,16 +93,16 @@
try:
check_output(command)
except CalledProcessError:
- logger.error("Can not change the keyboard layout:")
+ logger.error("Cannot change the keyboard layout.")
except OSError:
- logger.error("Please, check that setxkbmap is available:")
+ logger.error("Please, check that setxkbmap is available.")
else:
- try:
- check_output("xmodmap $HOME/.Xmodmap", shell=True)
- except CalledProcessError:
- logger.error("Can not load ~/.Xmodmap:")
- except OSError:
- logger.error("Please, check that xmodmap is available:")
+ # Load Xmodmap if it's available
+ if Path("~/.Xmodmap").expanduser().is_file():
+ try:
+ check_output("xmodmap $HOME/.Xmodmap", shell=True)
+ except CalledProcessError:
+ logger.error("Could not load ~/.Xmodmap.")
class _WaylandLayoutBackend(_BaseLayoutBackend):
| {"golden_diff": "diff --git a/libqtile/widget/keyboardlayout.py b/libqtile/widget/keyboardlayout.py\n--- a/libqtile/widget/keyboardlayout.py\n+++ b/libqtile/widget/keyboardlayout.py\n@@ -26,6 +26,7 @@\n \n import re\n from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n from subprocess import CalledProcessError, check_output\n from typing import TYPE_CHECKING\n \n@@ -92,16 +93,16 @@\n try:\n check_output(command)\n except CalledProcessError:\n- logger.error(\"Can not change the keyboard layout:\")\n+ logger.error(\"Cannot change the keyboard layout.\")\n except OSError:\n- logger.error(\"Please, check that setxkbmap is available:\")\n+ logger.error(\"Please, check that setxkbmap is available.\")\n else:\n- try:\n- check_output(\"xmodmap $HOME/.Xmodmap\", shell=True)\n- except CalledProcessError:\n- logger.error(\"Can not load ~/.Xmodmap:\")\n- except OSError:\n- logger.error(\"Please, check that xmodmap is available:\")\n+ # Load Xmodmap if it's available\n+ if Path(\"~/.Xmodmap\").expanduser().is_file():\n+ try:\n+ check_output(\"xmodmap $HOME/.Xmodmap\", shell=True)\n+ except CalledProcessError:\n+ logger.error(\"Could not load ~/.Xmodmap.\")\n \n \n class _WaylandLayoutBackend(_BaseLayoutBackend):\n", "issue": "Logs pollution - keyboardlayout.py\n### The issue:\r\n\r\nBunch of these:\r\n```\r\n2023-07-15 15:28:06,304 ERROR libqtile keyboardlayout.py:set_keyboard():L102 Can not load ~/.Xmodmap:\r\n```\r\nMy humble opinion is that if I don't use something optional, I shouldn't have an error about it in the logs. Oh and it's supposed to be **\"Cannot\"**. I don't have another idea on how this should be done atm.\r\n\r\n### Required:\r\n\r\n- [X] I have searched past issues to see if this bug has already been reported.\n", "before_files": [{"content": "# Copyright (c) 2013 Jacob Mourelos\n# Copyright (c) 2014 Shepilov Vladislav\n# Copyright (c) 2014-2015 Sean Vig\n# Copyright (c) 2014 Tycho Andersen\n# Copyright (c) 2019 zordsdavini\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import annotations\n\nimport re\nfrom abc import ABCMeta, abstractmethod\nfrom subprocess import CalledProcessError, check_output\nfrom typing import TYPE_CHECKING\n\nfrom libqtile.command.base import expose_command\nfrom libqtile.confreader import ConfigError\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\nif TYPE_CHECKING:\n from libqtile.core.manager import Qtile\n\n\nclass _BaseLayoutBackend(metaclass=ABCMeta):\n def __init__(self, qtile: Qtile):\n \"\"\"\n This handles getting and setter the keyboard layout with the appropriate\n backend.\n \"\"\"\n\n @abstractmethod\n def get_keyboard(self) -> str:\n \"\"\"\n Return the currently used keyboard layout as a string\n\n Examples: \"us\", \"us dvorak\". In case of error returns \"unknown\".\n \"\"\"\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n \"\"\"\n Set the keyboard layout with specified options.\n \"\"\"\n\n\nclass _X11LayoutBackend(_BaseLayoutBackend):\n kb_layout_regex = re.compile(r\"layout:\\s+(?P<layout>[\\w-]+)\")\n kb_variant_regex = re.compile(r\"variant:\\s+(?P<variant>\\w+)\")\n\n def get_keyboard(self) -> str:\n try:\n command = \"setxkbmap -verbose 10 -query\"\n setxkbmap_output = check_output(command.split(\" \")).decode()\n except CalledProcessError:\n logger.exception(\"Can not get the keyboard layout:\")\n return \"unknown\"\n except OSError:\n logger.exception(\"Please, check that xset is available:\")\n return \"unknown\"\n\n match_layout = self.kb_layout_regex.search(setxkbmap_output)\n if match_layout is None:\n return \"ERR\"\n keyboard = match_layout.group(\"layout\")\n\n match_variant = self.kb_variant_regex.search(setxkbmap_output)\n if match_variant:\n keyboard += \" \" + match_variant.group(\"variant\")\n return keyboard\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n command = [\"setxkbmap\"]\n command.extend(layout.split(\" \"))\n if options:\n command.extend([\"-option\", options])\n try:\n check_output(command)\n except CalledProcessError:\n logger.error(\"Can not change the keyboard layout:\")\n except OSError:\n logger.error(\"Please, check that setxkbmap is available:\")\n else:\n try:\n check_output(\"xmodmap $HOME/.Xmodmap\", shell=True)\n except CalledProcessError:\n logger.error(\"Can not load ~/.Xmodmap:\")\n except OSError:\n logger.error(\"Please, check that xmodmap is available:\")\n\n\nclass _WaylandLayoutBackend(_BaseLayoutBackend):\n def __init__(self, qtile: Qtile) -> None:\n self.set_keymap = qtile.core.set_keymap\n self._layout: str = \"\"\n\n def get_keyboard(self) -> str:\n return self._layout\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n maybe_variant: str | None = None\n if \" \" in layout:\n layout_name, maybe_variant = layout.split(\" \", maxsplit=1)\n else:\n layout_name = layout\n self.set_keymap(layout_name, options, maybe_variant)\n self._layout = layout\n\n\nlayout_backends = {\n \"x11\": _X11LayoutBackend,\n \"wayland\": _WaylandLayoutBackend,\n}\n\n\nclass KeyboardLayout(base.InLoopPollText):\n \"\"\"Widget for changing and displaying the current keyboard layout\n\n To use this widget effectively you need to specify keyboard layouts you want to use\n (using \"configured_keyboards\") and bind function \"next_keyboard\" to specific keys in\n order to change layouts.\n\n For example:\n\n Key([mod], \"space\", lazy.widget[\"keyboardlayout\"].next_keyboard(), desc=\"Next keyboard layout.\"),\n\n When running Qtile with the X11 backend, this widget requires setxkbmap to be available.\n Xmodmap will also be used if .Xmodmap file is available.\n \"\"\"\n\n defaults = [\n (\"update_interval\", 1, \"Update time in seconds.\"),\n (\n \"configured_keyboards\",\n [\"us\"],\n \"A list of predefined keyboard layouts \"\n \"represented as strings. For example: \"\n \"['us', 'us colemak', 'es', 'fr'].\",\n ),\n (\n \"display_map\",\n {},\n \"Custom display of layout. Key should be in format \"\n \"'layout variant'. For example: \"\n \"{'us': 'us', 'lt sgs': 'sgs', 'ru phonetic': 'ru'}\",\n ),\n (\"option\", None, \"string of setxkbmap option. Ex., 'compose:menu,grp_led:scroll'\"),\n ]\n\n def __init__(self, **config):\n base.InLoopPollText.__init__(self, **config)\n self.add_defaults(KeyboardLayout.defaults)\n self.add_callbacks({\"Button1\": self.next_keyboard})\n\n def _configure(self, qtile, bar):\n base.InLoopPollText._configure(self, qtile, bar)\n\n if qtile.core.name not in layout_backends:\n raise ConfigError(\"KeyboardLayout does not support backend: \" + qtile.core.name)\n\n self.backend = layout_backends[qtile.core.name](qtile)\n self.backend.set_keyboard(self.configured_keyboards[0], self.option)\n\n @expose_command()\n def next_keyboard(self):\n \"\"\"set the next layout in the list of configured keyboard layouts as\n new current layout in use\n\n If the current keyboard layout is not in the list, it will set as new\n layout the first one in the list.\n \"\"\"\n\n current_keyboard = self.backend.get_keyboard()\n if current_keyboard in self.configured_keyboards:\n # iterate the list circularly\n next_keyboard = self.configured_keyboards[\n (self.configured_keyboards.index(current_keyboard) + 1)\n % len(self.configured_keyboards)\n ]\n else:\n next_keyboard = self.configured_keyboards[0]\n\n self.backend.set_keyboard(next_keyboard, self.option)\n\n self.tick()\n\n def poll(self):\n keyboard = self.backend.get_keyboard()\n if keyboard in self.display_map.keys():\n return self.display_map[keyboard]\n return keyboard.upper()\n", "path": "libqtile/widget/keyboardlayout.py"}], "after_files": [{"content": "# Copyright (c) 2013 Jacob Mourelos\n# Copyright (c) 2014 Shepilov Vladislav\n# Copyright (c) 2014-2015 Sean Vig\n# Copyright (c) 2014 Tycho Andersen\n# Copyright (c) 2019 zordsdavini\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import annotations\n\nimport re\nfrom abc import ABCMeta, abstractmethod\nfrom pathlib import Path\nfrom subprocess import CalledProcessError, check_output\nfrom typing import TYPE_CHECKING\n\nfrom libqtile.command.base import expose_command\nfrom libqtile.confreader import ConfigError\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\nif TYPE_CHECKING:\n from libqtile.core.manager import Qtile\n\n\nclass _BaseLayoutBackend(metaclass=ABCMeta):\n def __init__(self, qtile: Qtile):\n \"\"\"\n This handles getting and setter the keyboard layout with the appropriate\n backend.\n \"\"\"\n\n @abstractmethod\n def get_keyboard(self) -> str:\n \"\"\"\n Return the currently used keyboard layout as a string\n\n Examples: \"us\", \"us dvorak\". In case of error returns \"unknown\".\n \"\"\"\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n \"\"\"\n Set the keyboard layout with specified options.\n \"\"\"\n\n\nclass _X11LayoutBackend(_BaseLayoutBackend):\n kb_layout_regex = re.compile(r\"layout:\\s+(?P<layout>[\\w-]+)\")\n kb_variant_regex = re.compile(r\"variant:\\s+(?P<variant>\\w+)\")\n\n def get_keyboard(self) -> str:\n try:\n command = \"setxkbmap -verbose 10 -query\"\n setxkbmap_output = check_output(command.split(\" \")).decode()\n except CalledProcessError:\n logger.exception(\"Can not get the keyboard layout:\")\n return \"unknown\"\n except OSError:\n logger.exception(\"Please, check that xset is available:\")\n return \"unknown\"\n\n match_layout = self.kb_layout_regex.search(setxkbmap_output)\n if match_layout is None:\n return \"ERR\"\n keyboard = match_layout.group(\"layout\")\n\n match_variant = self.kb_variant_regex.search(setxkbmap_output)\n if match_variant:\n keyboard += \" \" + match_variant.group(\"variant\")\n return keyboard\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n command = [\"setxkbmap\"]\n command.extend(layout.split(\" \"))\n if options:\n command.extend([\"-option\", options])\n try:\n check_output(command)\n except CalledProcessError:\n logger.error(\"Cannot change the keyboard layout.\")\n except OSError:\n logger.error(\"Please, check that setxkbmap is available.\")\n else:\n # Load Xmodmap if it's available\n if Path(\"~/.Xmodmap\").expanduser().is_file():\n try:\n check_output(\"xmodmap $HOME/.Xmodmap\", shell=True)\n except CalledProcessError:\n logger.error(\"Could not load ~/.Xmodmap.\")\n\n\nclass _WaylandLayoutBackend(_BaseLayoutBackend):\n def __init__(self, qtile: Qtile) -> None:\n self.set_keymap = qtile.core.set_keymap\n self._layout: str = \"\"\n\n def get_keyboard(self) -> str:\n return self._layout\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n maybe_variant: str | None = None\n if \" \" in layout:\n layout_name, maybe_variant = layout.split(\" \", maxsplit=1)\n else:\n layout_name = layout\n self.set_keymap(layout_name, options, maybe_variant)\n self._layout = layout\n\n\nlayout_backends = {\n \"x11\": _X11LayoutBackend,\n \"wayland\": _WaylandLayoutBackend,\n}\n\n\nclass KeyboardLayout(base.InLoopPollText):\n \"\"\"Widget for changing and displaying the current keyboard layout\n\n To use this widget effectively you need to specify keyboard layouts you want to use\n (using \"configured_keyboards\") and bind function \"next_keyboard\" to specific keys in\n order to change layouts.\n\n For example:\n\n Key([mod], \"space\", lazy.widget[\"keyboardlayout\"].next_keyboard(), desc=\"Next keyboard layout.\"),\n\n When running Qtile with the X11 backend, this widget requires setxkbmap to be available.\n Xmodmap will also be used if .Xmodmap file is available.\n \"\"\"\n\n defaults = [\n (\"update_interval\", 1, \"Update time in seconds.\"),\n (\n \"configured_keyboards\",\n [\"us\"],\n \"A list of predefined keyboard layouts \"\n \"represented as strings. For example: \"\n \"['us', 'us colemak', 'es', 'fr'].\",\n ),\n (\n \"display_map\",\n {},\n \"Custom display of layout. Key should be in format \"\n \"'layout variant'. For example: \"\n \"{'us': 'us', 'lt sgs': 'sgs', 'ru phonetic': 'ru'}\",\n ),\n (\"option\", None, \"string of setxkbmap option. Ex., 'compose:menu,grp_led:scroll'\"),\n ]\n\n def __init__(self, **config):\n base.InLoopPollText.__init__(self, **config)\n self.add_defaults(KeyboardLayout.defaults)\n self.add_callbacks({\"Button1\": self.next_keyboard})\n\n def _configure(self, qtile, bar):\n base.InLoopPollText._configure(self, qtile, bar)\n\n if qtile.core.name not in layout_backends:\n raise ConfigError(\"KeyboardLayout does not support backend: \" + qtile.core.name)\n\n self.backend = layout_backends[qtile.core.name](qtile)\n self.backend.set_keyboard(self.configured_keyboards[0], self.option)\n\n @expose_command()\n def next_keyboard(self):\n \"\"\"set the next layout in the list of configured keyboard layouts as\n new current layout in use\n\n If the current keyboard layout is not in the list, it will set as new\n layout the first one in the list.\n \"\"\"\n\n current_keyboard = self.backend.get_keyboard()\n if current_keyboard in self.configured_keyboards:\n # iterate the list circularly\n next_keyboard = self.configured_keyboards[\n (self.configured_keyboards.index(current_keyboard) + 1)\n % len(self.configured_keyboards)\n ]\n else:\n next_keyboard = self.configured_keyboards[0]\n\n self.backend.set_keyboard(next_keyboard, self.option)\n\n self.tick()\n\n def poll(self):\n keyboard = self.backend.get_keyboard()\n if keyboard in self.display_map.keys():\n return self.display_map[keyboard]\n return keyboard.upper()\n", "path": "libqtile/widget/keyboardlayout.py"}]} | 2,628 | 326 |
gh_patches_debug_31699 | rasdani/github-patches | git_diff | facebookresearch__Mephisto-494 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Running static_test_script.py: Usage of deprecated keyword in package header
Seems like this is a compatibility issue with hydra 1.1.0? I've changed static_test_script.py like so:
`@hydra.main(config_path=".", config_name="scriptconfig")`
and deleted the first line config/example.yaml as instructed by hydra.cc, after running the script I'm still getting the following error:
`/home/ninacdu/.local/lib/python3.8/site-packages/hydra/core/default_element.py:122: UserWarning: In 'mephisto/provider/mock': Usage of deprecated keyword in package header '# @package _group_'.
See https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_package_header for more information
warnings.warn(
/home/ninacdu/.local/lib/python3.8/site-packages/hydra/core/default_element.py:122: UserWarning: In 'mephisto/architect/local': Usage of deprecated keyword in package header '# @package _group_'.
See https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_package_header for more information
warnings.warn(
/home/ninacdu/.local/lib/python3.8/site-packages/hydra/core/default_element.py:122: UserWarning: In 'mephisto/blueprint/static_task': Usage of deprecated keyword in package header '# @package _group_'.
See https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_package_header for more information
warnings.warn(
In 'scriptconfig': Validation error while composing config:
Merge error: BlueprintArgs is not a subclass of StaticHTMLBlueprintArgs. value: {'_blueprint_type': '???', 'onboarding_qualification': '???', 'block_qualification': '???'}
full_key:
object_type=dict
Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
`
Is there something I'm doing wrong here?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/simple_static_task/static_run_with_onboarding.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 import os
8 from mephisto.operations.operator import Operator
9 from mephisto.operations.utils import get_root_dir
10 from mephisto.tools.scripts import load_db_and_process_config
11 from mephisto.abstractions.blueprints.static_html_task.static_html_blueprint import (
12 BLUEPRINT_TYPE,
13 )
14 from mephisto.abstractions.blueprints.abstract.static_task.static_blueprint import (
15 SharedStaticTaskState,
16 )
17
18 import hydra
19 from omegaconf import DictConfig
20 from dataclasses import dataclass, field
21 from typing import List, Any
22
23 TASK_DIRECTORY = os.path.join(get_root_dir(), "examples/simple_static_task")
24 CORRECT_ANSWER = "apple"
25
26 defaults = [
27 {"mephisto/blueprint": BLUEPRINT_TYPE},
28 {"mephisto/architect": "local"},
29 {"mephisto/provider": "mock"},
30 {"conf": "onboarding_example"},
31 ]
32
33 from mephisto.operations.hydra_config import RunScriptConfig, register_script_config
34
35
36 @dataclass
37 class TestScriptConfig(RunScriptConfig):
38 defaults: List[Any] = field(default_factory=lambda: defaults)
39 task_dir: str = TASK_DIRECTORY
40 correct_answer: str = CORRECT_ANSWER
41
42
43 register_script_config(name="scriptconfig", module=TestScriptConfig)
44
45
46 @hydra.main(config_name="scriptconfig")
47 def main(cfg: DictConfig) -> None:
48 correct_config_answer = cfg.correct_answer
49
50 def onboarding_is_valid(onboarding_data):
51 inputs = onboarding_data["inputs"]
52 outputs = onboarding_data["outputs"]
53 return outputs.get("answer") == correct_config_answer
54
55 shared_state = SharedStaticTaskState(
56 onboarding_data={"correct_answer": correct_config_answer},
57 validate_onboarding=onboarding_is_valid,
58 )
59
60 db, cfg = load_db_and_process_config(cfg)
61 operator = Operator(db)
62
63 operator.validate_and_run_config(cfg.mephisto, shared_state)
64 operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)
65
66
67 if __name__ == "__main__":
68 main()
69
```
Path: `mephisto/operations/hydra_config.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 from hydra.core.config_store import ConfigStoreWithProvider
8 from mephisto.abstractions.blueprint import BlueprintArgs
9 from mephisto.abstractions.architect import ArchitectArgs
10 from mephisto.abstractions.crowd_provider import ProviderArgs
11 from mephisto.data_model.task_config import TaskConfigArgs
12 from dataclasses import dataclass, field
13 from omegaconf import MISSING
14 from typing import List, Any
15
16 config = ConfigStoreWithProvider("mephisto")
17
18
19 @dataclass
20 class DatabaseArgs:
21 _database_type: str = "local" # default DB is local
22
23
24 @dataclass
25 class MephistoConfig:
26 blueprint: BlueprintArgs = BlueprintArgs()
27 provider: ProviderArgs = ProviderArgs()
28 architect: ArchitectArgs = ArchitectArgs()
29 task: TaskConfigArgs = TaskConfigArgs()
30 database: DatabaseArgs = DatabaseArgs()
31 log_level: str = "info"
32
33
34 @dataclass
35 class RunScriptConfig:
36 mephisto: MephistoConfig = MephistoConfig()
37
38
39 def register_abstraction_config(name: str, node: Any, abstraction_type: str):
40 config.store(
41 name=name, node=node, group=f"mephisto/{abstraction_type}", package="_group_"
42 )
43
44
45 def initialize_named_configs():
46 """
47 Functionality to register the core mephisto configuration structure. Must be done in __init__
48 """
49 config.store(
50 name="base_mephisto_config",
51 node=MephistoConfig,
52 group="mephisto",
53 package="_group_",
54 )
55
56
57 def register_script_config(name: str, module: Any):
58 config.store(name=name, node=module)
59
```
Path: `examples/static_react_task/run_task.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 import os
8 import shutil
9 import subprocess
10 from mephisto.operations.operator import Operator
11 from mephisto.operations.utils import get_root_dir
12 from mephisto.tools.scripts import load_db_and_process_config
13 from mephisto.abstractions.blueprints.static_react_task.static_react_blueprint import (
14 BLUEPRINT_TYPE,
15 )
16 from mephisto.abstractions.blueprints.abstract.static_task.static_blueprint import (
17 SharedStaticTaskState,
18 )
19
20 import hydra
21 from omegaconf import DictConfig
22 from dataclasses import dataclass, field
23 from typing import List, Any
24
25 TASK_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
26
27 defaults = [
28 {"mephisto/blueprint": BLUEPRINT_TYPE},
29 {"mephisto/architect": "local"},
30 {"mephisto/provider": "mock"},
31 {"conf": "example"},
32 ]
33
34 from mephisto.operations.hydra_config import RunScriptConfig, register_script_config
35
36
37 @dataclass
38 class TestScriptConfig(RunScriptConfig):
39 defaults: List[Any] = field(default_factory=lambda: defaults)
40 task_dir: str = TASK_DIRECTORY
41
42
43 register_script_config(name="scriptconfig", module=TestScriptConfig)
44
45
46 # TODO it would be nice if this was automated in the way that it
47 # is for ParlAI custom frontend tasks
48 def build_task(task_dir):
49 """Rebuild the frontend for this task"""
50
51 frontend_source_dir = os.path.join(task_dir, "webapp")
52 frontend_build_dir = os.path.join(frontend_source_dir, "build")
53
54 return_dir = os.getcwd()
55 os.chdir(frontend_source_dir)
56 if os.path.exists(frontend_build_dir):
57 shutil.rmtree(frontend_build_dir)
58 packages_installed = subprocess.call(["npm", "install"])
59 if packages_installed != 0:
60 raise Exception(
61 "please make sure npm is installed, otherwise view "
62 "the above error for more info."
63 )
64
65 webpack_complete = subprocess.call(["npm", "run", "dev"])
66 if webpack_complete != 0:
67 raise Exception(
68 "Webpack appears to have failed to build your "
69 "frontend. See the above error for more information."
70 )
71 os.chdir(return_dir)
72
73
74 @hydra.main(config_name="scriptconfig")
75 def main(cfg: DictConfig) -> None:
76 task_dir = cfg.task_dir
77
78 def onboarding_always_valid(onboarding_data):
79 return True
80
81 shared_state = SharedStaticTaskState(
82 static_task_data=[
83 {"text": "This text is good text!"},
84 {"text": "This text is bad text!"},
85 ],
86 validate_onboarding=onboarding_always_valid,
87 )
88
89 build_task(task_dir)
90
91 db, cfg = load_db_and_process_config(cfg)
92 operator = Operator(db)
93
94 operator.validate_and_run_config(cfg.mephisto, shared_state)
95 operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)
96
97
98 if __name__ == "__main__":
99 main()
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/simple_static_task/static_run_with_onboarding.py b/examples/simple_static_task/static_run_with_onboarding.py
--- a/examples/simple_static_task/static_run_with_onboarding.py
+++ b/examples/simple_static_task/static_run_with_onboarding.py
@@ -43,7 +43,7 @@
register_script_config(name="scriptconfig", module=TestScriptConfig)
[email protected](config_name="scriptconfig")
[email protected](config_path="hydra_configs", config_name="scriptconfig")
def main(cfg: DictConfig) -> None:
correct_config_answer = cfg.correct_answer
diff --git a/examples/static_react_task/run_task.py b/examples/static_react_task/run_task.py
--- a/examples/static_react_task/run_task.py
+++ b/examples/static_react_task/run_task.py
@@ -71,7 +71,7 @@
os.chdir(return_dir)
[email protected](config_name="scriptconfig")
[email protected](config_path="hydra_configs", config_name="scriptconfig")
def main(cfg: DictConfig) -> None:
task_dir = cfg.task_dir
diff --git a/mephisto/operations/hydra_config.py b/mephisto/operations/hydra_config.py
--- a/mephisto/operations/hydra_config.py
+++ b/mephisto/operations/hydra_config.py
@@ -23,9 +23,9 @@
@dataclass
class MephistoConfig:
- blueprint: BlueprintArgs = BlueprintArgs()
- provider: ProviderArgs = ProviderArgs()
- architect: ArchitectArgs = ArchitectArgs()
+ blueprint: BlueprintArgs = MISSING
+ provider: ProviderArgs = MISSING
+ architect: ArchitectArgs = MISSING
task: TaskConfigArgs = TaskConfigArgs()
database: DatabaseArgs = DatabaseArgs()
log_level: str = "info"
@@ -38,7 +38,9 @@
def register_abstraction_config(name: str, node: Any, abstraction_type: str):
config.store(
- name=name, node=node, group=f"mephisto/{abstraction_type}", package="_group_"
+ name=name,
+ node=node,
+ group=f"mephisto/{abstraction_type}",
)
@@ -50,7 +52,6 @@
name="base_mephisto_config",
node=MephistoConfig,
group="mephisto",
- package="_group_",
)
| {"golden_diff": "diff --git a/examples/simple_static_task/static_run_with_onboarding.py b/examples/simple_static_task/static_run_with_onboarding.py\n--- a/examples/simple_static_task/static_run_with_onboarding.py\n+++ b/examples/simple_static_task/static_run_with_onboarding.py\n@@ -43,7 +43,7 @@\n register_script_config(name=\"scriptconfig\", module=TestScriptConfig)\n \n \[email protected](config_name=\"scriptconfig\")\[email protected](config_path=\"hydra_configs\", config_name=\"scriptconfig\")\n def main(cfg: DictConfig) -> None:\n correct_config_answer = cfg.correct_answer\n \ndiff --git a/examples/static_react_task/run_task.py b/examples/static_react_task/run_task.py\n--- a/examples/static_react_task/run_task.py\n+++ b/examples/static_react_task/run_task.py\n@@ -71,7 +71,7 @@\n os.chdir(return_dir)\n \n \[email protected](config_name=\"scriptconfig\")\[email protected](config_path=\"hydra_configs\", config_name=\"scriptconfig\")\n def main(cfg: DictConfig) -> None:\n task_dir = cfg.task_dir\n \ndiff --git a/mephisto/operations/hydra_config.py b/mephisto/operations/hydra_config.py\n--- a/mephisto/operations/hydra_config.py\n+++ b/mephisto/operations/hydra_config.py\n@@ -23,9 +23,9 @@\n \n @dataclass\n class MephistoConfig:\n- blueprint: BlueprintArgs = BlueprintArgs()\n- provider: ProviderArgs = ProviderArgs()\n- architect: ArchitectArgs = ArchitectArgs()\n+ blueprint: BlueprintArgs = MISSING\n+ provider: ProviderArgs = MISSING\n+ architect: ArchitectArgs = MISSING\n task: TaskConfigArgs = TaskConfigArgs()\n database: DatabaseArgs = DatabaseArgs()\n log_level: str = \"info\"\n@@ -38,7 +38,9 @@\n \n def register_abstraction_config(name: str, node: Any, abstraction_type: str):\n config.store(\n- name=name, node=node, group=f\"mephisto/{abstraction_type}\", package=\"_group_\"\n+ name=name,\n+ node=node,\n+ group=f\"mephisto/{abstraction_type}\",\n )\n \n \n@@ -50,7 +52,6 @@\n name=\"base_mephisto_config\",\n node=MephistoConfig,\n group=\"mephisto\",\n- package=\"_group_\",\n )\n", "issue": "Running static_test_script.py: Usage of deprecated keyword in package header\nSeems like this is a compatibility issue with hydra 1.1.0? I've changed static_test_script.py like so:\r\n`@hydra.main(config_path=\".\", config_name=\"scriptconfig\")`\r\nand deleted the first line config/example.yaml as instructed by hydra.cc, after running the script I'm still getting the following error:\r\n`/home/ninacdu/.local/lib/python3.8/site-packages/hydra/core/default_element.py:122: UserWarning: In 'mephisto/provider/mock': Usage of deprecated keyword in package header '# @package _group_'.\r\nSee https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_package_header for more information\r\n warnings.warn(\r\n/home/ninacdu/.local/lib/python3.8/site-packages/hydra/core/default_element.py:122: UserWarning: In 'mephisto/architect/local': Usage of deprecated keyword in package header '# @package _group_'.\r\nSee https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_package_header for more information\r\n warnings.warn(\r\n/home/ninacdu/.local/lib/python3.8/site-packages/hydra/core/default_element.py:122: UserWarning: In 'mephisto/blueprint/static_task': Usage of deprecated keyword in package header '# @package _group_'.\r\nSee https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_package_header for more information\r\n warnings.warn(\r\nIn 'scriptconfig': Validation error while composing config:\r\nMerge error: BlueprintArgs is not a subclass of StaticHTMLBlueprintArgs. value: {'_blueprint_type': '???', 'onboarding_qualification': '???', 'block_qualification': '???'}\r\n full_key: \r\n object_type=dict\r\n\r\nSet the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.\r\n`\r\nIs there something I'm doing wrong here? \n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nfrom mephisto.operations.operator import Operator\nfrom mephisto.operations.utils import get_root_dir\nfrom mephisto.tools.scripts import load_db_and_process_config\nfrom mephisto.abstractions.blueprints.static_html_task.static_html_blueprint import (\n BLUEPRINT_TYPE,\n)\nfrom mephisto.abstractions.blueprints.abstract.static_task.static_blueprint import (\n SharedStaticTaskState,\n)\n\nimport hydra\nfrom omegaconf import DictConfig\nfrom dataclasses import dataclass, field\nfrom typing import List, Any\n\nTASK_DIRECTORY = os.path.join(get_root_dir(), \"examples/simple_static_task\")\nCORRECT_ANSWER = \"apple\"\n\ndefaults = [\n {\"mephisto/blueprint\": BLUEPRINT_TYPE},\n {\"mephisto/architect\": \"local\"},\n {\"mephisto/provider\": \"mock\"},\n {\"conf\": \"onboarding_example\"},\n]\n\nfrom mephisto.operations.hydra_config import RunScriptConfig, register_script_config\n\n\n@dataclass\nclass TestScriptConfig(RunScriptConfig):\n defaults: List[Any] = field(default_factory=lambda: defaults)\n task_dir: str = TASK_DIRECTORY\n correct_answer: str = CORRECT_ANSWER\n\n\nregister_script_config(name=\"scriptconfig\", module=TestScriptConfig)\n\n\[email protected](config_name=\"scriptconfig\")\ndef main(cfg: DictConfig) -> None:\n correct_config_answer = cfg.correct_answer\n\n def onboarding_is_valid(onboarding_data):\n inputs = onboarding_data[\"inputs\"]\n outputs = onboarding_data[\"outputs\"]\n return outputs.get(\"answer\") == correct_config_answer\n\n shared_state = SharedStaticTaskState(\n onboarding_data={\"correct_answer\": correct_config_answer},\n validate_onboarding=onboarding_is_valid,\n )\n\n db, cfg = load_db_and_process_config(cfg)\n operator = Operator(db)\n\n operator.validate_and_run_config(cfg.mephisto, shared_state)\n operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/simple_static_task/static_run_with_onboarding.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom hydra.core.config_store import ConfigStoreWithProvider\nfrom mephisto.abstractions.blueprint import BlueprintArgs\nfrom mephisto.abstractions.architect import ArchitectArgs\nfrom mephisto.abstractions.crowd_provider import ProviderArgs\nfrom mephisto.data_model.task_config import TaskConfigArgs\nfrom dataclasses import dataclass, field\nfrom omegaconf import MISSING\nfrom typing import List, Any\n\nconfig = ConfigStoreWithProvider(\"mephisto\")\n\n\n@dataclass\nclass DatabaseArgs:\n _database_type: str = \"local\" # default DB is local\n\n\n@dataclass\nclass MephistoConfig:\n blueprint: BlueprintArgs = BlueprintArgs()\n provider: ProviderArgs = ProviderArgs()\n architect: ArchitectArgs = ArchitectArgs()\n task: TaskConfigArgs = TaskConfigArgs()\n database: DatabaseArgs = DatabaseArgs()\n log_level: str = \"info\"\n\n\n@dataclass\nclass RunScriptConfig:\n mephisto: MephistoConfig = MephistoConfig()\n\n\ndef register_abstraction_config(name: str, node: Any, abstraction_type: str):\n config.store(\n name=name, node=node, group=f\"mephisto/{abstraction_type}\", package=\"_group_\"\n )\n\n\ndef initialize_named_configs():\n \"\"\"\n Functionality to register the core mephisto configuration structure. Must be done in __init__\n \"\"\"\n config.store(\n name=\"base_mephisto_config\",\n node=MephistoConfig,\n group=\"mephisto\",\n package=\"_group_\",\n )\n\n\ndef register_script_config(name: str, module: Any):\n config.store(name=name, node=module)\n", "path": "mephisto/operations/hydra_config.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport shutil\nimport subprocess\nfrom mephisto.operations.operator import Operator\nfrom mephisto.operations.utils import get_root_dir\nfrom mephisto.tools.scripts import load_db_and_process_config\nfrom mephisto.abstractions.blueprints.static_react_task.static_react_blueprint import (\n BLUEPRINT_TYPE,\n)\nfrom mephisto.abstractions.blueprints.abstract.static_task.static_blueprint import (\n SharedStaticTaskState,\n)\n\nimport hydra\nfrom omegaconf import DictConfig\nfrom dataclasses import dataclass, field\nfrom typing import List, Any\n\nTASK_DIRECTORY = os.path.dirname(os.path.abspath(__file__))\n\ndefaults = [\n {\"mephisto/blueprint\": BLUEPRINT_TYPE},\n {\"mephisto/architect\": \"local\"},\n {\"mephisto/provider\": \"mock\"},\n {\"conf\": \"example\"},\n]\n\nfrom mephisto.operations.hydra_config import RunScriptConfig, register_script_config\n\n\n@dataclass\nclass TestScriptConfig(RunScriptConfig):\n defaults: List[Any] = field(default_factory=lambda: defaults)\n task_dir: str = TASK_DIRECTORY\n\n\nregister_script_config(name=\"scriptconfig\", module=TestScriptConfig)\n\n\n# TODO it would be nice if this was automated in the way that it\n# is for ParlAI custom frontend tasks\ndef build_task(task_dir):\n \"\"\"Rebuild the frontend for this task\"\"\"\n\n frontend_source_dir = os.path.join(task_dir, \"webapp\")\n frontend_build_dir = os.path.join(frontend_source_dir, \"build\")\n\n return_dir = os.getcwd()\n os.chdir(frontend_source_dir)\n if os.path.exists(frontend_build_dir):\n shutil.rmtree(frontend_build_dir)\n packages_installed = subprocess.call([\"npm\", \"install\"])\n if packages_installed != 0:\n raise Exception(\n \"please make sure npm is installed, otherwise view \"\n \"the above error for more info.\"\n )\n\n webpack_complete = subprocess.call([\"npm\", \"run\", \"dev\"])\n if webpack_complete != 0:\n raise Exception(\n \"Webpack appears to have failed to build your \"\n \"frontend. See the above error for more information.\"\n )\n os.chdir(return_dir)\n\n\[email protected](config_name=\"scriptconfig\")\ndef main(cfg: DictConfig) -> None:\n task_dir = cfg.task_dir\n\n def onboarding_always_valid(onboarding_data):\n return True\n\n shared_state = SharedStaticTaskState(\n static_task_data=[\n {\"text\": \"This text is good text!\"},\n {\"text\": \"This text is bad text!\"},\n ],\n validate_onboarding=onboarding_always_valid,\n )\n\n build_task(task_dir)\n\n db, cfg = load_db_and_process_config(cfg)\n operator = Operator(db)\n\n operator.validate_and_run_config(cfg.mephisto, shared_state)\n operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/static_react_task/run_task.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nfrom mephisto.operations.operator import Operator\nfrom mephisto.operations.utils import get_root_dir\nfrom mephisto.tools.scripts import load_db_and_process_config\nfrom mephisto.abstractions.blueprints.static_html_task.static_html_blueprint import (\n BLUEPRINT_TYPE,\n)\nfrom mephisto.abstractions.blueprints.abstract.static_task.static_blueprint import (\n SharedStaticTaskState,\n)\n\nimport hydra\nfrom omegaconf import DictConfig\nfrom dataclasses import dataclass, field\nfrom typing import List, Any\n\nTASK_DIRECTORY = os.path.join(get_root_dir(), \"examples/simple_static_task\")\nCORRECT_ANSWER = \"apple\"\n\ndefaults = [\n {\"mephisto/blueprint\": BLUEPRINT_TYPE},\n {\"mephisto/architect\": \"local\"},\n {\"mephisto/provider\": \"mock\"},\n {\"conf\": \"onboarding_example\"},\n]\n\nfrom mephisto.operations.hydra_config import RunScriptConfig, register_script_config\n\n\n@dataclass\nclass TestScriptConfig(RunScriptConfig):\n defaults: List[Any] = field(default_factory=lambda: defaults)\n task_dir: str = TASK_DIRECTORY\n correct_answer: str = CORRECT_ANSWER\n\n\nregister_script_config(name=\"scriptconfig\", module=TestScriptConfig)\n\n\[email protected](config_path=\"hydra_configs\", config_name=\"scriptconfig\")\ndef main(cfg: DictConfig) -> None:\n correct_config_answer = cfg.correct_answer\n\n def onboarding_is_valid(onboarding_data):\n inputs = onboarding_data[\"inputs\"]\n outputs = onboarding_data[\"outputs\"]\n return outputs.get(\"answer\") == correct_config_answer\n\n shared_state = SharedStaticTaskState(\n onboarding_data={\"correct_answer\": correct_config_answer},\n validate_onboarding=onboarding_is_valid,\n )\n\n db, cfg = load_db_and_process_config(cfg)\n operator = Operator(db)\n\n operator.validate_and_run_config(cfg.mephisto, shared_state)\n operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/simple_static_task/static_run_with_onboarding.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom hydra.core.config_store import ConfigStoreWithProvider\nfrom mephisto.abstractions.blueprint import BlueprintArgs\nfrom mephisto.abstractions.architect import ArchitectArgs\nfrom mephisto.abstractions.crowd_provider import ProviderArgs\nfrom mephisto.data_model.task_config import TaskConfigArgs\nfrom dataclasses import dataclass, field\nfrom omegaconf import MISSING\nfrom typing import List, Any\n\nconfig = ConfigStoreWithProvider(\"mephisto\")\n\n\n@dataclass\nclass DatabaseArgs:\n _database_type: str = \"local\" # default DB is local\n\n\n@dataclass\nclass MephistoConfig:\n blueprint: BlueprintArgs = MISSING\n provider: ProviderArgs = MISSING\n architect: ArchitectArgs = MISSING\n task: TaskConfigArgs = TaskConfigArgs()\n database: DatabaseArgs = DatabaseArgs()\n log_level: str = \"info\"\n\n\n@dataclass\nclass RunScriptConfig:\n mephisto: MephistoConfig = MephistoConfig()\n\n\ndef register_abstraction_config(name: str, node: Any, abstraction_type: str):\n config.store(\n name=name,\n node=node,\n group=f\"mephisto/{abstraction_type}\",\n )\n\n\ndef initialize_named_configs():\n \"\"\"\n Functionality to register the core mephisto configuration structure. Must be done in __init__\n \"\"\"\n config.store(\n name=\"base_mephisto_config\",\n node=MephistoConfig,\n group=\"mephisto\",\n )\n\n\ndef register_script_config(name: str, module: Any):\n config.store(name=name, node=module)\n", "path": "mephisto/operations/hydra_config.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport shutil\nimport subprocess\nfrom mephisto.operations.operator import Operator\nfrom mephisto.operations.utils import get_root_dir\nfrom mephisto.tools.scripts import load_db_and_process_config\nfrom mephisto.abstractions.blueprints.static_react_task.static_react_blueprint import (\n BLUEPRINT_TYPE,\n)\nfrom mephisto.abstractions.blueprints.abstract.static_task.static_blueprint import (\n SharedStaticTaskState,\n)\n\nimport hydra\nfrom omegaconf import DictConfig\nfrom dataclasses import dataclass, field\nfrom typing import List, Any\n\nTASK_DIRECTORY = os.path.dirname(os.path.abspath(__file__))\n\ndefaults = [\n {\"mephisto/blueprint\": BLUEPRINT_TYPE},\n {\"mephisto/architect\": \"local\"},\n {\"mephisto/provider\": \"mock\"},\n {\"conf\": \"example\"},\n]\n\nfrom mephisto.operations.hydra_config import RunScriptConfig, register_script_config\n\n\n@dataclass\nclass TestScriptConfig(RunScriptConfig):\n defaults: List[Any] = field(default_factory=lambda: defaults)\n task_dir: str = TASK_DIRECTORY\n\n\nregister_script_config(name=\"scriptconfig\", module=TestScriptConfig)\n\n\n# TODO it would be nice if this was automated in the way that it\n# is for ParlAI custom frontend tasks\ndef build_task(task_dir):\n \"\"\"Rebuild the frontend for this task\"\"\"\n\n frontend_source_dir = os.path.join(task_dir, \"webapp\")\n frontend_build_dir = os.path.join(frontend_source_dir, \"build\")\n\n return_dir = os.getcwd()\n os.chdir(frontend_source_dir)\n if os.path.exists(frontend_build_dir):\n shutil.rmtree(frontend_build_dir)\n packages_installed = subprocess.call([\"npm\", \"install\"])\n if packages_installed != 0:\n raise Exception(\n \"please make sure npm is installed, otherwise view \"\n \"the above error for more info.\"\n )\n\n webpack_complete = subprocess.call([\"npm\", \"run\", \"dev\"])\n if webpack_complete != 0:\n raise Exception(\n \"Webpack appears to have failed to build your \"\n \"frontend. See the above error for more information.\"\n )\n os.chdir(return_dir)\n\n\[email protected](config_path=\"hydra_configs\", config_name=\"scriptconfig\")\ndef main(cfg: DictConfig) -> None:\n task_dir = cfg.task_dir\n\n def onboarding_always_valid(onboarding_data):\n return True\n\n shared_state = SharedStaticTaskState(\n static_task_data=[\n {\"text\": \"This text is good text!\"},\n {\"text\": \"This text is bad text!\"},\n ],\n validate_onboarding=onboarding_always_valid,\n )\n\n build_task(task_dir)\n\n db, cfg = load_db_and_process_config(cfg)\n operator = Operator(db)\n\n operator.validate_and_run_config(cfg.mephisto, shared_state)\n operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/static_react_task/run_task.py"}]} | 2,778 | 537 |
gh_patches_debug_40764 | rasdani/github-patches | git_diff | svthalia__concrexit-3115 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Members API doesn't list all members
### Describe the bug
Luko, pk=23 on staging, is listed as member on the website, but is not a member according to the API
### How to reproduce
No idea.
check https://staging.thalia.nu/members/profile/23 vs https://staging.thalia.nu/api/v2/members/23/
### Expected behaviour
These urls should agree on the memberness of Luko
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/members/api/v2/views.py`
Content:
```
1 """API views of the activemembers app."""
2
3 from django.shortcuts import get_object_or_404
4
5 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
6 from rest_framework import filters as framework_filters
7 from rest_framework.generics import ListAPIView, RetrieveAPIView, UpdateAPIView
8
9 from members.api.v2 import filters
10 from members.api.v2.serializers.member import (
11 MemberCurrentSerializer,
12 MemberListSerializer,
13 MemberSerializer,
14 )
15 from members.models import Member
16 from thaliawebsite.api.openapi import OAuthAutoSchema
17 from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod
18 from utils.media.services import fetch_thumbnails_db
19
20
21 class MemberListView(ListAPIView):
22 """Returns an overview of all members."""
23
24 serializer_class = MemberListSerializer
25 queryset = (
26 Member.current_members.all()
27 .select_related("profile")
28 .prefetch_related("membership_set")
29 )
30
31 def get_serializer(self, *args, **kwargs):
32 if len(args) > 0:
33 members = args[0]
34 fetch_thumbnails_db([member.profile.photo for member in members])
35 return super().get_serializer(*args, **kwargs)
36
37 permission_classes = [
38 IsAuthenticatedOrTokenHasScope,
39 ]
40 required_scopes = ["members:read"]
41 filter_backends = (
42 framework_filters.OrderingFilter,
43 framework_filters.SearchFilter,
44 filters.MembershipTypeFilter,
45 filters.StartingYearFilter,
46 )
47 ordering_fields = ("first_name", "last_name", "username")
48 search_fields = (
49 "profile__nickname",
50 "profile__starting_year",
51 "first_name",
52 "last_name",
53 "username",
54 )
55
56
57 class MemberDetailView(RetrieveAPIView):
58 """Returns details of a member."""
59
60 serializer_class = MemberSerializer
61 queryset = Member.current_members.all()
62 permission_classes = [
63 IsAuthenticatedOrTokenHasScope,
64 ]
65 required_scopes = ["members:read"]
66
67
68 class MemberCurrentView(MemberDetailView, UpdateAPIView):
69 """Returns details of the authenticated member."""
70
71 serializer_class = MemberCurrentSerializer
72 schema = OAuthAutoSchema(operation_id_base="CurrentMember")
73 permission_classes = [
74 IsAuthenticatedOrTokenHasScopeForMethod,
75 ]
76 required_scopes_per_method = {
77 "GET": ["profile:read"],
78 "PATCH": ["profile:write"],
79 "PUT": ["profile:write"],
80 }
81
82 def get_object(self):
83 return get_object_or_404(Member, pk=self.request.user.pk)
84
```
Path: `website/members/api/v2/filters.py`
Content:
```
1 from rest_framework import filters
2
3 from members.models import Membership
4
5
6 class StartingYearFilter(filters.BaseFilterBackend):
7 """Allows you to filter by starting year."""
8
9 def filter_queryset(self, request, queryset, view):
10 starting_year = request.query_params.get("starting_year", None)
11
12 if starting_year:
13 queryset = queryset.filter(profile__starting_year=starting_year)
14
15 return queryset
16
17 def get_schema_operation_parameters(self, view):
18 return [
19 {
20 "name": "starting_year",
21 "required": False,
22 "in": "query",
23 "description": "Filter by starting year",
24 "schema": {
25 "type": "number",
26 },
27 }
28 ]
29
30
31 class MembershipTypeFilter(filters.BaseFilterBackend):
32 """Allows you to filter by membership type."""
33
34 def filter_queryset(self, request, queryset, view):
35 membership_type = request.query_params.get("membership_type", None)
36
37 if membership_type:
38 memberships = Membership.objects.filter(type=membership_type)
39 queryset = queryset.filter(pk__in=memberships.values("user__pk"))
40
41 return queryset
42
43 def get_schema_operation_parameters(self, view):
44 return [
45 {
46 "name": "membership_type",
47 "required": False,
48 "in": "query",
49 "description": "Filter by membership type",
50 "schema": {
51 "type": "string",
52 },
53 }
54 ]
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/members/api/v2/filters.py b/website/members/api/v2/filters.py
--- a/website/members/api/v2/filters.py
+++ b/website/members/api/v2/filters.py
@@ -1,4 +1,9 @@
-from rest_framework import filters
+from datetime import datetime
+
+from django.db.models import Q
+from django.utils import timezone
+
+from rest_framework import filters, serializers
from members.models import Membership
@@ -28,6 +33,62 @@
]
+class FormerMemberFilter(filters.BaseFilterBackend):
+ def filter_queryset(self, request, queryset, view):
+ former = request.query_params.get("former", "false")
+
+ if former == "false":
+ # Filter out former members
+ return (
+ queryset.exclude(membership=None)
+ .filter(
+ Q(membership__until__isnull=True)
+ | Q(membership__until__gt=timezone.now().date())
+ )
+ .distinct()
+ )
+ elif former == "true":
+ # Filter out current members
+
+ memberships_query = Q(until__gt=datetime.now()) | Q(until=None)
+ members_query = ~Q(id=None)
+
+ # Filter out all current active memberships
+ memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)
+ memberships = Membership.objects.filter(memberships_query)
+ members_query &= ~Q(pk__in=memberships.values("user__pk"))
+
+ memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)
+ memberships = Membership.objects.filter(memberships_query)
+ all_memberships = Membership.objects.all()
+ # Only keep members that were once members, or are legacy users
+ # that do not have any memberships at all
+ members_query &= Q(pk__in=memberships.values("user__pk")) | ~Q(
+ pk__in=all_memberships.values("user__pk")
+ )
+
+ return queryset.filter(members_query)
+ elif former == "any":
+ # Include both former and current members
+ return queryset
+ else:
+ raise serializers.ValidationError("invalid former parameter")
+
+ def get_schema_operation_parameters(self, view):
+ return [
+ {
+ "name": "former",
+ "required": False,
+ "in": "query",
+ "description": "Include former members or only former members",
+ "schema": {
+ "type": "string",
+ "enum": ["true", "false", "any"],
+ },
+ }
+ ]
+
+
class MembershipTypeFilter(filters.BaseFilterBackend):
"""Allows you to filter by membership type."""
diff --git a/website/members/api/v2/views.py b/website/members/api/v2/views.py
--- a/website/members/api/v2/views.py
+++ b/website/members/api/v2/views.py
@@ -23,7 +23,7 @@
serializer_class = MemberListSerializer
queryset = (
- Member.current_members.all()
+ Member.objects.all()
.select_related("profile")
.prefetch_related("membership_set")
)
@@ -43,6 +43,7 @@
framework_filters.SearchFilter,
filters.MembershipTypeFilter,
filters.StartingYearFilter,
+ filters.FormerMemberFilter,
)
ordering_fields = ("first_name", "last_name", "username")
search_fields = (
@@ -58,7 +59,7 @@
"""Returns details of a member."""
serializer_class = MemberSerializer
- queryset = Member.current_members.all()
+ queryset = Member.objects.all()
permission_classes = [
IsAuthenticatedOrTokenHasScope,
]
| {"golden_diff": "diff --git a/website/members/api/v2/filters.py b/website/members/api/v2/filters.py\n--- a/website/members/api/v2/filters.py\n+++ b/website/members/api/v2/filters.py\n@@ -1,4 +1,9 @@\n-from rest_framework import filters\n+from datetime import datetime\n+\n+from django.db.models import Q\n+from django.utils import timezone\n+\n+from rest_framework import filters, serializers\n \n from members.models import Membership\n \n@@ -28,6 +33,62 @@\n ]\n \n \n+class FormerMemberFilter(filters.BaseFilterBackend):\n+ def filter_queryset(self, request, queryset, view):\n+ former = request.query_params.get(\"former\", \"false\")\n+\n+ if former == \"false\":\n+ # Filter out former members\n+ return (\n+ queryset.exclude(membership=None)\n+ .filter(\n+ Q(membership__until__isnull=True)\n+ | Q(membership__until__gt=timezone.now().date())\n+ )\n+ .distinct()\n+ )\n+ elif former == \"true\":\n+ # Filter out current members\n+\n+ memberships_query = Q(until__gt=datetime.now()) | Q(until=None)\n+ members_query = ~Q(id=None)\n+\n+ # Filter out all current active memberships\n+ memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n+ memberships = Membership.objects.filter(memberships_query)\n+ members_query &= ~Q(pk__in=memberships.values(\"user__pk\"))\n+\n+ memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n+ memberships = Membership.objects.filter(memberships_query)\n+ all_memberships = Membership.objects.all()\n+ # Only keep members that were once members, or are legacy users\n+ # that do not have any memberships at all\n+ members_query &= Q(pk__in=memberships.values(\"user__pk\")) | ~Q(\n+ pk__in=all_memberships.values(\"user__pk\")\n+ )\n+\n+ return queryset.filter(members_query)\n+ elif former == \"any\":\n+ # Include both former and current members\n+ return queryset\n+ else:\n+ raise serializers.ValidationError(\"invalid former parameter\")\n+\n+ def get_schema_operation_parameters(self, view):\n+ return [\n+ {\n+ \"name\": \"former\",\n+ \"required\": False,\n+ \"in\": \"query\",\n+ \"description\": \"Include former members or only former members\",\n+ \"schema\": {\n+ \"type\": \"string\",\n+ \"enum\": [\"true\", \"false\", \"any\"],\n+ },\n+ }\n+ ]\n+\n+\n class MembershipTypeFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by membership type.\"\"\"\n \ndiff --git a/website/members/api/v2/views.py b/website/members/api/v2/views.py\n--- a/website/members/api/v2/views.py\n+++ b/website/members/api/v2/views.py\n@@ -23,7 +23,7 @@\n \n serializer_class = MemberListSerializer\n queryset = (\n- Member.current_members.all()\n+ Member.objects.all()\n .select_related(\"profile\")\n .prefetch_related(\"membership_set\")\n )\n@@ -43,6 +43,7 @@\n framework_filters.SearchFilter,\n filters.MembershipTypeFilter,\n filters.StartingYearFilter,\n+ filters.FormerMemberFilter,\n )\n ordering_fields = (\"first_name\", \"last_name\", \"username\")\n search_fields = (\n@@ -58,7 +59,7 @@\n \"\"\"Returns details of a member.\"\"\"\n \n serializer_class = MemberSerializer\n- queryset = Member.current_members.all()\n+ queryset = Member.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n", "issue": "Members API doesn't list all members\n### Describe the bug\r\nLuko, pk=23 on staging, is listed as member on the website, but is not a member according to the API\r\n\r\n### How to reproduce\r\nNo idea.\r\ncheck https://staging.thalia.nu/members/profile/23 vs https://staging.thalia.nu/api/v2/members/23/\r\n\r\n### Expected behaviour\r\nThese urls should agree on the memberness of Luko\r\n\r\n\n", "before_files": [{"content": "\"\"\"API views of the activemembers app.\"\"\"\n\nfrom django.shortcuts import get_object_or_404\n\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters as framework_filters\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView, UpdateAPIView\n\nfrom members.api.v2 import filters\nfrom members.api.v2.serializers.member import (\n MemberCurrentSerializer,\n MemberListSerializer,\n MemberSerializer,\n)\nfrom members.models import Member\nfrom thaliawebsite.api.openapi import OAuthAutoSchema\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\nfrom utils.media.services import fetch_thumbnails_db\n\n\nclass MemberListView(ListAPIView):\n \"\"\"Returns an overview of all members.\"\"\"\n\n serializer_class = MemberListSerializer\n queryset = (\n Member.current_members.all()\n .select_related(\"profile\")\n .prefetch_related(\"membership_set\")\n )\n\n def get_serializer(self, *args, **kwargs):\n if len(args) > 0:\n members = args[0]\n fetch_thumbnails_db([member.profile.photo for member in members])\n return super().get_serializer(*args, **kwargs)\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"members:read\"]\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n filters.MembershipTypeFilter,\n filters.StartingYearFilter,\n )\n ordering_fields = (\"first_name\", \"last_name\", \"username\")\n search_fields = (\n \"profile__nickname\",\n \"profile__starting_year\",\n \"first_name\",\n \"last_name\",\n \"username\",\n )\n\n\nclass MemberDetailView(RetrieveAPIView):\n \"\"\"Returns details of a member.\"\"\"\n\n serializer_class = MemberSerializer\n queryset = Member.current_members.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"members:read\"]\n\n\nclass MemberCurrentView(MemberDetailView, UpdateAPIView):\n \"\"\"Returns details of the authenticated member.\"\"\"\n\n serializer_class = MemberCurrentSerializer\n schema = OAuthAutoSchema(operation_id_base=\"CurrentMember\")\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"profile:read\"],\n \"PATCH\": [\"profile:write\"],\n \"PUT\": [\"profile:write\"],\n }\n\n def get_object(self):\n return get_object_or_404(Member, pk=self.request.user.pk)\n", "path": "website/members/api/v2/views.py"}, {"content": "from rest_framework import filters\n\nfrom members.models import Membership\n\n\nclass StartingYearFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by starting year.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n starting_year = request.query_params.get(\"starting_year\", None)\n\n if starting_year:\n queryset = queryset.filter(profile__starting_year=starting_year)\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"starting_year\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by starting year\",\n \"schema\": {\n \"type\": \"number\",\n },\n }\n ]\n\n\nclass MembershipTypeFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by membership type.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n membership_type = request.query_params.get(\"membership_type\", None)\n\n if membership_type:\n memberships = Membership.objects.filter(type=membership_type)\n queryset = queryset.filter(pk__in=memberships.values(\"user__pk\"))\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"membership_type\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by membership type\",\n \"schema\": {\n \"type\": \"string\",\n },\n }\n ]\n", "path": "website/members/api/v2/filters.py"}], "after_files": [{"content": "\"\"\"API views of the activemembers app.\"\"\"\n\nfrom django.shortcuts import get_object_or_404\n\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters as framework_filters\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView, UpdateAPIView\n\nfrom members.api.v2 import filters\nfrom members.api.v2.serializers.member import (\n MemberCurrentSerializer,\n MemberListSerializer,\n MemberSerializer,\n)\nfrom members.models import Member\nfrom thaliawebsite.api.openapi import OAuthAutoSchema\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\nfrom utils.media.services import fetch_thumbnails_db\n\n\nclass MemberListView(ListAPIView):\n \"\"\"Returns an overview of all members.\"\"\"\n\n serializer_class = MemberListSerializer\n queryset = (\n Member.objects.all()\n .select_related(\"profile\")\n .prefetch_related(\"membership_set\")\n )\n\n def get_serializer(self, *args, **kwargs):\n if len(args) > 0:\n members = args[0]\n fetch_thumbnails_db([member.profile.photo for member in members])\n return super().get_serializer(*args, **kwargs)\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"members:read\"]\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n filters.MembershipTypeFilter,\n filters.StartingYearFilter,\n filters.FormerMemberFilter,\n )\n ordering_fields = (\"first_name\", \"last_name\", \"username\")\n search_fields = (\n \"profile__nickname\",\n \"profile__starting_year\",\n \"first_name\",\n \"last_name\",\n \"username\",\n )\n\n\nclass MemberDetailView(RetrieveAPIView):\n \"\"\"Returns details of a member.\"\"\"\n\n serializer_class = MemberSerializer\n queryset = Member.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"members:read\"]\n\n\nclass MemberCurrentView(MemberDetailView, UpdateAPIView):\n \"\"\"Returns details of the authenticated member.\"\"\"\n\n serializer_class = MemberCurrentSerializer\n schema = OAuthAutoSchema(operation_id_base=\"CurrentMember\")\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"profile:read\"],\n \"PATCH\": [\"profile:write\"],\n \"PUT\": [\"profile:write\"],\n }\n\n def get_object(self):\n return get_object_or_404(Member, pk=self.request.user.pk)\n", "path": "website/members/api/v2/views.py"}, {"content": "from datetime import datetime\n\nfrom django.db.models import Q\nfrom django.utils import timezone\n\nfrom rest_framework import filters, serializers\n\nfrom members.models import Membership\n\n\nclass StartingYearFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by starting year.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n starting_year = request.query_params.get(\"starting_year\", None)\n\n if starting_year:\n queryset = queryset.filter(profile__starting_year=starting_year)\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"starting_year\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by starting year\",\n \"schema\": {\n \"type\": \"number\",\n },\n }\n ]\n\n\nclass FormerMemberFilter(filters.BaseFilterBackend):\n def filter_queryset(self, request, queryset, view):\n former = request.query_params.get(\"former\", \"false\")\n\n if former == \"false\":\n # Filter out former members\n return (\n queryset.exclude(membership=None)\n .filter(\n Q(membership__until__isnull=True)\n | Q(membership__until__gt=timezone.now().date())\n )\n .distinct()\n )\n elif former == \"true\":\n # Filter out current members\n\n memberships_query = Q(until__gt=datetime.now()) | Q(until=None)\n members_query = ~Q(id=None)\n\n # Filter out all current active memberships\n memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n memberships = Membership.objects.filter(memberships_query)\n members_query &= ~Q(pk__in=memberships.values(\"user__pk\"))\n\n memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n memberships = Membership.objects.filter(memberships_query)\n all_memberships = Membership.objects.all()\n # Only keep members that were once members, or are legacy users\n # that do not have any memberships at all\n members_query &= Q(pk__in=memberships.values(\"user__pk\")) | ~Q(\n pk__in=all_memberships.values(\"user__pk\")\n )\n\n return queryset.filter(members_query)\n elif former == \"any\":\n # Include both former and current members\n return queryset\n else:\n raise serializers.ValidationError(\"invalid former parameter\")\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"former\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Include former members or only former members\",\n \"schema\": {\n \"type\": \"string\",\n \"enum\": [\"true\", \"false\", \"any\"],\n },\n }\n ]\n\n\nclass MembershipTypeFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by membership type.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n membership_type = request.query_params.get(\"membership_type\", None)\n\n if membership_type:\n memberships = Membership.objects.filter(type=membership_type)\n queryset = queryset.filter(pk__in=memberships.values(\"user__pk\"))\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"membership_type\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by membership type\",\n \"schema\": {\n \"type\": \"string\",\n },\n }\n ]\n", "path": "website/members/api/v2/filters.py"}]} | 1,487 | 845 |
gh_patches_debug_4725 | rasdani/github-patches | git_diff | getnikola__nikola-2010 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Author pages show stories
example: https://getnikola.com/authors/roberto-alsina.html → anything between 7.5.0 and 7.6.1 (most of which were written by me, but that’s not important)
cc @jjconti
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/task/authors.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2015 Juanjo Conti.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Render the author pages and feeds."""
28
29 from __future__ import unicode_literals
30 import os
31 import natsort
32 try:
33 from urlparse import urljoin
34 except ImportError:
35 from urllib.parse import urljoin # NOQA
36 from collections import defaultdict
37
38 from nikola.plugin_categories import Task
39 from nikola import utils
40
41
42 class RenderAuthors(Task):
43
44 """Render the author pages and feeds."""
45
46 name = "render_authors"
47 posts_per_author = None
48
49 def set_site(self, site):
50 """Set Nikola site."""
51 if site.config["ENABLE_AUTHOR_PAGES"]:
52 site.register_path_handler('author_index', self.author_index_path)
53 site.register_path_handler('author', self.author_path)
54 site.register_path_handler('author_atom', self.author_atom_path)
55 site.register_path_handler('author_rss', self.author_rss_path)
56 return super(RenderAuthors, self).set_site(site)
57
58 def gen_tasks(self):
59 """Render the author pages and feeds."""
60 kw = {
61 "translations": self.site.config["TRANSLATIONS"],
62 "blog_title": self.site.config["BLOG_TITLE"],
63 "site_url": self.site.config["SITE_URL"],
64 "base_url": self.site.config["BASE_URL"],
65 "messages": self.site.MESSAGES,
66 "output_folder": self.site.config['OUTPUT_FOLDER'],
67 "filters": self.site.config['FILTERS'],
68 'author_path': self.site.config['AUTHOR_PATH'],
69 "author_pages_are_indexes": self.site.config['AUTHOR_PAGES_ARE_INDEXES'],
70 "generate_rss": self.site.config['GENERATE_RSS'],
71 "rss_teasers": self.site.config["RSS_TEASERS"],
72 "rss_plain": self.site.config["RSS_PLAIN"],
73 "rss_link_append_query": self.site.config["RSS_LINKS_APPEND_QUERY"],
74 "show_untranslated_posts": self.site.config['SHOW_UNTRANSLATED_POSTS'],
75 "feed_length": self.site.config['FEED_LENGTH'],
76 "tzinfo": self.site.tzinfo,
77 "pretty_urls": self.site.config['PRETTY_URLS'],
78 "strip_indexes": self.site.config['STRIP_INDEXES'],
79 "index_file": self.site.config['INDEX_FILE'],
80 }
81
82 yield self.group_task()
83 self.site.scan_posts()
84
85 generate_author_pages = self.site.config["ENABLE_AUTHOR_PAGES"] and len(self._posts_per_author()) > 1
86 self.site.GLOBAL_CONTEXT["author_pages_generated"] = generate_author_pages
87 if generate_author_pages:
88 yield self.list_authors_page(kw)
89
90 if not self._posts_per_author(): # this may be self.site.posts_per_author
91 return
92
93 author_list = list(self._posts_per_author().items())
94
95 def render_lists(author, posts):
96 """Render author pages as RSS files and lists/indexes."""
97 post_list = sorted(posts, key=lambda a: a.date)
98 post_list.reverse()
99 for lang in kw["translations"]:
100 if kw["show_untranslated_posts"]:
101 filtered_posts = post_list
102 else:
103 filtered_posts = [x for x in post_list if x.is_translation_available(lang)]
104 if kw["generate_rss"]:
105 yield self.author_rss(author, lang, filtered_posts, kw)
106 # Render HTML
107 if kw['author_pages_are_indexes']:
108 yield self.author_page_as_index(author, lang, filtered_posts, kw)
109 else:
110 yield self.author_page_as_list(author, lang, filtered_posts, kw)
111
112 for author, posts in author_list:
113 for task in render_lists(author, posts):
114 yield task
115
116 def _create_authors_page(self, kw):
117 """Create a global "all authors" page for each language."""
118 template_name = "authors.tmpl"
119 kw = kw.copy()
120 for lang in kw["translations"]:
121 authors = natsort.natsorted([author for author in self._posts_per_author().keys()],
122 alg=natsort.ns.F | natsort.ns.IC)
123 has_authors = (authors != [])
124 kw['authors'] = authors
125 output_name = os.path.join(
126 kw['output_folder'], self.site.path('author_index', None, lang))
127 context = {}
128 if has_authors:
129 context["title"] = kw["messages"][lang]["Authors"]
130 context["items"] = [(author, self.site.link("author", author, lang)) for author
131 in authors]
132 context["description"] = context["title"]
133 else:
134 context["items"] = None
135 context["permalink"] = self.site.link("author_index", None, lang)
136 context["pagekind"] = ["list", "authors_page"]
137 task = self.site.generic_post_list_renderer(
138 lang,
139 [],
140 output_name,
141 template_name,
142 kw['filters'],
143 context,
144 )
145 task['uptodate'] = task['uptodate'] + [utils.config_changed(kw, 'nikola.plugins.task.authors:page')]
146 task['basename'] = str(self.name)
147 yield task
148
149 def list_authors_page(self, kw):
150 """Create a global "all authors" page for each language."""
151 yield self._create_authors_page(kw)
152
153 def _get_title(self, author):
154 return author
155
156 def _get_description(self, author, lang):
157 descriptions = self.site.config['AUTHOR_PAGES_DESCRIPTIONS']
158 return descriptions[lang][author] if lang in descriptions and author in descriptions[lang] else None
159
160 def author_page_as_index(self, author, lang, post_list, kw):
161 """Render a sort of index page collection using only this author's posts."""
162 kind = "author"
163
164 def page_link(i, displayed_i, num_pages, force_addition, extension=None):
165 feed = "_atom" if extension == ".atom" else ""
166 return utils.adjust_name_for_index_link(self.site.link(kind + feed, author, lang), i, displayed_i, lang, self.site, force_addition, extension)
167
168 def page_path(i, displayed_i, num_pages, force_addition, extension=None):
169 feed = "_atom" if extension == ".atom" else ""
170 return utils.adjust_name_for_index_path(self.site.path(kind + feed, author, lang), i, displayed_i, lang, self.site, force_addition, extension)
171
172 context_source = {}
173 title = self._get_title(author)
174 if kw["generate_rss"]:
175 # On a author page, the feeds include the author's feeds
176 rss_link = ("""<link rel="alternate" type="application/rss+xml" """
177 """title="RSS for author """
178 """{0} ({1})" href="{2}">""".format(
179 title, lang, self.site.link(kind + "_rss", author, lang)))
180 context_source['rss_link'] = rss_link
181 context_source["author"] = title
182 indexes_title = kw["messages"][lang]["Posts by %s"] % title
183 context_source["description"] = self._get_description(author, lang)
184 context_source["pagekind"] = ["index", "author_page"]
185 template_name = "authorindex.tmpl"
186
187 yield self.site.generic_index_renderer(lang, post_list, indexes_title, template_name, context_source, kw, str(self.name), page_link, page_path)
188
189 def author_page_as_list(self, author, lang, post_list, kw):
190 """Render a single flat link list with this author's posts."""
191 kind = "author"
192 template_name = "author.tmpl"
193 output_name = os.path.join(kw['output_folder'], self.site.path(
194 kind, author, lang))
195 context = {}
196 context["lang"] = lang
197 title = self._get_title(author)
198 context["author"] = title
199 context["title"] = kw["messages"][lang]["Posts by %s"] % title
200 context["posts"] = post_list
201 context["permalink"] = self.site.link(kind, author, lang)
202 context["kind"] = kind
203 context["description"] = self._get_description(author, lang)
204 context["pagekind"] = ["list", "author_page"]
205 task = self.site.generic_post_list_renderer(
206 lang,
207 post_list,
208 output_name,
209 template_name,
210 kw['filters'],
211 context,
212 )
213 task['uptodate'] = task['uptodate'] + [utils.config_changed(kw, 'nikola.plugins.task.authors:list')]
214 task['basename'] = str(self.name)
215 yield task
216
217 def author_rss(self, author, lang, posts, kw):
218 """Create a RSS feed for a single author in a given language."""
219 kind = "author"
220 # Render RSS
221 output_name = os.path.normpath(
222 os.path.join(kw['output_folder'],
223 self.site.path(kind + "_rss", author, lang)))
224 feed_url = urljoin(self.site.config['BASE_URL'], self.site.link(kind + "_rss", author, lang).lstrip('/'))
225 deps = []
226 deps_uptodate = []
227 post_list = sorted(posts, key=lambda a: a.date)
228 post_list.reverse()
229 for post in post_list:
230 deps += post.deps(lang)
231 deps_uptodate += post.deps_uptodate(lang)
232 task = {
233 'basename': str(self.name),
234 'name': output_name,
235 'file_dep': deps,
236 'targets': [output_name],
237 'actions': [(utils.generic_rss_renderer,
238 (lang, "{0} ({1})".format(kw["blog_title"](lang), self._get_title(author)),
239 kw["site_url"], None, post_list,
240 output_name, kw["rss_teasers"], kw["rss_plain"], kw['feed_length'],
241 feed_url, None, kw["rss_link_append_query"]))],
242 'clean': True,
243 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.authors:rss')] + deps_uptodate,
244 'task_dep': ['render_posts'],
245 }
246 return utils.apply_filters(task, kw['filters'])
247
248 def slugify_author_name(self, name):
249 """Slugify an author name."""
250 if self.site.config['SLUG_AUTHOR_PATH']:
251 name = utils.slugify(name)
252 return name
253
254 def author_index_path(self, name, lang):
255 """Return path to the author index."""
256 return [_f for _f in [self.site.config['TRANSLATIONS'][lang],
257 self.site.config['AUTHOR_PATH'],
258 self.site.config['INDEX_FILE']] if _f]
259
260 def author_path(self, name, lang):
261 """Return path to an author."""
262 if self.site.config['PRETTY_URLS']:
263 return [_f for _f in [
264 self.site.config['TRANSLATIONS'][lang],
265 self.site.config['AUTHOR_PATH'],
266 self.slugify_author_name(name),
267 self.site.config['INDEX_FILE']] if _f]
268 else:
269 return [_f for _f in [
270 self.site.config['TRANSLATIONS'][lang],
271 self.site.config['AUTHOR_PATH'],
272 self.slugify_author_name(name) + ".html"] if _f]
273
274 def author_atom_path(self, name, lang):
275 """Return path to an author Atom feed."""
276 return [_f for _f in [self.site.config['TRANSLATIONS'][lang],
277 self.site.config['AUTHOR_PATH'], self.slugify_author_name(name) + ".atom"] if
278 _f]
279
280 def author_rss_path(self, name, lang):
281 """Return path to an author RSS feed."""
282 return [_f for _f in [self.site.config['TRANSLATIONS'][lang],
283 self.site.config['AUTHOR_PATH'], self.slugify_author_name(name) + ".xml"] if
284 _f]
285
286 def _add_extension(self, path, extension):
287 path[-1] += extension
288 return path
289
290 def _posts_per_author(self):
291 """Return a dict of posts per author."""
292 if self.posts_per_author is None:
293 self.posts_per_author = defaultdict(list)
294 for post in self.site.timeline:
295 self.posts_per_author[post.author()].append(post)
296 return self.posts_per_author
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/plugins/task/authors.py b/nikola/plugins/task/authors.py
--- a/nikola/plugins/task/authors.py
+++ b/nikola/plugins/task/authors.py
@@ -292,5 +292,6 @@
if self.posts_per_author is None:
self.posts_per_author = defaultdict(list)
for post in self.site.timeline:
- self.posts_per_author[post.author()].append(post)
+ if post.is_post:
+ self.posts_per_author[post.author()].append(post)
return self.posts_per_author
| {"golden_diff": "diff --git a/nikola/plugins/task/authors.py b/nikola/plugins/task/authors.py\n--- a/nikola/plugins/task/authors.py\n+++ b/nikola/plugins/task/authors.py\n@@ -292,5 +292,6 @@\n if self.posts_per_author is None:\n self.posts_per_author = defaultdict(list)\n for post in self.site.timeline:\n- self.posts_per_author[post.author()].append(post)\n+ if post.is_post:\n+ self.posts_per_author[post.author()].append(post)\n return self.posts_per_author\n", "issue": "Author pages show stories\nexample: https://getnikola.com/authors/roberto-alsina.html \u2192 anything between 7.5.0 and 7.6.1 (most of which were written by me, but that\u2019s not important)\n\ncc @jjconti\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2015 Juanjo Conti.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Render the author pages and feeds.\"\"\"\n\nfrom __future__ import unicode_literals\nimport os\nimport natsort\ntry:\n from urlparse import urljoin\nexcept ImportError:\n from urllib.parse import urljoin # NOQA\nfrom collections import defaultdict\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass RenderAuthors(Task):\n\n \"\"\"Render the author pages and feeds.\"\"\"\n\n name = \"render_authors\"\n posts_per_author = None\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n if site.config[\"ENABLE_AUTHOR_PAGES\"]:\n site.register_path_handler('author_index', self.author_index_path)\n site.register_path_handler('author', self.author_path)\n site.register_path_handler('author_atom', self.author_atom_path)\n site.register_path_handler('author_rss', self.author_rss_path)\n return super(RenderAuthors, self).set_site(site)\n\n def gen_tasks(self):\n \"\"\"Render the author pages and feeds.\"\"\"\n kw = {\n \"translations\": self.site.config[\"TRANSLATIONS\"],\n \"blog_title\": self.site.config[\"BLOG_TITLE\"],\n \"site_url\": self.site.config[\"SITE_URL\"],\n \"base_url\": self.site.config[\"BASE_URL\"],\n \"messages\": self.site.MESSAGES,\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n 'author_path': self.site.config['AUTHOR_PATH'],\n \"author_pages_are_indexes\": self.site.config['AUTHOR_PAGES_ARE_INDEXES'],\n \"generate_rss\": self.site.config['GENERATE_RSS'],\n \"rss_teasers\": self.site.config[\"RSS_TEASERS\"],\n \"rss_plain\": self.site.config[\"RSS_PLAIN\"],\n \"rss_link_append_query\": self.site.config[\"RSS_LINKS_APPEND_QUERY\"],\n \"show_untranslated_posts\": self.site.config['SHOW_UNTRANSLATED_POSTS'],\n \"feed_length\": self.site.config['FEED_LENGTH'],\n \"tzinfo\": self.site.tzinfo,\n \"pretty_urls\": self.site.config['PRETTY_URLS'],\n \"strip_indexes\": self.site.config['STRIP_INDEXES'],\n \"index_file\": self.site.config['INDEX_FILE'],\n }\n\n yield self.group_task()\n self.site.scan_posts()\n\n generate_author_pages = self.site.config[\"ENABLE_AUTHOR_PAGES\"] and len(self._posts_per_author()) > 1\n self.site.GLOBAL_CONTEXT[\"author_pages_generated\"] = generate_author_pages\n if generate_author_pages:\n yield self.list_authors_page(kw)\n\n if not self._posts_per_author(): # this may be self.site.posts_per_author\n return\n\n author_list = list(self._posts_per_author().items())\n\n def render_lists(author, posts):\n \"\"\"Render author pages as RSS files and lists/indexes.\"\"\"\n post_list = sorted(posts, key=lambda a: a.date)\n post_list.reverse()\n for lang in kw[\"translations\"]:\n if kw[\"show_untranslated_posts\"]:\n filtered_posts = post_list\n else:\n filtered_posts = [x for x in post_list if x.is_translation_available(lang)]\n if kw[\"generate_rss\"]:\n yield self.author_rss(author, lang, filtered_posts, kw)\n # Render HTML\n if kw['author_pages_are_indexes']:\n yield self.author_page_as_index(author, lang, filtered_posts, kw)\n else:\n yield self.author_page_as_list(author, lang, filtered_posts, kw)\n\n for author, posts in author_list:\n for task in render_lists(author, posts):\n yield task\n\n def _create_authors_page(self, kw):\n \"\"\"Create a global \"all authors\" page for each language.\"\"\"\n template_name = \"authors.tmpl\"\n kw = kw.copy()\n for lang in kw[\"translations\"]:\n authors = natsort.natsorted([author for author in self._posts_per_author().keys()],\n alg=natsort.ns.F | natsort.ns.IC)\n has_authors = (authors != [])\n kw['authors'] = authors\n output_name = os.path.join(\n kw['output_folder'], self.site.path('author_index', None, lang))\n context = {}\n if has_authors:\n context[\"title\"] = kw[\"messages\"][lang][\"Authors\"]\n context[\"items\"] = [(author, self.site.link(\"author\", author, lang)) for author\n in authors]\n context[\"description\"] = context[\"title\"]\n else:\n context[\"items\"] = None\n context[\"permalink\"] = self.site.link(\"author_index\", None, lang)\n context[\"pagekind\"] = [\"list\", \"authors_page\"]\n task = self.site.generic_post_list_renderer(\n lang,\n [],\n output_name,\n template_name,\n kw['filters'],\n context,\n )\n task['uptodate'] = task['uptodate'] + [utils.config_changed(kw, 'nikola.plugins.task.authors:page')]\n task['basename'] = str(self.name)\n yield task\n\n def list_authors_page(self, kw):\n \"\"\"Create a global \"all authors\" page for each language.\"\"\"\n yield self._create_authors_page(kw)\n\n def _get_title(self, author):\n return author\n\n def _get_description(self, author, lang):\n descriptions = self.site.config['AUTHOR_PAGES_DESCRIPTIONS']\n return descriptions[lang][author] if lang in descriptions and author in descriptions[lang] else None\n\n def author_page_as_index(self, author, lang, post_list, kw):\n \"\"\"Render a sort of index page collection using only this author's posts.\"\"\"\n kind = \"author\"\n\n def page_link(i, displayed_i, num_pages, force_addition, extension=None):\n feed = \"_atom\" if extension == \".atom\" else \"\"\n return utils.adjust_name_for_index_link(self.site.link(kind + feed, author, lang), i, displayed_i, lang, self.site, force_addition, extension)\n\n def page_path(i, displayed_i, num_pages, force_addition, extension=None):\n feed = \"_atom\" if extension == \".atom\" else \"\"\n return utils.adjust_name_for_index_path(self.site.path(kind + feed, author, lang), i, displayed_i, lang, self.site, force_addition, extension)\n\n context_source = {}\n title = self._get_title(author)\n if kw[\"generate_rss\"]:\n # On a author page, the feeds include the author's feeds\n rss_link = (\"\"\"<link rel=\"alternate\" type=\"application/rss+xml\" \"\"\"\n \"\"\"title=\"RSS for author \"\"\"\n \"\"\"{0} ({1})\" href=\"{2}\">\"\"\".format(\n title, lang, self.site.link(kind + \"_rss\", author, lang)))\n context_source['rss_link'] = rss_link\n context_source[\"author\"] = title\n indexes_title = kw[\"messages\"][lang][\"Posts by %s\"] % title\n context_source[\"description\"] = self._get_description(author, lang)\n context_source[\"pagekind\"] = [\"index\", \"author_page\"]\n template_name = \"authorindex.tmpl\"\n\n yield self.site.generic_index_renderer(lang, post_list, indexes_title, template_name, context_source, kw, str(self.name), page_link, page_path)\n\n def author_page_as_list(self, author, lang, post_list, kw):\n \"\"\"Render a single flat link list with this author's posts.\"\"\"\n kind = \"author\"\n template_name = \"author.tmpl\"\n output_name = os.path.join(kw['output_folder'], self.site.path(\n kind, author, lang))\n context = {}\n context[\"lang\"] = lang\n title = self._get_title(author)\n context[\"author\"] = title\n context[\"title\"] = kw[\"messages\"][lang][\"Posts by %s\"] % title\n context[\"posts\"] = post_list\n context[\"permalink\"] = self.site.link(kind, author, lang)\n context[\"kind\"] = kind\n context[\"description\"] = self._get_description(author, lang)\n context[\"pagekind\"] = [\"list\", \"author_page\"]\n task = self.site.generic_post_list_renderer(\n lang,\n post_list,\n output_name,\n template_name,\n kw['filters'],\n context,\n )\n task['uptodate'] = task['uptodate'] + [utils.config_changed(kw, 'nikola.plugins.task.authors:list')]\n task['basename'] = str(self.name)\n yield task\n\n def author_rss(self, author, lang, posts, kw):\n \"\"\"Create a RSS feed for a single author in a given language.\"\"\"\n kind = \"author\"\n # Render RSS\n output_name = os.path.normpath(\n os.path.join(kw['output_folder'],\n self.site.path(kind + \"_rss\", author, lang)))\n feed_url = urljoin(self.site.config['BASE_URL'], self.site.link(kind + \"_rss\", author, lang).lstrip('/'))\n deps = []\n deps_uptodate = []\n post_list = sorted(posts, key=lambda a: a.date)\n post_list.reverse()\n for post in post_list:\n deps += post.deps(lang)\n deps_uptodate += post.deps_uptodate(lang)\n task = {\n 'basename': str(self.name),\n 'name': output_name,\n 'file_dep': deps,\n 'targets': [output_name],\n 'actions': [(utils.generic_rss_renderer,\n (lang, \"{0} ({1})\".format(kw[\"blog_title\"](lang), self._get_title(author)),\n kw[\"site_url\"], None, post_list,\n output_name, kw[\"rss_teasers\"], kw[\"rss_plain\"], kw['feed_length'],\n feed_url, None, kw[\"rss_link_append_query\"]))],\n 'clean': True,\n 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.authors:rss')] + deps_uptodate,\n 'task_dep': ['render_posts'],\n }\n return utils.apply_filters(task, kw['filters'])\n\n def slugify_author_name(self, name):\n \"\"\"Slugify an author name.\"\"\"\n if self.site.config['SLUG_AUTHOR_PATH']:\n name = utils.slugify(name)\n return name\n\n def author_index_path(self, name, lang):\n \"\"\"Return path to the author index.\"\"\"\n return [_f for _f in [self.site.config['TRANSLATIONS'][lang],\n self.site.config['AUTHOR_PATH'],\n self.site.config['INDEX_FILE']] if _f]\n\n def author_path(self, name, lang):\n \"\"\"Return path to an author.\"\"\"\n if self.site.config['PRETTY_URLS']:\n return [_f for _f in [\n self.site.config['TRANSLATIONS'][lang],\n self.site.config['AUTHOR_PATH'],\n self.slugify_author_name(name),\n self.site.config['INDEX_FILE']] if _f]\n else:\n return [_f for _f in [\n self.site.config['TRANSLATIONS'][lang],\n self.site.config['AUTHOR_PATH'],\n self.slugify_author_name(name) + \".html\"] if _f]\n\n def author_atom_path(self, name, lang):\n \"\"\"Return path to an author Atom feed.\"\"\"\n return [_f for _f in [self.site.config['TRANSLATIONS'][lang],\n self.site.config['AUTHOR_PATH'], self.slugify_author_name(name) + \".atom\"] if\n _f]\n\n def author_rss_path(self, name, lang):\n \"\"\"Return path to an author RSS feed.\"\"\"\n return [_f for _f in [self.site.config['TRANSLATIONS'][lang],\n self.site.config['AUTHOR_PATH'], self.slugify_author_name(name) + \".xml\"] if\n _f]\n\n def _add_extension(self, path, extension):\n path[-1] += extension\n return path\n\n def _posts_per_author(self):\n \"\"\"Return a dict of posts per author.\"\"\"\n if self.posts_per_author is None:\n self.posts_per_author = defaultdict(list)\n for post in self.site.timeline:\n self.posts_per_author[post.author()].append(post)\n return self.posts_per_author\n", "path": "nikola/plugins/task/authors.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2015 Juanjo Conti.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Render the author pages and feeds.\"\"\"\n\nfrom __future__ import unicode_literals\nimport os\nimport natsort\ntry:\n from urlparse import urljoin\nexcept ImportError:\n from urllib.parse import urljoin # NOQA\nfrom collections import defaultdict\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass RenderAuthors(Task):\n\n \"\"\"Render the author pages and feeds.\"\"\"\n\n name = \"render_authors\"\n posts_per_author = None\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n if site.config[\"ENABLE_AUTHOR_PAGES\"]:\n site.register_path_handler('author_index', self.author_index_path)\n site.register_path_handler('author', self.author_path)\n site.register_path_handler('author_atom', self.author_atom_path)\n site.register_path_handler('author_rss', self.author_rss_path)\n return super(RenderAuthors, self).set_site(site)\n\n def gen_tasks(self):\n \"\"\"Render the author pages and feeds.\"\"\"\n kw = {\n \"translations\": self.site.config[\"TRANSLATIONS\"],\n \"blog_title\": self.site.config[\"BLOG_TITLE\"],\n \"site_url\": self.site.config[\"SITE_URL\"],\n \"base_url\": self.site.config[\"BASE_URL\"],\n \"messages\": self.site.MESSAGES,\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n 'author_path': self.site.config['AUTHOR_PATH'],\n \"author_pages_are_indexes\": self.site.config['AUTHOR_PAGES_ARE_INDEXES'],\n \"generate_rss\": self.site.config['GENERATE_RSS'],\n \"rss_teasers\": self.site.config[\"RSS_TEASERS\"],\n \"rss_plain\": self.site.config[\"RSS_PLAIN\"],\n \"rss_link_append_query\": self.site.config[\"RSS_LINKS_APPEND_QUERY\"],\n \"show_untranslated_posts\": self.site.config['SHOW_UNTRANSLATED_POSTS'],\n \"feed_length\": self.site.config['FEED_LENGTH'],\n \"tzinfo\": self.site.tzinfo,\n \"pretty_urls\": self.site.config['PRETTY_URLS'],\n \"strip_indexes\": self.site.config['STRIP_INDEXES'],\n \"index_file\": self.site.config['INDEX_FILE'],\n }\n\n yield self.group_task()\n self.site.scan_posts()\n\n generate_author_pages = self.site.config[\"ENABLE_AUTHOR_PAGES\"] and len(self._posts_per_author()) > 1\n self.site.GLOBAL_CONTEXT[\"author_pages_generated\"] = generate_author_pages\n if generate_author_pages:\n yield self.list_authors_page(kw)\n\n if not self._posts_per_author(): # this may be self.site.posts_per_author\n return\n\n author_list = list(self._posts_per_author().items())\n\n def render_lists(author, posts):\n \"\"\"Render author pages as RSS files and lists/indexes.\"\"\"\n post_list = sorted(posts, key=lambda a: a.date)\n post_list.reverse()\n for lang in kw[\"translations\"]:\n if kw[\"show_untranslated_posts\"]:\n filtered_posts = post_list\n else:\n filtered_posts = [x for x in post_list if x.is_translation_available(lang)]\n if kw[\"generate_rss\"]:\n yield self.author_rss(author, lang, filtered_posts, kw)\n # Render HTML\n if kw['author_pages_are_indexes']:\n yield self.author_page_as_index(author, lang, filtered_posts, kw)\n else:\n yield self.author_page_as_list(author, lang, filtered_posts, kw)\n\n for author, posts in author_list:\n for task in render_lists(author, posts):\n yield task\n\n def _create_authors_page(self, kw):\n \"\"\"Create a global \"all authors\" page for each language.\"\"\"\n template_name = \"authors.tmpl\"\n kw = kw.copy()\n for lang in kw[\"translations\"]:\n authors = natsort.natsorted([author for author in self._posts_per_author().keys()],\n alg=natsort.ns.F | natsort.ns.IC)\n has_authors = (authors != [])\n kw['authors'] = authors\n output_name = os.path.join(\n kw['output_folder'], self.site.path('author_index', None, lang))\n context = {}\n if has_authors:\n context[\"title\"] = kw[\"messages\"][lang][\"Authors\"]\n context[\"items\"] = [(author, self.site.link(\"author\", author, lang)) for author\n in authors]\n context[\"description\"] = context[\"title\"]\n else:\n context[\"items\"] = None\n context[\"permalink\"] = self.site.link(\"author_index\", None, lang)\n context[\"pagekind\"] = [\"list\", \"authors_page\"]\n task = self.site.generic_post_list_renderer(\n lang,\n [],\n output_name,\n template_name,\n kw['filters'],\n context,\n )\n task['uptodate'] = task['uptodate'] + [utils.config_changed(kw, 'nikola.plugins.task.authors:page')]\n task['basename'] = str(self.name)\n yield task\n\n def list_authors_page(self, kw):\n \"\"\"Create a global \"all authors\" page for each language.\"\"\"\n yield self._create_authors_page(kw)\n\n def _get_title(self, author):\n return author\n\n def _get_description(self, author, lang):\n descriptions = self.site.config['AUTHOR_PAGES_DESCRIPTIONS']\n return descriptions[lang][author] if lang in descriptions and author in descriptions[lang] else None\n\n def author_page_as_index(self, author, lang, post_list, kw):\n \"\"\"Render a sort of index page collection using only this author's posts.\"\"\"\n kind = \"author\"\n\n def page_link(i, displayed_i, num_pages, force_addition, extension=None):\n feed = \"_atom\" if extension == \".atom\" else \"\"\n return utils.adjust_name_for_index_link(self.site.link(kind + feed, author, lang), i, displayed_i, lang, self.site, force_addition, extension)\n\n def page_path(i, displayed_i, num_pages, force_addition, extension=None):\n feed = \"_atom\" if extension == \".atom\" else \"\"\n return utils.adjust_name_for_index_path(self.site.path(kind + feed, author, lang), i, displayed_i, lang, self.site, force_addition, extension)\n\n context_source = {}\n title = self._get_title(author)\n if kw[\"generate_rss\"]:\n # On a author page, the feeds include the author's feeds\n rss_link = (\"\"\"<link rel=\"alternate\" type=\"application/rss+xml\" \"\"\"\n \"\"\"title=\"RSS for author \"\"\"\n \"\"\"{0} ({1})\" href=\"{2}\">\"\"\".format(\n title, lang, self.site.link(kind + \"_rss\", author, lang)))\n context_source['rss_link'] = rss_link\n context_source[\"author\"] = title\n indexes_title = kw[\"messages\"][lang][\"Posts by %s\"] % title\n context_source[\"description\"] = self._get_description(author, lang)\n context_source[\"pagekind\"] = [\"index\", \"author_page\"]\n template_name = \"authorindex.tmpl\"\n\n yield self.site.generic_index_renderer(lang, post_list, indexes_title, template_name, context_source, kw, str(self.name), page_link, page_path)\n\n def author_page_as_list(self, author, lang, post_list, kw):\n \"\"\"Render a single flat link list with this author's posts.\"\"\"\n kind = \"author\"\n template_name = \"author.tmpl\"\n output_name = os.path.join(kw['output_folder'], self.site.path(\n kind, author, lang))\n context = {}\n context[\"lang\"] = lang\n title = self._get_title(author)\n context[\"author\"] = title\n context[\"title\"] = kw[\"messages\"][lang][\"Posts by %s\"] % title\n context[\"posts\"] = post_list\n context[\"permalink\"] = self.site.link(kind, author, lang)\n context[\"kind\"] = kind\n context[\"description\"] = self._get_description(author, lang)\n context[\"pagekind\"] = [\"list\", \"author_page\"]\n task = self.site.generic_post_list_renderer(\n lang,\n post_list,\n output_name,\n template_name,\n kw['filters'],\n context,\n )\n task['uptodate'] = task['uptodate'] + [utils.config_changed(kw, 'nikola.plugins.task.authors:list')]\n task['basename'] = str(self.name)\n yield task\n\n def author_rss(self, author, lang, posts, kw):\n \"\"\"Create a RSS feed for a single author in a given language.\"\"\"\n kind = \"author\"\n # Render RSS\n output_name = os.path.normpath(\n os.path.join(kw['output_folder'],\n self.site.path(kind + \"_rss\", author, lang)))\n feed_url = urljoin(self.site.config['BASE_URL'], self.site.link(kind + \"_rss\", author, lang).lstrip('/'))\n deps = []\n deps_uptodate = []\n post_list = sorted(posts, key=lambda a: a.date)\n post_list.reverse()\n for post in post_list:\n deps += post.deps(lang)\n deps_uptodate += post.deps_uptodate(lang)\n task = {\n 'basename': str(self.name),\n 'name': output_name,\n 'file_dep': deps,\n 'targets': [output_name],\n 'actions': [(utils.generic_rss_renderer,\n (lang, \"{0} ({1})\".format(kw[\"blog_title\"](lang), self._get_title(author)),\n kw[\"site_url\"], None, post_list,\n output_name, kw[\"rss_teasers\"], kw[\"rss_plain\"], kw['feed_length'],\n feed_url, None, kw[\"rss_link_append_query\"]))],\n 'clean': True,\n 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.authors:rss')] + deps_uptodate,\n 'task_dep': ['render_posts'],\n }\n return utils.apply_filters(task, kw['filters'])\n\n def slugify_author_name(self, name):\n \"\"\"Slugify an author name.\"\"\"\n if self.site.config['SLUG_AUTHOR_PATH']:\n name = utils.slugify(name)\n return name\n\n def author_index_path(self, name, lang):\n \"\"\"Return path to the author index.\"\"\"\n return [_f for _f in [self.site.config['TRANSLATIONS'][lang],\n self.site.config['AUTHOR_PATH'],\n self.site.config['INDEX_FILE']] if _f]\n\n def author_path(self, name, lang):\n \"\"\"Return path to an author.\"\"\"\n if self.site.config['PRETTY_URLS']:\n return [_f for _f in [\n self.site.config['TRANSLATIONS'][lang],\n self.site.config['AUTHOR_PATH'],\n self.slugify_author_name(name),\n self.site.config['INDEX_FILE']] if _f]\n else:\n return [_f for _f in [\n self.site.config['TRANSLATIONS'][lang],\n self.site.config['AUTHOR_PATH'],\n self.slugify_author_name(name) + \".html\"] if _f]\n\n def author_atom_path(self, name, lang):\n \"\"\"Return path to an author Atom feed.\"\"\"\n return [_f for _f in [self.site.config['TRANSLATIONS'][lang],\n self.site.config['AUTHOR_PATH'], self.slugify_author_name(name) + \".atom\"] if\n _f]\n\n def author_rss_path(self, name, lang):\n \"\"\"Return path to an author RSS feed.\"\"\"\n return [_f for _f in [self.site.config['TRANSLATIONS'][lang],\n self.site.config['AUTHOR_PATH'], self.slugify_author_name(name) + \".xml\"] if\n _f]\n\n def _add_extension(self, path, extension):\n path[-1] += extension\n return path\n\n def _posts_per_author(self):\n \"\"\"Return a dict of posts per author.\"\"\"\n if self.posts_per_author is None:\n self.posts_per_author = defaultdict(list)\n for post in self.site.timeline:\n if post.is_post:\n self.posts_per_author[post.author()].append(post)\n return self.posts_per_author\n", "path": "nikola/plugins/task/authors.py"}]} | 3,953 | 124 |
gh_patches_debug_8184 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-2766 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dbt compile fails against redshift when using multi-threading
### Describe the bug
When I run `dbt compile` against our redshift data warehouse the command fails with the error
```
KeyError: 'endpoint_resolver'
```
The error only occurs when threads > 1 and `method: iam` is used.
From what I can gather this is happening because the boto session object is not thread safe and it is being accessed from multiple threads without being protected, and this happens during the call to get_tmp_iam_cluster_credentials.
### Steps To Reproduce
* Create a dbt project containing a significant number of models.
* Configure the target in the profiles.yml file to point to a redshift dwh, with threads > 1 and method iam:
```
type: redshift
method: iam
threads: 8
host: xxxx.redshift.amazonaws.com
cluster_id: xxx
port: 5439
user: xxx
dbname: xxx
schema: xxx
```
* Run `dbt compile`
### Expected behavior
That `dbt compile` succeeds.
### Screenshots and log output
```
2020-09-14 11:15:23.743840 (MainThread): Traceback (most recent call last):
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/postgres/connections.py", line 46, in exception_handler
yield
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/sql/connections.py", line 76, in add_query
cursor = connection.handle.cursor()
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/contracts/connection.py", line 69, in handle
self._handle.resolve(self)
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/contracts/connection.py", line 90, in resolve
return self.opener(connection)
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/postgres/connections.py", line 77, in open
credentials = cls.get_credentials(connection.credentials)
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py", line 152, in get_credentials
return cls.get_tmp_iam_cluster_credentials(credentials)
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py", line 128, in get_tmp_iam_cluster_credentials
credentials.db_groups,
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py", line 93, in fetch_cluster_credentials
boto_client = boto3.client('redshift')
File "/Users/xxx/venv/lib/python3.7/site-packages/boto3/__init__.py", line 91, in client
return _get_default_session().client(*args, **kwargs)
File "/Users/xxx/venv/lib/python3.7/site-packages/boto3/session.py", line 263, in client
aws_session_token=aws_session_token, config=config)
File "/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py", line 828, in create_client
endpoint_resolver = self._get_internal_component('endpoint_resolver')
File "/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py", line 695, in _get_internal_component
return self._internal_components.get_component(name)
File "/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py", line 907, in get_component
del self._deferred[name]
KeyError: 'endpoint_resolver'
```
Sometimes the error returned is
```
KeyError: 'credential_provider'
```
but the stack trace is identical.
### System information
**Which database are you using dbt with?**
- [ ] postgres
- [x] redshift
- [ ] bigquery
- [ ] snowflake
- [ ] other (specify: ____________)
**The output of `dbt --version`:**
```
installed version: 0.17.2
latest version: 0.18.0
Your version of dbt is out of date! You can find instructions for upgrading here:
https://docs.getdbt.com/docs/installation
Plugins:
- bigquery: 0.17.2
- snowflake: 0.17.2
- redshift: 0.17.2
- postgres: 0.17.2
```
**The operating system you're using:**
macOS Catalina
**The output of `python --version`:**
Python 3.7.3
### Additional context
The error surfaced after I bumped dbt from version 0.14.2 to 0.17.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/redshift/dbt/adapters/redshift/connections.py`
Content:
```
1 from multiprocessing import Lock
2 from contextlib import contextmanager
3 from typing import NewType
4
5 from dbt.adapters.postgres import PostgresConnectionManager
6 from dbt.adapters.postgres import PostgresCredentials
7 from dbt.logger import GLOBAL_LOGGER as logger # noqa
8 import dbt.exceptions
9 import dbt.flags
10
11 import boto3
12
13 from hologram import FieldEncoder, JsonSchemaMixin
14 from hologram.helpers import StrEnum
15
16 from dataclasses import dataclass, field
17 from typing import Optional, List
18
19 drop_lock: Lock = dbt.flags.MP_CONTEXT.Lock()
20
21
22 IAMDuration = NewType('IAMDuration', int)
23
24
25 class IAMDurationEncoder(FieldEncoder):
26 @property
27 def json_schema(self):
28 return {'type': 'integer', 'minimum': 0, 'maximum': 65535}
29
30
31 JsonSchemaMixin.register_field_encoders({IAMDuration: IAMDurationEncoder()})
32
33
34 class RedshiftConnectionMethod(StrEnum):
35 DATABASE = 'database'
36 IAM = 'iam'
37
38
39 @dataclass
40 class RedshiftCredentials(PostgresCredentials):
41 method: RedshiftConnectionMethod = RedshiftConnectionMethod.DATABASE
42 password: Optional[str] = None
43 cluster_id: Optional[str] = field(
44 default=None,
45 metadata={'description': 'If using IAM auth, the name of the cluster'},
46 )
47 iam_profile: Optional[str] = None
48 iam_duration_seconds: int = 900
49 search_path: Optional[str] = None
50 keepalives_idle: int = 240
51 autocreate: bool = False
52 db_groups: List[str] = field(default_factory=list)
53
54 @property
55 def type(self):
56 return 'redshift'
57
58 def _connection_keys(self):
59 keys = super()._connection_keys()
60 return keys + (
61 'method',
62 'cluster_id',
63 'iam_profile',
64 'iam_duration_seconds'
65 )
66
67
68 class RedshiftConnectionManager(PostgresConnectionManager):
69 TYPE = 'redshift'
70
71 @contextmanager
72 def fresh_transaction(self, name=None):
73 """On entrance to this context manager, hold an exclusive lock and
74 create a fresh transaction for redshift, then commit and begin a new
75 one before releasing the lock on exit.
76
77 See drop_relation in RedshiftAdapter for more information.
78
79 :param Optional[str] name: The name of the connection to use, or None
80 to use the default.
81 """
82 with drop_lock:
83 connection = self.get_thread_connection()
84
85 if connection.transaction_open:
86 self.commit()
87
88 self.begin()
89 yield
90
91 self.commit()
92 self.begin()
93
94 @classmethod
95 def fetch_cluster_credentials(cls, db_user, db_name, cluster_id,
96 iam_profile, duration_s, autocreate,
97 db_groups):
98 """Fetches temporary login credentials from AWS. The specified user
99 must already exist in the database, or else an error will occur"""
100
101 if iam_profile is None:
102 boto_client = boto3.client('redshift')
103 else:
104 logger.debug("Connecting to Redshift using 'IAM'" +
105 f"with profile {iam_profile}")
106 boto_session = boto3.Session(
107 profile_name=iam_profile
108 )
109 boto_client = boto_session.client('redshift')
110
111 try:
112 return boto_client.get_cluster_credentials(
113 DbUser=db_user,
114 DbName=db_name,
115 ClusterIdentifier=cluster_id,
116 DurationSeconds=duration_s,
117 AutoCreate=autocreate,
118 DbGroups=db_groups,)
119
120 except boto_client.exceptions.ClientError as e:
121 raise dbt.exceptions.FailedToConnectException(
122 "Unable to get temporary Redshift cluster credentials: {}"
123 .format(e))
124
125 @classmethod
126 def get_tmp_iam_cluster_credentials(cls, credentials):
127 cluster_id = credentials.cluster_id
128
129 # default via:
130 # boto3.readthedocs.io/en/latest/reference/services/redshift.html
131 iam_duration_s = credentials.iam_duration_seconds
132
133 if not cluster_id:
134 raise dbt.exceptions.FailedToConnectException(
135 "'cluster_id' must be provided in profile if IAM "
136 "authentication method selected")
137
138 cluster_creds = cls.fetch_cluster_credentials(
139 credentials.user,
140 credentials.database,
141 credentials.cluster_id,
142 credentials.iam_profile,
143 iam_duration_s,
144 credentials.autocreate,
145 credentials.db_groups,
146 )
147
148 # replace username and password with temporary redshift credentials
149 return credentials.replace(user=cluster_creds.get('DbUser'),
150 password=cluster_creds.get('DbPassword'))
151
152 @classmethod
153 def get_credentials(cls, credentials):
154 method = credentials.method
155
156 # Support missing 'method' for backwards compatibility
157 if method == 'database' or method is None:
158 logger.debug("Connecting to Redshift using 'database' credentials")
159 # this requirement is really annoying to encode into json schema,
160 # so validate it here
161 if credentials.password is None:
162 raise dbt.exceptions.FailedToConnectException(
163 "'password' field is required for 'database' credentials"
164 )
165 return credentials
166
167 elif method == 'iam':
168 logger.debug("Connecting to Redshift using 'IAM' credentials")
169 return cls.get_tmp_iam_cluster_credentials(credentials)
170
171 else:
172 raise dbt.exceptions.FailedToConnectException(
173 "Invalid 'method' in profile: '{}'".format(method))
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/redshift/dbt/adapters/redshift/connections.py b/plugins/redshift/dbt/adapters/redshift/connections.py
--- a/plugins/redshift/dbt/adapters/redshift/connections.py
+++ b/plugins/redshift/dbt/adapters/redshift/connections.py
@@ -99,7 +99,8 @@
must already exist in the database, or else an error will occur"""
if iam_profile is None:
- boto_client = boto3.client('redshift')
+ session = boto3.Session()
+ boto_client = session.client("redshift")
else:
logger.debug("Connecting to Redshift using 'IAM'" +
f"with profile {iam_profile}")
| {"golden_diff": "diff --git a/plugins/redshift/dbt/adapters/redshift/connections.py b/plugins/redshift/dbt/adapters/redshift/connections.py\n--- a/plugins/redshift/dbt/adapters/redshift/connections.py\n+++ b/plugins/redshift/dbt/adapters/redshift/connections.py\n@@ -99,7 +99,8 @@\n must already exist in the database, or else an error will occur\"\"\"\n \n if iam_profile is None:\n- boto_client = boto3.client('redshift')\n+ session = boto3.Session()\n+ boto_client = session.client(\"redshift\")\n else:\n logger.debug(\"Connecting to Redshift using 'IAM'\" +\n f\"with profile {iam_profile}\")\n", "issue": "dbt compile fails against redshift when using multi-threading\n### Describe the bug\r\nWhen I run `dbt compile` against our redshift data warehouse the command fails with the error \r\n```\r\nKeyError: 'endpoint_resolver'\r\n```\r\nThe error only occurs when threads > 1 and `method: iam` is used.\r\n\r\nFrom what I can gather this is happening because the boto session object is not thread safe and it is being accessed from multiple threads without being protected, and this happens during the call to get_tmp_iam_cluster_credentials.\r\n\r\n### Steps To Reproduce\r\n* Create a dbt project containing a significant number of models.\r\n* Configure the target in the profiles.yml file to point to a redshift dwh, with threads > 1 and method iam:\r\n```\r\n type: redshift\r\n method: iam\r\n threads: 8\r\n host: xxxx.redshift.amazonaws.com\r\n cluster_id: xxx\r\n port: 5439\r\n user: xxx\r\n dbname: xxx\r\n schema: xxx\r\n```\r\n* Run `dbt compile`\r\n\r\n### Expected behavior\r\nThat `dbt compile` succeeds.\r\n\r\n### Screenshots and log output\r\n\r\n```\r\n2020-09-14 11:15:23.743840 (MainThread): Traceback (most recent call last):\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/postgres/connections.py\", line 46, in exception_handler\r\n yield\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/sql/connections.py\", line 76, in add_query\r\n cursor = connection.handle.cursor()\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/contracts/connection.py\", line 69, in handle\r\n self._handle.resolve(self)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/contracts/connection.py\", line 90, in resolve\r\n return self.opener(connection)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/postgres/connections.py\", line 77, in open\r\n credentials = cls.get_credentials(connection.credentials)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py\", line 152, in get_credentials\r\n return cls.get_tmp_iam_cluster_credentials(credentials)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py\", line 128, in get_tmp_iam_cluster_credentials\r\n credentials.db_groups,\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py\", line 93, in fetch_cluster_credentials\r\n boto_client = boto3.client('redshift')\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/boto3/__init__.py\", line 91, in client\r\n return _get_default_session().client(*args, **kwargs)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/boto3/session.py\", line 263, in client\r\n aws_session_token=aws_session_token, config=config)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py\", line 828, in create_client\r\n endpoint_resolver = self._get_internal_component('endpoint_resolver')\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py\", line 695, in _get_internal_component\r\n return self._internal_components.get_component(name)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py\", line 907, in get_component\r\n del self._deferred[name]\r\nKeyError: 'endpoint_resolver'\r\n```\r\n\r\nSometimes the error returned is \r\n```\r\nKeyError: 'credential_provider'\r\n```\r\nbut the stack trace is identical.\r\n\r\n### System information\r\n**Which database are you using dbt with?**\r\n- [ ] postgres\r\n- [x] redshift\r\n- [ ] bigquery\r\n- [ ] snowflake\r\n- [ ] other (specify: ____________)\r\n\r\n\r\n**The output of `dbt --version`:**\r\n```\r\ninstalled version: 0.17.2\r\n latest version: 0.18.0\r\n\r\nYour version of dbt is out of date! You can find instructions for upgrading here:\r\nhttps://docs.getdbt.com/docs/installation\r\n\r\nPlugins:\r\n - bigquery: 0.17.2\r\n - snowflake: 0.17.2\r\n - redshift: 0.17.2\r\n - postgres: 0.17.2\r\n```\r\n\r\n**The operating system you're using:**\r\nmacOS Catalina\r\n\r\n**The output of `python --version`:**\r\nPython 3.7.3\r\n\r\n### Additional context\r\nThe error surfaced after I bumped dbt from version 0.14.2 to 0.17.2\r\n\n", "before_files": [{"content": "from multiprocessing import Lock\nfrom contextlib import contextmanager\nfrom typing import NewType\n\nfrom dbt.adapters.postgres import PostgresConnectionManager\nfrom dbt.adapters.postgres import PostgresCredentials\nfrom dbt.logger import GLOBAL_LOGGER as logger # noqa\nimport dbt.exceptions\nimport dbt.flags\n\nimport boto3\n\nfrom hologram import FieldEncoder, JsonSchemaMixin\nfrom hologram.helpers import StrEnum\n\nfrom dataclasses import dataclass, field\nfrom typing import Optional, List\n\ndrop_lock: Lock = dbt.flags.MP_CONTEXT.Lock()\n\n\nIAMDuration = NewType('IAMDuration', int)\n\n\nclass IAMDurationEncoder(FieldEncoder):\n @property\n def json_schema(self):\n return {'type': 'integer', 'minimum': 0, 'maximum': 65535}\n\n\nJsonSchemaMixin.register_field_encoders({IAMDuration: IAMDurationEncoder()})\n\n\nclass RedshiftConnectionMethod(StrEnum):\n DATABASE = 'database'\n IAM = 'iam'\n\n\n@dataclass\nclass RedshiftCredentials(PostgresCredentials):\n method: RedshiftConnectionMethod = RedshiftConnectionMethod.DATABASE\n password: Optional[str] = None\n cluster_id: Optional[str] = field(\n default=None,\n metadata={'description': 'If using IAM auth, the name of the cluster'},\n )\n iam_profile: Optional[str] = None\n iam_duration_seconds: int = 900\n search_path: Optional[str] = None\n keepalives_idle: int = 240\n autocreate: bool = False\n db_groups: List[str] = field(default_factory=list)\n\n @property\n def type(self):\n return 'redshift'\n\n def _connection_keys(self):\n keys = super()._connection_keys()\n return keys + (\n 'method',\n 'cluster_id',\n 'iam_profile',\n 'iam_duration_seconds'\n )\n\n\nclass RedshiftConnectionManager(PostgresConnectionManager):\n TYPE = 'redshift'\n\n @contextmanager\n def fresh_transaction(self, name=None):\n \"\"\"On entrance to this context manager, hold an exclusive lock and\n create a fresh transaction for redshift, then commit and begin a new\n one before releasing the lock on exit.\n\n See drop_relation in RedshiftAdapter for more information.\n\n :param Optional[str] name: The name of the connection to use, or None\n to use the default.\n \"\"\"\n with drop_lock:\n connection = self.get_thread_connection()\n\n if connection.transaction_open:\n self.commit()\n\n self.begin()\n yield\n\n self.commit()\n self.begin()\n\n @classmethod\n def fetch_cluster_credentials(cls, db_user, db_name, cluster_id,\n iam_profile, duration_s, autocreate,\n db_groups):\n \"\"\"Fetches temporary login credentials from AWS. The specified user\n must already exist in the database, or else an error will occur\"\"\"\n\n if iam_profile is None:\n boto_client = boto3.client('redshift')\n else:\n logger.debug(\"Connecting to Redshift using 'IAM'\" +\n f\"with profile {iam_profile}\")\n boto_session = boto3.Session(\n profile_name=iam_profile\n )\n boto_client = boto_session.client('redshift')\n\n try:\n return boto_client.get_cluster_credentials(\n DbUser=db_user,\n DbName=db_name,\n ClusterIdentifier=cluster_id,\n DurationSeconds=duration_s,\n AutoCreate=autocreate,\n DbGroups=db_groups,)\n\n except boto_client.exceptions.ClientError as e:\n raise dbt.exceptions.FailedToConnectException(\n \"Unable to get temporary Redshift cluster credentials: {}\"\n .format(e))\n\n @classmethod\n def get_tmp_iam_cluster_credentials(cls, credentials):\n cluster_id = credentials.cluster_id\n\n # default via:\n # boto3.readthedocs.io/en/latest/reference/services/redshift.html\n iam_duration_s = credentials.iam_duration_seconds\n\n if not cluster_id:\n raise dbt.exceptions.FailedToConnectException(\n \"'cluster_id' must be provided in profile if IAM \"\n \"authentication method selected\")\n\n cluster_creds = cls.fetch_cluster_credentials(\n credentials.user,\n credentials.database,\n credentials.cluster_id,\n credentials.iam_profile,\n iam_duration_s,\n credentials.autocreate,\n credentials.db_groups,\n )\n\n # replace username and password with temporary redshift credentials\n return credentials.replace(user=cluster_creds.get('DbUser'),\n password=cluster_creds.get('DbPassword'))\n\n @classmethod\n def get_credentials(cls, credentials):\n method = credentials.method\n\n # Support missing 'method' for backwards compatibility\n if method == 'database' or method is None:\n logger.debug(\"Connecting to Redshift using 'database' credentials\")\n # this requirement is really annoying to encode into json schema,\n # so validate it here\n if credentials.password is None:\n raise dbt.exceptions.FailedToConnectException(\n \"'password' field is required for 'database' credentials\"\n )\n return credentials\n\n elif method == 'iam':\n logger.debug(\"Connecting to Redshift using 'IAM' credentials\")\n return cls.get_tmp_iam_cluster_credentials(credentials)\n\n else:\n raise dbt.exceptions.FailedToConnectException(\n \"Invalid 'method' in profile: '{}'\".format(method))\n", "path": "plugins/redshift/dbt/adapters/redshift/connections.py"}], "after_files": [{"content": "from multiprocessing import Lock\nfrom contextlib import contextmanager\nfrom typing import NewType\n\nfrom dbt.adapters.postgres import PostgresConnectionManager\nfrom dbt.adapters.postgres import PostgresCredentials\nfrom dbt.logger import GLOBAL_LOGGER as logger # noqa\nimport dbt.exceptions\nimport dbt.flags\n\nimport boto3\n\nfrom hologram import FieldEncoder, JsonSchemaMixin\nfrom hologram.helpers import StrEnum\n\nfrom dataclasses import dataclass, field\nfrom typing import Optional, List\n\ndrop_lock: Lock = dbt.flags.MP_CONTEXT.Lock()\n\n\nIAMDuration = NewType('IAMDuration', int)\n\n\nclass IAMDurationEncoder(FieldEncoder):\n @property\n def json_schema(self):\n return {'type': 'integer', 'minimum': 0, 'maximum': 65535}\n\n\nJsonSchemaMixin.register_field_encoders({IAMDuration: IAMDurationEncoder()})\n\n\nclass RedshiftConnectionMethod(StrEnum):\n DATABASE = 'database'\n IAM = 'iam'\n\n\n@dataclass\nclass RedshiftCredentials(PostgresCredentials):\n method: RedshiftConnectionMethod = RedshiftConnectionMethod.DATABASE\n password: Optional[str] = None\n cluster_id: Optional[str] = field(\n default=None,\n metadata={'description': 'If using IAM auth, the name of the cluster'},\n )\n iam_profile: Optional[str] = None\n iam_duration_seconds: int = 900\n search_path: Optional[str] = None\n keepalives_idle: int = 240\n autocreate: bool = False\n db_groups: List[str] = field(default_factory=list)\n\n @property\n def type(self):\n return 'redshift'\n\n def _connection_keys(self):\n keys = super()._connection_keys()\n return keys + (\n 'method',\n 'cluster_id',\n 'iam_profile',\n 'iam_duration_seconds'\n )\n\n\nclass RedshiftConnectionManager(PostgresConnectionManager):\n TYPE = 'redshift'\n\n @contextmanager\n def fresh_transaction(self, name=None):\n \"\"\"On entrance to this context manager, hold an exclusive lock and\n create a fresh transaction for redshift, then commit and begin a new\n one before releasing the lock on exit.\n\n See drop_relation in RedshiftAdapter for more information.\n\n :param Optional[str] name: The name of the connection to use, or None\n to use the default.\n \"\"\"\n with drop_lock:\n connection = self.get_thread_connection()\n\n if connection.transaction_open:\n self.commit()\n\n self.begin()\n yield\n\n self.commit()\n self.begin()\n\n @classmethod\n def fetch_cluster_credentials(cls, db_user, db_name, cluster_id,\n iam_profile, duration_s, autocreate,\n db_groups):\n \"\"\"Fetches temporary login credentials from AWS. The specified user\n must already exist in the database, or else an error will occur\"\"\"\n\n if iam_profile is None:\n session = boto3.Session()\n boto_client = session.client(\"redshift\")\n else:\n logger.debug(\"Connecting to Redshift using 'IAM'\" +\n f\"with profile {iam_profile}\")\n boto_session = boto3.Session(\n profile_name=iam_profile\n )\n boto_client = boto_session.client('redshift')\n\n try:\n return boto_client.get_cluster_credentials(\n DbUser=db_user,\n DbName=db_name,\n ClusterIdentifier=cluster_id,\n DurationSeconds=duration_s,\n AutoCreate=autocreate,\n DbGroups=db_groups,)\n\n except boto_client.exceptions.ClientError as e:\n raise dbt.exceptions.FailedToConnectException(\n \"Unable to get temporary Redshift cluster credentials: {}\"\n .format(e))\n\n @classmethod\n def get_tmp_iam_cluster_credentials(cls, credentials):\n cluster_id = credentials.cluster_id\n\n # default via:\n # boto3.readthedocs.io/en/latest/reference/services/redshift.html\n iam_duration_s = credentials.iam_duration_seconds\n\n if not cluster_id:\n raise dbt.exceptions.FailedToConnectException(\n \"'cluster_id' must be provided in profile if IAM \"\n \"authentication method selected\")\n\n cluster_creds = cls.fetch_cluster_credentials(\n credentials.user,\n credentials.database,\n credentials.cluster_id,\n credentials.iam_profile,\n iam_duration_s,\n credentials.autocreate,\n credentials.db_groups,\n )\n\n # replace username and password with temporary redshift credentials\n return credentials.replace(user=cluster_creds.get('DbUser'),\n password=cluster_creds.get('DbPassword'))\n\n @classmethod\n def get_credentials(cls, credentials):\n method = credentials.method\n\n # Support missing 'method' for backwards compatibility\n if method == 'database' or method is None:\n logger.debug(\"Connecting to Redshift using 'database' credentials\")\n # this requirement is really annoying to encode into json schema,\n # so validate it here\n if credentials.password is None:\n raise dbt.exceptions.FailedToConnectException(\n \"'password' field is required for 'database' credentials\"\n )\n return credentials\n\n elif method == 'iam':\n logger.debug(\"Connecting to Redshift using 'IAM' credentials\")\n return cls.get_tmp_iam_cluster_credentials(credentials)\n\n else:\n raise dbt.exceptions.FailedToConnectException(\n \"Invalid 'method' in profile: '{}'\".format(method))\n", "path": "plugins/redshift/dbt/adapters/redshift/connections.py"}]} | 2,941 | 151 |
gh_patches_debug_25139 | rasdani/github-patches | git_diff | biolab__orange3-2295 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove Nans from Sparse Data
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
-->
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
3.5.dev
##### Expected behavior
Logistic Regression works on sparse data that contains nan values.
##### Actual behavior
It crashes with `Input contains Nans, ...`
##### Steps to reproduce the behavior
Load `Election-2016-Tweets.tab` in Corpus. Use just first 100 rows for speed. Pass corpus through bag of words into test & score and use 10-fold cross validation. Add logistic regression on input and test & score crashes.
##### Additional info (worksheets, data, screenshots, ...)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Orange/preprocess/impute.py`
Content:
```
1 import numpy
2 from scipy.sparse import issparse
3
4 import Orange.data
5 from Orange.statistics import distribution, basic_stats
6 from Orange.util import Reprable
7 from .transformation import Transformation, Lookup
8
9 __all__ = ["ReplaceUnknowns", "Average", "DoNotImpute", "DropInstances",
10 "Model", "AsValue", "Random", "Default"]
11
12
13 class ReplaceUnknowns(Transformation):
14 """
15 A column transformation which replaces unknown values with a fixed `value`.
16
17 Parameters
18 ----------
19 variable : Orange.data.Variable
20 The target variable for imputation.
21 value : int or float
22 The value with which to replace the unknown values
23 """
24 def __init__(self, variable, value=0):
25 super().__init__(variable)
26 self.value = value
27
28 def transform(self, c):
29 if issparse(c): # sparse does not have unknown values
30 return c
31 else:
32 return numpy.where(numpy.isnan(c), self.value, c)
33
34
35 class BaseImputeMethod(Reprable):
36 name = ""
37 short_name = ""
38 description = ""
39 format = "{var.name} -> {self.short_name}"
40 columns_only = False
41
42 def __call__(self, data, variable):
43 """ Imputes table along variable column.
44
45 Args:
46 data (Table): A table to impute.
47 variable (Variable): Variable for completing missing values.
48
49 Returns:
50 A new Variable instance with completed missing values or
51 a array mask of rows to drop out.
52 """
53 raise NotImplementedError
54
55 def format_variable(self, var):
56 return self.format.format(var=var, self=self)
57
58 def __str__(self):
59 return self.name
60
61 def copy(self):
62 return self
63
64 @classmethod
65 def supports_variable(cls, variable):
66 return True
67
68
69 class DoNotImpute(BaseImputeMethod):
70 name = "Don't impute"
71 short_name = "leave"
72 description = ""
73
74 def __call__(self, data, variable):
75 return variable
76
77
78 class DropInstances(BaseImputeMethod):
79 name = "Remove instances with unknown values"
80 short_name = "drop"
81 description = ""
82
83 def __call__(self, data, variable):
84 index = data.domain.index(variable)
85 return numpy.isnan(data[:, index]).reshape(-1)
86
87
88 class Average(BaseImputeMethod):
89 name = "Average/Most frequent"
90 short_name = "average"
91 description = "Replace with average/mode of the column"
92
93 def __call__(self, data, variable, value=None):
94 variable = data.domain[variable]
95 if value is None:
96 if variable.is_continuous:
97 stats = basic_stats.BasicStats(data, variable)
98 value = stats.mean
99 elif variable.is_discrete:
100 dist = distribution.get_distribution(data, variable)
101 value = dist.modus()
102 else:
103 raise TypeError("Variable must be continuous or discrete")
104
105 a = variable.copy(compute_value=ReplaceUnknowns(variable, value))
106 a.to_sql = ImputeSql(variable, value)
107 return a
108
109
110 class ImputeSql(Reprable):
111 def __init__(self, var, default):
112 self.var = var
113 self.default = default
114
115 def __call__(self):
116 return 'coalesce(%s, %s)' % (self.var.to_sql(), str(self.default))
117
118
119 class Default(BaseImputeMethod):
120 name = "Value"
121 short_name = "value"
122 description = ""
123 columns_only = True
124 format = '{var} -> {self.default}'
125
126 def __init__(self, default=0):
127 self.default = default
128
129 def __call__(self, data, variable, *, default=None):
130 variable = data.domain[variable]
131 default = default if default is not None else self.default
132 return variable.copy(compute_value=ReplaceUnknowns(variable, default))
133
134 def copy(self):
135 return Default(self.default)
136
137
138 class ReplaceUnknownsModel(Reprable):
139 """
140 Replace unknown values with predicted values using a `Orange.base.Model`
141
142 Parameters
143 ----------
144 variable : Orange.data.Variable
145 The target variable for the imputation.
146 model : Orange.base.Model
147 A fitted model predicting `variable`.
148 """
149 def __init__(self, variable, model):
150 assert model.domain.class_var == variable
151 self.variable = variable
152 self.model = model
153
154 def __call__(self, data):
155 if isinstance(data, Orange.data.Instance):
156 column = numpy.array([float(data[self.variable])])
157 else:
158 column = numpy.array(data.get_column_view(self.variable)[0],
159 copy=True)
160
161 mask = numpy.isnan(column)
162 if not numpy.any(mask):
163 return column
164
165 if isinstance(data, Orange.data.Instance):
166 predicted = self.model(data)
167 else:
168 predicted = self.model(data[mask])
169 column[mask] = predicted
170 return column
171
172
173 class Model(BaseImputeMethod):
174 _name = "Model-based imputer"
175 short_name = "model"
176 description = ""
177 format = BaseImputeMethod.format + " ({self.learner.name})"
178 @property
179 def name(self):
180 return "{} ({})".format(self._name, getattr(self.learner, 'name', ''))
181
182 def __init__(self, learner):
183 self.learner = learner
184
185 def __call__(self, data, variable):
186 variable = data.domain[variable]
187 domain = domain_with_class_var(data.domain, variable)
188
189 if self.learner.check_learner_adequacy(domain):
190 data = data.transform(domain)
191 model = self.learner(data)
192 assert model.domain.class_var == variable
193 return variable.copy(
194 compute_value=ReplaceUnknownsModel(variable, model))
195 else:
196 raise ValueError("`{}` doesn't support domain type"
197 .format(self.learner.name))
198
199 def copy(self):
200 return Model(self.learner)
201
202 def supports_variable(self, variable):
203 domain = Orange.data.Domain([], class_vars=variable)
204 return self.learner.check_learner_adequacy(domain)
205
206
207 def domain_with_class_var(domain, class_var):
208 """
209 Return a domain with class_var as output domain.class_var.
210
211 If class_var is in the input domain's attributes it is removed from the
212 output's domain.attributes.
213 """
214 if domain.class_var is class_var:
215 return domain
216 elif class_var in domain.attributes:
217 attrs = [var for var in domain.attributes
218 if var is not class_var]
219 else:
220 attrs = domain.attributes
221 return Orange.data.Domain(attrs, class_var)
222
223
224 class IsDefined(Transformation):
225 def transform(self, c):
226 return ~numpy.isnan(c)
227
228
229 class Lookup(Lookup):
230 def __init__(self, variable, lookup_table, unknown=None):
231 super().__init__(variable, lookup_table)
232 self.unknown = unknown
233
234 def transform(self, column):
235 if self.unknown is None:
236 unknown = numpy.nan
237 else:
238 unknown = self.unknown
239
240 mask = numpy.isnan(column)
241 column_valid = numpy.where(mask, 0, column)
242 values = self.lookup_table[numpy.array(column_valid, dtype=int)]
243 return numpy.where(mask, unknown, values)
244
245
246 class AsValue(BaseImputeMethod):
247 name = "As a distinct value"
248 short_name = "new value"
249 description = ""
250
251 def __call__(self, data, variable):
252 variable = data.domain[variable]
253 if variable.is_discrete:
254 fmt = "{var.name}"
255 value = "N/A"
256 var = Orange.data.DiscreteVariable(
257 fmt.format(var=variable),
258 values=variable.values + [value],
259 base_value=variable.base_value,
260 compute_value=Lookup(
261 variable,
262 numpy.arange(len(variable.values), dtype=int),
263 unknown=len(variable.values))
264 )
265 return var
266
267 elif variable.is_continuous:
268 fmt = "{var.name}_def"
269 indicator_var = Orange.data.DiscreteVariable(
270 fmt.format(var=variable),
271 values=("undef", "def"),
272 compute_value=IsDefined(variable))
273 stats = basic_stats.BasicStats(data, variable)
274 return (variable.copy(compute_value=ReplaceUnknowns(variable,
275 stats.mean)),
276 indicator_var)
277 else:
278 raise TypeError(type(variable))
279
280
281 class ReplaceUnknownsRandom(Transformation):
282 """
283 A column transformation replacing unknowns with values drawn randomly from
284 an empirical distribution.
285
286 Parameters
287 ----------
288 variable : Orange.data.Variable
289 The target variable for imputation.
290 distribution : Orange.statistics.distribution.Distribution
291 The corresponding sampling distribution
292 """
293 def __init__(self, variable, distribution):
294 assert distribution.size > 0
295 assert distribution.variable == variable
296 super().__init__(variable)
297 self.distribution = distribution
298
299 if variable.is_discrete:
300 counts = numpy.array(distribution)
301 elif variable.is_continuous:
302 counts = numpy.array(distribution)[1, :]
303 else:
304 raise TypeError("Only discrete and continuous "
305 "variables are supported")
306 csum = numpy.sum(counts)
307 if csum > 0:
308 self.sample_prob = counts / csum
309 else:
310 self.sample_prob = numpy.ones_like(counts) / len(counts)
311
312 def transform(self, c):
313 c = numpy.array(c, copy=True)
314 nanindices = numpy.flatnonzero(numpy.isnan(c))
315
316 if self.variable.is_discrete:
317 sample = numpy.random.choice(
318 len(self.variable.values), size=len(nanindices),
319 replace=True, p=self.sample_prob)
320 else:
321 sample = numpy.random.choice(
322 numpy.asarray(self.distribution)[0, :], size=len(nanindices),
323 replace=True, p=self.sample_prob)
324
325 c[nanindices] = sample
326 return c
327
328
329 class Random(BaseImputeMethod):
330 name = "Random values"
331 short_name = "random"
332 description = "Replace with a random value"
333
334 def __call__(self, data, variable):
335 variable = data.domain[variable]
336 dist = distribution.get_distribution(data, variable)
337 # A distribution is invalid if a continuous variable's column does not
338 # contain any known values or if a discrete variable's .values == []
339 isinvalid = dist.size == 0
340 if isinvalid and variable.is_discrete:
341 assert len(variable.values) == 0
342 raise ValueError("'{}' has no values".format(variable))
343 elif isinvalid and variable.is_continuous:
344 raise ValueError("'{}' has an unknown distribution"
345 .format(variable))
346
347 if variable.is_discrete and numpy.sum(dist) == 0:
348 dist += 1 / len(dist)
349 elif variable.is_continuous and numpy.sum(dist[1, :]) == 0:
350 dist[1, :] += 1 / dist.shape[1]
351 return variable.copy(
352 compute_value=ReplaceUnknownsRandom(variable, dist))
353
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Orange/preprocess/impute.py b/Orange/preprocess/impute.py
--- a/Orange/preprocess/impute.py
+++ b/Orange/preprocess/impute.py
@@ -4,7 +4,7 @@
import Orange.data
from Orange.statistics import distribution, basic_stats
from Orange.util import Reprable
-from .transformation import Transformation, Lookup
+from .transformation import Transformation, Lookup as BaseLookup
__all__ = ["ReplaceUnknowns", "Average", "DoNotImpute", "DropInstances",
"Model", "AsValue", "Random", "Default"]
@@ -26,7 +26,8 @@
self.value = value
def transform(self, c):
- if issparse(c): # sparse does not have unknown values
+ if issparse(c):
+ c.data = numpy.where(numpy.isnan(c.data), self.value, c.data)
return c
else:
return numpy.where(numpy.isnan(c), self.value, c)
@@ -226,7 +227,7 @@
return ~numpy.isnan(c)
-class Lookup(Lookup):
+class Lookup(BaseLookup):
def __init__(self, variable, lookup_table, unknown=None):
super().__init__(variable, lookup_table)
self.unknown = unknown
| {"golden_diff": "diff --git a/Orange/preprocess/impute.py b/Orange/preprocess/impute.py\n--- a/Orange/preprocess/impute.py\n+++ b/Orange/preprocess/impute.py\n@@ -4,7 +4,7 @@\n import Orange.data\n from Orange.statistics import distribution, basic_stats\n from Orange.util import Reprable\n-from .transformation import Transformation, Lookup\n+from .transformation import Transformation, Lookup as BaseLookup\n \n __all__ = [\"ReplaceUnknowns\", \"Average\", \"DoNotImpute\", \"DropInstances\",\n \"Model\", \"AsValue\", \"Random\", \"Default\"]\n@@ -26,7 +26,8 @@\n self.value = value\n \n def transform(self, c):\n- if issparse(c): # sparse does not have unknown values\n+ if issparse(c):\n+ c.data = numpy.where(numpy.isnan(c.data), self.value, c.data)\n return c\n else:\n return numpy.where(numpy.isnan(c), self.value, c)\n@@ -226,7 +227,7 @@\n return ~numpy.isnan(c)\n \n \n-class Lookup(Lookup):\n+class Lookup(BaseLookup):\n def __init__(self, variable, lookup_table, unknown=None):\n super().__init__(variable, lookup_table)\n self.unknown = unknown\n", "issue": "Remove Nans from Sparse Data\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.5.dev\r\n\r\n##### Expected behavior\r\n\r\nLogistic Regression works on sparse data that contains nan values.\r\n\r\n##### Actual behavior\r\nIt crashes with `Input contains Nans, ...`\r\n\r\n\r\n##### Steps to reproduce the behavior\r\nLoad `Election-2016-Tweets.tab` in Corpus. Use just first 100 rows for speed. Pass corpus through bag of words into test & score and use 10-fold cross validation. Add logistic regression on input and test & score crashes.\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\n\r\n\r\n\n", "before_files": [{"content": "import numpy\nfrom scipy.sparse import issparse\n\nimport Orange.data\nfrom Orange.statistics import distribution, basic_stats\nfrom Orange.util import Reprable\nfrom .transformation import Transformation, Lookup\n\n__all__ = [\"ReplaceUnknowns\", \"Average\", \"DoNotImpute\", \"DropInstances\",\n \"Model\", \"AsValue\", \"Random\", \"Default\"]\n\n\nclass ReplaceUnknowns(Transformation):\n \"\"\"\n A column transformation which replaces unknown values with a fixed `value`.\n\n Parameters\n ----------\n variable : Orange.data.Variable\n The target variable for imputation.\n value : int or float\n The value with which to replace the unknown values\n \"\"\"\n def __init__(self, variable, value=0):\n super().__init__(variable)\n self.value = value\n\n def transform(self, c):\n if issparse(c): # sparse does not have unknown values\n return c\n else:\n return numpy.where(numpy.isnan(c), self.value, c)\n\n\nclass BaseImputeMethod(Reprable):\n name = \"\"\n short_name = \"\"\n description = \"\"\n format = \"{var.name} -> {self.short_name}\"\n columns_only = False\n\n def __call__(self, data, variable):\n \"\"\" Imputes table along variable column.\n\n Args:\n data (Table): A table to impute.\n variable (Variable): Variable for completing missing values.\n\n Returns:\n A new Variable instance with completed missing values or\n a array mask of rows to drop out.\n \"\"\"\n raise NotImplementedError\n\n def format_variable(self, var):\n return self.format.format(var=var, self=self)\n\n def __str__(self):\n return self.name\n\n def copy(self):\n return self\n\n @classmethod\n def supports_variable(cls, variable):\n return True\n\n\nclass DoNotImpute(BaseImputeMethod):\n name = \"Don't impute\"\n short_name = \"leave\"\n description = \"\"\n\n def __call__(self, data, variable):\n return variable\n\n\nclass DropInstances(BaseImputeMethod):\n name = \"Remove instances with unknown values\"\n short_name = \"drop\"\n description = \"\"\n\n def __call__(self, data, variable):\n index = data.domain.index(variable)\n return numpy.isnan(data[:, index]).reshape(-1)\n\n\nclass Average(BaseImputeMethod):\n name = \"Average/Most frequent\"\n short_name = \"average\"\n description = \"Replace with average/mode of the column\"\n\n def __call__(self, data, variable, value=None):\n variable = data.domain[variable]\n if value is None:\n if variable.is_continuous:\n stats = basic_stats.BasicStats(data, variable)\n value = stats.mean\n elif variable.is_discrete:\n dist = distribution.get_distribution(data, variable)\n value = dist.modus()\n else:\n raise TypeError(\"Variable must be continuous or discrete\")\n\n a = variable.copy(compute_value=ReplaceUnknowns(variable, value))\n a.to_sql = ImputeSql(variable, value)\n return a\n\n\nclass ImputeSql(Reprable):\n def __init__(self, var, default):\n self.var = var\n self.default = default\n\n def __call__(self):\n return 'coalesce(%s, %s)' % (self.var.to_sql(), str(self.default))\n\n\nclass Default(BaseImputeMethod):\n name = \"Value\"\n short_name = \"value\"\n description = \"\"\n columns_only = True\n format = '{var} -> {self.default}'\n\n def __init__(self, default=0):\n self.default = default\n\n def __call__(self, data, variable, *, default=None):\n variable = data.domain[variable]\n default = default if default is not None else self.default\n return variable.copy(compute_value=ReplaceUnknowns(variable, default))\n\n def copy(self):\n return Default(self.default)\n\n\nclass ReplaceUnknownsModel(Reprable):\n \"\"\"\n Replace unknown values with predicted values using a `Orange.base.Model`\n\n Parameters\n ----------\n variable : Orange.data.Variable\n The target variable for the imputation.\n model : Orange.base.Model\n A fitted model predicting `variable`.\n \"\"\"\n def __init__(self, variable, model):\n assert model.domain.class_var == variable\n self.variable = variable\n self.model = model\n\n def __call__(self, data):\n if isinstance(data, Orange.data.Instance):\n column = numpy.array([float(data[self.variable])])\n else:\n column = numpy.array(data.get_column_view(self.variable)[0],\n copy=True)\n\n mask = numpy.isnan(column)\n if not numpy.any(mask):\n return column\n\n if isinstance(data, Orange.data.Instance):\n predicted = self.model(data)\n else:\n predicted = self.model(data[mask])\n column[mask] = predicted\n return column\n\n\nclass Model(BaseImputeMethod):\n _name = \"Model-based imputer\"\n short_name = \"model\"\n description = \"\"\n format = BaseImputeMethod.format + \" ({self.learner.name})\"\n @property\n def name(self):\n return \"{} ({})\".format(self._name, getattr(self.learner, 'name', ''))\n\n def __init__(self, learner):\n self.learner = learner\n\n def __call__(self, data, variable):\n variable = data.domain[variable]\n domain = domain_with_class_var(data.domain, variable)\n\n if self.learner.check_learner_adequacy(domain):\n data = data.transform(domain)\n model = self.learner(data)\n assert model.domain.class_var == variable\n return variable.copy(\n compute_value=ReplaceUnknownsModel(variable, model))\n else:\n raise ValueError(\"`{}` doesn't support domain type\"\n .format(self.learner.name))\n\n def copy(self):\n return Model(self.learner)\n\n def supports_variable(self, variable):\n domain = Orange.data.Domain([], class_vars=variable)\n return self.learner.check_learner_adequacy(domain)\n\n\ndef domain_with_class_var(domain, class_var):\n \"\"\"\n Return a domain with class_var as output domain.class_var.\n\n If class_var is in the input domain's attributes it is removed from the\n output's domain.attributes.\n \"\"\"\n if domain.class_var is class_var:\n return domain\n elif class_var in domain.attributes:\n attrs = [var for var in domain.attributes\n if var is not class_var]\n else:\n attrs = domain.attributes\n return Orange.data.Domain(attrs, class_var)\n\n\nclass IsDefined(Transformation):\n def transform(self, c):\n return ~numpy.isnan(c)\n\n\nclass Lookup(Lookup):\n def __init__(self, variable, lookup_table, unknown=None):\n super().__init__(variable, lookup_table)\n self.unknown = unknown\n\n def transform(self, column):\n if self.unknown is None:\n unknown = numpy.nan\n else:\n unknown = self.unknown\n\n mask = numpy.isnan(column)\n column_valid = numpy.where(mask, 0, column)\n values = self.lookup_table[numpy.array(column_valid, dtype=int)]\n return numpy.where(mask, unknown, values)\n\n\nclass AsValue(BaseImputeMethod):\n name = \"As a distinct value\"\n short_name = \"new value\"\n description = \"\"\n\n def __call__(self, data, variable):\n variable = data.domain[variable]\n if variable.is_discrete:\n fmt = \"{var.name}\"\n value = \"N/A\"\n var = Orange.data.DiscreteVariable(\n fmt.format(var=variable),\n values=variable.values + [value],\n base_value=variable.base_value,\n compute_value=Lookup(\n variable,\n numpy.arange(len(variable.values), dtype=int),\n unknown=len(variable.values))\n )\n return var\n\n elif variable.is_continuous:\n fmt = \"{var.name}_def\"\n indicator_var = Orange.data.DiscreteVariable(\n fmt.format(var=variable),\n values=(\"undef\", \"def\"),\n compute_value=IsDefined(variable))\n stats = basic_stats.BasicStats(data, variable)\n return (variable.copy(compute_value=ReplaceUnknowns(variable,\n stats.mean)),\n indicator_var)\n else:\n raise TypeError(type(variable))\n\n\nclass ReplaceUnknownsRandom(Transformation):\n \"\"\"\n A column transformation replacing unknowns with values drawn randomly from\n an empirical distribution.\n\n Parameters\n ----------\n variable : Orange.data.Variable\n The target variable for imputation.\n distribution : Orange.statistics.distribution.Distribution\n The corresponding sampling distribution\n \"\"\"\n def __init__(self, variable, distribution):\n assert distribution.size > 0\n assert distribution.variable == variable\n super().__init__(variable)\n self.distribution = distribution\n\n if variable.is_discrete:\n counts = numpy.array(distribution)\n elif variable.is_continuous:\n counts = numpy.array(distribution)[1, :]\n else:\n raise TypeError(\"Only discrete and continuous \"\n \"variables are supported\")\n csum = numpy.sum(counts)\n if csum > 0:\n self.sample_prob = counts / csum\n else:\n self.sample_prob = numpy.ones_like(counts) / len(counts)\n\n def transform(self, c):\n c = numpy.array(c, copy=True)\n nanindices = numpy.flatnonzero(numpy.isnan(c))\n\n if self.variable.is_discrete:\n sample = numpy.random.choice(\n len(self.variable.values), size=len(nanindices),\n replace=True, p=self.sample_prob)\n else:\n sample = numpy.random.choice(\n numpy.asarray(self.distribution)[0, :], size=len(nanindices),\n replace=True, p=self.sample_prob)\n\n c[nanindices] = sample\n return c\n\n\nclass Random(BaseImputeMethod):\n name = \"Random values\"\n short_name = \"random\"\n description = \"Replace with a random value\"\n\n def __call__(self, data, variable):\n variable = data.domain[variable]\n dist = distribution.get_distribution(data, variable)\n # A distribution is invalid if a continuous variable's column does not\n # contain any known values or if a discrete variable's .values == []\n isinvalid = dist.size == 0\n if isinvalid and variable.is_discrete:\n assert len(variable.values) == 0\n raise ValueError(\"'{}' has no values\".format(variable))\n elif isinvalid and variable.is_continuous:\n raise ValueError(\"'{}' has an unknown distribution\"\n .format(variable))\n\n if variable.is_discrete and numpy.sum(dist) == 0:\n dist += 1 / len(dist)\n elif variable.is_continuous and numpy.sum(dist[1, :]) == 0:\n dist[1, :] += 1 / dist.shape[1]\n return variable.copy(\n compute_value=ReplaceUnknownsRandom(variable, dist))\n", "path": "Orange/preprocess/impute.py"}], "after_files": [{"content": "import numpy\nfrom scipy.sparse import issparse\n\nimport Orange.data\nfrom Orange.statistics import distribution, basic_stats\nfrom Orange.util import Reprable\nfrom .transformation import Transformation, Lookup as BaseLookup\n\n__all__ = [\"ReplaceUnknowns\", \"Average\", \"DoNotImpute\", \"DropInstances\",\n \"Model\", \"AsValue\", \"Random\", \"Default\"]\n\n\nclass ReplaceUnknowns(Transformation):\n \"\"\"\n A column transformation which replaces unknown values with a fixed `value`.\n\n Parameters\n ----------\n variable : Orange.data.Variable\n The target variable for imputation.\n value : int or float\n The value with which to replace the unknown values\n \"\"\"\n def __init__(self, variable, value=0):\n super().__init__(variable)\n self.value = value\n\n def transform(self, c):\n if issparse(c):\n c.data = numpy.where(numpy.isnan(c.data), self.value, c.data)\n return c\n else:\n return numpy.where(numpy.isnan(c), self.value, c)\n\n\nclass BaseImputeMethod(Reprable):\n name = \"\"\n short_name = \"\"\n description = \"\"\n format = \"{var.name} -> {self.short_name}\"\n columns_only = False\n\n def __call__(self, data, variable):\n \"\"\" Imputes table along variable column.\n\n Args:\n data (Table): A table to impute.\n variable (Variable): Variable for completing missing values.\n\n Returns:\n A new Variable instance with completed missing values or\n a array mask of rows to drop out.\n \"\"\"\n raise NotImplementedError\n\n def format_variable(self, var):\n return self.format.format(var=var, self=self)\n\n def __str__(self):\n return self.name\n\n def copy(self):\n return self\n\n @classmethod\n def supports_variable(cls, variable):\n return True\n\n\nclass DoNotImpute(BaseImputeMethod):\n name = \"Don't impute\"\n short_name = \"leave\"\n description = \"\"\n\n def __call__(self, data, variable):\n return variable\n\n\nclass DropInstances(BaseImputeMethod):\n name = \"Remove instances with unknown values\"\n short_name = \"drop\"\n description = \"\"\n\n def __call__(self, data, variable):\n index = data.domain.index(variable)\n return numpy.isnan(data[:, index]).reshape(-1)\n\n\nclass Average(BaseImputeMethod):\n name = \"Average/Most frequent\"\n short_name = \"average\"\n description = \"Replace with average/mode of the column\"\n\n def __call__(self, data, variable, value=None):\n variable = data.domain[variable]\n if value is None:\n if variable.is_continuous:\n stats = basic_stats.BasicStats(data, variable)\n value = stats.mean\n elif variable.is_discrete:\n dist = distribution.get_distribution(data, variable)\n value = dist.modus()\n else:\n raise TypeError(\"Variable must be continuous or discrete\")\n\n a = variable.copy(compute_value=ReplaceUnknowns(variable, value))\n a.to_sql = ImputeSql(variable, value)\n return a\n\n\nclass ImputeSql(Reprable):\n def __init__(self, var, default):\n self.var = var\n self.default = default\n\n def __call__(self):\n return 'coalesce(%s, %s)' % (self.var.to_sql(), str(self.default))\n\n\nclass Default(BaseImputeMethod):\n name = \"Value\"\n short_name = \"value\"\n description = \"\"\n columns_only = True\n format = '{var} -> {self.default}'\n\n def __init__(self, default=0):\n self.default = default\n\n def __call__(self, data, variable, *, default=None):\n variable = data.domain[variable]\n default = default if default is not None else self.default\n return variable.copy(compute_value=ReplaceUnknowns(variable, default))\n\n def copy(self):\n return Default(self.default)\n\n\nclass ReplaceUnknownsModel(Reprable):\n \"\"\"\n Replace unknown values with predicted values using a `Orange.base.Model`\n\n Parameters\n ----------\n variable : Orange.data.Variable\n The target variable for the imputation.\n model : Orange.base.Model\n A fitted model predicting `variable`.\n \"\"\"\n def __init__(self, variable, model):\n assert model.domain.class_var == variable\n self.variable = variable\n self.model = model\n\n def __call__(self, data):\n if isinstance(data, Orange.data.Instance):\n column = numpy.array([float(data[self.variable])])\n else:\n column = numpy.array(data.get_column_view(self.variable)[0],\n copy=True)\n\n mask = numpy.isnan(column)\n if not numpy.any(mask):\n return column\n\n if isinstance(data, Orange.data.Instance):\n predicted = self.model(data)\n else:\n predicted = self.model(data[mask])\n column[mask] = predicted\n return column\n\n\nclass Model(BaseImputeMethod):\n _name = \"Model-based imputer\"\n short_name = \"model\"\n description = \"\"\n format = BaseImputeMethod.format + \" ({self.learner.name})\"\n @property\n def name(self):\n return \"{} ({})\".format(self._name, getattr(self.learner, 'name', ''))\n\n def __init__(self, learner):\n self.learner = learner\n\n def __call__(self, data, variable):\n variable = data.domain[variable]\n domain = domain_with_class_var(data.domain, variable)\n\n if self.learner.check_learner_adequacy(domain):\n data = data.from_table(domain, data)\n model = self.learner(data)\n assert model.domain.class_var == variable\n return variable.copy(\n compute_value=ReplaceUnknownsModel(variable, model))\n else:\n raise ValueError(\"`{}` doesn't support domain type\"\n .format(self.learner.name))\n\n def copy(self):\n return Model(self.learner)\n\n def supports_variable(self, variable):\n domain = Orange.data.Domain([], class_vars=variable)\n return self.learner.check_learner_adequacy(domain)\n\n\ndef domain_with_class_var(domain, class_var):\n \"\"\"\n Return a domain with class_var as output domain.class_var.\n\n If class_var is in the input domain's attributes it is removed from the\n output's domain.attributes.\n \"\"\"\n if domain.class_var is class_var:\n return domain\n elif class_var in domain.attributes:\n attrs = [var for var in domain.attributes\n if var is not class_var]\n else:\n attrs = domain.attributes\n return Orange.data.Domain(attrs, class_var)\n\n\nclass IsDefined(Transformation):\n def transform(self, c):\n return ~numpy.isnan(c)\n\n\nclass Lookup(BaseLookup):\n def __init__(self, variable, lookup_table, unknown=None):\n super().__init__(variable, lookup_table)\n self.unknown = unknown\n\n def transform(self, column):\n if self.unknown is None:\n unknown = numpy.nan\n else:\n unknown = self.unknown\n\n mask = numpy.isnan(column)\n column_valid = numpy.where(mask, 0, column)\n values = self.lookup_table[numpy.array(column_valid, dtype=int)]\n return numpy.where(mask, unknown, values)\n\n\nclass AsValue(BaseImputeMethod):\n name = \"As a distinct value\"\n short_name = \"new value\"\n description = \"\"\n\n def __call__(self, data, variable):\n variable = data.domain[variable]\n if variable.is_discrete:\n fmt = \"{var.name}\"\n value = \"N/A\"\n var = Orange.data.DiscreteVariable(\n fmt.format(var=variable),\n values=variable.values + [value],\n base_value=variable.base_value,\n compute_value=Lookup(\n variable,\n numpy.arange(len(variable.values), dtype=int),\n unknown=len(variable.values))\n )\n return var\n\n elif variable.is_continuous:\n fmt = \"{var.name}_def\"\n indicator_var = Orange.data.DiscreteVariable(\n fmt.format(var=variable),\n values=(\"undef\", \"def\"),\n compute_value=IsDefined(variable))\n stats = basic_stats.BasicStats(data, variable)\n return (variable.copy(compute_value=ReplaceUnknowns(variable,\n stats.mean)),\n indicator_var)\n else:\n raise TypeError(type(variable))\n\n\nclass ReplaceUnknownsRandom(Transformation):\n \"\"\"\n A column transformation replacing unknowns with values drawn randomly from\n an empirical distribution.\n\n Parameters\n ----------\n variable : Orange.data.Variable\n The target variable for imputation.\n distribution : Orange.statistics.distribution.Distribution\n The corresponding sampling distribution\n \"\"\"\n def __init__(self, variable, distribution):\n assert distribution.size > 0\n assert distribution.variable == variable\n super().__init__(variable)\n self.distribution = distribution\n\n if variable.is_discrete:\n counts = numpy.array(distribution)\n elif variable.is_continuous:\n counts = numpy.array(distribution)[1, :]\n else:\n raise TypeError(\"Only discrete and continuous \"\n \"variables are supported\")\n csum = numpy.sum(counts)\n if csum > 0:\n self.sample_prob = counts / csum\n else:\n self.sample_prob = numpy.ones_like(counts) / len(counts)\n\n def transform(self, c):\n c = numpy.array(c, copy=True)\n nanindices = numpy.flatnonzero(numpy.isnan(c))\n\n if self.variable.is_discrete:\n sample = numpy.random.choice(\n len(self.variable.values), size=len(nanindices),\n replace=True, p=self.sample_prob)\n else:\n sample = numpy.random.choice(\n numpy.asarray(self.distribution)[0, :], size=len(nanindices),\n replace=True, p=self.sample_prob)\n\n c[nanindices] = sample\n return c\n\n\nclass Random(BaseImputeMethod):\n name = \"Random values\"\n short_name = \"random\"\n description = \"Replace with a random value\"\n\n def __call__(self, data, variable):\n variable = data.domain[variable]\n dist = distribution.get_distribution(data, variable)\n # A distribution is invalid if a continuous variable's column does not\n # contain any known values or if a discrete variable's .values == []\n isinvalid = dist.size == 0\n if isinvalid and variable.is_discrete:\n assert len(variable.values) == 0\n raise ValueError(\"'{}' has no values\".format(variable))\n elif isinvalid and variable.is_continuous:\n raise ValueError(\"'{}' has an unknown distribution\"\n .format(variable))\n\n if variable.is_discrete and numpy.sum(dist) == 0:\n dist += 1 / len(dist)\n elif variable.is_continuous and numpy.sum(dist[1, :]) == 0:\n dist[1, :] += 1 / dist.shape[1]\n return variable.copy(\n compute_value=ReplaceUnknownsRandom(variable, dist))\n", "path": "Orange/preprocess/impute.py"}]} | 3,734 | 280 |
gh_patches_debug_4661 | rasdani/github-patches | git_diff | SeldonIO__MLServer-478 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Activating custom conda env in mlserver build image
Hello,
I’ve encountered an issue when using `mlserver build ...` with `1.1.0dev` where the custom conda environment is not activated. The image builds and all packages are present in the image. However, when starting the image it crashes on `mlserver start` calling what seems to be native Python 3.8 rather than the conda installed python.
```
--> Sourcing new environment at ./envs/base/environment...
--> Calling conda-unpack...
--> Disabling user-installed packages...
Traceback (most recent call last):
File "/opt/mlserver/envs/base/environment/bin/mlserver", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py", line 76, in main
root()
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1128, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1053, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py", line 19, in wrapper
return asyncio.run(f(*args, **kwargs))
File "/usr/local/lib/python3.8/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/usr/local/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py", line 40, in start
settings, models = await load_settings(folder)
File "/usr/local/lib/python3.8/site-packages/mlserver/cli/serve.py", line 37, in load_settings
available_models = await repository.list()
File "/usr/local/lib/python3.8/site-packages/mlserver/repository.py", line 37, in list
model_settings = ModelSettings()
File "pydantic/env_settings.py", line 36, in pydantic.env_settings.BaseSettings.__init__
File "pydantic/main.py", line 406, in pydantic.main.BaseModel.__init__
pydantic.error_wrappers.ValidationError: 1 validation error for ModelSettings
implementation
ensure this value contains valid import path or valid callable: No module named 'mlserver_mlflow' (type=type_error.pyobject; error_message=No module named 'mlserver_mlflow')
```
- [x] manually removing final CMD line in Dockerfile and starting interactive container. Running `./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && mlserver start $MLSERVER_MODELS_DIR` successfully launches the service
conda.yaml file:
```
channels:
- defaults
- conda-forge
- anaconda
dependencies:
- python=3.7.10
- pip
- gcc_linux-aarch64
- gxx_linux-aarch64
- pip:
- mlflow
- mlserver==0.4.0
- mlserver-mlflow==0.4.0
name: conda
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/cli/constants.py`
Content:
```
1 DockerfileName = "Dockerfile"
2 DockerfileTemplate = """
3 FROM continuumio/miniconda3:4.10.3 AS env-builder
4 SHELL ["/bin/bash", "-c"]
5
6 ARG MLSERVER_ENV_NAME="mlserver-custom-env" \\
7 MLSERVER_ENV_TARBALL="./envs/base.tar.gz"
8
9 RUN conda config --add channels conda-forge && \\
10 conda install conda-pack
11
12 # The `[]` character range will ensure that Docker doesn't complain if the
13 # files don't exist:
14 # https://stackoverflow.com/a/65138098/5015573
15 COPY \\
16 ./environment.ym[l] \\
17 ./environment.yam[l] \\
18 ./conda.ym[l] \\
19 ./conda.yam[l] \\
20 .
21 RUN mkdir $(dirname $MLSERVER_ENV_TARBALL); \\
22 for envFile in environment.yml environment.yaml conda.yml conda.yaml; do \\
23 if [[ -f $envFile ]]; then \\
24 conda env create \
25 --name $MLSERVER_ENV_NAME \\
26 --file $envFile; \\
27 conda-pack \
28 -n $MLSERVER_ENV_NAME \\
29 -o $MLSERVER_ENV_TARBALL; \\
30 fi \\
31 done; \\
32 chmod -R 776 $(dirname $MLSERVER_ENV_TARBALL)
33
34 FROM seldonio/mlserver:{version}-slim
35 SHELL ["/bin/bash", "-c"]
36
37 # Copy all potential sources for custom environments
38 COPY \\
39 --chown=1000 \\
40 --from=env-builder \\
41 /envs/base.tar.g[z] \\
42 ./envs/base.tar.gz
43 COPY \\
44 ./settings.jso[n] \\
45 ./model-settings.jso[n] \\
46 ./requirements.tx[t] \\
47 .
48
49 USER root
50 # Install dependencies system-wide, to ensure that they are available for every
51 # user
52 RUN ./hack/build-env.sh . ./envs/base && \
53 chown -R 1000:0 ./envs/base && \\
54 chmod -R 776 ./envs/base
55 USER 1000
56
57 # Copy everything else
58 COPY . .
59
60 # Override MLServer's own `CMD` to activate the embedded environment
61 # (optionally activating the hot-loaded one as well).
62 CMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\
63 mlserver start $MLSERVER_MODELS_DIR
64 """
65
66 DockerignoreName = ".dockerignore"
67 Dockerignore = """
68 # Binaries for programs and plugins
69 *.exe
70 *.exe~
71 *.dll
72 *.so
73 *.dylib
74 *.pyc
75 *.pyo
76 *.pyd
77 bin
78
79 # Mac file system
80 **/.DS_Store
81
82 # Python dev
83 __pycache__
84 .Python
85 env
86 pip-log.txt
87 pip-delete-this-directory.txt
88 .mypy_cache
89 eggs/
90 .eggs/
91 *.egg-info/
92 ./pytest_cache
93 .tox
94 build/
95 dist/
96
97 # Notebook Checkpoints
98 .ipynb_checkpoints
99
100 .coverage
101 .coverage.*
102 .cache
103 nosetests.xml
104 coverage.xml
105 *,cover
106 *.log
107 .git
108 """
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/cli/constants.py b/mlserver/cli/constants.py
--- a/mlserver/cli/constants.py
+++ b/mlserver/cli/constants.py
@@ -24,7 +24,7 @@
conda env create \
--name $MLSERVER_ENV_NAME \\
--file $envFile; \\
- conda-pack \
+ conda-pack --ignore-missing-files \
-n $MLSERVER_ENV_NAME \\
-o $MLSERVER_ENV_TARBALL; \\
fi \\
| {"golden_diff": "diff --git a/mlserver/cli/constants.py b/mlserver/cli/constants.py\n--- a/mlserver/cli/constants.py\n+++ b/mlserver/cli/constants.py\n@@ -24,7 +24,7 @@\n conda env create \\\n --name $MLSERVER_ENV_NAME \\\\\n --file $envFile; \\\\\n- conda-pack \\\n+ conda-pack --ignore-missing-files \\\n -n $MLSERVER_ENV_NAME \\\\\n -o $MLSERVER_ENV_TARBALL; \\\\\n fi \\\\\n", "issue": "Activating custom conda env in mlserver build image\nHello,\r\nI\u2019ve encountered an issue when using `mlserver build ...` with `1.1.0dev` where the custom conda environment is not activated. The image builds and all packages are present in the image. However, when starting the image it crashes on `mlserver start` calling what seems to be native Python 3.8 rather than the conda installed python.\r\n\r\n```\r\n--> Sourcing new environment at ./envs/base/environment...\r\n\r\n--> Calling conda-unpack...\r\n\r\n--> Disabling user-installed packages...\r\n\r\nTraceback (most recent call last):\r\n\r\n File \"/opt/mlserver/envs/base/environment/bin/mlserver\", line 8, in <module>\r\n\r\n sys.exit(main())\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py\", line 76, in main\r\n\r\n root()\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 1128, in __call__\r\n\r\n return self.main(*args, **kwargs)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 1053, in main\r\n\r\n rv = self.invoke(ctx)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 1659, in invoke\r\n\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 1395, in invoke\r\n\r\n return ctx.invoke(self.callback, **ctx.params)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 754, in invoke\r\n\r\n return __callback(*args, **kwargs)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py\", line 19, in wrapper\r\n\r\n return asyncio.run(f(*args, **kwargs))\r\n\r\n File \"/usr/local/lib/python3.8/asyncio/runners.py\", line 44, in run\r\n\r\n return loop.run_until_complete(main)\r\n\r\n File \"/usr/local/lib/python3.8/asyncio/base_events.py\", line 616, in run_until_complete\r\n\r\n return future.result()\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py\", line 40, in start\r\n\r\n settings, models = await load_settings(folder)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/cli/serve.py\", line 37, in load_settings\r\n\r\n available_models = await repository.list()\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/repository.py\", line 37, in list\r\n\r\n model_settings = ModelSettings()\r\n\r\n File \"pydantic/env_settings.py\", line 36, in pydantic.env_settings.BaseSettings.__init__\r\n\r\n File \"pydantic/main.py\", line 406, in pydantic.main.BaseModel.__init__\r\n\r\npydantic.error_wrappers.ValidationError: 1 validation error for ModelSettings\r\n\r\nimplementation\r\n\r\n ensure this value contains valid import path or valid callable: No module named 'mlserver_mlflow' (type=type_error.pyobject; error_message=No module named 'mlserver_mlflow')\r\n\r\n```\r\n\r\n- [x] manually removing final CMD line in Dockerfile and starting interactive container. Running `./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && mlserver start $MLSERVER_MODELS_DIR` successfully launches the service\r\n\r\nconda.yaml file:\r\n```\r\nchannels:\r\n- defaults\r\n- conda-forge\r\n- anaconda\r\ndependencies:\r\n- python=3.7.10\r\n- pip\r\n- gcc_linux-aarch64\r\n- gxx_linux-aarch64\r\n- pip:\r\n - mlflow\r\n - mlserver==0.4.0\r\n - mlserver-mlflow==0.4.0\r\nname: conda\r\n```\n", "before_files": [{"content": "DockerfileName = \"Dockerfile\"\nDockerfileTemplate = \"\"\"\nFROM continuumio/miniconda3:4.10.3 AS env-builder\nSHELL [\"/bin/bash\", \"-c\"]\n\nARG MLSERVER_ENV_NAME=\"mlserver-custom-env\" \\\\\n MLSERVER_ENV_TARBALL=\"./envs/base.tar.gz\"\n\nRUN conda config --add channels conda-forge && \\\\\n conda install conda-pack\n\n# The `[]` character range will ensure that Docker doesn't complain if the\n# files don't exist:\n# https://stackoverflow.com/a/65138098/5015573\nCOPY \\\\\n ./environment.ym[l] \\\\\n ./environment.yam[l] \\\\\n ./conda.ym[l] \\\\\n ./conda.yam[l] \\\\\n .\nRUN mkdir $(dirname $MLSERVER_ENV_TARBALL); \\\\\n for envFile in environment.yml environment.yaml conda.yml conda.yaml; do \\\\\n if [[ -f $envFile ]]; then \\\\\n conda env create \\\n --name $MLSERVER_ENV_NAME \\\\\n --file $envFile; \\\\\n conda-pack \\\n -n $MLSERVER_ENV_NAME \\\\\n -o $MLSERVER_ENV_TARBALL; \\\\\n fi \\\\\n done; \\\\\n chmod -R 776 $(dirname $MLSERVER_ENV_TARBALL)\n\nFROM seldonio/mlserver:{version}-slim\nSHELL [\"/bin/bash\", \"-c\"]\n\n# Copy all potential sources for custom environments\nCOPY \\\\\n --chown=1000 \\\\\n --from=env-builder \\\\\n /envs/base.tar.g[z] \\\\\n ./envs/base.tar.gz\nCOPY \\\\\n ./settings.jso[n] \\\\\n ./model-settings.jso[n] \\\\\n ./requirements.tx[t] \\\\\n .\n\nUSER root\n# Install dependencies system-wide, to ensure that they are available for every\n# user\nRUN ./hack/build-env.sh . ./envs/base && \\\n chown -R 1000:0 ./envs/base && \\\\\n chmod -R 776 ./envs/base\nUSER 1000\n\n# Copy everything else\nCOPY . .\n\n# Override MLServer's own `CMD` to activate the embedded environment\n# (optionally activating the hot-loaded one as well).\nCMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\\\\n mlserver start $MLSERVER_MODELS_DIR\n\"\"\"\n\nDockerignoreName = \".dockerignore\"\nDockerignore = \"\"\"\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n*.pyc\n*.pyo\n*.pyd\nbin\n\n# Mac file system\n**/.DS_Store\n\n# Python dev\n__pycache__\n.Python\nenv\npip-log.txt\npip-delete-this-directory.txt\n.mypy_cache\neggs/\n.eggs/\n*.egg-info/\n./pytest_cache\n.tox\nbuild/\ndist/\n\n# Notebook Checkpoints\n.ipynb_checkpoints\n\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*,cover\n*.log\n.git\n\"\"\"\n", "path": "mlserver/cli/constants.py"}], "after_files": [{"content": "DockerfileName = \"Dockerfile\"\nDockerfileTemplate = \"\"\"\nFROM continuumio/miniconda3:4.10.3 AS env-builder\nSHELL [\"/bin/bash\", \"-c\"]\n\nARG MLSERVER_ENV_NAME=\"mlserver-custom-env\" \\\\\n MLSERVER_ENV_TARBALL=\"./envs/base.tar.gz\"\n\nRUN conda config --add channels conda-forge && \\\\\n conda install conda-pack\n\n# The `[]` character range will ensure that Docker doesn't complain if the\n# files don't exist:\n# https://stackoverflow.com/a/65138098/5015573\nCOPY \\\\\n ./environment.ym[l] \\\\\n ./environment.yam[l] \\\\\n ./conda.ym[l] \\\\\n ./conda.yam[l] \\\\\n .\nRUN mkdir $(dirname $MLSERVER_ENV_TARBALL); \\\\\n for envFile in environment.yml environment.yaml conda.yml conda.yaml; do \\\\\n if [[ -f $envFile ]]; then \\\\\n conda env create \\\n --name $MLSERVER_ENV_NAME \\\\\n --file $envFile; \\\\\n conda-pack --ignore-missing-files \\\n -n $MLSERVER_ENV_NAME \\\\\n -o $MLSERVER_ENV_TARBALL; \\\\\n fi \\\\\n done; \\\\\n chmod -R 776 $(dirname $MLSERVER_ENV_TARBALL)\n\nFROM seldonio/mlserver:{version}-slim\nSHELL [\"/bin/bash\", \"-c\"]\n\n# Copy all potential sources for custom environments\nCOPY \\\\\n --chown=1000 \\\\\n --from=env-builder \\\\\n /envs/base.tar.g[z] \\\\\n ./envs/base.tar.gz\nCOPY \\\\\n ./settings.jso[n] \\\\\n ./model-settings.jso[n] \\\\\n ./requirements.tx[t] \\\\\n .\n\nUSER root\n# Install dependencies system-wide, to ensure that they are available for every\n# user\nRUN ./hack/build-env.sh . ./envs/base && \\\n chown -R 1000:0 ./envs/base && \\\\\n chmod -R 776 ./envs/base\nUSER 1000\n\n# Copy everything else\nCOPY . .\n\n# Override MLServer's own `CMD` to activate the embedded environment\n# (optionally activating the hot-loaded one as well).\nCMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\\\\n mlserver start $MLSERVER_MODELS_DIR\n\"\"\"\n\nDockerignoreName = \".dockerignore\"\nDockerignore = \"\"\"\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n*.pyc\n*.pyo\n*.pyd\nbin\n\n# Mac file system\n**/.DS_Store\n\n# Python dev\n__pycache__\n.Python\nenv\npip-log.txt\npip-delete-this-directory.txt\n.mypy_cache\neggs/\n.eggs/\n*.egg-info/\n./pytest_cache\n.tox\nbuild/\ndist/\n\n# Notebook Checkpoints\n.ipynb_checkpoints\n\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*,cover\n*.log\n.git\n\"\"\"\n", "path": "mlserver/cli/constants.py"}]} | 2,000 | 108 |
gh_patches_debug_32002 | rasdani/github-patches | git_diff | Mailu__Mailu-2069 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Setup utility] Cannot generate files when database flavors have been switched
## Before you open your issue
- [x] Check if no issue or pull-request for this already exists.
- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [x] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [x] docker swarm
### Versions
All versions
## Description
In the setup utility when you select a different database flavor and then switch back to SQLite, then you can not generate the files. The reason is that it still expects the fields to be filled of the alternative database flavor you previously selected.
When you select an alternative database flavor, the fields (e.g. hostname, database name) are mandatory. These fields are still mandatory when you switch back to SQLlite as database flavor.
As a workaround you can fill in mandatory fields and then switch back to SQLite again. Or you could refresh the page and do not switch from SQLite.
The problem is in https://github.com/Mailu/Mailu/blob/master/setup/templates/steps/database.html .
I didn't check yet how to resolve this.
## Replication Steps
1) Go to https://setup.mailu.io/master/ and click next.
2). Enter a value for main domain server and public hostname.
3). Select roundcube as webmail.
4). At the bottom switch to postgresql or mysql as database
5). Switch back to SQLite as database.
6). Click Setup mailu. Note that the button does not work.
## Expected behaviour
The Setup Mailu button works after following about steps.
[Setup utility] Cannot generate files when database flavors have been switched
## Before you open your issue
- [x] Check if no issue or pull-request for this already exists.
- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [x] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [x] docker swarm
### Versions
All versions
## Description
In the setup utility when you select a different database flavor and then switch back to SQLite, then you can not generate the files. The reason is that it still expects the fields to be filled of the alternative database flavor you previously selected.
When you select an alternative database flavor, the fields (e.g. hostname, database name) are mandatory. These fields are still mandatory when you switch back to SQLlite as database flavor.
As a workaround you can fill in mandatory fields and then switch back to SQLite again. Or you could refresh the page and do not switch from SQLite.
The problem is in https://github.com/Mailu/Mailu/blob/master/setup/templates/steps/database.html .
I didn't check yet how to resolve this.
## Replication Steps
1) Go to https://setup.mailu.io/master/ and click next.
2). Enter a value for main domain server and public hostname.
3). Select roundcube as webmail.
4). At the bottom switch to postgresql or mysql as database
5). Switch back to SQLite as database.
6). Click Setup mailu. Note that the button does not work.
## Expected behaviour
The Setup Mailu button works after following about steps.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optional/postgresql/start.py`
Content:
```
1 #!/usr/bin/python3
2
3 import anosql
4 import psycopg2
5 import glob
6 import os
7 import subprocess
8 from socrate import conf
9
10 def setup():
11 conn = psycopg2.connect(user='postgres')
12 queries = anosql.load_queries('postgres', '/conf/queries.sql')
13 # Mailu user
14 queries.create_mailu_user(conn)
15 queries.update_pw(conn, pw=os.environ.get("DB_PW"))
16 # Healthcheck user
17 queries.create_health_user(conn)
18 queries.grant_health(conn)
19 conn.commit()
20 # create db cannot be atomic. But this script is the only active connection, this is kinda safe.
21 if not queries.check_db(conn):
22 conn.set_isolation_level(0)
23 queries.create_db(conn)
24 conn.set_isolation_level(1)
25 conn.close()
26
27 # Check if /data is empty
28 if not os.listdir("/data"):
29 os.system("chown -R postgres:postgres /data")
30 os.system("chmod 0700 /data")
31 base_backups=sorted(glob.glob("/backup/base-*"))
32 if base_backups:
33 # Restore the latest backup
34 subprocess.call(["tar", "--same-owner", "-zpxf", base_backups[-1] + "/base.tar.gz" , "-C", "/data"])
35 if os.listdir("/backup/wal_archive"):
36 with open("/data/recovery.conf", "w") as rec:
37 rec.write("restore_command = 'gunzip < /backup/wal_archive/%f > %p'\n")
38 rec.write("standby_mode = off\n")
39 os.system("chown postgres:postgres /data/recovery.conf")
40 else:
41 # Bootstrap the database
42 os.system("sudo -u postgres initdb -D /data")
43
44 # Create backup directory structure, if it does not yet exist
45 os.system("mkdir -p /backup/wal_archive")
46 os.system("chown -R postgres:postgres /backup")
47
48 # Render config files
49 for pg_file in glob.glob("/conf/*.conf"):
50 conf.jinja(pg_file, os.environ, os.path.join("/data", os.path.basename(pg_file)))
51
52 # (Re)start postgresql locally for DB and user creation
53 os.system("sudo -u postgres pg_ctl start -D /data -o '-h \"''\" '")
54 while os.path.isfile("recovery.conf"):
55 pass
56 os.system("sudo -u postgres pg_ctl -D /data promote")
57 setup()
58 os.system("sudo -u postgres pg_ctl stop -m smart -w -D /data")
59
60 out=open("/proc/1/fd/1", "w")
61 err=open("/proc/1/fd/2", "w")
62 # Run the cron deamon
63 subprocess.Popen(["crond", "-f"], stdout=out, stderr=err)
64 # Run postgresql service
65 os.system("sudo -u postgres postgres -D /data -h \*")
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optional/postgresql/start.py b/optional/postgresql/start.py
deleted file mode 100755
--- a/optional/postgresql/start.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/python3
-
-import anosql
-import psycopg2
-import glob
-import os
-import subprocess
-from socrate import conf
-
-def setup():
- conn = psycopg2.connect(user='postgres')
- queries = anosql.load_queries('postgres', '/conf/queries.sql')
- # Mailu user
- queries.create_mailu_user(conn)
- queries.update_pw(conn, pw=os.environ.get("DB_PW"))
- # Healthcheck user
- queries.create_health_user(conn)
- queries.grant_health(conn)
- conn.commit()
- # create db cannot be atomic. But this script is the only active connection, this is kinda safe.
- if not queries.check_db(conn):
- conn.set_isolation_level(0)
- queries.create_db(conn)
- conn.set_isolation_level(1)
- conn.close()
-
-# Check if /data is empty
-if not os.listdir("/data"):
- os.system("chown -R postgres:postgres /data")
- os.system("chmod 0700 /data")
- base_backups=sorted(glob.glob("/backup/base-*"))
- if base_backups:
- # Restore the latest backup
- subprocess.call(["tar", "--same-owner", "-zpxf", base_backups[-1] + "/base.tar.gz" , "-C", "/data"])
- if os.listdir("/backup/wal_archive"):
- with open("/data/recovery.conf", "w") as rec:
- rec.write("restore_command = 'gunzip < /backup/wal_archive/%f > %p'\n")
- rec.write("standby_mode = off\n")
- os.system("chown postgres:postgres /data/recovery.conf")
- else:
- # Bootstrap the database
- os.system("sudo -u postgres initdb -D /data")
-
-# Create backup directory structure, if it does not yet exist
-os.system("mkdir -p /backup/wal_archive")
-os.system("chown -R postgres:postgres /backup")
-
-# Render config files
-for pg_file in glob.glob("/conf/*.conf"):
- conf.jinja(pg_file, os.environ, os.path.join("/data", os.path.basename(pg_file)))
-
-# (Re)start postgresql locally for DB and user creation
-os.system("sudo -u postgres pg_ctl start -D /data -o '-h \"''\" '")
-while os.path.isfile("recovery.conf"):
- pass
-os.system("sudo -u postgres pg_ctl -D /data promote")
-setup()
-os.system("sudo -u postgres pg_ctl stop -m smart -w -D /data")
-
-out=open("/proc/1/fd/1", "w")
-err=open("/proc/1/fd/2", "w")
-# Run the cron deamon
-subprocess.Popen(["crond", "-f"], stdout=out, stderr=err)
-# Run postgresql service
-os.system("sudo -u postgres postgres -D /data -h \*")
| {"golden_diff": "diff --git a/optional/postgresql/start.py b/optional/postgresql/start.py\ndeleted file mode 100755\n--- a/optional/postgresql/start.py\n+++ /dev/null\n@@ -1,65 +0,0 @@\n-#!/usr/bin/python3\n-\n-import anosql\n-import psycopg2\n-import glob\n-import os\n-import subprocess\n-from socrate import conf\n-\n-def setup():\n- conn = psycopg2.connect(user='postgres')\n- queries = anosql.load_queries('postgres', '/conf/queries.sql')\n- # Mailu user\n- queries.create_mailu_user(conn)\n- queries.update_pw(conn, pw=os.environ.get(\"DB_PW\"))\n- # Healthcheck user\n- queries.create_health_user(conn)\n- queries.grant_health(conn)\n- conn.commit()\n- # create db cannot be atomic. But this script is the only active connection, this is kinda safe.\n- if not queries.check_db(conn):\n- conn.set_isolation_level(0)\n- queries.create_db(conn)\n- conn.set_isolation_level(1)\n- conn.close()\n-\n-# Check if /data is empty\n-if not os.listdir(\"/data\"):\n- os.system(\"chown -R postgres:postgres /data\")\n- os.system(\"chmod 0700 /data\")\n- base_backups=sorted(glob.glob(\"/backup/base-*\"))\n- if base_backups:\n- # Restore the latest backup\n- subprocess.call([\"tar\", \"--same-owner\", \"-zpxf\", base_backups[-1] + \"/base.tar.gz\" , \"-C\", \"/data\"])\n- if os.listdir(\"/backup/wal_archive\"):\n- with open(\"/data/recovery.conf\", \"w\") as rec:\n- rec.write(\"restore_command = 'gunzip < /backup/wal_archive/%f > %p'\\n\")\n- rec.write(\"standby_mode = off\\n\")\n- os.system(\"chown postgres:postgres /data/recovery.conf\")\n- else:\n- # Bootstrap the database\n- os.system(\"sudo -u postgres initdb -D /data\")\n-\n-# Create backup directory structure, if it does not yet exist\n-os.system(\"mkdir -p /backup/wal_archive\")\n-os.system(\"chown -R postgres:postgres /backup\")\n-\n-# Render config files\n-for pg_file in glob.glob(\"/conf/*.conf\"):\n- conf.jinja(pg_file, os.environ, os.path.join(\"/data\", os.path.basename(pg_file)))\n-\n-# (Re)start postgresql locally for DB and user creation\n-os.system(\"sudo -u postgres pg_ctl start -D /data -o '-h \\\"''\\\" '\")\n-while os.path.isfile(\"recovery.conf\"):\n- pass\n-os.system(\"sudo -u postgres pg_ctl -D /data promote\")\n-setup()\n-os.system(\"sudo -u postgres pg_ctl stop -m smart -w -D /data\")\n-\n-out=open(\"/proc/1/fd/1\", \"w\")\n-err=open(\"/proc/1/fd/2\", \"w\")\n-# Run the cron deamon\n-subprocess.Popen([\"crond\", \"-f\"], stdout=out, stderr=err)\n-# Run postgresql service\n-os.system(\"sudo -u postgres postgres -D /data -h \\*\")\n", "issue": "[Setup utility] Cannot generate files when database flavors have been switched\n## Before you open your issue\r\n- [x] Check if no issue or pull-request for this already exists.\r\n- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- [x] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\n## Environment & Versions\r\n### Environment\r\n - [x] docker-compose\r\n - [ ] kubernetes\r\n - [x] docker swarm\r\n\r\n### Versions\r\nAll versions\r\n\r\n## Description\r\nIn the setup utility when you select a different database flavor and then switch back to SQLite, then you can not generate the files. The reason is that it still expects the fields to be filled of the alternative database flavor you previously selected. \r\nWhen you select an alternative database flavor, the fields (e.g. hostname, database name) are mandatory. These fields are still mandatory when you switch back to SQLlite as database flavor.\r\n\r\nAs a workaround you can fill in mandatory fields and then switch back to SQLite again. Or you could refresh the page and do not switch from SQLite. \r\n\r\nThe problem is in https://github.com/Mailu/Mailu/blob/master/setup/templates/steps/database.html . \r\nI didn't check yet how to resolve this.\r\n\r\n## Replication Steps\r\n1) Go to https://setup.mailu.io/master/ and click next.\r\n2). Enter a value for main domain server and public hostname.\r\n3). Select roundcube as webmail.\r\n4). At the bottom switch to postgresql or mysql as database\r\n5). Switch back to SQLite as database.\r\n6). Click Setup mailu. Note that the button does not work.\r\n\r\n## Expected behaviour\r\nThe Setup Mailu button works after following about steps.\n[Setup utility] Cannot generate files when database flavors have been switched\n## Before you open your issue\r\n- [x] Check if no issue or pull-request for this already exists.\r\n- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- [x] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\n## Environment & Versions\r\n### Environment\r\n - [x] docker-compose\r\n - [ ] kubernetes\r\n - [x] docker swarm\r\n\r\n### Versions\r\nAll versions\r\n\r\n## Description\r\nIn the setup utility when you select a different database flavor and then switch back to SQLite, then you can not generate the files. The reason is that it still expects the fields to be filled of the alternative database flavor you previously selected. \r\nWhen you select an alternative database flavor, the fields (e.g. hostname, database name) are mandatory. These fields are still mandatory when you switch back to SQLlite as database flavor.\r\n\r\nAs a workaround you can fill in mandatory fields and then switch back to SQLite again. Or you could refresh the page and do not switch from SQLite. \r\n\r\nThe problem is in https://github.com/Mailu/Mailu/blob/master/setup/templates/steps/database.html . \r\nI didn't check yet how to resolve this.\r\n\r\n## Replication Steps\r\n1) Go to https://setup.mailu.io/master/ and click next.\r\n2). Enter a value for main domain server and public hostname.\r\n3). Select roundcube as webmail.\r\n4). At the bottom switch to postgresql or mysql as database\r\n5). Switch back to SQLite as database.\r\n6). Click Setup mailu. Note that the button does not work.\r\n\r\n## Expected behaviour\r\nThe Setup Mailu button works after following about steps.\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport anosql\nimport psycopg2\nimport glob\nimport os\nimport subprocess\nfrom socrate import conf\n\ndef setup():\n conn = psycopg2.connect(user='postgres')\n queries = anosql.load_queries('postgres', '/conf/queries.sql')\n # Mailu user\n queries.create_mailu_user(conn)\n queries.update_pw(conn, pw=os.environ.get(\"DB_PW\"))\n # Healthcheck user\n queries.create_health_user(conn)\n queries.grant_health(conn)\n conn.commit()\n # create db cannot be atomic. But this script is the only active connection, this is kinda safe.\n if not queries.check_db(conn):\n conn.set_isolation_level(0)\n queries.create_db(conn)\n conn.set_isolation_level(1)\n conn.close()\n\n# Check if /data is empty\nif not os.listdir(\"/data\"):\n os.system(\"chown -R postgres:postgres /data\")\n os.system(\"chmod 0700 /data\")\n base_backups=sorted(glob.glob(\"/backup/base-*\"))\n if base_backups:\n # Restore the latest backup\n subprocess.call([\"tar\", \"--same-owner\", \"-zpxf\", base_backups[-1] + \"/base.tar.gz\" , \"-C\", \"/data\"])\n if os.listdir(\"/backup/wal_archive\"):\n with open(\"/data/recovery.conf\", \"w\") as rec:\n rec.write(\"restore_command = 'gunzip < /backup/wal_archive/%f > %p'\\n\")\n rec.write(\"standby_mode = off\\n\")\n os.system(\"chown postgres:postgres /data/recovery.conf\")\n else:\n # Bootstrap the database\n os.system(\"sudo -u postgres initdb -D /data\")\n\n# Create backup directory structure, if it does not yet exist\nos.system(\"mkdir -p /backup/wal_archive\")\nos.system(\"chown -R postgres:postgres /backup\")\n\n# Render config files\nfor pg_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(pg_file, os.environ, os.path.join(\"/data\", os.path.basename(pg_file)))\n\n# (Re)start postgresql locally for DB and user creation\nos.system(\"sudo -u postgres pg_ctl start -D /data -o '-h \\\"''\\\" '\")\nwhile os.path.isfile(\"recovery.conf\"):\n pass\nos.system(\"sudo -u postgres pg_ctl -D /data promote\")\nsetup()\nos.system(\"sudo -u postgres pg_ctl stop -m smart -w -D /data\")\n\nout=open(\"/proc/1/fd/1\", \"w\")\nerr=open(\"/proc/1/fd/2\", \"w\")\n# Run the cron deamon\nsubprocess.Popen([\"crond\", \"-f\"], stdout=out, stderr=err)\n# Run postgresql service\nos.system(\"sudo -u postgres postgres -D /data -h \\*\")\n", "path": "optional/postgresql/start.py"}], "after_files": [{"content": null, "path": "optional/postgresql/start.py"}]} | 1,854 | 710 |
gh_patches_debug_11268 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-1022 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django middleware not properly supported if left empty
In Django 1.10 and above, `dd-trace-py` will attempt to check first if the user is using the `MIDDLEWARE` Django setting and will fallback to `MIDDLEWARE_CLASSES` if unset. In my application, I deliberately declare an empty list for the `MIDDLEWARE` setting to tell Django that I am not personally putting any middleware into the application. Unfortunately, `dd-trace-py` simply checks if the `settings.MIDDLEWARE` attribute is falsy and since this is true it then falls back to `MIDDLEWARE_CLASSES`.
If an empty list/tuple is declared in Django settings for `MIDDLEWARE`, it should still be properly injected.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/django/middleware.py`
Content:
```
1 # project
2 from .conf import settings
3 from .compat import user_is_authenticated, get_resolver
4 from .utils import get_request_uri
5
6 from ...constants import ANALYTICS_SAMPLE_RATE_KEY
7 from ...contrib import func_name
8 from ...ext import http
9 from ...internal.logger import get_logger
10 from ...propagation.http import HTTPPropagator
11 from ...settings import config
12
13 # 3p
14 from django.core.exceptions import MiddlewareNotUsed
15 from django.conf import settings as django_settings
16 import django
17
18 try:
19 from django.utils.deprecation import MiddlewareMixin
20 MiddlewareClass = MiddlewareMixin
21 except ImportError:
22 MiddlewareClass = object
23
24 log = get_logger(__name__)
25
26 EXCEPTION_MIDDLEWARE = 'ddtrace.contrib.django.TraceExceptionMiddleware'
27 TRACE_MIDDLEWARE = 'ddtrace.contrib.django.TraceMiddleware'
28 MIDDLEWARE = 'MIDDLEWARE'
29 MIDDLEWARE_CLASSES = 'MIDDLEWARE_CLASSES'
30
31 # Default views list available from:
32 # https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/views/defaults.py
33 # DEV: Django doesn't call `process_view` when falling back to one of these internal error handling views
34 # DEV: We only use these names when `span.resource == 'unknown'` and we have one of these status codes
35 _django_default_views = {
36 400: 'django.views.defaults.bad_request',
37 403: 'django.views.defaults.permission_denied',
38 404: 'django.views.defaults.page_not_found',
39 500: 'django.views.defaults.server_error',
40 }
41
42
43 def _analytics_enabled():
44 return (
45 (config.analytics_enabled and settings.ANALYTICS_ENABLED is not False)
46 or settings.ANALYTICS_ENABLED is True
47 ) and settings.ANALYTICS_SAMPLE_RATE is not None
48
49
50 def get_middleware_insertion_point():
51 """Returns the attribute name and collection object for the Django middleware.
52 If middleware cannot be found, returns None for the middleware collection."""
53 middleware = getattr(django_settings, MIDDLEWARE, None)
54 # Prioritise MIDDLEWARE over ..._CLASSES, but only in 1.10 and later.
55 if middleware and django.VERSION >= (1, 10):
56 return MIDDLEWARE, middleware
57 return MIDDLEWARE_CLASSES, getattr(django_settings, MIDDLEWARE_CLASSES, None)
58
59
60 def insert_trace_middleware():
61 middleware_attribute, middleware = get_middleware_insertion_point()
62 if middleware is not None and TRACE_MIDDLEWARE not in set(middleware):
63 setattr(django_settings, middleware_attribute, type(middleware)((TRACE_MIDDLEWARE,)) + middleware)
64
65
66 def remove_trace_middleware():
67 _, middleware = get_middleware_insertion_point()
68 if middleware and TRACE_MIDDLEWARE in set(middleware):
69 middleware.remove(TRACE_MIDDLEWARE)
70
71
72 def insert_exception_middleware():
73 middleware_attribute, middleware = get_middleware_insertion_point()
74 if middleware is not None and EXCEPTION_MIDDLEWARE not in set(middleware):
75 setattr(django_settings, middleware_attribute, middleware + type(middleware)((EXCEPTION_MIDDLEWARE,)))
76
77
78 def remove_exception_middleware():
79 _, middleware = get_middleware_insertion_point()
80 if middleware and EXCEPTION_MIDDLEWARE in set(middleware):
81 middleware.remove(EXCEPTION_MIDDLEWARE)
82
83
84 class InstrumentationMixin(MiddlewareClass):
85 """
86 Useful mixin base class for tracing middlewares
87 """
88 def __init__(self, get_response=None):
89 # disable the middleware if the tracer is not enabled
90 # or if the auto instrumentation is disabled
91 self.get_response = get_response
92 if not settings.AUTO_INSTRUMENT:
93 raise MiddlewareNotUsed
94
95
96 class TraceExceptionMiddleware(InstrumentationMixin):
97 """
98 Middleware that traces exceptions raised
99 """
100 def process_exception(self, request, exception):
101 try:
102 span = _get_req_span(request)
103 if span:
104 span.set_tag(http.STATUS_CODE, '500')
105 span.set_traceback() # will set the exception info
106 except Exception:
107 log.debug('error processing exception', exc_info=True)
108
109
110 class TraceMiddleware(InstrumentationMixin):
111 """
112 Middleware that traces Django requests
113 """
114 def process_request(self, request):
115 tracer = settings.TRACER
116 if settings.DISTRIBUTED_TRACING:
117 propagator = HTTPPropagator()
118 context = propagator.extract(request.META)
119 # Only need to active the new context if something was propagated
120 if context.trace_id:
121 tracer.context_provider.activate(context)
122 try:
123 span = tracer.trace(
124 'django.request',
125 service=settings.DEFAULT_SERVICE,
126 resource='unknown', # will be filled by process view
127 span_type=http.TYPE,
128 )
129
130 # set analytics sample rate
131 # DEV: django is special case maintains separate configuration from config api
132 if _analytics_enabled():
133 span.set_tag(
134 ANALYTICS_SAMPLE_RATE_KEY,
135 settings.ANALYTICS_SAMPLE_RATE,
136 )
137
138 # Set HTTP Request tags
139 span.set_tag(http.METHOD, request.method)
140 span.set_tag(http.URL, get_request_uri(request))
141 _set_req_span(request, span)
142 except Exception as e:
143 log.debug('error tracing request: %s', e)
144
145 def process_view(self, request, view_func, *args, **kwargs):
146 span = _get_req_span(request)
147 if span:
148 span.resource = func_name(view_func)
149
150 def process_response(self, request, response):
151 try:
152 span = _get_req_span(request)
153 if span:
154 if response.status_code < 500 and span.error:
155 # remove any existing stack trace since it must have been
156 # handled appropriately
157 span._remove_exc_info()
158
159 # If `process_view` was not called, try to determine the correct `span.resource` to set
160 # DEV: `process_view` won't get called if a middle `process_request` returns an HttpResponse
161 # DEV: `process_view` won't get called when internal error handlers are used (e.g. for 404 responses)
162 if span.resource == 'unknown':
163 try:
164 # Attempt to lookup the view function from the url resolver
165 # https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/core/handlers/base.py#L104-L113 # noqa
166 urlconf = None
167 if hasattr(request, 'urlconf'):
168 urlconf = request.urlconf
169 resolver = get_resolver(urlconf)
170
171 # Try to resolve the Django view for handling this request
172 if getattr(request, 'request_match', None):
173 request_match = request.request_match
174 else:
175 # This may raise a `django.urls.exceptions.Resolver404` exception
176 request_match = resolver.resolve(request.path_info)
177 span.resource = func_name(request_match.func)
178 except Exception:
179 log.debug('error determining request view function', exc_info=True)
180
181 # If the view could not be found, try to set from a static list of
182 # known internal error handler views
183 span.resource = _django_default_views.get(response.status_code, 'unknown')
184
185 span.set_tag(http.STATUS_CODE, response.status_code)
186 span = _set_auth_tags(span, request)
187 span.finish()
188 except Exception as e:
189 log.debug('error tracing request: %s', e)
190 finally:
191 return response
192
193
194 def _get_req_span(request):
195 """ Return the datadog span from the given request. """
196 return getattr(request, '_datadog_request_span', None)
197
198
199 def _set_req_span(request, span):
200 """ Set the datadog span on the given request. """
201 return setattr(request, '_datadog_request_span', span)
202
203
204 def _set_auth_tags(span, request):
205 """ Patch any available auth tags from the request onto the span. """
206 user = getattr(request, 'user', None)
207 if not user:
208 return span
209
210 if hasattr(user, 'is_authenticated'):
211 span.set_tag('django.user.is_authenticated', user_is_authenticated(user))
212
213 uid = getattr(user, 'pk', None)
214 if uid:
215 span.set_tag('django.user.id', uid)
216
217 uname = getattr(user, 'username', None)
218 if uname:
219 span.set_tag('django.user.name', uname)
220
221 return span
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/contrib/django/middleware.py b/ddtrace/contrib/django/middleware.py
--- a/ddtrace/contrib/django/middleware.py
+++ b/ddtrace/contrib/django/middleware.py
@@ -52,7 +52,7 @@
If middleware cannot be found, returns None for the middleware collection."""
middleware = getattr(django_settings, MIDDLEWARE, None)
# Prioritise MIDDLEWARE over ..._CLASSES, but only in 1.10 and later.
- if middleware and django.VERSION >= (1, 10):
+ if middleware is not None and django.VERSION >= (1, 10):
return MIDDLEWARE, middleware
return MIDDLEWARE_CLASSES, getattr(django_settings, MIDDLEWARE_CLASSES, None)
| {"golden_diff": "diff --git a/ddtrace/contrib/django/middleware.py b/ddtrace/contrib/django/middleware.py\n--- a/ddtrace/contrib/django/middleware.py\n+++ b/ddtrace/contrib/django/middleware.py\n@@ -52,7 +52,7 @@\n If middleware cannot be found, returns None for the middleware collection.\"\"\"\n middleware = getattr(django_settings, MIDDLEWARE, None)\n # Prioritise MIDDLEWARE over ..._CLASSES, but only in 1.10 and later.\n- if middleware and django.VERSION >= (1, 10):\n+ if middleware is not None and django.VERSION >= (1, 10):\n return MIDDLEWARE, middleware\n return MIDDLEWARE_CLASSES, getattr(django_settings, MIDDLEWARE_CLASSES, None)\n", "issue": "Django middleware not properly supported if left empty\nIn Django 1.10 and above, `dd-trace-py` will attempt to check first if the user is using the `MIDDLEWARE` Django setting and will fallback to `MIDDLEWARE_CLASSES` if unset. In my application, I deliberately declare an empty list for the `MIDDLEWARE` setting to tell Django that I am not personally putting any middleware into the application. Unfortunately, `dd-trace-py` simply checks if the `settings.MIDDLEWARE` attribute is falsy and since this is true it then falls back to `MIDDLEWARE_CLASSES`.\r\n\r\nIf an empty list/tuple is declared in Django settings for `MIDDLEWARE`, it should still be properly injected.\n", "before_files": [{"content": "# project\nfrom .conf import settings\nfrom .compat import user_is_authenticated, get_resolver\nfrom .utils import get_request_uri\n\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...contrib import func_name\nfrom ...ext import http\nfrom ...internal.logger import get_logger\nfrom ...propagation.http import HTTPPropagator\nfrom ...settings import config\n\n# 3p\nfrom django.core.exceptions import MiddlewareNotUsed\nfrom django.conf import settings as django_settings\nimport django\n\ntry:\n from django.utils.deprecation import MiddlewareMixin\n MiddlewareClass = MiddlewareMixin\nexcept ImportError:\n MiddlewareClass = object\n\nlog = get_logger(__name__)\n\nEXCEPTION_MIDDLEWARE = 'ddtrace.contrib.django.TraceExceptionMiddleware'\nTRACE_MIDDLEWARE = 'ddtrace.contrib.django.TraceMiddleware'\nMIDDLEWARE = 'MIDDLEWARE'\nMIDDLEWARE_CLASSES = 'MIDDLEWARE_CLASSES'\n\n# Default views list available from:\n# https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/views/defaults.py\n# DEV: Django doesn't call `process_view` when falling back to one of these internal error handling views\n# DEV: We only use these names when `span.resource == 'unknown'` and we have one of these status codes\n_django_default_views = {\n 400: 'django.views.defaults.bad_request',\n 403: 'django.views.defaults.permission_denied',\n 404: 'django.views.defaults.page_not_found',\n 500: 'django.views.defaults.server_error',\n}\n\n\ndef _analytics_enabled():\n return (\n (config.analytics_enabled and settings.ANALYTICS_ENABLED is not False)\n or settings.ANALYTICS_ENABLED is True\n ) and settings.ANALYTICS_SAMPLE_RATE is not None\n\n\ndef get_middleware_insertion_point():\n \"\"\"Returns the attribute name and collection object for the Django middleware.\n If middleware cannot be found, returns None for the middleware collection.\"\"\"\n middleware = getattr(django_settings, MIDDLEWARE, None)\n # Prioritise MIDDLEWARE over ..._CLASSES, but only in 1.10 and later.\n if middleware and django.VERSION >= (1, 10):\n return MIDDLEWARE, middleware\n return MIDDLEWARE_CLASSES, getattr(django_settings, MIDDLEWARE_CLASSES, None)\n\n\ndef insert_trace_middleware():\n middleware_attribute, middleware = get_middleware_insertion_point()\n if middleware is not None and TRACE_MIDDLEWARE not in set(middleware):\n setattr(django_settings, middleware_attribute, type(middleware)((TRACE_MIDDLEWARE,)) + middleware)\n\n\ndef remove_trace_middleware():\n _, middleware = get_middleware_insertion_point()\n if middleware and TRACE_MIDDLEWARE in set(middleware):\n middleware.remove(TRACE_MIDDLEWARE)\n\n\ndef insert_exception_middleware():\n middleware_attribute, middleware = get_middleware_insertion_point()\n if middleware is not None and EXCEPTION_MIDDLEWARE not in set(middleware):\n setattr(django_settings, middleware_attribute, middleware + type(middleware)((EXCEPTION_MIDDLEWARE,)))\n\n\ndef remove_exception_middleware():\n _, middleware = get_middleware_insertion_point()\n if middleware and EXCEPTION_MIDDLEWARE in set(middleware):\n middleware.remove(EXCEPTION_MIDDLEWARE)\n\n\nclass InstrumentationMixin(MiddlewareClass):\n \"\"\"\n Useful mixin base class for tracing middlewares\n \"\"\"\n def __init__(self, get_response=None):\n # disable the middleware if the tracer is not enabled\n # or if the auto instrumentation is disabled\n self.get_response = get_response\n if not settings.AUTO_INSTRUMENT:\n raise MiddlewareNotUsed\n\n\nclass TraceExceptionMiddleware(InstrumentationMixin):\n \"\"\"\n Middleware that traces exceptions raised\n \"\"\"\n def process_exception(self, request, exception):\n try:\n span = _get_req_span(request)\n if span:\n span.set_tag(http.STATUS_CODE, '500')\n span.set_traceback() # will set the exception info\n except Exception:\n log.debug('error processing exception', exc_info=True)\n\n\nclass TraceMiddleware(InstrumentationMixin):\n \"\"\"\n Middleware that traces Django requests\n \"\"\"\n def process_request(self, request):\n tracer = settings.TRACER\n if settings.DISTRIBUTED_TRACING:\n propagator = HTTPPropagator()\n context = propagator.extract(request.META)\n # Only need to active the new context if something was propagated\n if context.trace_id:\n tracer.context_provider.activate(context)\n try:\n span = tracer.trace(\n 'django.request',\n service=settings.DEFAULT_SERVICE,\n resource='unknown', # will be filled by process view\n span_type=http.TYPE,\n )\n\n # set analytics sample rate\n # DEV: django is special case maintains separate configuration from config api\n if _analytics_enabled():\n span.set_tag(\n ANALYTICS_SAMPLE_RATE_KEY,\n settings.ANALYTICS_SAMPLE_RATE,\n )\n\n # Set HTTP Request tags\n span.set_tag(http.METHOD, request.method)\n span.set_tag(http.URL, get_request_uri(request))\n _set_req_span(request, span)\n except Exception as e:\n log.debug('error tracing request: %s', e)\n\n def process_view(self, request, view_func, *args, **kwargs):\n span = _get_req_span(request)\n if span:\n span.resource = func_name(view_func)\n\n def process_response(self, request, response):\n try:\n span = _get_req_span(request)\n if span:\n if response.status_code < 500 and span.error:\n # remove any existing stack trace since it must have been\n # handled appropriately\n span._remove_exc_info()\n\n # If `process_view` was not called, try to determine the correct `span.resource` to set\n # DEV: `process_view` won't get called if a middle `process_request` returns an HttpResponse\n # DEV: `process_view` won't get called when internal error handlers are used (e.g. for 404 responses)\n if span.resource == 'unknown':\n try:\n # Attempt to lookup the view function from the url resolver\n # https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/core/handlers/base.py#L104-L113 # noqa\n urlconf = None\n if hasattr(request, 'urlconf'):\n urlconf = request.urlconf\n resolver = get_resolver(urlconf)\n\n # Try to resolve the Django view for handling this request\n if getattr(request, 'request_match', None):\n request_match = request.request_match\n else:\n # This may raise a `django.urls.exceptions.Resolver404` exception\n request_match = resolver.resolve(request.path_info)\n span.resource = func_name(request_match.func)\n except Exception:\n log.debug('error determining request view function', exc_info=True)\n\n # If the view could not be found, try to set from a static list of\n # known internal error handler views\n span.resource = _django_default_views.get(response.status_code, 'unknown')\n\n span.set_tag(http.STATUS_CODE, response.status_code)\n span = _set_auth_tags(span, request)\n span.finish()\n except Exception as e:\n log.debug('error tracing request: %s', e)\n finally:\n return response\n\n\ndef _get_req_span(request):\n \"\"\" Return the datadog span from the given request. \"\"\"\n return getattr(request, '_datadog_request_span', None)\n\n\ndef _set_req_span(request, span):\n \"\"\" Set the datadog span on the given request. \"\"\"\n return setattr(request, '_datadog_request_span', span)\n\n\ndef _set_auth_tags(span, request):\n \"\"\" Patch any available auth tags from the request onto the span. \"\"\"\n user = getattr(request, 'user', None)\n if not user:\n return span\n\n if hasattr(user, 'is_authenticated'):\n span.set_tag('django.user.is_authenticated', user_is_authenticated(user))\n\n uid = getattr(user, 'pk', None)\n if uid:\n span.set_tag('django.user.id', uid)\n\n uname = getattr(user, 'username', None)\n if uname:\n span.set_tag('django.user.name', uname)\n\n return span\n", "path": "ddtrace/contrib/django/middleware.py"}], "after_files": [{"content": "# project\nfrom .conf import settings\nfrom .compat import user_is_authenticated, get_resolver\nfrom .utils import get_request_uri\n\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...contrib import func_name\nfrom ...ext import http\nfrom ...internal.logger import get_logger\nfrom ...propagation.http import HTTPPropagator\nfrom ...settings import config\n\n# 3p\nfrom django.core.exceptions import MiddlewareNotUsed\nfrom django.conf import settings as django_settings\nimport django\n\ntry:\n from django.utils.deprecation import MiddlewareMixin\n MiddlewareClass = MiddlewareMixin\nexcept ImportError:\n MiddlewareClass = object\n\nlog = get_logger(__name__)\n\nEXCEPTION_MIDDLEWARE = 'ddtrace.contrib.django.TraceExceptionMiddleware'\nTRACE_MIDDLEWARE = 'ddtrace.contrib.django.TraceMiddleware'\nMIDDLEWARE = 'MIDDLEWARE'\nMIDDLEWARE_CLASSES = 'MIDDLEWARE_CLASSES'\n\n# Default views list available from:\n# https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/views/defaults.py\n# DEV: Django doesn't call `process_view` when falling back to one of these internal error handling views\n# DEV: We only use these names when `span.resource == 'unknown'` and we have one of these status codes\n_django_default_views = {\n 400: 'django.views.defaults.bad_request',\n 403: 'django.views.defaults.permission_denied',\n 404: 'django.views.defaults.page_not_found',\n 500: 'django.views.defaults.server_error',\n}\n\n\ndef _analytics_enabled():\n return (\n (config.analytics_enabled and settings.ANALYTICS_ENABLED is not False)\n or settings.ANALYTICS_ENABLED is True\n ) and settings.ANALYTICS_SAMPLE_RATE is not None\n\n\ndef get_middleware_insertion_point():\n \"\"\"Returns the attribute name and collection object for the Django middleware.\n If middleware cannot be found, returns None for the middleware collection.\"\"\"\n middleware = getattr(django_settings, MIDDLEWARE, None)\n # Prioritise MIDDLEWARE over ..._CLASSES, but only in 1.10 and later.\n if middleware is not None and django.VERSION >= (1, 10):\n return MIDDLEWARE, middleware\n return MIDDLEWARE_CLASSES, getattr(django_settings, MIDDLEWARE_CLASSES, None)\n\n\ndef insert_trace_middleware():\n middleware_attribute, middleware = get_middleware_insertion_point()\n if middleware is not None and TRACE_MIDDLEWARE not in set(middleware):\n setattr(django_settings, middleware_attribute, type(middleware)((TRACE_MIDDLEWARE,)) + middleware)\n\n\ndef remove_trace_middleware():\n _, middleware = get_middleware_insertion_point()\n if middleware and TRACE_MIDDLEWARE in set(middleware):\n middleware.remove(TRACE_MIDDLEWARE)\n\n\ndef insert_exception_middleware():\n middleware_attribute, middleware = get_middleware_insertion_point()\n if middleware is not None and EXCEPTION_MIDDLEWARE not in set(middleware):\n setattr(django_settings, middleware_attribute, middleware + type(middleware)((EXCEPTION_MIDDLEWARE,)))\n\n\ndef remove_exception_middleware():\n _, middleware = get_middleware_insertion_point()\n if middleware and EXCEPTION_MIDDLEWARE in set(middleware):\n middleware.remove(EXCEPTION_MIDDLEWARE)\n\n\nclass InstrumentationMixin(MiddlewareClass):\n \"\"\"\n Useful mixin base class for tracing middlewares\n \"\"\"\n def __init__(self, get_response=None):\n # disable the middleware if the tracer is not enabled\n # or if the auto instrumentation is disabled\n self.get_response = get_response\n if not settings.AUTO_INSTRUMENT:\n raise MiddlewareNotUsed\n\n\nclass TraceExceptionMiddleware(InstrumentationMixin):\n \"\"\"\n Middleware that traces exceptions raised\n \"\"\"\n def process_exception(self, request, exception):\n try:\n span = _get_req_span(request)\n if span:\n span.set_tag(http.STATUS_CODE, '500')\n span.set_traceback() # will set the exception info\n except Exception:\n log.debug('error processing exception', exc_info=True)\n\n\nclass TraceMiddleware(InstrumentationMixin):\n \"\"\"\n Middleware that traces Django requests\n \"\"\"\n def process_request(self, request):\n tracer = settings.TRACER\n if settings.DISTRIBUTED_TRACING:\n propagator = HTTPPropagator()\n context = propagator.extract(request.META)\n # Only need to active the new context if something was propagated\n if context.trace_id:\n tracer.context_provider.activate(context)\n try:\n span = tracer.trace(\n 'django.request',\n service=settings.DEFAULT_SERVICE,\n resource='unknown', # will be filled by process view\n span_type=http.TYPE,\n )\n\n # set analytics sample rate\n # DEV: django is special case maintains separate configuration from config api\n if _analytics_enabled():\n span.set_tag(\n ANALYTICS_SAMPLE_RATE_KEY,\n settings.ANALYTICS_SAMPLE_RATE,\n )\n\n # Set HTTP Request tags\n span.set_tag(http.METHOD, request.method)\n span.set_tag(http.URL, get_request_uri(request))\n _set_req_span(request, span)\n except Exception as e:\n log.debug('error tracing request: %s', e)\n\n def process_view(self, request, view_func, *args, **kwargs):\n span = _get_req_span(request)\n if span:\n span.resource = func_name(view_func)\n\n def process_response(self, request, response):\n try:\n span = _get_req_span(request)\n if span:\n if response.status_code < 500 and span.error:\n # remove any existing stack trace since it must have been\n # handled appropriately\n span._remove_exc_info()\n\n # If `process_view` was not called, try to determine the correct `span.resource` to set\n # DEV: `process_view` won't get called if a middle `process_request` returns an HttpResponse\n # DEV: `process_view` won't get called when internal error handlers are used (e.g. for 404 responses)\n if span.resource == 'unknown':\n try:\n # Attempt to lookup the view function from the url resolver\n # https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/core/handlers/base.py#L104-L113 # noqa\n urlconf = None\n if hasattr(request, 'urlconf'):\n urlconf = request.urlconf\n resolver = get_resolver(urlconf)\n\n # Try to resolve the Django view for handling this request\n if getattr(request, 'request_match', None):\n request_match = request.request_match\n else:\n # This may raise a `django.urls.exceptions.Resolver404` exception\n request_match = resolver.resolve(request.path_info)\n span.resource = func_name(request_match.func)\n except Exception:\n log.debug('error determining request view function', exc_info=True)\n\n # If the view could not be found, try to set from a static list of\n # known internal error handler views\n span.resource = _django_default_views.get(response.status_code, 'unknown')\n\n span.set_tag(http.STATUS_CODE, response.status_code)\n span = _set_auth_tags(span, request)\n span.finish()\n except Exception as e:\n log.debug('error tracing request: %s', e)\n finally:\n return response\n\n\ndef _get_req_span(request):\n \"\"\" Return the datadog span from the given request. \"\"\"\n return getattr(request, '_datadog_request_span', None)\n\n\ndef _set_req_span(request, span):\n \"\"\" Set the datadog span on the given request. \"\"\"\n return setattr(request, '_datadog_request_span', span)\n\n\ndef _set_auth_tags(span, request):\n \"\"\" Patch any available auth tags from the request onto the span. \"\"\"\n user = getattr(request, 'user', None)\n if not user:\n return span\n\n if hasattr(user, 'is_authenticated'):\n span.set_tag('django.user.is_authenticated', user_is_authenticated(user))\n\n uid = getattr(user, 'pk', None)\n if uid:\n span.set_tag('django.user.id', uid)\n\n uname = getattr(user, 'username', None)\n if uname:\n span.set_tag('django.user.name', uname)\n\n return span\n", "path": "ddtrace/contrib/django/middleware.py"}]} | 2,818 | 173 |
gh_patches_debug_41778 | rasdani/github-patches | git_diff | google__flax-2064 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document default stride for pooling functions
### Discussed in https://github.com/google/flax/discussions/2023
<div type='discussions-op-text'>
<sup>Originally posted by **dogeplusplus** April 3, 2022</sup>
A bit of a nitpick but I was wondering why the default behavior of pooling functions is to have stride 1 instead of the `window_shape`? I feel that for most use cases the stride would be the dimension of the kernel size as in other frameworks.</div>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flax/linen/pooling.py`
Content:
```
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Pooling modules."""
16
17 from jax import lax
18 import jax.numpy as jnp
19
20 import numpy as np
21
22
23 def pool(inputs, init, reduce_fn, window_shape, strides, padding):
24 """Helper function to define pooling functions.
25
26 Pooling functions are implemented using the ReduceWindow XLA op.
27 NOTE: Be aware that pooling is not generally differentiable.
28 That means providing a reduce_fn that is differentiable does not imply
29 that pool is differentiable.
30
31 Args:
32 inputs: input data with dimensions (batch, window dims..., features).
33 init: the initial value for the reduction
34 reduce_fn: a reduce function of the form `(T, T) -> T`.
35 window_shape: a shape tuple defining the window to reduce over.
36 strides: a sequence of `n` integers, representing the inter-window
37 strides.
38 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
39 of `n` `(low, high)` integer pairs that give the padding to apply before
40 and after each spatial dimension.
41 Returns:
42 The output of the reduction for each window slice.
43 """
44 strides = strides or (1,) * len(window_shape)
45 assert len(window_shape) == len(strides), (
46 f"len({window_shape}) must equal len({strides})")
47 strides = (1,) + strides + (1,)
48 dims = (1,) + window_shape + (1,)
49
50 is_single_input = False
51 if inputs.ndim == len(dims) - 1:
52 # add singleton batch dimension because lax.reduce_window always
53 # needs a batch dimension.
54 inputs = inputs[None]
55 is_single_input = True
56
57 assert inputs.ndim == len(dims), f"len({inputs.shape}) != len({dims})"
58 if not isinstance(padding, str):
59 padding = tuple(map(tuple, padding))
60 assert len(padding) == len(window_shape), (
61 f"padding {padding} must specify pads for same number of dims as "
62 f"window_shape {window_shape}")
63 assert all([len(x) == 2 for x in padding]), (
64 f"each entry in padding {padding} must be length 2")
65 padding = ((0, 0),) + padding + ((0, 0),)
66 y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)
67 if is_single_input:
68 y = jnp.squeeze(y, axis=0)
69 return y
70
71
72 def avg_pool(inputs, window_shape, strides=None, padding="VALID"):
73 """Pools the input by taking the average over a window.
74
75 Args:
76 inputs: input data with dimensions (batch, window dims..., features).
77 window_shape: a shape tuple defining the window to reduce over.
78 strides: a sequence of `n` integers, representing the inter-window
79 strides (default: `(1, ..., 1)`).
80 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
81 of `n` `(low, high)` integer pairs that give the padding to apply before
82 and after each spatial dimension (default: `'VALID'`).
83 Returns:
84 The average for each window slice.
85 """
86 y = pool(inputs, 0., lax.add, window_shape, strides, padding)
87 y = y / np.prod(window_shape)
88 return y
89
90
91 def max_pool(inputs, window_shape, strides=None, padding="VALID"):
92 """Pools the input by taking the maximum of a window slice.
93
94 Args:
95 inputs: input data with dimensions (batch, window dims..., features).
96 window_shape: a shape tuple defining the window to reduce over.
97 strides: a sequence of `n` integers, representing the inter-window
98 strides (default: `(1, ..., 1)`).
99 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
100 of `n` `(low, high)` integer pairs that give the padding to apply before
101 and after each spatial dimension (default: `'VALID'`).
102 Returns:
103 The maximum for each window slice.
104 """
105 y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)
106 return y
107
108
109 def min_pool(inputs, window_shape, strides=None, padding="VALID"):
110 """Pools the input by taking the minimum of a window slice.
111
112 Args:
113 inputs: Input data with dimensions (batch, window dims..., features).
114 window_shape: A shape tuple defining the window to reduce over.
115 strides: A sequence of `n` integers, representing the inter-window strides
116 (default: `(1, ..., 1)`).
117 padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of
118 `n` `(low, high)` integer pairs that give the padding to apply before and
119 after each spatial dimension (default: `'VALID'`).
120
121 Returns:
122 The minimum for each window slice.
123 """
124 return pool(inputs, jnp.inf, lax.min, window_shape, strides, padding)
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flax/linen/pooling.py b/flax/linen/pooling.py
--- a/flax/linen/pooling.py
+++ b/flax/linen/pooling.py
@@ -25,8 +25,8 @@
Pooling functions are implemented using the ReduceWindow XLA op.
NOTE: Be aware that pooling is not generally differentiable.
- That means providing a reduce_fn that is differentiable does not imply
- that pool is differentiable.
+ That means providing a reduce_fn that is differentiable does not imply that
+ pool is differentiable.
Args:
inputs: input data with dimensions (batch, window dims..., features).
@@ -34,7 +34,7 @@
reduce_fn: a reduce function of the form `(T, T) -> T`.
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of `n` integers, representing the inter-window
- strides.
+ strides (default: `(1, ..., 1)`).
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension.
@@ -76,7 +76,7 @@
inputs: input data with dimensions (batch, window dims..., features).
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of `n` integers, representing the inter-window
- strides (default: `(1, ..., 1)`).
+ strides (default: `(1, ..., 1)`).
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension (default: `'VALID'`).
@@ -95,7 +95,7 @@
inputs: input data with dimensions (batch, window dims..., features).
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of `n` integers, representing the inter-window
- strides (default: `(1, ..., 1)`).
+ strides (default: `(1, ..., 1)`).
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension (default: `'VALID'`).
@@ -113,7 +113,7 @@
inputs: Input data with dimensions (batch, window dims..., features).
window_shape: A shape tuple defining the window to reduce over.
strides: A sequence of `n` integers, representing the inter-window strides
- (default: `(1, ..., 1)`).
+ (default: `(1, ..., 1)`).
padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension (default: `'VALID'`).
| {"golden_diff": "diff --git a/flax/linen/pooling.py b/flax/linen/pooling.py\n--- a/flax/linen/pooling.py\n+++ b/flax/linen/pooling.py\n@@ -25,8 +25,8 @@\n \n Pooling functions are implemented using the ReduceWindow XLA op.\n NOTE: Be aware that pooling is not generally differentiable.\n- That means providing a reduce_fn that is differentiable does not imply\n- that pool is differentiable.\n+ That means providing a reduce_fn that is differentiable does not imply that\n+ pool is differentiable.\n \n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n@@ -34,7 +34,7 @@\n reduce_fn: a reduce function of the form `(T, T) -> T`.\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n- strides.\n+ strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n@@ -76,7 +76,7 @@\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n- strides (default: `(1, ..., 1)`).\n+ strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n@@ -95,7 +95,7 @@\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n- strides (default: `(1, ..., 1)`).\n+ strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n@@ -113,7 +113,7 @@\n inputs: Input data with dimensions (batch, window dims..., features).\n window_shape: A shape tuple defining the window to reduce over.\n strides: A sequence of `n` integers, representing the inter-window strides\n- (default: `(1, ..., 1)`).\n+ (default: `(1, ..., 1)`).\n padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of\n `n` `(low, high)` integer pairs that give the padding to apply before and\n after each spatial dimension (default: `'VALID'`).\n", "issue": "Document default stride for pooling functions\n### Discussed in https://github.com/google/flax/discussions/2023\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **dogeplusplus** April 3, 2022</sup>\r\nA bit of a nitpick but I was wondering why the default behavior of pooling functions is to have stride 1 instead of the `window_shape`? I feel that for most use cases the stride would be the dimension of the kernel size as in other frameworks.</div>\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pooling modules.\"\"\"\n\nfrom jax import lax\nimport jax.numpy as jnp\n\nimport numpy as np\n\n\ndef pool(inputs, init, reduce_fn, window_shape, strides, padding):\n \"\"\"Helper function to define pooling functions.\n\n Pooling functions are implemented using the ReduceWindow XLA op.\n NOTE: Be aware that pooling is not generally differentiable.\n That means providing a reduce_fn that is differentiable does not imply\n that pool is differentiable.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n init: the initial value for the reduction\n reduce_fn: a reduce function of the form `(T, T) -> T`.\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides.\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n Returns:\n The output of the reduction for each window slice.\n \"\"\"\n strides = strides or (1,) * len(window_shape)\n assert len(window_shape) == len(strides), (\n f\"len({window_shape}) must equal len({strides})\")\n strides = (1,) + strides + (1,)\n dims = (1,) + window_shape + (1,)\n\n is_single_input = False\n if inputs.ndim == len(dims) - 1:\n # add singleton batch dimension because lax.reduce_window always\n # needs a batch dimension.\n inputs = inputs[None]\n is_single_input = True\n\n assert inputs.ndim == len(dims), f\"len({inputs.shape}) != len({dims})\"\n if not isinstance(padding, str):\n padding = tuple(map(tuple, padding))\n assert len(padding) == len(window_shape), (\n f\"padding {padding} must specify pads for same number of dims as \"\n f\"window_shape {window_shape}\")\n assert all([len(x) == 2 for x in padding]), (\n f\"each entry in padding {padding} must be length 2\")\n padding = ((0, 0),) + padding + ((0, 0),)\n y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n if is_single_input:\n y = jnp.squeeze(y, axis=0)\n return y\n\n\ndef avg_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the average over a window.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The average for each window slice.\n \"\"\"\n y = pool(inputs, 0., lax.add, window_shape, strides, padding)\n y = y / np.prod(window_shape)\n return y\n\n\ndef max_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the maximum of a window slice.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The maximum for each window slice.\n \"\"\"\n y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)\n return y\n\n\ndef min_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the minimum of a window slice.\n\n Args:\n inputs: Input data with dimensions (batch, window dims..., features).\n window_shape: A shape tuple defining the window to reduce over.\n strides: A sequence of `n` integers, representing the inter-window strides\n (default: `(1, ..., 1)`).\n padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of\n `n` `(low, high)` integer pairs that give the padding to apply before and\n after each spatial dimension (default: `'VALID'`).\n\n Returns:\n The minimum for each window slice.\n \"\"\"\n return pool(inputs, jnp.inf, lax.min, window_shape, strides, padding)\n", "path": "flax/linen/pooling.py"}], "after_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pooling modules.\"\"\"\n\nfrom jax import lax\nimport jax.numpy as jnp\n\nimport numpy as np\n\n\ndef pool(inputs, init, reduce_fn, window_shape, strides, padding):\n \"\"\"Helper function to define pooling functions.\n\n Pooling functions are implemented using the ReduceWindow XLA op.\n NOTE: Be aware that pooling is not generally differentiable.\n That means providing a reduce_fn that is differentiable does not imply that\n pool is differentiable.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n init: the initial value for the reduction\n reduce_fn: a reduce function of the form `(T, T) -> T`.\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n Returns:\n The output of the reduction for each window slice.\n \"\"\"\n strides = strides or (1,) * len(window_shape)\n assert len(window_shape) == len(strides), (\n f\"len({window_shape}) must equal len({strides})\")\n strides = (1,) + strides + (1,)\n dims = (1,) + window_shape + (1,)\n\n is_single_input = False\n if inputs.ndim == len(dims) - 1:\n # add singleton batch dimension because lax.reduce_window always\n # needs a batch dimension.\n inputs = inputs[None]\n is_single_input = True\n\n assert inputs.ndim == len(dims), f\"len({inputs.shape}) != len({dims})\"\n if not isinstance(padding, str):\n padding = tuple(map(tuple, padding))\n assert len(padding) == len(window_shape), (\n f\"padding {padding} must specify pads for same number of dims as \"\n f\"window_shape {window_shape}\")\n assert all([len(x) == 2 for x in padding]), (\n f\"each entry in padding {padding} must be length 2\")\n padding = ((0, 0),) + padding + ((0, 0),)\n y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n if is_single_input:\n y = jnp.squeeze(y, axis=0)\n return y\n\n\ndef avg_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the average over a window.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The average for each window slice.\n \"\"\"\n y = pool(inputs, 0., lax.add, window_shape, strides, padding)\n y = y / np.prod(window_shape)\n return y\n\n\ndef max_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the maximum of a window slice.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The maximum for each window slice.\n \"\"\"\n y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)\n return y\n\n\ndef min_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the minimum of a window slice.\n\n Args:\n inputs: Input data with dimensions (batch, window dims..., features).\n window_shape: A shape tuple defining the window to reduce over.\n strides: A sequence of `n` integers, representing the inter-window strides\n (default: `(1, ..., 1)`).\n padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of\n `n` `(low, high)` integer pairs that give the padding to apply before and\n after each spatial dimension (default: `'VALID'`).\n\n Returns:\n The minimum for each window slice.\n \"\"\"\n return pool(inputs, jnp.inf, lax.min, window_shape, strides, padding)\n", "path": "flax/linen/pooling.py"}]} | 1,871 | 698 |
gh_patches_debug_23929 | rasdani/github-patches | git_diff | sunpy__sunpy-5089 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remote data manager does not recover well from its database being deleted while a session is running
<!-- This comments are hidden when you submit the issue so you do not need to remove them!
Please be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst
Please be sure to check out our code of conduct:
https://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->
<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.
If a similar issue is closed, have a quick look to see if you are satisfied by the resolution.
If not please go ahead and open an issue! -->
### Description
You can permanently corrupt the data manager state. So nothing works.
### Expected behavior
It recovers from my abuse and fixes itself.
### Steps to Reproduce
<!-- Ideally a code example could be provided so we can run it ourselves. -->
<!-- If you are pasting code, use tripe backticks (```) around your code snippet. -->
1. Delete all your data_manager stuff so you have a clean slate.
2. Run the following code inside an interactive prompt:
```python
from sunpy.data import manager
@manager.require('test_file',
['http://data.sunpy.org/sample-data/predicted-sunspot-radio-flux.txt'],
'4c85b04a5528aa97eb84a087450eda0421c71833820576330bba148564089b11')
def test_function():
pass
test_function()
```
3. Observe how everything works.
4. do a rm ~/sunpy/data_manager/*
5. **Without** starting a new python session run: `test_function()`, observe the following error:
```
---------------------------------------------------------------------------
OperationalError Traceback (most recent call last)
<ipython-input-2-7e96d146b416> in <module>
----> 1 test_function()
~/.virtualenvs/sunpy-release/lib/python3.8/site-packages/sunpy/data/data_manager/manager.py in wrapper(*args, **kwargs)
64 file_path = self._cache.download(urls, redownload=True)
65 else:
---> 66 details = self._cache.get_by_hash(sha_hash)
67 if not details:
68 # In case we are matching by hash and file does not exist
~/.virtualenvs/sunpy-release/lib/python3.8/site-packages/sunpy/data/data_manager/cache.py in get_by_hash(self, sha_hash)
127 SHA-1 hash of the file.
128 """
--> 129 details = self._storage.find_by_key('file_hash', sha_hash)
130 return details
131
~/.virtualenvs/sunpy-release/lib/python3.8/site-packages/sunpy/data/data_manager/storage.py in find_by_key(self, key, value)
161 with self.connection() as conn:
162 cursor = conn.cursor()
--> 163 cursor.execute(f'''SELECT * FROM {self._table_name}
164 WHERE {key}="{value}"''')
165 row = cursor.fetchone()
OperationalError: no such table: cache_storage
```
6. Restart your Python session and re-run:
```python
from sunpy.data import manager
@manager.require('test_file',
['http://data.sunpy.org/sample-data/predicted-sunspot-radio-flux.txt'],
'4c85b04a5528aa97eb84a087450eda0421c71833820576330bba148564089b11')
def test_function():
pass
test_function()
```
7. Observe how it's still broken.
Note, that if you delete the contents of the `data_manager` dir and then *restart* your Python session before calling the data manager again it recovers fine.
### System Details
<!-- We at least need to know the SunPy version you are using. -->
<!-- We provide a short function in SunPy that will provide some of the below information. -->
<!-- It is sunpy.util.system_info(), this is optional but strongly recommended. -->
- SunPy Version: 1.1.1
- Python Version: 3.8
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/data/data_manager/storage.py`
Content:
```
1 """
2 Storage module contains the abstract implementation of storage
3 for `sunpy.data.data_manager.Cache` and a concrete implementation
4 using sqlite.
5 """
6 import sqlite3
7 from abc import ABCMeta, abstractmethod
8 from pathlib import Path
9 from contextlib import contextmanager
10
11 __all__ = [
12 'StorageProviderBase',
13 'SqliteStorage',
14 'InMemStorage',
15 ]
16
17
18 class StorageProviderBase(metaclass=ABCMeta):
19 """
20 Base class for remote data manager storage providers.
21 """
22 @abstractmethod
23 def find_by_key(self, key, value):
24 """
25 Returns the file details if value coresponding to the key
26 found in storage. Returns `None` if hash not found.
27
28 Parameters
29 ----------
30 key: `str`
31 The key/column name of the field.
32 value: `str`
33 The value associated with the key of the entry.
34
35 Returns
36 -------
37 `dict` or `None`
38 `dict` contains the details of the file. `None` if hash not found.
39
40 Raises
41 ------
42 ``KeyError``
43 KeyError is raised if key does not exist.
44 """
45
46 @abstractmethod
47 def delete_by_key(self, key, value):
48 """
49 Deletes the matching entry from the store.
50
51 Parameters
52 ----------
53 key: `str`
54 The key/column name of the field.
55 value: `str`
56 The value associated with the key of the entry.
57
58 Raises
59 ------
60 ``KeyError``
61 KeyError is raised if key does not exist.
62 """
63
64 @abstractmethod
65 def store(self, details):
66 """
67 Stores the details in the storage.
68
69 Parameters
70 ----------
71 details: `dict`
72 Details to be stored.
73 """
74
75
76 class InMemStorage(StorageProviderBase):
77 """
78 This provides a storage stored in memory.
79 """
80
81 def __init__(self):
82 self._store = []
83
84 def store(self, details):
85 self._store += [details]
86
87 def delete_by_key(self, key, value):
88 for i in self._store:
89 if i[key] == value:
90 self._store.remove(i)
91
92 def find_by_key(self, key, value):
93 for i in self._store:
94 if i[key] == value:
95 return i
96 return None
97
98
99 class SqliteStorage(StorageProviderBase):
100 """
101 This provides a sqlite backend for storage.
102
103 Parameters
104 ----------
105 path: `str`
106 Path to the database file.
107 """
108 COLUMN_NAMES = [
109 'file_hash',
110 'file_path',
111 'url',
112 'time',
113 ]
114
115 def __init__(self, path):
116 self._db_path = Path(path)
117 self._table_name = 'cache_storage'
118
119 self._db_path.parent.mkdir(parents=True, exist_ok=True)
120 if not self._db_path.exists():
121 # setup database
122 self._setup()
123
124 def _setup(self):
125 schema = ' text, '.join(self.COLUMN_NAMES) + ' text'
126 with self.connection(commit=True) as conn:
127 # Do this in a try...except to prevent race conditions in the tests
128 try:
129 conn.execute(f'''CREATE TABLE {self._table_name}
130 ({schema})''')
131 except sqlite3.OperationalError as exc:
132 if "cache_storage already exists" in str(exc):
133 return
134 raise exc
135
136 @contextmanager
137 def connection(self, commit=False):
138 """
139 A context manager which provides an easy way to handle db connections.
140
141 Parameters
142 ----------
143 commit: `bool`
144 Whether to commit after succesful execution of db command.
145 """
146 conn = sqlite3.connect(str(self._db_path))
147 try:
148 yield conn
149 if commit:
150 conn.commit()
151 finally:
152 conn.close()
153
154 def find_by_key(self, key, value):
155 if key not in self.COLUMN_NAMES:
156 raise KeyError
157 with self.connection() as conn:
158 cursor = conn.cursor()
159 cursor.execute(f'''SELECT * FROM {self._table_name}
160 WHERE {key}="{value}"''')
161 row = cursor.fetchone()
162 if row:
163 return dict(zip(self.COLUMN_NAMES, row))
164 return None
165
166 def delete_by_key(self, key, value):
167 if key not in self.COLUMN_NAMES:
168 raise KeyError
169 with self.connection(commit=True) as conn:
170 cursor = conn.cursor()
171 cursor.execute(f'''DELETE FROM {self._table_name}
172 WHERE {key}="{value}"''')
173
174 def store(self, details):
175 values = [details[k] for k in self.COLUMN_NAMES]
176 placeholder = '?,' * len(values)
177 placeholder = placeholder[:-1]
178 with self.connection(commit=True) as conn:
179 conn.execute(f'''INSERT INTO {self._table_name}
180 VALUES ({placeholder})''', list(values))
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/data/data_manager/storage.py b/sunpy/data/data_manager/storage.py
--- a/sunpy/data/data_manager/storage.py
+++ b/sunpy/data/data_manager/storage.py
@@ -122,16 +122,12 @@
self._setup()
def _setup(self):
- schema = ' text, '.join(self.COLUMN_NAMES) + ' text'
with self.connection(commit=True) as conn:
- # Do this in a try...except to prevent race conditions in the tests
- try:
- conn.execute(f'''CREATE TABLE {self._table_name}
- ({schema})''')
- except sqlite3.OperationalError as exc:
- if "cache_storage already exists" in str(exc):
- return
- raise exc
+ self._create_table(conn)
+
+ def _create_table(self, conn):
+ schema = ' text, '.join(self.COLUMN_NAMES) + ' text'
+ conn.execute(f'''CREATE TABLE IF NOT EXISTS {self._table_name} ({schema})''')
@contextmanager
def connection(self, commit=False):
@@ -144,6 +140,7 @@
Whether to commit after succesful execution of db command.
"""
conn = sqlite3.connect(str(self._db_path))
+ self._create_table(conn)
try:
yield conn
if commit:
| {"golden_diff": "diff --git a/sunpy/data/data_manager/storage.py b/sunpy/data/data_manager/storage.py\n--- a/sunpy/data/data_manager/storage.py\n+++ b/sunpy/data/data_manager/storage.py\n@@ -122,16 +122,12 @@\n self._setup()\n \n def _setup(self):\n- schema = ' text, '.join(self.COLUMN_NAMES) + ' text'\n with self.connection(commit=True) as conn:\n- # Do this in a try...except to prevent race conditions in the tests\n- try:\n- conn.execute(f'''CREATE TABLE {self._table_name}\n- ({schema})''')\n- except sqlite3.OperationalError as exc:\n- if \"cache_storage already exists\" in str(exc):\n- return\n- raise exc\n+ self._create_table(conn)\n+\n+ def _create_table(self, conn):\n+ schema = ' text, '.join(self.COLUMN_NAMES) + ' text'\n+ conn.execute(f'''CREATE TABLE IF NOT EXISTS {self._table_name} ({schema})''')\n \n @contextmanager\n def connection(self, commit=False):\n@@ -144,6 +140,7 @@\n Whether to commit after succesful execution of db command.\n \"\"\"\n conn = sqlite3.connect(str(self._db_path))\n+ self._create_table(conn)\n try:\n yield conn\n if commit:\n", "issue": "Remote data manager does not recover well from its database being deleted while a session is running\n<!-- This comments are hidden when you submit the issue so you do not need to remove them!\r\nPlease be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst\r\nPlease be sure to check out our code of conduct:\r\nhttps://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->\r\n\r\n<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.\r\nIf a similar issue is closed, have a quick look to see if you are satisfied by the resolution.\r\nIf not please go ahead and open an issue! -->\r\n\r\n### Description\r\nYou can permanently corrupt the data manager state. So nothing works.\r\n\r\n### Expected behavior\r\nIt recovers from my abuse and fixes itself.\r\n\r\n\r\n### Steps to Reproduce\r\n<!-- Ideally a code example could be provided so we can run it ourselves. -->\r\n<!-- If you are pasting code, use tripe backticks (```) around your code snippet. -->\r\n\r\n1. Delete all your data_manager stuff so you have a clean slate.\r\n2. Run the following code inside an interactive prompt:\r\n```python\r\nfrom sunpy.data import manager\r\[email protected]('test_file',\r\n ['http://data.sunpy.org/sample-data/predicted-sunspot-radio-flux.txt'],\r\n '4c85b04a5528aa97eb84a087450eda0421c71833820576330bba148564089b11')\r\ndef test_function():\r\n pass\r\n\r\ntest_function()\r\n```\r\n3. Observe how everything works.\r\n4. do a rm ~/sunpy/data_manager/*\r\n5. **Without** starting a new python session run: `test_function()`, observe the following error:\r\n```\r\n---------------------------------------------------------------------------\r\nOperationalError Traceback (most recent call last)\r\n<ipython-input-2-7e96d146b416> in <module>\r\n----> 1 test_function()\r\n\r\n~/.virtualenvs/sunpy-release/lib/python3.8/site-packages/sunpy/data/data_manager/manager.py in wrapper(*args, **kwargs)\r\n 64 file_path = self._cache.download(urls, redownload=True)\r\n 65 else:\r\n---> 66 details = self._cache.get_by_hash(sha_hash)\r\n 67 if not details:\r\n 68 # In case we are matching by hash and file does not exist\r\n\r\n~/.virtualenvs/sunpy-release/lib/python3.8/site-packages/sunpy/data/data_manager/cache.py in get_by_hash(self, sha_hash)\r\n 127 SHA-1 hash of the file.\r\n 128 \"\"\"\r\n--> 129 details = self._storage.find_by_key('file_hash', sha_hash)\r\n 130 return details\r\n 131 \r\n\r\n~/.virtualenvs/sunpy-release/lib/python3.8/site-packages/sunpy/data/data_manager/storage.py in find_by_key(self, key, value)\r\n 161 with self.connection() as conn:\r\n 162 cursor = conn.cursor()\r\n--> 163 cursor.execute(f'''SELECT * FROM {self._table_name}\r\n 164 WHERE {key}=\"{value}\"''')\r\n 165 row = cursor.fetchone()\r\n\r\nOperationalError: no such table: cache_storage\r\n\r\n```\r\n6. Restart your Python session and re-run:\r\n```python\r\nfrom sunpy.data import manager\r\[email protected]('test_file',\r\n ['http://data.sunpy.org/sample-data/predicted-sunspot-radio-flux.txt'],\r\n '4c85b04a5528aa97eb84a087450eda0421c71833820576330bba148564089b11')\r\ndef test_function():\r\n pass\r\n\r\ntest_function()\r\n```\r\n7. Observe how it's still broken.\r\n\r\nNote, that if you delete the contents of the `data_manager` dir and then *restart* your Python session before calling the data manager again it recovers fine.\r\n\r\n### System Details\r\n<!-- We at least need to know the SunPy version you are using. -->\r\n<!-- We provide a short function in SunPy that will provide some of the below information. -->\r\n<!-- It is sunpy.util.system_info(), this is optional but strongly recommended. -->\r\n\r\n - SunPy Version: 1.1.1\r\n - Python Version: 3.8\r\n\n", "before_files": [{"content": "\"\"\"\nStorage module contains the abstract implementation of storage\nfor `sunpy.data.data_manager.Cache` and a concrete implementation\nusing sqlite.\n\"\"\"\nimport sqlite3\nfrom abc import ABCMeta, abstractmethod\nfrom pathlib import Path\nfrom contextlib import contextmanager\n\n__all__ = [\n 'StorageProviderBase',\n 'SqliteStorage',\n 'InMemStorage',\n]\n\n\nclass StorageProviderBase(metaclass=ABCMeta):\n \"\"\"\n Base class for remote data manager storage providers.\n \"\"\"\n @abstractmethod\n def find_by_key(self, key, value):\n \"\"\"\n Returns the file details if value coresponding to the key\n found in storage. Returns `None` if hash not found.\n\n Parameters\n ----------\n key: `str`\n The key/column name of the field.\n value: `str`\n The value associated with the key of the entry.\n\n Returns\n -------\n `dict` or `None`\n `dict` contains the details of the file. `None` if hash not found.\n\n Raises\n ------\n ``KeyError``\n KeyError is raised if key does not exist.\n \"\"\"\n\n @abstractmethod\n def delete_by_key(self, key, value):\n \"\"\"\n Deletes the matching entry from the store.\n\n Parameters\n ----------\n key: `str`\n The key/column name of the field.\n value: `str`\n The value associated with the key of the entry.\n\n Raises\n ------\n ``KeyError``\n KeyError is raised if key does not exist.\n \"\"\"\n\n @abstractmethod\n def store(self, details):\n \"\"\"\n Stores the details in the storage.\n\n Parameters\n ----------\n details: `dict`\n Details to be stored.\n \"\"\"\n\n\nclass InMemStorage(StorageProviderBase):\n \"\"\"\n This provides a storage stored in memory.\n \"\"\"\n\n def __init__(self):\n self._store = []\n\n def store(self, details):\n self._store += [details]\n\n def delete_by_key(self, key, value):\n for i in self._store:\n if i[key] == value:\n self._store.remove(i)\n\n def find_by_key(self, key, value):\n for i in self._store:\n if i[key] == value:\n return i\n return None\n\n\nclass SqliteStorage(StorageProviderBase):\n \"\"\"\n This provides a sqlite backend for storage.\n\n Parameters\n ----------\n path: `str`\n Path to the database file.\n \"\"\"\n COLUMN_NAMES = [\n 'file_hash',\n 'file_path',\n 'url',\n 'time',\n ]\n\n def __init__(self, path):\n self._db_path = Path(path)\n self._table_name = 'cache_storage'\n\n self._db_path.parent.mkdir(parents=True, exist_ok=True)\n if not self._db_path.exists():\n # setup database\n self._setup()\n\n def _setup(self):\n schema = ' text, '.join(self.COLUMN_NAMES) + ' text'\n with self.connection(commit=True) as conn:\n # Do this in a try...except to prevent race conditions in the tests\n try:\n conn.execute(f'''CREATE TABLE {self._table_name}\n ({schema})''')\n except sqlite3.OperationalError as exc:\n if \"cache_storage already exists\" in str(exc):\n return\n raise exc\n\n @contextmanager\n def connection(self, commit=False):\n \"\"\"\n A context manager which provides an easy way to handle db connections.\n\n Parameters\n ----------\n commit: `bool`\n Whether to commit after succesful execution of db command.\n \"\"\"\n conn = sqlite3.connect(str(self._db_path))\n try:\n yield conn\n if commit:\n conn.commit()\n finally:\n conn.close()\n\n def find_by_key(self, key, value):\n if key not in self.COLUMN_NAMES:\n raise KeyError\n with self.connection() as conn:\n cursor = conn.cursor()\n cursor.execute(f'''SELECT * FROM {self._table_name}\n WHERE {key}=\"{value}\"''')\n row = cursor.fetchone()\n if row:\n return dict(zip(self.COLUMN_NAMES, row))\n return None\n\n def delete_by_key(self, key, value):\n if key not in self.COLUMN_NAMES:\n raise KeyError\n with self.connection(commit=True) as conn:\n cursor = conn.cursor()\n cursor.execute(f'''DELETE FROM {self._table_name}\n WHERE {key}=\"{value}\"''')\n\n def store(self, details):\n values = [details[k] for k in self.COLUMN_NAMES]\n placeholder = '?,' * len(values)\n placeholder = placeholder[:-1]\n with self.connection(commit=True) as conn:\n conn.execute(f'''INSERT INTO {self._table_name}\n VALUES ({placeholder})''', list(values))\n", "path": "sunpy/data/data_manager/storage.py"}], "after_files": [{"content": "\"\"\"\nStorage module contains the abstract implementation of storage\nfor `sunpy.data.data_manager.Cache` and a concrete implementation\nusing sqlite.\n\"\"\"\nimport sqlite3\nfrom abc import ABCMeta, abstractmethod\nfrom pathlib import Path\nfrom contextlib import contextmanager\n\n__all__ = [\n 'StorageProviderBase',\n 'SqliteStorage',\n 'InMemStorage',\n]\n\n\nclass StorageProviderBase(metaclass=ABCMeta):\n \"\"\"\n Base class for remote data manager storage providers.\n \"\"\"\n @abstractmethod\n def find_by_key(self, key, value):\n \"\"\"\n Returns the file details if value coresponding to the key\n found in storage. Returns `None` if hash not found.\n\n Parameters\n ----------\n key: `str`\n The key/column name of the field.\n value: `str`\n The value associated with the key of the entry.\n\n Returns\n -------\n `dict` or `None`\n `dict` contains the details of the file. `None` if hash not found.\n\n Raises\n ------\n ``KeyError``\n KeyError is raised if key does not exist.\n \"\"\"\n\n @abstractmethod\n def delete_by_key(self, key, value):\n \"\"\"\n Deletes the matching entry from the store.\n\n Parameters\n ----------\n key: `str`\n The key/column name of the field.\n value: `str`\n The value associated with the key of the entry.\n\n Raises\n ------\n ``KeyError``\n KeyError is raised if key does not exist.\n \"\"\"\n\n @abstractmethod\n def store(self, details):\n \"\"\"\n Stores the details in the storage.\n\n Parameters\n ----------\n details: `dict`\n Details to be stored.\n \"\"\"\n\n\nclass InMemStorage(StorageProviderBase):\n \"\"\"\n This provides a storage stored in memory.\n \"\"\"\n\n def __init__(self):\n self._store = []\n\n def store(self, details):\n self._store += [details]\n\n def delete_by_key(self, key, value):\n for i in self._store:\n if i[key] == value:\n self._store.remove(i)\n\n def find_by_key(self, key, value):\n for i in self._store:\n if i[key] == value:\n return i\n return None\n\n\nclass SqliteStorage(StorageProviderBase):\n \"\"\"\n This provides a sqlite backend for storage.\n\n Parameters\n ----------\n path: `str`\n Path to the database file.\n \"\"\"\n COLUMN_NAMES = [\n 'file_hash',\n 'file_path',\n 'url',\n 'time',\n ]\n\n def __init__(self, path):\n self._db_path = Path(path)\n self._table_name = 'cache_storage'\n\n self._db_path.parent.mkdir(parents=True, exist_ok=True)\n if not self._db_path.exists():\n # setup database\n self._setup()\n\n def _setup(self):\n with self.connection(commit=True) as conn:\n self._create_table(conn)\n\n def _create_table(self, conn):\n schema = ' text, '.join(self.COLUMN_NAMES) + ' text'\n conn.execute(f'''CREATE TABLE IF NOT EXISTS {self._table_name} ({schema})''')\n\n @contextmanager\n def connection(self, commit=False):\n \"\"\"\n A context manager which provides an easy way to handle db connections.\n\n Parameters\n ----------\n commit: `bool`\n Whether to commit after succesful execution of db command.\n \"\"\"\n conn = sqlite3.connect(str(self._db_path))\n self._create_table(conn)\n try:\n yield conn\n if commit:\n conn.commit()\n finally:\n conn.close()\n\n def find_by_key(self, key, value):\n if key not in self.COLUMN_NAMES:\n raise KeyError\n with self.connection() as conn:\n cursor = conn.cursor()\n cursor.execute(f'''SELECT * FROM {self._table_name}\n WHERE {key}=\"{value}\"''')\n row = cursor.fetchone()\n if row:\n return dict(zip(self.COLUMN_NAMES, row))\n return None\n\n def delete_by_key(self, key, value):\n if key not in self.COLUMN_NAMES:\n raise KeyError\n with self.connection(commit=True) as conn:\n cursor = conn.cursor()\n cursor.execute(f'''DELETE FROM {self._table_name}\n WHERE {key}=\"{value}\"''')\n\n def store(self, details):\n values = [details[k] for k in self.COLUMN_NAMES]\n placeholder = '?,' * len(values)\n placeholder = placeholder[:-1]\n with self.connection(commit=True) as conn:\n conn.execute(f'''INSERT INTO {self._table_name}\n VALUES ({placeholder})''', list(values))\n", "path": "sunpy/data/data_manager/storage.py"}]} | 2,751 | 307 |
gh_patches_debug_30539 | rasdani/github-patches | git_diff | chainer__chainer-1266 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Concat reverse indexing not working correctly
The following works correctly:
```
a_data = np.zeros((10, 5))
b_data = np.zeros((10, 3))
a = Variable(a_data)
b = Variable(b_data)
F.concat((a, b), axis=1)
```
However, if I change the last line into:
```
F.concat((a, b), axis=-1)
```
it gives the error:
```
Invalid operation is performed in: Concat (Forward)
Expect: in_types[0].shape[1] == in_types[1].shape[1]
Actual: 5 != 3
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/array/concat.py`
Content:
```
1 import numpy
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.utils import type_check
6
7
8 class Concat(function.Function):
9
10 """Concatenate multiple tensors towards specified axis."""
11
12 # concat along the channel dimension by default
13 def __init__(self, axis=1):
14 self.axis = axis
15
16 def check_type_forward(self, in_types):
17 type_check.expect(in_types.size() > 0)
18 type_check.expect(in_types[0].ndim >
19 type_check.Variable(self.axis, 'axis'))
20
21 ndim = in_types[0].ndim.eval()
22 for i in range(1, in_types.size().eval()):
23 type_check.expect(
24 in_types[0].dtype == in_types[i].dtype,
25 in_types[0].ndim == in_types[i].ndim,
26 )
27 for d in range(0, ndim):
28 if d == self.axis:
29 continue
30 type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])
31
32 def forward(self, xs):
33 xp = cuda.get_array_module(*xs)
34 return xp.concatenate(xs, axis=self.axis),
35
36 def backward(self, xs, gy):
37 if not xs[:-1]:
38 return gy
39
40 xp = cuda.get_array_module(*xs)
41 sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()
42 return xp.split(gy[0], sizes, axis=self.axis)
43
44
45 def concat(xs, axis=1):
46 """Concatenates given variables along an axis.
47
48 Args:
49 xs (tuple of Variables): Variables to be concatenated.
50 axis (int): Axis that the input arrays are concatenated along.
51
52 Returns:
53 ~chainer.Variable: Output variable.
54
55 """
56 return Concat(axis=axis)(*xs)
57
```
Path: `cupy/manipulation/split.py`
Content:
```
1 import numpy
2 import six
3
4
5 def array_split(ary, indices_or_sections, axis=0):
6 """Splits an array into multiple sub arrays along a given axis.
7
8 This function is almost equivalent to :func:`cupy.split`. The only
9 difference is that this function allows an integer sections that does not
10 evenly divide the axis.
11
12 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`
13
14 """
15 if ary.ndim <= axis:
16 raise IndexError('Axis exceeds ndim')
17 size = ary.shape[axis]
18
19 if numpy.isscalar(indices_or_sections):
20 each_size = (size - 1) // indices_or_sections + 1
21 indices = [i * each_size
22 for i in six.moves.range(1, indices_or_sections)]
23 else:
24 indices = indices_or_sections
25
26 if len(indices) == 0:
27 return [ary]
28
29 skip = (slice(None),) * axis
30 ret = []
31 i = 0
32 for index in indices:
33 ret.append(ary[skip + (slice(i, index),)])
34 i = index
35 ret.append(ary[skip + (slice(i, size),)])
36
37 return ret
38
39
40 def dsplit(ary, indices_or_sections):
41 """Splits an array into multiple sub arrays along the third axis.
42
43 This is equivalent to ``split`` with ``axis=2``.
44
45 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`
46
47 """
48 if ary.ndim <= 2:
49 raise ValueError('Cannot dsplit an array with less than 3 dimensions')
50 return split(ary, indices_or_sections, 2)
51
52
53 def hsplit(ary, indices_or_sections):
54 """Splits an array into multiple sub arrays horizontally.
55
56 This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
57 dimension, and otherwise that with ``axis=1``.
58
59 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`
60
61 """
62 if ary.ndim == 0:
63 raise ValueError('Cannot hsplit a zero-dimensional array')
64 if ary.ndim == 1:
65 return split(ary, indices_or_sections, 0)
66 else:
67 return split(ary, indices_or_sections, 1)
68
69
70 def split(ary, indices_or_sections, axis=0):
71 """Splits an array into multiple sub arrays along a given axis.
72
73 Args:
74 ary (cupy.ndarray): Array to split.
75 indices_or_sections (int or sequence of ints): A value indicating how
76 to divide the axis. If it is an integer, then is treated as the
77 number of sections, and the axis is evenly divided. Otherwise,
78 the integers indicate indices to split at. Note that the sequence
79 on the device memory is not allowed.
80 axis (int): Axis along which the array is split.
81
82 Returns:
83 A list of sub arrays. Each array is a view of the corresponding input
84 array.
85
86 .. seealso:: :func:`numpy.split`
87
88 """
89 if ary.ndim <= axis:
90 raise IndexError('Axis exceeds ndim')
91 size = ary.shape[axis]
92
93 if numpy.isscalar(indices_or_sections):
94 if size % indices_or_sections != 0:
95 raise ValueError(
96 'indices_or_sections must divide the size along the axes.\n'
97 'If you want to split the array into non-equally-sized '
98 'arrays, use array_split instead.')
99 return array_split(ary, indices_or_sections, axis)
100
101
102 def vsplit(ary, indices_or_sections):
103 """Splits an array into multiple sub arrays along the first axis.
104
105 This is equivalent to ``split`` with ``axis=0``.
106
107 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`
108
109 """
110 if ary.ndim <= 1:
111 raise ValueError('Cannot vsplit an array with less than 2 dimensions')
112 return split(ary, indices_or_sections, 0)
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/array/concat.py b/chainer/functions/array/concat.py
--- a/chainer/functions/array/concat.py
+++ b/chainer/functions/array/concat.py
@@ -18,14 +18,19 @@
type_check.expect(in_types[0].ndim >
type_check.Variable(self.axis, 'axis'))
+ type_check.expect(
+ -in_types[0].ndim <= self.axis,
+ self.axis < in_types[0].ndim
+ )
ndim = in_types[0].ndim.eval()
+ axis = self.axis % ndim
for i in range(1, in_types.size().eval()):
type_check.expect(
in_types[0].dtype == in_types[i].dtype,
in_types[0].ndim == in_types[i].ndim,
)
for d in range(0, ndim):
- if d == self.axis:
+ if d == axis:
continue
type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])
diff --git a/cupy/manipulation/split.py b/cupy/manipulation/split.py
--- a/cupy/manipulation/split.py
+++ b/cupy/manipulation/split.py
@@ -12,8 +12,10 @@
.. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`
"""
- if ary.ndim <= axis:
+ ndim = ary.ndim
+ if -ndim > axis or ndim <= axis:
raise IndexError('Axis exceeds ndim')
+ axis %= ndim
size = ary.shape[axis]
if numpy.isscalar(indices_or_sections):
@@ -33,7 +35,6 @@
ret.append(ary[skip + (slice(i, index),)])
i = index
ret.append(ary[skip + (slice(i, size),)])
-
return ret
| {"golden_diff": "diff --git a/chainer/functions/array/concat.py b/chainer/functions/array/concat.py\n--- a/chainer/functions/array/concat.py\n+++ b/chainer/functions/array/concat.py\n@@ -18,14 +18,19 @@\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n \n+ type_check.expect(\n+ -in_types[0].ndim <= self.axis,\n+ self.axis < in_types[0].ndim\n+ )\n ndim = in_types[0].ndim.eval()\n+ axis = self.axis % ndim\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n- if d == self.axis:\n+ if d == axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n \ndiff --git a/cupy/manipulation/split.py b/cupy/manipulation/split.py\n--- a/cupy/manipulation/split.py\n+++ b/cupy/manipulation/split.py\n@@ -12,8 +12,10 @@\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n \n \"\"\"\n- if ary.ndim <= axis:\n+ ndim = ary.ndim\n+ if -ndim > axis or ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n+ axis %= ndim\n size = ary.shape[axis]\n \n if numpy.isscalar(indices_or_sections):\n@@ -33,7 +35,6 @@\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(i, size),)])\n-\n return ret\n", "issue": "Concat reverse indexing not working correctly\nThe following works correctly:\n\n```\na_data = np.zeros((10, 5))\nb_data = np.zeros((10, 3))\na = Variable(a_data)\nb = Variable(b_data)\nF.concat((a, b), axis=1)\n```\n\nHowever, if I change the last line into:\n\n```\nF.concat((a, b), axis=-1)\n```\n\nit gives the error:\n\n```\nInvalid operation is performed in: Concat (Forward)\n\nExpect: in_types[0].shape[1] == in_types[1].shape[1]\nActual: 5 != 3\n```\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Concat(function.Function):\n\n \"\"\"Concatenate multiple tensors towards specified axis.\"\"\"\n\n # concat along the channel dimension by default\n def __init__(self, axis=1):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() > 0)\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n\n ndim = in_types[0].ndim.eval()\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n\n def forward(self, xs):\n xp = cuda.get_array_module(*xs)\n return xp.concatenate(xs, axis=self.axis),\n\n def backward(self, xs, gy):\n if not xs[:-1]:\n return gy\n\n xp = cuda.get_array_module(*xs)\n sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()\n return xp.split(gy[0], sizes, axis=self.axis)\n\n\ndef concat(xs, axis=1):\n \"\"\"Concatenates given variables along an axis.\n\n Args:\n xs (tuple of Variables): Variables to be concatenated.\n axis (int): Axis that the input arrays are concatenated along.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Concat(axis=axis)(*xs)\n", "path": "chainer/functions/array/concat.py"}, {"content": "import numpy\nimport six\n\n\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n This function is almost equivalent to :func:`cupy.split`. The only\n difference is that this function allows an integer sections that does not\n evenly divide the axis.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n each_size = (size - 1) // indices_or_sections + 1\n indices = [i * each_size\n for i in six.moves.range(1, indices_or_sections)]\n else:\n indices = indices_or_sections\n\n if len(indices) == 0:\n return [ary]\n\n skip = (slice(None),) * axis\n ret = []\n i = 0\n for index in indices:\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(i, size),)])\n\n return ret\n\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the third axis.\n\n This is equivalent to ``split`` with ``axis=2``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 2:\n raise ValueError('Cannot dsplit an array with less than 3 dimensions')\n return split(ary, indices_or_sections, 2)\n\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays horizontally.\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`\n\n \"\"\"\n if ary.ndim == 0:\n raise ValueError('Cannot hsplit a zero-dimensional array')\n if ary.ndim == 1:\n return split(ary, indices_or_sections, 0)\n else:\n return split(ary, indices_or_sections, 1)\n\n\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n Args:\n ary (cupy.ndarray): Array to split.\n indices_or_sections (int or sequence of ints): A value indicating how\n to divide the axis. If it is an integer, then is treated as the\n number of sections, and the axis is evenly divided. Otherwise,\n the integers indicate indices to split at. Note that the sequence\n on the device memory is not allowed.\n axis (int): Axis along which the array is split.\n\n Returns:\n A list of sub arrays. Each array is a view of the corresponding input\n array.\n\n .. seealso:: :func:`numpy.split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n if size % indices_or_sections != 0:\n raise ValueError(\n 'indices_or_sections must divide the size along the axes.\\n'\n 'If you want to split the array into non-equally-sized '\n 'arrays, use array_split instead.')\n return array_split(ary, indices_or_sections, axis)\n\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the first axis.\n\n This is equivalent to ``split`` with ``axis=0``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 1:\n raise ValueError('Cannot vsplit an array with less than 2 dimensions')\n return split(ary, indices_or_sections, 0)\n", "path": "cupy/manipulation/split.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Concat(function.Function):\n\n \"\"\"Concatenate multiple tensors towards specified axis.\"\"\"\n\n # concat along the channel dimension by default\n def __init__(self, axis=1):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() > 0)\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n\n type_check.expect(\n -in_types[0].ndim <= self.axis,\n self.axis < in_types[0].ndim\n )\n ndim = in_types[0].ndim.eval()\n axis = self.axis % ndim\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n if d == axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n\n def forward(self, xs):\n xp = cuda.get_array_module(*xs)\n return xp.concatenate(xs, axis=self.axis),\n\n def backward(self, xs, gy):\n if not xs[:-1]:\n return gy\n\n xp = cuda.get_array_module(*xs)\n sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()\n return xp.split(gy[0], sizes, axis=self.axis)\n\n\ndef concat(xs, axis=1):\n \"\"\"Concatenates given variables along an axis.\n\n Args:\n xs (tuple of Variables): Variables to be concatenated.\n axis (int): Axis that the input arrays are concatenated along.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Concat(axis=axis)(*xs)\n", "path": "chainer/functions/array/concat.py"}, {"content": "import numpy\nimport six\n\n\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n This function is almost equivalent to :func:`cupy.split`. The only\n difference is that this function allows an integer sections that does not\n evenly divide the axis.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n\n \"\"\"\n ndim = ary.ndim\n if -ndim > axis or ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n axis %= ndim\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n each_size = (size - 1) // indices_or_sections + 1\n indices = [i * each_size\n for i in six.moves.range(1, indices_or_sections)]\n else:\n indices = indices_or_sections\n\n if len(indices) == 0:\n return [ary]\n\n skip = (slice(None),) * axis\n ret = []\n i = 0\n for index in indices:\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(i, size),)])\n return ret\n\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the third axis.\n\n This is equivalent to ``split`` with ``axis=2``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 2:\n raise ValueError('Cannot dsplit an array with less than 3 dimensions')\n return split(ary, indices_or_sections, 2)\n\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays horizontally.\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`\n\n \"\"\"\n if ary.ndim == 0:\n raise ValueError('Cannot hsplit a zero-dimensional array')\n if ary.ndim == 1:\n return split(ary, indices_or_sections, 0)\n else:\n return split(ary, indices_or_sections, 1)\n\n\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n Args:\n ary (cupy.ndarray): Array to split.\n indices_or_sections (int or sequence of ints): A value indicating how\n to divide the axis. If it is an integer, then is treated as the\n number of sections, and the axis is evenly divided. Otherwise,\n the integers indicate indices to split at. Note that the sequence\n on the device memory is not allowed.\n axis (int): Axis along which the array is split.\n\n Returns:\n A list of sub arrays. Each array is a view of the corresponding input\n array.\n\n .. seealso:: :func:`numpy.split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n if size % indices_or_sections != 0:\n raise ValueError(\n 'indices_or_sections must divide the size along the axes.\\n'\n 'If you want to split the array into non-equally-sized '\n 'arrays, use array_split instead.')\n return array_split(ary, indices_or_sections, axis)\n\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the first axis.\n\n This is equivalent to ``split`` with ``axis=0``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 1:\n raise ValueError('Cannot vsplit an array with less than 2 dimensions')\n return split(ary, indices_or_sections, 0)\n", "path": "cupy/manipulation/split.py"}]} | 2,042 | 436 |
gh_patches_debug_389 | rasdani/github-patches | git_diff | mlflow__mlflow-4368 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make mlflow compatible with protobuf 3.6.1
## What changes are proposed in this pull request?
Make mlflow compatible with protobuf 3.6.1:
for protobuf ==3.6.1
Add EnumTypeWrapper.__getattr__ to access values
## How is this patch tested?
N/A
## Release Notes
### Is this a user-facing change?
- [x] No. You can skip the rest of this section.
- [ ] Yes. Give a description of this change to be included in the release notes for MLflow users.
(Details in 1-2 sentences. You can just refer to another PR with a description if this PR is part of a larger change.)
### What component(s), interfaces, languages, and integrations does this PR affect?
Components
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs
- [ ] `area/server-infra`: MLflow server, JavaScript dev server
- [x] `area/tracking`: Tracking Service, tracking client APIs, autologging
Interface
- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
Language
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
Integrations
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
<!--
Insert an empty named anchor here to allow jumping to this section with a fragment URL
(e.g. https://github.com/mlflow/mlflow/pull/123#user-content-release-note-category).
Note that GitHub prefixes anchor names in markdown with "user-content-".
-->
<a name="release-note-category"></a>
### How should the PR be classified in the release notes? Choose one:
- [ ] `rn/breaking-change` - The PR will be mentioned in the "Breaking Changes" section
- [x] `rn/none` - No description will be included. The PR will be mentioned only by the PR number in the "Small Bugfixes and Documentation Updates" section
- [ ] `rn/feature` - A new user-facing feature worth mentioning in the release notes
- [ ] `rn/bug-fix` - A user-facing bug fix worth mentioning in the release notes
- [ ] `rn/documentation` - A user-facing documentation change worth mentioning in the release notes
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import logging
3
4 from importlib.machinery import SourceFileLoader
5 from setuptools import setup, find_packages
6
7 _MLFLOW_SKINNY_ENV_VAR = "MLFLOW_SKINNY"
8
9 version = (
10 SourceFileLoader("mlflow.version", os.path.join("mlflow", "version.py")).load_module().VERSION
11 )
12
13
14 # Get a list of all files in the JS directory to include in our module
15 def package_files(directory):
16 paths = []
17 for (path, _, filenames) in os.walk(directory):
18 for filename in filenames:
19 paths.append(os.path.join("..", path, filename))
20 return paths
21
22
23 # Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build
24 # to include in the wheel, e.g. "../mlflow/server/js/build/index.html"
25 js_files = package_files("mlflow/server/js/build")
26 models_container_server_files = package_files("mlflow/models/container")
27 alembic_files = [
28 "../mlflow/store/db_migrations/alembic.ini",
29 "../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini",
30 ]
31 extra_files = ["ml-package-versions.yml", "pyspark/ml/log_model_allowlist.txt"]
32
33 """
34 Minimal requirements for the skinny MLflow client which provides a limited
35 subset of functionality such as: RESTful client functionality for Tracking and
36 Model Registry, as well as support for Project execution against local backends
37 and Databricks.
38 """
39 SKINNY_REQUIREMENTS = [
40 "click>=7.0",
41 "cloudpickle",
42 "databricks-cli>=0.8.7",
43 "entrypoints",
44 "gitpython>=2.1.0",
45 "pyyaml",
46 "protobuf>=3.6.0",
47 "pytz",
48 "requests>=2.17.3",
49 "packaging",
50 ]
51
52 """
53 These are the core requirements for the complete MLflow platform, which augments
54 the skinny client functionality with support for running the MLflow Tracking
55 Server & UI. It also adds project backends such as Docker and Kubernetes among
56 other capabilities.
57 """
58 CORE_REQUIREMENTS = SKINNY_REQUIREMENTS + [
59 "alembic<=1.4.1",
60 # Required
61 "docker>=4.0.0",
62 "Flask",
63 "gunicorn; platform_system != 'Windows'",
64 "numpy",
65 "pandas",
66 "prometheus-flask-exporter",
67 "querystring_parser",
68 # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433
69 "sqlparse>=0.3.1",
70 # Required to run the MLflow server against SQL-backed storage
71 "sqlalchemy",
72 "waitress; platform_system == 'Windows'",
73 ]
74
75 _is_mlflow_skinny = bool(os.environ.get(_MLFLOW_SKINNY_ENV_VAR))
76 logging.debug("{} env var is set: {}".format(_MLFLOW_SKINNY_ENV_VAR, _is_mlflow_skinny))
77
78 setup(
79 name="mlflow" if not _is_mlflow_skinny else "mlflow-skinny",
80 version=version,
81 packages=find_packages(exclude=["tests", "tests.*"]),
82 package_data={"mlflow": js_files + models_container_server_files + alembic_files + extra_files}
83 if not _is_mlflow_skinny
84 # include alembic files to enable usage of the skinny client with SQL databases
85 # if users install sqlalchemy, alembic, and sqlparse independently
86 else {"mlflow": alembic_files + extra_files},
87 install_requires=CORE_REQUIREMENTS if not _is_mlflow_skinny else SKINNY_REQUIREMENTS,
88 extras_require={
89 "extras": [
90 "scikit-learn",
91 # Required to log artifacts and models to HDFS artifact locations
92 "pyarrow",
93 # Required to log artifacts and models to AWS S3 artifact locations
94 "boto3",
95 "mleap",
96 # Required to log artifacts and models to GCS artifact locations
97 "google-cloud-storage",
98 "azureml-core>=1.2.0",
99 # Required to log artifacts to SFTP artifact locations
100 "pysftp",
101 # Required by the mlflow.projects module, when running projects against
102 # a remote Kubernetes cluster
103 "kubernetes",
104 ],
105 "sqlserver": ["mlflow-dbstore"],
106 "aliyun-oss": ["aliyunstoreplugin"],
107 },
108 entry_points="""
109 [console_scripts]
110 mlflow=mlflow.cli:cli
111 """,
112 zip_safe=False,
113 author="Databricks",
114 description="MLflow: A Platform for ML Development and Productionization",
115 long_description=open("README.rst").read()
116 if not _is_mlflow_skinny
117 else open("README_SKINNY.rst").read() + open("README.rst").read(),
118 long_description_content_type="text/x-rst",
119 license="Apache License 2.0",
120 classifiers=["Intended Audience :: Developers", "Programming Language :: Python :: 3.6"],
121 keywords="ml ai databricks",
122 url="https://mlflow.org/",
123 python_requires=">=3.6",
124 project_urls={
125 "Bug Tracker": "https://github.com/mlflow/mlflow/issues",
126 "Documentation": "https://mlflow.org/docs/latest/index.html",
127 "Source Code": "https://github.com/mlflow/mlflow",
128 },
129 )
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -43,7 +43,7 @@
"entrypoints",
"gitpython>=2.1.0",
"pyyaml",
- "protobuf>=3.6.0",
+ "protobuf>=3.7.0",
"pytz",
"requests>=2.17.3",
"packaging",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -43,7 +43,7 @@\n \"entrypoints\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n- \"protobuf>=3.6.0\",\n+ \"protobuf>=3.7.0\",\n \"pytz\",\n \"requests>=2.17.3\",\n \"packaging\",\n", "issue": "Make mlflow compatible with protobuf 3.6.1\n## What changes are proposed in this pull request?\r\n\r\nMake mlflow compatible with protobuf 3.6.1:\r\nfor protobuf ==3.6.1\r\nAdd EnumTypeWrapper.__getattr__ to access values\r\n\r\n## How is this patch tested?\r\n\r\nN/A\r\n\r\n## Release Notes\r\n\r\n### Is this a user-facing change?\r\n\r\n- [x] No. You can skip the rest of this section.\r\n- [ ] Yes. Give a description of this change to be included in the release notes for MLflow users.\r\n\r\n(Details in 1-2 sentences. You can just refer to another PR with a description if this PR is part of a larger change.)\r\n\r\n### What component(s), interfaces, languages, and integrations does this PR affect?\r\nComponents \r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs\r\n- [ ] `area/server-infra`: MLflow server, JavaScript dev server\r\n- [x] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\nInterface \r\n- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\nLanguage \r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\nIntegrations\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\r\n\r\n<!--\r\nInsert an empty named anchor here to allow jumping to this section with a fragment URL\r\n(e.g. https://github.com/mlflow/mlflow/pull/123#user-content-release-note-category).\r\nNote that GitHub prefixes anchor names in markdown with \"user-content-\".\r\n-->\r\n<a name=\"release-note-category\"></a>\r\n### How should the PR be classified in the release notes? Choose one:\r\n\r\n- [ ] `rn/breaking-change` - The PR will be mentioned in the \"Breaking Changes\" section\r\n- [x] `rn/none` - No description will be included. The PR will be mentioned only by the PR number in the \"Small Bugfixes and Documentation Updates\" section\r\n- [ ] `rn/feature` - A new user-facing feature worth mentioning in the release notes\r\n- [ ] `rn/bug-fix` - A user-facing bug fix worth mentioning in the release notes\r\n- [ ] `rn/documentation` - A user-facing documentation change worth mentioning in the release notes\r\n\n", "before_files": [{"content": "import os\nimport logging\n\nfrom importlib.machinery import SourceFileLoader\nfrom setuptools import setup, find_packages\n\n_MLFLOW_SKINNY_ENV_VAR = \"MLFLOW_SKINNY\"\n\nversion = (\n SourceFileLoader(\"mlflow.version\", os.path.join(\"mlflow\", \"version.py\")).load_module().VERSION\n)\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join(\"..\", path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files(\"mlflow/server/js/build\")\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\n \"../mlflow/store/db_migrations/alembic.ini\",\n \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\",\n]\nextra_files = [\"ml-package-versions.yml\", \"pyspark/ml/log_model_allowlist.txt\"]\n\n\"\"\"\nMinimal requirements for the skinny MLflow client which provides a limited\nsubset of functionality such as: RESTful client functionality for Tracking and\nModel Registry, as well as support for Project execution against local backends\nand Databricks.\n\"\"\"\nSKINNY_REQUIREMENTS = [\n \"click>=7.0\",\n \"cloudpickle\",\n \"databricks-cli>=0.8.7\",\n \"entrypoints\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n \"protobuf>=3.6.0\",\n \"pytz\",\n \"requests>=2.17.3\",\n \"packaging\",\n]\n\n\"\"\"\nThese are the core requirements for the complete MLflow platform, which augments\nthe skinny client functionality with support for running the MLflow Tracking\nServer & UI. It also adds project backends such as Docker and Kubernetes among\nother capabilities.\n\"\"\"\nCORE_REQUIREMENTS = SKINNY_REQUIREMENTS + [\n \"alembic<=1.4.1\",\n # Required\n \"docker>=4.0.0\",\n \"Flask\",\n \"gunicorn; platform_system != 'Windows'\",\n \"numpy\",\n \"pandas\",\n \"prometheus-flask-exporter\",\n \"querystring_parser\",\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\n \"sqlparse>=0.3.1\",\n # Required to run the MLflow server against SQL-backed storage\n \"sqlalchemy\",\n \"waitress; platform_system == 'Windows'\",\n]\n\n_is_mlflow_skinny = bool(os.environ.get(_MLFLOW_SKINNY_ENV_VAR))\nlogging.debug(\"{} env var is set: {}\".format(_MLFLOW_SKINNY_ENV_VAR, _is_mlflow_skinny))\n\nsetup(\n name=\"mlflow\" if not _is_mlflow_skinny else \"mlflow-skinny\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"mlflow\": js_files + models_container_server_files + alembic_files + extra_files}\n if not _is_mlflow_skinny\n # include alembic files to enable usage of the skinny client with SQL databases\n # if users install sqlalchemy, alembic, and sqlparse independently\n else {\"mlflow\": alembic_files + extra_files},\n install_requires=CORE_REQUIREMENTS if not _is_mlflow_skinny else SKINNY_REQUIREMENTS,\n extras_require={\n \"extras\": [\n \"scikit-learn\",\n # Required to log artifacts and models to HDFS artifact locations\n \"pyarrow\",\n # Required to log artifacts and models to AWS S3 artifact locations\n \"boto3\",\n \"mleap\",\n # Required to log artifacts and models to GCS artifact locations\n \"google-cloud-storage\",\n \"azureml-core>=1.2.0\",\n # Required to log artifacts to SFTP artifact locations\n \"pysftp\",\n # Required by the mlflow.projects module, when running projects against\n # a remote Kubernetes cluster\n \"kubernetes\",\n ],\n \"sqlserver\": [\"mlflow-dbstore\"],\n \"aliyun-oss\": [\"aliyunstoreplugin\"],\n },\n entry_points=\"\"\"\n [console_scripts]\n mlflow=mlflow.cli:cli\n \"\"\",\n zip_safe=False,\n author=\"Databricks\",\n description=\"MLflow: A Platform for ML Development and Productionization\",\n long_description=open(\"README.rst\").read()\n if not _is_mlflow_skinny\n else open(\"README_SKINNY.rst\").read() + open(\"README.rst\").read(),\n long_description_content_type=\"text/x-rst\",\n license=\"Apache License 2.0\",\n classifiers=[\"Intended Audience :: Developers\", \"Programming Language :: Python :: 3.6\"],\n keywords=\"ml ai databricks\",\n url=\"https://mlflow.org/\",\n python_requires=\">=3.6\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/mlflow/mlflow/issues\",\n \"Documentation\": \"https://mlflow.org/docs/latest/index.html\",\n \"Source Code\": \"https://github.com/mlflow/mlflow\",\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport logging\n\nfrom importlib.machinery import SourceFileLoader\nfrom setuptools import setup, find_packages\n\n_MLFLOW_SKINNY_ENV_VAR = \"MLFLOW_SKINNY\"\n\nversion = (\n SourceFileLoader(\"mlflow.version\", os.path.join(\"mlflow\", \"version.py\")).load_module().VERSION\n)\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join(\"..\", path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files(\"mlflow/server/js/build\")\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\n \"../mlflow/store/db_migrations/alembic.ini\",\n \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\",\n]\nextra_files = [\"ml-package-versions.yml\", \"pyspark/ml/log_model_allowlist.txt\"]\n\n\"\"\"\nMinimal requirements for the skinny MLflow client which provides a limited\nsubset of functionality such as: RESTful client functionality for Tracking and\nModel Registry, as well as support for Project execution against local backends\nand Databricks.\n\"\"\"\nSKINNY_REQUIREMENTS = [\n \"click>=7.0\",\n \"cloudpickle\",\n \"databricks-cli>=0.8.7\",\n \"entrypoints\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n \"protobuf>=3.7.0\",\n \"pytz\",\n \"requests>=2.17.3\",\n \"packaging\",\n]\n\n\"\"\"\nThese are the core requirements for the complete MLflow platform, which augments\nthe skinny client functionality with support for running the MLflow Tracking\nServer & UI. It also adds project backends such as Docker and Kubernetes among\nother capabilities.\n\"\"\"\nCORE_REQUIREMENTS = SKINNY_REQUIREMENTS + [\n \"alembic<=1.4.1\",\n # Required\n \"docker>=4.0.0\",\n \"Flask\",\n \"gunicorn; platform_system != 'Windows'\",\n \"numpy\",\n \"pandas\",\n \"prometheus-flask-exporter\",\n \"querystring_parser\",\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\n \"sqlparse>=0.3.1\",\n # Required to run the MLflow server against SQL-backed storage\n \"sqlalchemy\",\n \"waitress; platform_system == 'Windows'\",\n]\n\n_is_mlflow_skinny = bool(os.environ.get(_MLFLOW_SKINNY_ENV_VAR))\nlogging.debug(\"{} env var is set: {}\".format(_MLFLOW_SKINNY_ENV_VAR, _is_mlflow_skinny))\n\nsetup(\n name=\"mlflow\" if not _is_mlflow_skinny else \"mlflow-skinny\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"mlflow\": js_files + models_container_server_files + alembic_files + extra_files}\n if not _is_mlflow_skinny\n # include alembic files to enable usage of the skinny client with SQL databases\n # if users install sqlalchemy, alembic, and sqlparse independently\n else {\"mlflow\": alembic_files + extra_files},\n install_requires=CORE_REQUIREMENTS if not _is_mlflow_skinny else SKINNY_REQUIREMENTS,\n extras_require={\n \"extras\": [\n \"scikit-learn\",\n # Required to log artifacts and models to HDFS artifact locations\n \"pyarrow\",\n # Required to log artifacts and models to AWS S3 artifact locations\n \"boto3\",\n \"mleap\",\n # Required to log artifacts and models to GCS artifact locations\n \"google-cloud-storage\",\n \"azureml-core>=1.2.0\",\n # Required to log artifacts to SFTP artifact locations\n \"pysftp\",\n # Required by the mlflow.projects module, when running projects against\n # a remote Kubernetes cluster\n \"kubernetes\",\n ],\n \"sqlserver\": [\"mlflow-dbstore\"],\n \"aliyun-oss\": [\"aliyunstoreplugin\"],\n },\n entry_points=\"\"\"\n [console_scripts]\n mlflow=mlflow.cli:cli\n \"\"\",\n zip_safe=False,\n author=\"Databricks\",\n description=\"MLflow: A Platform for ML Development and Productionization\",\n long_description=open(\"README.rst\").read()\n if not _is_mlflow_skinny\n else open(\"README_SKINNY.rst\").read() + open(\"README.rst\").read(),\n long_description_content_type=\"text/x-rst\",\n license=\"Apache License 2.0\",\n classifiers=[\"Intended Audience :: Developers\", \"Programming Language :: Python :: 3.6\"],\n keywords=\"ml ai databricks\",\n url=\"https://mlflow.org/\",\n python_requires=\">=3.6\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/mlflow/mlflow/issues\",\n \"Documentation\": \"https://mlflow.org/docs/latest/index.html\",\n \"Source Code\": \"https://github.com/mlflow/mlflow\",\n },\n)\n", "path": "setup.py"}]} | 2,417 | 96 |
gh_patches_debug_5463 | rasdani/github-patches | git_diff | ipython__ipython-14172 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to run `ipapp.py`
There is a problem with the `ipython/terminal/ipapp.py` file.
On the first line is this:
```python
#!/usr/bin/env python
```
this is an mistake, because further down in the code there is a relative import that makes the file not executable with `./ipapp.py`.
So please remove `#!/usr/bin/env python`.
Steps to reproduce:
```
# download repo
cd /tmp/
git clone https://github.com/ipython/ipython.git
# run file
cd ipython/IPython/terminal/
./ipapp.py
```
Example of error:
```
Traceback (most recent call last):
File "/tmp/ipython/IPython/terminal/./ipapp.py", line 36, in <module>
from .interactiveshell import TerminalInteractiveShell
ImportError: attempted relative import with no known parent package
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/terminal/ipapp.py`
Content:
```
1 #!/usr/bin/env python
2 # encoding: utf-8
3 """
4 The :class:`~traitlets.config.application.Application` object for the command
5 line :command:`ipython` program.
6 """
7
8 # Copyright (c) IPython Development Team.
9 # Distributed under the terms of the Modified BSD License.
10
11
12 import logging
13 import os
14 import sys
15 import warnings
16
17 from traitlets.config.loader import Config
18 from traitlets.config.application import boolean_flag, catch_config_error
19 from IPython.core import release
20 from IPython.core import usage
21 from IPython.core.completer import IPCompleter
22 from IPython.core.crashhandler import CrashHandler
23 from IPython.core.formatters import PlainTextFormatter
24 from IPython.core.history import HistoryManager
25 from IPython.core.application import (
26 ProfileDir, BaseIPythonApplication, base_flags, base_aliases
27 )
28 from IPython.core.magic import MagicsManager
29 from IPython.core.magics import (
30 ScriptMagics, LoggingMagics
31 )
32 from IPython.core.shellapp import (
33 InteractiveShellApp, shell_flags, shell_aliases
34 )
35 from IPython.extensions.storemagic import StoreMagics
36 from .interactiveshell import TerminalInteractiveShell
37 from IPython.paths import get_ipython_dir
38 from traitlets import (
39 Bool, List, default, observe, Type
40 )
41
42 #-----------------------------------------------------------------------------
43 # Globals, utilities and helpers
44 #-----------------------------------------------------------------------------
45
46 _examples = """
47 ipython --matplotlib # enable matplotlib integration
48 ipython --matplotlib=qt # enable matplotlib integration with qt4 backend
49
50 ipython --log-level=DEBUG # set logging to DEBUG
51 ipython --profile=foo # start with profile foo
52
53 ipython profile create foo # create profile foo w/ default config files
54 ipython help profile # show the help for the profile subcmd
55
56 ipython locate # print the path to the IPython directory
57 ipython locate profile foo # print the path to the directory for profile `foo`
58 """
59
60 #-----------------------------------------------------------------------------
61 # Crash handler for this application
62 #-----------------------------------------------------------------------------
63
64 class IPAppCrashHandler(CrashHandler):
65 """sys.excepthook for IPython itself, leaves a detailed report on disk."""
66
67 def __init__(self, app):
68 contact_name = release.author
69 contact_email = release.author_email
70 bug_tracker = 'https://github.com/ipython/ipython/issues'
71 super(IPAppCrashHandler,self).__init__(
72 app, contact_name, contact_email, bug_tracker
73 )
74
75 def make_report(self,traceback):
76 """Return a string containing a crash report."""
77
78 sec_sep = self.section_sep
79 # Start with parent report
80 report = [super(IPAppCrashHandler, self).make_report(traceback)]
81 # Add interactive-specific info we may have
82 rpt_add = report.append
83 try:
84 rpt_add(sec_sep+"History of session input:")
85 for line in self.app.shell.user_ns['_ih']:
86 rpt_add(line)
87 rpt_add('\n*** Last line of input (may not be in above history):\n')
88 rpt_add(self.app.shell._last_input_line+'\n')
89 except:
90 pass
91
92 return ''.join(report)
93
94 #-----------------------------------------------------------------------------
95 # Aliases and Flags
96 #-----------------------------------------------------------------------------
97 flags = dict(base_flags)
98 flags.update(shell_flags)
99 frontend_flags = {}
100 addflag = lambda *args: frontend_flags.update(boolean_flag(*args))
101 addflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax',
102 'Turn on auto editing of files with syntax errors.',
103 'Turn off auto editing of files with syntax errors.'
104 )
105 addflag('simple-prompt', 'TerminalInteractiveShell.simple_prompt',
106 "Force simple minimal prompt using `raw_input`",
107 "Use a rich interactive prompt with prompt_toolkit",
108 )
109
110 addflag('banner', 'TerminalIPythonApp.display_banner',
111 "Display a banner upon starting IPython.",
112 "Don't display a banner upon starting IPython."
113 )
114 addflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit',
115 """Set to confirm when you try to exit IPython with an EOF (Control-D
116 in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
117 you can force a direct exit without any confirmation.""",
118 "Don't prompt the user when exiting."
119 )
120 addflag('term-title', 'TerminalInteractiveShell.term_title',
121 "Enable auto setting the terminal title.",
122 "Disable auto setting the terminal title."
123 )
124 classic_config = Config()
125 classic_config.InteractiveShell.cache_size = 0
126 classic_config.PlainTextFormatter.pprint = False
127 classic_config.TerminalInteractiveShell.prompts_class='IPython.terminal.prompts.ClassicPrompts'
128 classic_config.InteractiveShell.separate_in = ''
129 classic_config.InteractiveShell.separate_out = ''
130 classic_config.InteractiveShell.separate_out2 = ''
131 classic_config.InteractiveShell.colors = 'NoColor'
132 classic_config.InteractiveShell.xmode = 'Plain'
133
134 frontend_flags['classic']=(
135 classic_config,
136 "Gives IPython a similar feel to the classic Python prompt."
137 )
138 # # log doesn't make so much sense this way anymore
139 # paa('--log','-l',
140 # action='store_true', dest='InteractiveShell.logstart',
141 # help="Start logging to the default log file (./ipython_log.py).")
142 #
143 # # quick is harder to implement
144 frontend_flags['quick']=(
145 {'TerminalIPythonApp' : {'quick' : True}},
146 "Enable quick startup with no config files."
147 )
148
149 frontend_flags['i'] = (
150 {'TerminalIPythonApp' : {'force_interact' : True}},
151 """If running code from the command line, become interactive afterwards.
152 It is often useful to follow this with `--` to treat remaining flags as
153 script arguments.
154 """
155 )
156 flags.update(frontend_flags)
157
158 aliases = dict(base_aliases)
159 aliases.update(shell_aliases) # type: ignore[arg-type]
160
161 #-----------------------------------------------------------------------------
162 # Main classes and functions
163 #-----------------------------------------------------------------------------
164
165
166 class LocateIPythonApp(BaseIPythonApplication):
167 description = """print the path to the IPython dir"""
168 subcommands = dict(
169 profile=('IPython.core.profileapp.ProfileLocate',
170 "print the path to an IPython profile directory",
171 ),
172 )
173 def start(self):
174 if self.subapp is not None:
175 return self.subapp.start()
176 else:
177 print(self.ipython_dir)
178
179
180 class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):
181 name = u'ipython'
182 description = usage.cl_usage
183 crash_handler_class = IPAppCrashHandler # typing: ignore[assignment]
184 examples = _examples
185
186 flags = flags
187 aliases = aliases
188 classes = List()
189
190 interactive_shell_class = Type(
191 klass=object, # use default_value otherwise which only allow subclasses.
192 default_value=TerminalInteractiveShell,
193 help="Class to use to instantiate the TerminalInteractiveShell object. Useful for custom Frontends"
194 ).tag(config=True)
195
196 @default('classes')
197 def _classes_default(self):
198 """This has to be in a method, for TerminalIPythonApp to be available."""
199 return [
200 InteractiveShellApp, # ShellApp comes before TerminalApp, because
201 self.__class__, # it will also affect subclasses (e.g. QtConsole)
202 TerminalInteractiveShell,
203 HistoryManager,
204 MagicsManager,
205 ProfileDir,
206 PlainTextFormatter,
207 IPCompleter,
208 ScriptMagics,
209 LoggingMagics,
210 StoreMagics,
211 ]
212
213 subcommands = dict(
214 profile = ("IPython.core.profileapp.ProfileApp",
215 "Create and manage IPython profiles."
216 ),
217 kernel = ("ipykernel.kernelapp.IPKernelApp",
218 "Start a kernel without an attached frontend."
219 ),
220 locate=('IPython.terminal.ipapp.LocateIPythonApp',
221 LocateIPythonApp.description
222 ),
223 history=('IPython.core.historyapp.HistoryApp',
224 "Manage the IPython history database."
225 ),
226 )
227
228
229 # *do* autocreate requested profile, but don't create the config file.
230 auto_create=Bool(True)
231 # configurables
232 quick = Bool(False,
233 help="""Start IPython quickly by skipping the loading of config files."""
234 ).tag(config=True)
235 @observe('quick')
236 def _quick_changed(self, change):
237 if change['new']:
238 self.load_config_file = lambda *a, **kw: None
239
240 display_banner = Bool(True,
241 help="Whether to display a banner upon starting IPython."
242 ).tag(config=True)
243
244 # if there is code of files to run from the cmd line, don't interact
245 # unless the --i flag (App.force_interact) is true.
246 force_interact = Bool(False,
247 help="""If a command or file is given via the command-line,
248 e.g. 'ipython foo.py', start an interactive shell after executing the
249 file or command."""
250 ).tag(config=True)
251 @observe('force_interact')
252 def _force_interact_changed(self, change):
253 if change['new']:
254 self.interact = True
255
256 @observe('file_to_run', 'code_to_run', 'module_to_run')
257 def _file_to_run_changed(self, change):
258 new = change['new']
259 if new:
260 self.something_to_run = True
261 if new and not self.force_interact:
262 self.interact = False
263
264 # internal, not-configurable
265 something_to_run=Bool(False)
266
267 @catch_config_error
268 def initialize(self, argv=None):
269 """Do actions after construct, but before starting the app."""
270 super(TerminalIPythonApp, self).initialize(argv)
271 if self.subapp is not None:
272 # don't bother initializing further, starting subapp
273 return
274 # print self.extra_args
275 if self.extra_args and not self.something_to_run:
276 self.file_to_run = self.extra_args[0]
277 self.init_path()
278 # create the shell
279 self.init_shell()
280 # and draw the banner
281 self.init_banner()
282 # Now a variety of things that happen after the banner is printed.
283 self.init_gui_pylab()
284 self.init_extensions()
285 self.init_code()
286
287 def init_shell(self):
288 """initialize the InteractiveShell instance"""
289 # Create an InteractiveShell instance.
290 # shell.display_banner should always be False for the terminal
291 # based app, because we call shell.show_banner() by hand below
292 # so the banner shows *before* all extension loading stuff.
293 self.shell = self.interactive_shell_class.instance(parent=self,
294 profile_dir=self.profile_dir,
295 ipython_dir=self.ipython_dir, user_ns=self.user_ns)
296 self.shell.configurables.append(self)
297
298 def init_banner(self):
299 """optionally display the banner"""
300 if self.display_banner and self.interact:
301 self.shell.show_banner()
302 # Make sure there is a space below the banner.
303 if self.log_level <= logging.INFO: print()
304
305 def _pylab_changed(self, name, old, new):
306 """Replace --pylab='inline' with --pylab='auto'"""
307 if new == 'inline':
308 warnings.warn("'inline' not available as pylab backend, "
309 "using 'auto' instead.")
310 self.pylab = 'auto'
311
312 def start(self):
313 if self.subapp is not None:
314 return self.subapp.start()
315 # perform any prexec steps:
316 if self.interact:
317 self.log.debug("Starting IPython's mainloop...")
318 self.shell.mainloop()
319 else:
320 self.log.debug("IPython not interactive...")
321 self.shell.restore_term_title()
322 if not self.shell.last_execution_succeeded:
323 sys.exit(1)
324
325 def load_default_config(ipython_dir=None):
326 """Load the default config file from the default ipython_dir.
327
328 This is useful for embedded shells.
329 """
330 if ipython_dir is None:
331 ipython_dir = get_ipython_dir()
332
333 profile_dir = os.path.join(ipython_dir, 'profile_default')
334 app = TerminalIPythonApp()
335 app.config_file_paths.append(profile_dir)
336 app.load_config_file()
337 return app.config
338
339 launch_new_instance = TerminalIPythonApp.launch_instance
340
341
342 if __name__ == '__main__':
343 launch_new_instance()
344
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/terminal/ipapp.py b/IPython/terminal/ipapp.py
--- a/IPython/terminal/ipapp.py
+++ b/IPython/terminal/ipapp.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# encoding: utf-8
"""
The :class:`~traitlets.config.application.Application` object for the command
@@ -337,7 +336,3 @@
return app.config
launch_new_instance = TerminalIPythonApp.launch_instance
-
-
-if __name__ == '__main__':
- launch_new_instance()
| {"golden_diff": "diff --git a/IPython/terminal/ipapp.py b/IPython/terminal/ipapp.py\n--- a/IPython/terminal/ipapp.py\n+++ b/IPython/terminal/ipapp.py\n@@ -1,4 +1,3 @@\n-#!/usr/bin/env python\n # encoding: utf-8\n \"\"\"\n The :class:`~traitlets.config.application.Application` object for the command\n@@ -337,7 +336,3 @@\n return app.config\n \n launch_new_instance = TerminalIPythonApp.launch_instance\n-\n-\n-if __name__ == '__main__':\n- launch_new_instance()\n", "issue": "Unable to run `ipapp.py`\nThere is a problem with the `ipython/terminal/ipapp.py` file.\r\n\r\nOn the first line is this:\r\n```python\r\n#!/usr/bin/env python\r\n```\r\nthis is an mistake, because further down in the code there is a relative import that makes the file not executable with `./ipapp.py`.\r\nSo please remove `#!/usr/bin/env python`.\r\n\r\nSteps to reproduce:\r\n\r\n```\r\n# download repo\r\ncd /tmp/\r\ngit clone https://github.com/ipython/ipython.git\r\n\r\n# run file\r\ncd ipython/IPython/terminal/\r\n./ipapp.py\r\n```\r\n\r\nExample of error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/tmp/ipython/IPython/terminal/./ipapp.py\", line 36, in <module>\r\n from .interactiveshell import TerminalInteractiveShell\r\nImportError: attempted relative import with no known parent package\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nThe :class:`~traitlets.config.application.Application` object for the command\nline :command:`ipython` program.\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n\nimport logging\nimport os\nimport sys\nimport warnings\n\nfrom traitlets.config.loader import Config\nfrom traitlets.config.application import boolean_flag, catch_config_error\nfrom IPython.core import release\nfrom IPython.core import usage\nfrom IPython.core.completer import IPCompleter\nfrom IPython.core.crashhandler import CrashHandler\nfrom IPython.core.formatters import PlainTextFormatter\nfrom IPython.core.history import HistoryManager\nfrom IPython.core.application import (\n ProfileDir, BaseIPythonApplication, base_flags, base_aliases\n)\nfrom IPython.core.magic import MagicsManager\nfrom IPython.core.magics import (\n ScriptMagics, LoggingMagics\n)\nfrom IPython.core.shellapp import (\n InteractiveShellApp, shell_flags, shell_aliases\n)\nfrom IPython.extensions.storemagic import StoreMagics\nfrom .interactiveshell import TerminalInteractiveShell\nfrom IPython.paths import get_ipython_dir\nfrom traitlets import (\n Bool, List, default, observe, Type\n)\n\n#-----------------------------------------------------------------------------\n# Globals, utilities and helpers\n#-----------------------------------------------------------------------------\n\n_examples = \"\"\"\nipython --matplotlib # enable matplotlib integration\nipython --matplotlib=qt # enable matplotlib integration with qt4 backend\n\nipython --log-level=DEBUG # set logging to DEBUG\nipython --profile=foo # start with profile foo\n\nipython profile create foo # create profile foo w/ default config files\nipython help profile # show the help for the profile subcmd\n\nipython locate # print the path to the IPython directory\nipython locate profile foo # print the path to the directory for profile `foo`\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Crash handler for this application\n#-----------------------------------------------------------------------------\n\nclass IPAppCrashHandler(CrashHandler):\n \"\"\"sys.excepthook for IPython itself, leaves a detailed report on disk.\"\"\"\n\n def __init__(self, app):\n contact_name = release.author\n contact_email = release.author_email\n bug_tracker = 'https://github.com/ipython/ipython/issues'\n super(IPAppCrashHandler,self).__init__(\n app, contact_name, contact_email, bug_tracker\n )\n\n def make_report(self,traceback):\n \"\"\"Return a string containing a crash report.\"\"\"\n\n sec_sep = self.section_sep\n # Start with parent report\n report = [super(IPAppCrashHandler, self).make_report(traceback)]\n # Add interactive-specific info we may have\n rpt_add = report.append\n try:\n rpt_add(sec_sep+\"History of session input:\")\n for line in self.app.shell.user_ns['_ih']:\n rpt_add(line)\n rpt_add('\\n*** Last line of input (may not be in above history):\\n')\n rpt_add(self.app.shell._last_input_line+'\\n')\n except:\n pass\n\n return ''.join(report)\n\n#-----------------------------------------------------------------------------\n# Aliases and Flags\n#-----------------------------------------------------------------------------\nflags = dict(base_flags)\nflags.update(shell_flags)\nfrontend_flags = {}\naddflag = lambda *args: frontend_flags.update(boolean_flag(*args))\naddflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax',\n 'Turn on auto editing of files with syntax errors.',\n 'Turn off auto editing of files with syntax errors.'\n)\naddflag('simple-prompt', 'TerminalInteractiveShell.simple_prompt',\n \"Force simple minimal prompt using `raw_input`\",\n \"Use a rich interactive prompt with prompt_toolkit\",\n)\n\naddflag('banner', 'TerminalIPythonApp.display_banner',\n \"Display a banner upon starting IPython.\",\n \"Don't display a banner upon starting IPython.\"\n)\naddflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit',\n \"\"\"Set to confirm when you try to exit IPython with an EOF (Control-D\n in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',\n you can force a direct exit without any confirmation.\"\"\",\n \"Don't prompt the user when exiting.\"\n)\naddflag('term-title', 'TerminalInteractiveShell.term_title',\n \"Enable auto setting the terminal title.\",\n \"Disable auto setting the terminal title.\"\n)\nclassic_config = Config()\nclassic_config.InteractiveShell.cache_size = 0\nclassic_config.PlainTextFormatter.pprint = False\nclassic_config.TerminalInteractiveShell.prompts_class='IPython.terminal.prompts.ClassicPrompts'\nclassic_config.InteractiveShell.separate_in = ''\nclassic_config.InteractiveShell.separate_out = ''\nclassic_config.InteractiveShell.separate_out2 = ''\nclassic_config.InteractiveShell.colors = 'NoColor'\nclassic_config.InteractiveShell.xmode = 'Plain'\n\nfrontend_flags['classic']=(\n classic_config,\n \"Gives IPython a similar feel to the classic Python prompt.\"\n)\n# # log doesn't make so much sense this way anymore\n# paa('--log','-l',\n# action='store_true', dest='InteractiveShell.logstart',\n# help=\"Start logging to the default log file (./ipython_log.py).\")\n#\n# # quick is harder to implement\nfrontend_flags['quick']=(\n {'TerminalIPythonApp' : {'quick' : True}},\n \"Enable quick startup with no config files.\"\n)\n\nfrontend_flags['i'] = (\n {'TerminalIPythonApp' : {'force_interact' : True}},\n \"\"\"If running code from the command line, become interactive afterwards.\n It is often useful to follow this with `--` to treat remaining flags as\n script arguments.\n \"\"\"\n)\nflags.update(frontend_flags)\n\naliases = dict(base_aliases)\naliases.update(shell_aliases) # type: ignore[arg-type]\n\n#-----------------------------------------------------------------------------\n# Main classes and functions\n#-----------------------------------------------------------------------------\n\n\nclass LocateIPythonApp(BaseIPythonApplication):\n description = \"\"\"print the path to the IPython dir\"\"\"\n subcommands = dict(\n profile=('IPython.core.profileapp.ProfileLocate',\n \"print the path to an IPython profile directory\",\n ),\n )\n def start(self):\n if self.subapp is not None:\n return self.subapp.start()\n else:\n print(self.ipython_dir)\n\n\nclass TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):\n name = u'ipython'\n description = usage.cl_usage\n crash_handler_class = IPAppCrashHandler # typing: ignore[assignment]\n examples = _examples\n\n flags = flags\n aliases = aliases\n classes = List()\n\n interactive_shell_class = Type(\n klass=object, # use default_value otherwise which only allow subclasses.\n default_value=TerminalInteractiveShell,\n help=\"Class to use to instantiate the TerminalInteractiveShell object. Useful for custom Frontends\"\n ).tag(config=True)\n\n @default('classes')\n def _classes_default(self):\n \"\"\"This has to be in a method, for TerminalIPythonApp to be available.\"\"\"\n return [\n InteractiveShellApp, # ShellApp comes before TerminalApp, because\n self.__class__, # it will also affect subclasses (e.g. QtConsole)\n TerminalInteractiveShell,\n HistoryManager,\n MagicsManager,\n ProfileDir,\n PlainTextFormatter,\n IPCompleter,\n ScriptMagics,\n LoggingMagics,\n StoreMagics,\n ]\n\n subcommands = dict(\n profile = (\"IPython.core.profileapp.ProfileApp\",\n \"Create and manage IPython profiles.\"\n ),\n kernel = (\"ipykernel.kernelapp.IPKernelApp\",\n \"Start a kernel without an attached frontend.\"\n ),\n locate=('IPython.terminal.ipapp.LocateIPythonApp',\n LocateIPythonApp.description\n ),\n history=('IPython.core.historyapp.HistoryApp',\n \"Manage the IPython history database.\"\n ),\n )\n\n\n # *do* autocreate requested profile, but don't create the config file.\n auto_create=Bool(True)\n # configurables\n quick = Bool(False,\n help=\"\"\"Start IPython quickly by skipping the loading of config files.\"\"\"\n ).tag(config=True)\n @observe('quick')\n def _quick_changed(self, change):\n if change['new']:\n self.load_config_file = lambda *a, **kw: None\n\n display_banner = Bool(True,\n help=\"Whether to display a banner upon starting IPython.\"\n ).tag(config=True)\n\n # if there is code of files to run from the cmd line, don't interact\n # unless the --i flag (App.force_interact) is true.\n force_interact = Bool(False,\n help=\"\"\"If a command or file is given via the command-line,\n e.g. 'ipython foo.py', start an interactive shell after executing the\n file or command.\"\"\"\n ).tag(config=True)\n @observe('force_interact')\n def _force_interact_changed(self, change):\n if change['new']:\n self.interact = True\n\n @observe('file_to_run', 'code_to_run', 'module_to_run')\n def _file_to_run_changed(self, change):\n new = change['new']\n if new:\n self.something_to_run = True\n if new and not self.force_interact:\n self.interact = False\n\n # internal, not-configurable\n something_to_run=Bool(False)\n\n @catch_config_error\n def initialize(self, argv=None):\n \"\"\"Do actions after construct, but before starting the app.\"\"\"\n super(TerminalIPythonApp, self).initialize(argv)\n if self.subapp is not None:\n # don't bother initializing further, starting subapp\n return\n # print self.extra_args\n if self.extra_args and not self.something_to_run:\n self.file_to_run = self.extra_args[0]\n self.init_path()\n # create the shell\n self.init_shell()\n # and draw the banner\n self.init_banner()\n # Now a variety of things that happen after the banner is printed.\n self.init_gui_pylab()\n self.init_extensions()\n self.init_code()\n\n def init_shell(self):\n \"\"\"initialize the InteractiveShell instance\"\"\"\n # Create an InteractiveShell instance.\n # shell.display_banner should always be False for the terminal\n # based app, because we call shell.show_banner() by hand below\n # so the banner shows *before* all extension loading stuff.\n self.shell = self.interactive_shell_class.instance(parent=self,\n profile_dir=self.profile_dir,\n ipython_dir=self.ipython_dir, user_ns=self.user_ns)\n self.shell.configurables.append(self)\n\n def init_banner(self):\n \"\"\"optionally display the banner\"\"\"\n if self.display_banner and self.interact:\n self.shell.show_banner()\n # Make sure there is a space below the banner.\n if self.log_level <= logging.INFO: print()\n\n def _pylab_changed(self, name, old, new):\n \"\"\"Replace --pylab='inline' with --pylab='auto'\"\"\"\n if new == 'inline':\n warnings.warn(\"'inline' not available as pylab backend, \"\n \"using 'auto' instead.\")\n self.pylab = 'auto'\n\n def start(self):\n if self.subapp is not None:\n return self.subapp.start()\n # perform any prexec steps:\n if self.interact:\n self.log.debug(\"Starting IPython's mainloop...\")\n self.shell.mainloop()\n else:\n self.log.debug(\"IPython not interactive...\")\n self.shell.restore_term_title()\n if not self.shell.last_execution_succeeded:\n sys.exit(1)\n\ndef load_default_config(ipython_dir=None):\n \"\"\"Load the default config file from the default ipython_dir.\n\n This is useful for embedded shells.\n \"\"\"\n if ipython_dir is None:\n ipython_dir = get_ipython_dir()\n\n profile_dir = os.path.join(ipython_dir, 'profile_default')\n app = TerminalIPythonApp()\n app.config_file_paths.append(profile_dir)\n app.load_config_file()\n return app.config\n\nlaunch_new_instance = TerminalIPythonApp.launch_instance\n\n\nif __name__ == '__main__':\n launch_new_instance()\n", "path": "IPython/terminal/ipapp.py"}], "after_files": [{"content": "# encoding: utf-8\n\"\"\"\nThe :class:`~traitlets.config.application.Application` object for the command\nline :command:`ipython` program.\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n\nimport logging\nimport os\nimport sys\nimport warnings\n\nfrom traitlets.config.loader import Config\nfrom traitlets.config.application import boolean_flag, catch_config_error\nfrom IPython.core import release\nfrom IPython.core import usage\nfrom IPython.core.completer import IPCompleter\nfrom IPython.core.crashhandler import CrashHandler\nfrom IPython.core.formatters import PlainTextFormatter\nfrom IPython.core.history import HistoryManager\nfrom IPython.core.application import (\n ProfileDir, BaseIPythonApplication, base_flags, base_aliases\n)\nfrom IPython.core.magic import MagicsManager\nfrom IPython.core.magics import (\n ScriptMagics, LoggingMagics\n)\nfrom IPython.core.shellapp import (\n InteractiveShellApp, shell_flags, shell_aliases\n)\nfrom IPython.extensions.storemagic import StoreMagics\nfrom .interactiveshell import TerminalInteractiveShell\nfrom IPython.paths import get_ipython_dir\nfrom traitlets import (\n Bool, List, default, observe, Type\n)\n\n#-----------------------------------------------------------------------------\n# Globals, utilities and helpers\n#-----------------------------------------------------------------------------\n\n_examples = \"\"\"\nipython --matplotlib # enable matplotlib integration\nipython --matplotlib=qt # enable matplotlib integration with qt4 backend\n\nipython --log-level=DEBUG # set logging to DEBUG\nipython --profile=foo # start with profile foo\n\nipython profile create foo # create profile foo w/ default config files\nipython help profile # show the help for the profile subcmd\n\nipython locate # print the path to the IPython directory\nipython locate profile foo # print the path to the directory for profile `foo`\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Crash handler for this application\n#-----------------------------------------------------------------------------\n\nclass IPAppCrashHandler(CrashHandler):\n \"\"\"sys.excepthook for IPython itself, leaves a detailed report on disk.\"\"\"\n\n def __init__(self, app):\n contact_name = release.author\n contact_email = release.author_email\n bug_tracker = 'https://github.com/ipython/ipython/issues'\n super(IPAppCrashHandler,self).__init__(\n app, contact_name, contact_email, bug_tracker\n )\n\n def make_report(self,traceback):\n \"\"\"Return a string containing a crash report.\"\"\"\n\n sec_sep = self.section_sep\n # Start with parent report\n report = [super(IPAppCrashHandler, self).make_report(traceback)]\n # Add interactive-specific info we may have\n rpt_add = report.append\n try:\n rpt_add(sec_sep+\"History of session input:\")\n for line in self.app.shell.user_ns['_ih']:\n rpt_add(line)\n rpt_add('\\n*** Last line of input (may not be in above history):\\n')\n rpt_add(self.app.shell._last_input_line+'\\n')\n except:\n pass\n\n return ''.join(report)\n\n#-----------------------------------------------------------------------------\n# Aliases and Flags\n#-----------------------------------------------------------------------------\nflags = dict(base_flags)\nflags.update(shell_flags)\nfrontend_flags = {}\naddflag = lambda *args: frontend_flags.update(boolean_flag(*args))\naddflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax',\n 'Turn on auto editing of files with syntax errors.',\n 'Turn off auto editing of files with syntax errors.'\n)\naddflag('simple-prompt', 'TerminalInteractiveShell.simple_prompt',\n \"Force simple minimal prompt using `raw_input`\",\n \"Use a rich interactive prompt with prompt_toolkit\",\n)\n\naddflag('banner', 'TerminalIPythonApp.display_banner',\n \"Display a banner upon starting IPython.\",\n \"Don't display a banner upon starting IPython.\"\n)\naddflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit',\n \"\"\"Set to confirm when you try to exit IPython with an EOF (Control-D\n in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',\n you can force a direct exit without any confirmation.\"\"\",\n \"Don't prompt the user when exiting.\"\n)\naddflag('term-title', 'TerminalInteractiveShell.term_title',\n \"Enable auto setting the terminal title.\",\n \"Disable auto setting the terminal title.\"\n)\nclassic_config = Config()\nclassic_config.InteractiveShell.cache_size = 0\nclassic_config.PlainTextFormatter.pprint = False\nclassic_config.TerminalInteractiveShell.prompts_class='IPython.terminal.prompts.ClassicPrompts'\nclassic_config.InteractiveShell.separate_in = ''\nclassic_config.InteractiveShell.separate_out = ''\nclassic_config.InteractiveShell.separate_out2 = ''\nclassic_config.InteractiveShell.colors = 'NoColor'\nclassic_config.InteractiveShell.xmode = 'Plain'\n\nfrontend_flags['classic']=(\n classic_config,\n \"Gives IPython a similar feel to the classic Python prompt.\"\n)\n# # log doesn't make so much sense this way anymore\n# paa('--log','-l',\n# action='store_true', dest='InteractiveShell.logstart',\n# help=\"Start logging to the default log file (./ipython_log.py).\")\n#\n# # quick is harder to implement\nfrontend_flags['quick']=(\n {'TerminalIPythonApp' : {'quick' : True}},\n \"Enable quick startup with no config files.\"\n)\n\nfrontend_flags['i'] = (\n {'TerminalIPythonApp' : {'force_interact' : True}},\n \"\"\"If running code from the command line, become interactive afterwards.\n It is often useful to follow this with `--` to treat remaining flags as\n script arguments.\n \"\"\"\n)\nflags.update(frontend_flags)\n\naliases = dict(base_aliases)\naliases.update(shell_aliases) # type: ignore[arg-type]\n\n#-----------------------------------------------------------------------------\n# Main classes and functions\n#-----------------------------------------------------------------------------\n\n\nclass LocateIPythonApp(BaseIPythonApplication):\n description = \"\"\"print the path to the IPython dir\"\"\"\n subcommands = dict(\n profile=('IPython.core.profileapp.ProfileLocate',\n \"print the path to an IPython profile directory\",\n ),\n )\n def start(self):\n if self.subapp is not None:\n return self.subapp.start()\n else:\n print(self.ipython_dir)\n\n\nclass TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):\n name = u'ipython'\n description = usage.cl_usage\n crash_handler_class = IPAppCrashHandler # typing: ignore[assignment]\n examples = _examples\n\n flags = flags\n aliases = aliases\n classes = List()\n\n interactive_shell_class = Type(\n klass=object, # use default_value otherwise which only allow subclasses.\n default_value=TerminalInteractiveShell,\n help=\"Class to use to instantiate the TerminalInteractiveShell object. Useful for custom Frontends\"\n ).tag(config=True)\n\n @default('classes')\n def _classes_default(self):\n \"\"\"This has to be in a method, for TerminalIPythonApp to be available.\"\"\"\n return [\n InteractiveShellApp, # ShellApp comes before TerminalApp, because\n self.__class__, # it will also affect subclasses (e.g. QtConsole)\n TerminalInteractiveShell,\n HistoryManager,\n MagicsManager,\n ProfileDir,\n PlainTextFormatter,\n IPCompleter,\n ScriptMagics,\n LoggingMagics,\n StoreMagics,\n ]\n\n subcommands = dict(\n profile = (\"IPython.core.profileapp.ProfileApp\",\n \"Create and manage IPython profiles.\"\n ),\n kernel = (\"ipykernel.kernelapp.IPKernelApp\",\n \"Start a kernel without an attached frontend.\"\n ),\n locate=('IPython.terminal.ipapp.LocateIPythonApp',\n LocateIPythonApp.description\n ),\n history=('IPython.core.historyapp.HistoryApp',\n \"Manage the IPython history database.\"\n ),\n )\n\n\n # *do* autocreate requested profile, but don't create the config file.\n auto_create=Bool(True)\n # configurables\n quick = Bool(False,\n help=\"\"\"Start IPython quickly by skipping the loading of config files.\"\"\"\n ).tag(config=True)\n @observe('quick')\n def _quick_changed(self, change):\n if change['new']:\n self.load_config_file = lambda *a, **kw: None\n\n display_banner = Bool(True,\n help=\"Whether to display a banner upon starting IPython.\"\n ).tag(config=True)\n\n # if there is code of files to run from the cmd line, don't interact\n # unless the --i flag (App.force_interact) is true.\n force_interact = Bool(False,\n help=\"\"\"If a command or file is given via the command-line,\n e.g. 'ipython foo.py', start an interactive shell after executing the\n file or command.\"\"\"\n ).tag(config=True)\n @observe('force_interact')\n def _force_interact_changed(self, change):\n if change['new']:\n self.interact = True\n\n @observe('file_to_run', 'code_to_run', 'module_to_run')\n def _file_to_run_changed(self, change):\n new = change['new']\n if new:\n self.something_to_run = True\n if new and not self.force_interact:\n self.interact = False\n\n # internal, not-configurable\n something_to_run=Bool(False)\n\n @catch_config_error\n def initialize(self, argv=None):\n \"\"\"Do actions after construct, but before starting the app.\"\"\"\n super(TerminalIPythonApp, self).initialize(argv)\n if self.subapp is not None:\n # don't bother initializing further, starting subapp\n return\n # print self.extra_args\n if self.extra_args and not self.something_to_run:\n self.file_to_run = self.extra_args[0]\n self.init_path()\n # create the shell\n self.init_shell()\n # and draw the banner\n self.init_banner()\n # Now a variety of things that happen after the banner is printed.\n self.init_gui_pylab()\n self.init_extensions()\n self.init_code()\n\n def init_shell(self):\n \"\"\"initialize the InteractiveShell instance\"\"\"\n # Create an InteractiveShell instance.\n # shell.display_banner should always be False for the terminal\n # based app, because we call shell.show_banner() by hand below\n # so the banner shows *before* all extension loading stuff.\n self.shell = self.interactive_shell_class.instance(parent=self,\n profile_dir=self.profile_dir,\n ipython_dir=self.ipython_dir, user_ns=self.user_ns)\n self.shell.configurables.append(self)\n\n def init_banner(self):\n \"\"\"optionally display the banner\"\"\"\n if self.display_banner and self.interact:\n self.shell.show_banner()\n # Make sure there is a space below the banner.\n if self.log_level <= logging.INFO: print()\n\n def _pylab_changed(self, name, old, new):\n \"\"\"Replace --pylab='inline' with --pylab='auto'\"\"\"\n if new == 'inline':\n warnings.warn(\"'inline' not available as pylab backend, \"\n \"using 'auto' instead.\")\n self.pylab = 'auto'\n\n def start(self):\n if self.subapp is not None:\n return self.subapp.start()\n # perform any prexec steps:\n if self.interact:\n self.log.debug(\"Starting IPython's mainloop...\")\n self.shell.mainloop()\n else:\n self.log.debug(\"IPython not interactive...\")\n self.shell.restore_term_title()\n if not self.shell.last_execution_succeeded:\n sys.exit(1)\n\ndef load_default_config(ipython_dir=None):\n \"\"\"Load the default config file from the default ipython_dir.\n\n This is useful for embedded shells.\n \"\"\"\n if ipython_dir is None:\n ipython_dir = get_ipython_dir()\n\n profile_dir = os.path.join(ipython_dir, 'profile_default')\n app = TerminalIPythonApp()\n app.config_file_paths.append(profile_dir)\n app.load_config_file()\n return app.config\n\nlaunch_new_instance = TerminalIPythonApp.launch_instance\n", "path": "IPython/terminal/ipapp.py"}]} | 4,023 | 127 |
gh_patches_debug_11688 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-272 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Core plugins zips to published artifacts.
The plugins built with the core repo should be bundled as zips and hosted at artifacts.opensearch.org.
This task involves updating Opensearch's build.sh script to build and include these artifacts with the component's artifact list.
Right now we also have brittle logic that assumes any component with "plugins" in their artifacts list is a plugin repository. This should be updated to identify the min bundle component in another way. Perhaps with a separate artifact folder for "min-bundle"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bundle-workflow/python/build_workflow/builder.py`
Content:
```
1 # Copyright OpenSearch Contributors.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import os
5
6 '''
7 This class is responsible for executing the build for a component and passing the results to a build recorder.
8 It will notify the build recorder of build information such as repository and git ref, and any artifacts generated by the build.
9 Artifacts found in "<build root>/artifacts/<maven|plugins|libs|bundle>" will be recognized and recorded.
10 '''
11 class Builder:
12 def __init__(self, component_name, git_repo, script_finder, build_recorder):
13 '''
14 Construct a new Builder instance.
15 :param component_name: The name of the component to build.
16 :param git_repo: A GitRepository instance containing the checked-out code.
17 :param script_finder: The ScriptFinder to use for finding build.sh scripts.
18 :param build_recorder: The build recorder that will capture build information and artifacts.
19 '''
20
21 self.component_name = component_name
22 self.git_repo = git_repo
23 self.script_finder = script_finder
24 self.build_recorder = build_recorder
25 self.output_path = 'artifacts'
26
27 def build(self, version, arch, snapshot):
28 build_script = self.script_finder.find_build_script(self.component_name, self.git_repo.dir)
29 build_command = f'{build_script} -v {version} -a {arch} -s {str(snapshot).lower()} -o {self.output_path}'
30 self.git_repo.execute(build_command)
31 self.build_recorder.record_component(self.component_name, self.git_repo)
32
33 def export_artifacts(self):
34 artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))
35 for artifact_type in ["maven", "bundle", "plugins", "libs"]:
36 for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):
37 for file_name in files:
38 absolute_path = os.path.join(dir, file_name)
39 relative_path = os.path.relpath(absolute_path, artifacts_dir)
40 self.build_recorder.record_artifact(self.component_name, artifact_type, relative_path, absolute_path)
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bundle-workflow/python/build_workflow/builder.py b/bundle-workflow/python/build_workflow/builder.py
--- a/bundle-workflow/python/build_workflow/builder.py
+++ b/bundle-workflow/python/build_workflow/builder.py
@@ -32,7 +32,7 @@
def export_artifacts(self):
artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))
- for artifact_type in ["maven", "bundle", "plugins", "libs"]:
+ for artifact_type in ["maven", "bundle", "plugins", "libs", "core-plugins"]:
for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):
for file_name in files:
absolute_path = os.path.join(dir, file_name)
| {"golden_diff": "diff --git a/bundle-workflow/python/build_workflow/builder.py b/bundle-workflow/python/build_workflow/builder.py\n--- a/bundle-workflow/python/build_workflow/builder.py\n+++ b/bundle-workflow/python/build_workflow/builder.py\n@@ -32,7 +32,7 @@\n \n def export_artifacts(self):\n artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))\n- for artifact_type in [\"maven\", \"bundle\", \"plugins\", \"libs\"]:\n+ for artifact_type in [\"maven\", \"bundle\", \"plugins\", \"libs\", \"core-plugins\"]:\n for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):\n for file_name in files:\n absolute_path = os.path.join(dir, file_name)\n", "issue": "Add Core plugins zips to published artifacts.\nThe plugins built with the core repo should be bundled as zips and hosted at artifacts.opensearch.org.\r\n\r\nThis task involves updating Opensearch's build.sh script to build and include these artifacts with the component's artifact list.\r\n\r\nRight now we also have brittle logic that assumes any component with \"plugins\" in their artifacts list is a plugin repository. This should be updated to identify the min bundle component in another way. Perhaps with a separate artifact folder for \"min-bundle\"\n", "before_files": [{"content": "# Copyright OpenSearch Contributors.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\n\n'''\nThis class is responsible for executing the build for a component and passing the results to a build recorder.\nIt will notify the build recorder of build information such as repository and git ref, and any artifacts generated by the build.\nArtifacts found in \"<build root>/artifacts/<maven|plugins|libs|bundle>\" will be recognized and recorded.\n'''\nclass Builder:\n def __init__(self, component_name, git_repo, script_finder, build_recorder):\n '''\n Construct a new Builder instance.\n :param component_name: The name of the component to build.\n :param git_repo: A GitRepository instance containing the checked-out code.\n :param script_finder: The ScriptFinder to use for finding build.sh scripts.\n :param build_recorder: The build recorder that will capture build information and artifacts.\n '''\n\n self.component_name = component_name\n self.git_repo = git_repo\n self.script_finder = script_finder\n self.build_recorder = build_recorder\n self.output_path = 'artifacts'\n\n def build(self, version, arch, snapshot):\n build_script = self.script_finder.find_build_script(self.component_name, self.git_repo.dir)\n build_command = f'{build_script} -v {version} -a {arch} -s {str(snapshot).lower()} -o {self.output_path}'\n self.git_repo.execute(build_command)\n self.build_recorder.record_component(self.component_name, self.git_repo)\n\n def export_artifacts(self):\n artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))\n for artifact_type in [\"maven\", \"bundle\", \"plugins\", \"libs\"]:\n for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):\n for file_name in files:\n absolute_path = os.path.join(dir, file_name)\n relative_path = os.path.relpath(absolute_path, artifacts_dir)\n self.build_recorder.record_artifact(self.component_name, artifact_type, relative_path, absolute_path)\n", "path": "bundle-workflow/python/build_workflow/builder.py"}], "after_files": [{"content": "# Copyright OpenSearch Contributors.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\n\n'''\nThis class is responsible for executing the build for a component and passing the results to a build recorder.\nIt will notify the build recorder of build information such as repository and git ref, and any artifacts generated by the build.\nArtifacts found in \"<build root>/artifacts/<maven|plugins|libs|bundle>\" will be recognized and recorded.\n'''\nclass Builder:\n def __init__(self, component_name, git_repo, script_finder, build_recorder):\n '''\n Construct a new Builder instance.\n :param component_name: The name of the component to build.\n :param git_repo: A GitRepository instance containing the checked-out code.\n :param script_finder: The ScriptFinder to use for finding build.sh scripts.\n :param build_recorder: The build recorder that will capture build information and artifacts.\n '''\n\n self.component_name = component_name\n self.git_repo = git_repo\n self.script_finder = script_finder\n self.build_recorder = build_recorder\n self.output_path = 'artifacts'\n\n def build(self, version, arch, snapshot):\n build_script = self.script_finder.find_build_script(self.component_name, self.git_repo.dir)\n build_command = f'{build_script} -v {version} -a {arch} -s {str(snapshot).lower()} -o {self.output_path}'\n self.git_repo.execute(build_command)\n self.build_recorder.record_component(self.component_name, self.git_repo)\n\n def export_artifacts(self):\n artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))\n for artifact_type in [\"maven\", \"bundle\", \"plugins\", \"libs\", \"core-plugins\"]:\n for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):\n for file_name in files:\n absolute_path = os.path.join(dir, file_name)\n relative_path = os.path.relpath(absolute_path, artifacts_dir)\n self.build_recorder.record_artifact(self.component_name, artifact_type, relative_path, absolute_path)\n", "path": "bundle-workflow/python/build_workflow/builder.py"}]} | 889 | 173 |
gh_patches_debug_390 | rasdani/github-patches | git_diff | google__turbinia-616 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add retries to tox
Tox fails when trying to check links within our docs if the link is temporarily down/unresponsive. Adding retries to sphinx config should take care of that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 # import os
14 # import sys
15 # sys.path.insert(0, os.path.abspath('.'))
16
17 from __future__ import unicode_literals
18 import re
19
20 from recommonmark.parser import CommonMarkParser
21 from recommonmark.transform import AutoStructify
22 from docutils import nodes, transforms
23
24 # -- Project information -----------------------------------------------------
25
26 project = 'Turbinia'
27 copyright = '2020, Google Inc'
28 author = 'Turbinia maintainers'
29
30 # -- General configuration ---------------------------------------------------
31
32 # Add any Sphinx extension module names here, as strings. They can be
33 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 # ones.
35 extensions = [
36 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',
37 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',
38 'recommonmark'
39 ]
40
41 # Add any paths that contain templates here, relative to this directory.
42 templates_path = ['_templates']
43
44 # List of patterns, relative to source directory, that match files and
45 # directories to ignore when looking for source files.
46 # This pattern also affects html_static_path and html_extra_path.
47 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']
48
49 # -- Options for HTML output -------------------------------------------------
50
51 # The theme to use for HTML and HTML Help pages. See the documentation for
52 # a list of builtin themes.
53 #
54 html_theme = 'sphinx_rtd_theme'
55
56 # The master toctree document.
57 master_doc = 'index'
58
59 # The name of the Pygments (syntax highlighting) style to use.
60 pygments_style = 'sphinx'
61
62 # Add any paths that contain custom static files (such as style sheets) here,
63 # relative to this directory. They are copied after the builtin static files,
64 # so a file named "default.css" will overwrite the builtin "default.css".
65 html_static_path = ['_static']
66
67 # The default sidebars (for documents that don't match any pattern) are
68 # defined by theme itself. Builtin themes are using these templates by
69 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
70 # 'searchbox.html']``.
71 #
72 html_sidebars = {
73 '**': [
74 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',
75 'searchbox.html'
76 ]
77 }
78
79
80 # Output file base name for HTML help builder.
81 htmlhelp_basename = 'turbiniadoc'
82
83 html_logo = "images/turbinia-logo.jpg"
84
85
86 class ProcessLink(transforms.Transform):
87 """Transform definition to parse .md references to internal pages."""
88
89 default_priority = 1000
90
91 def find_replace(self, node):
92 """Parses URIs containing .md and replaces them with their HTML page."""
93 if isinstance(node, nodes.reference) and 'refuri' in node:
94 r = node['refuri']
95 if r.endswith('.md'):
96 r = r[:-3] + '.html'
97 node['refuri'] = r
98
99 return node
100
101 def traverse(self, node):
102 """Traverse the document tree rooted at node.
103 node : docutil node
104 current root node to traverse
105 """
106 self.find_replace(node)
107
108 for c in node.children:
109 self.traverse(c)
110
111 # pylint: disable=arguments-differ,attribute-defined-outside-init
112 # this was taken from GRR's config file for documentation
113 def apply(self):
114 self.current_level = 0
115 self.traverse(self.document)
116
117
118 def setup(app):
119 """Add custom parsers to Sphinx generation."""
120 app.add_config_value(
121 'recommonmark_config', {
122 'enable_auto_doc_ref': False,
123 }, True)
124 app.add_transform(AutoStructify)
125 app.add_transform(ProcessLink)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -76,6 +76,8 @@
]
}
+# Adding retries to linkchecks before declaring a link broken
+linkcheck_retries = 3
# Output file base name for HTML help builder.
htmlhelp_basename = 'turbiniadoc'
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -76,6 +76,8 @@\n ]\n }\n \n+# Adding retries to linkchecks before declaring a link broken\n+linkcheck_retries = 3\n \n # Output file base name for HTML help builder.\n htmlhelp_basename = 'turbiniadoc'\n", "issue": "Add retries to tox\nTox fails when trying to check links within our docs if the link is temporarily down/unresponsive. Adding retries to sphinx config should take care of that.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom __future__ import unicode_literals\nimport re\n\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\nfrom docutils import nodes, transforms\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Turbinia'\ncopyright = '2020, Google Inc'\nauthor = 'Turbinia maintainers'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',\n 'recommonmark'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n '**': [\n 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',\n 'searchbox.html'\n ]\n}\n\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'turbiniadoc'\n\nhtml_logo = \"images/turbinia-logo.jpg\"\n\n\nclass ProcessLink(transforms.Transform):\n \"\"\"Transform definition to parse .md references to internal pages.\"\"\"\n\n default_priority = 1000\n\n def find_replace(self, node):\n \"\"\"Parses URIs containing .md and replaces them with their HTML page.\"\"\"\n if isinstance(node, nodes.reference) and 'refuri' in node:\n r = node['refuri']\n if r.endswith('.md'):\n r = r[:-3] + '.html'\n node['refuri'] = r\n\n return node\n\n def traverse(self, node):\n \"\"\"Traverse the document tree rooted at node.\n node : docutil node\n current root node to traverse\n \"\"\"\n self.find_replace(node)\n\n for c in node.children:\n self.traverse(c)\n\n # pylint: disable=arguments-differ,attribute-defined-outside-init\n # this was taken from GRR's config file for documentation\n def apply(self):\n self.current_level = 0\n self.traverse(self.document)\n\n\ndef setup(app):\n \"\"\"Add custom parsers to Sphinx generation.\"\"\"\n app.add_config_value(\n 'recommonmark_config', {\n 'enable_auto_doc_ref': False,\n }, True)\n app.add_transform(AutoStructify)\n app.add_transform(ProcessLink)\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom __future__ import unicode_literals\nimport re\n\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\nfrom docutils import nodes, transforms\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Turbinia'\ncopyright = '2020, Google Inc'\nauthor = 'Turbinia maintainers'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',\n 'recommonmark'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n '**': [\n 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',\n 'searchbox.html'\n ]\n}\n\n# Adding retries to linkchecks before declaring a link broken\nlinkcheck_retries = 3\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'turbiniadoc'\n\nhtml_logo = \"images/turbinia-logo.jpg\"\n\n\nclass ProcessLink(transforms.Transform):\n \"\"\"Transform definition to parse .md references to internal pages.\"\"\"\n\n default_priority = 1000\n\n def find_replace(self, node):\n \"\"\"Parses URIs containing .md and replaces them with their HTML page.\"\"\"\n if isinstance(node, nodes.reference) and 'refuri' in node:\n r = node['refuri']\n if r.endswith('.md'):\n r = r[:-3] + '.html'\n node['refuri'] = r\n\n return node\n\n def traverse(self, node):\n \"\"\"Traverse the document tree rooted at node.\n node : docutil node\n current root node to traverse\n \"\"\"\n self.find_replace(node)\n\n for c in node.children:\n self.traverse(c)\n\n # pylint: disable=arguments-differ,attribute-defined-outside-init\n # this was taken from GRR's config file for documentation\n def apply(self):\n self.current_level = 0\n self.traverse(self.document)\n\n\ndef setup(app):\n \"\"\"Add custom parsers to Sphinx generation.\"\"\"\n app.add_config_value(\n 'recommonmark_config', {\n 'enable_auto_doc_ref': False,\n }, True)\n app.add_transform(AutoStructify)\n app.add_transform(ProcessLink)\n", "path": "docs/conf.py"}]} | 1,511 | 82 |
gh_patches_debug_34066 | rasdani/github-patches | git_diff | freedomofpress__securedrop-1309 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NetworkManager hook notifications broken on Tails 2.x
The invocation of `notify-send` in `securedrop_init.py` does not show a notification in Tails 2.x like it did in Tails 1.x. This is due to dbus-related changes in Debian Jessie, and is a known issue as a quick [search](https://labs.riseup.net/code/projects/tails/search?utf8=%E2%9C%93&changesets=1&q=notify-send) of the Tails issue tracker demonstrates.
Furthermore, it looks like Tails has a special wrapper script, `tails-notify-user`, specifically meant for the use case of displaying notifications to the user from background scripts running as different users, so we should just use that instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tails_files/securedrop_init.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 import sys
5 import subprocess
6
7
8 if __name__ == '__main__':
9 # check for root
10 if os.geteuid() != 0:
11 sys.exit('You need to run this as root')
12
13 # paths
14 path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'
15 path_torrc_backup = '/etc/tor/torrc.bak'
16 path_torrc = '/etc/tor/torrc'
17
18 # load torrc_additions
19 if os.path.isfile(path_torrc_additions):
20 torrc_additions = open(path_torrc_additions).read()
21 else:
22 sys.exit('Error opening {0} for reading'.format(path_torrc_additions))
23
24 # load torrc
25 if os.path.isfile(path_torrc_backup):
26 torrc = open(path_torrc_backup).read()
27 else:
28 if os.path.isfile(path_torrc):
29 torrc = open(path_torrc).read()
30 else:
31 sys.exit('Error opening {0} for reading'.format(path_torrc))
32
33 # save a backup
34 open(path_torrc_backup, 'w').write(torrc)
35
36 # append the additions
37 open(path_torrc, 'w').write(torrc + torrc_additions)
38
39 # reload tor
40 subprocess.call(['/usr/sbin/service', 'tor', 'reload'])
41
42 # success
43 subprocess.call(['/usr/bin/sudo', '-u', 'amnesia', '/usr/bin/notify-send', '-i', '/home/amnesia/Persistent/.securedrop/securedrop_icon.png',
44 'Updated torrc!', 'You can now connect to your SecureDrop\ndocument interface.'])
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tails_files/securedrop_init.py b/tails_files/securedrop_init.py
--- a/tails_files/securedrop_init.py
+++ b/tails_files/securedrop_init.py
@@ -1,44 +1,47 @@
-#!/usr/bin/env python
+#!/usr/bin/python
import os
import sys
import subprocess
-if __name__ == '__main__':
- # check for root
- if os.geteuid() != 0:
- sys.exit('You need to run this as root')
+# check for root
+if os.geteuid() != 0:
+ sys.exit('You need to run this as root')
- # paths
- path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'
- path_torrc_backup = '/etc/tor/torrc.bak'
- path_torrc = '/etc/tor/torrc'
+# paths
+path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'
+path_torrc_backup = '/etc/tor/torrc.bak'
+path_torrc = '/etc/tor/torrc'
- # load torrc_additions
- if os.path.isfile(path_torrc_additions):
- torrc_additions = open(path_torrc_additions).read()
- else:
- sys.exit('Error opening {0} for reading'.format(path_torrc_additions))
+# load torrc_additions
+if os.path.isfile(path_torrc_additions):
+ torrc_additions = open(path_torrc_additions).read()
+else:
+ sys.exit('Error opening {0} for reading'.format(path_torrc_additions))
- # load torrc
- if os.path.isfile(path_torrc_backup):
- torrc = open(path_torrc_backup).read()
+# load torrc
+if os.path.isfile(path_torrc_backup):
+ torrc = open(path_torrc_backup).read()
+else:
+ if os.path.isfile(path_torrc):
+ torrc = open(path_torrc).read()
else:
- if os.path.isfile(path_torrc):
- torrc = open(path_torrc).read()
- else:
- sys.exit('Error opening {0} for reading'.format(path_torrc))
+ sys.exit('Error opening {0} for reading'.format(path_torrc))
- # save a backup
- open(path_torrc_backup, 'w').write(torrc)
+ # save a backup
+ open(path_torrc_backup, 'w').write(torrc)
- # append the additions
- open(path_torrc, 'w').write(torrc + torrc_additions)
+# append the additions
+open(path_torrc, 'w').write(torrc + torrc_additions)
- # reload tor
- subprocess.call(['/usr/sbin/service', 'tor', 'reload'])
+# reload tor
+try:
+ subprocess.check_call(['systemctl', 'reload', '[email protected]'])
+except subprocess.CalledProcessError:
+ sys.exit('Error reloading Tor')
- # success
- subprocess.call(['/usr/bin/sudo', '-u', 'amnesia', '/usr/bin/notify-send', '-i', '/home/amnesia/Persistent/.securedrop/securedrop_icon.png',
- 'Updated torrc!', 'You can now connect to your SecureDrop\ndocument interface.'])
+# notify the user
+subprocess.call(['tails-notify-user',
+ 'SecureDrop successfully auto-configured!',
+ 'You can now access the Document Interface.\nIf you are an admin, you can now SSH to the servers.'])
| {"golden_diff": "diff --git a/tails_files/securedrop_init.py b/tails_files/securedrop_init.py\n--- a/tails_files/securedrop_init.py\n+++ b/tails_files/securedrop_init.py\n@@ -1,44 +1,47 @@\n-#!/usr/bin/env python\n+#!/usr/bin/python\n \n import os\n import sys\n import subprocess\n \n \n-if __name__ == '__main__':\n- # check for root\n- if os.geteuid() != 0:\n- sys.exit('You need to run this as root')\n+# check for root\n+if os.geteuid() != 0:\n+ sys.exit('You need to run this as root')\n \n- # paths\n- path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\n- path_torrc_backup = '/etc/tor/torrc.bak'\n- path_torrc = '/etc/tor/torrc'\n+# paths\n+path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\n+path_torrc_backup = '/etc/tor/torrc.bak'\n+path_torrc = '/etc/tor/torrc'\n \n- # load torrc_additions\n- if os.path.isfile(path_torrc_additions):\n- torrc_additions = open(path_torrc_additions).read()\n- else:\n- sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n+# load torrc_additions\n+if os.path.isfile(path_torrc_additions):\n+ torrc_additions = open(path_torrc_additions).read()\n+else:\n+ sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n \n- # load torrc\n- if os.path.isfile(path_torrc_backup):\n- torrc = open(path_torrc_backup).read()\n+# load torrc\n+if os.path.isfile(path_torrc_backup):\n+ torrc = open(path_torrc_backup).read()\n+else:\n+ if os.path.isfile(path_torrc):\n+ torrc = open(path_torrc).read()\n else:\n- if os.path.isfile(path_torrc):\n- torrc = open(path_torrc).read()\n- else:\n- sys.exit('Error opening {0} for reading'.format(path_torrc))\n+ sys.exit('Error opening {0} for reading'.format(path_torrc))\n \n- # save a backup\n- open(path_torrc_backup, 'w').write(torrc)\n+ # save a backup\n+ open(path_torrc_backup, 'w').write(torrc)\n \n- # append the additions\n- open(path_torrc, 'w').write(torrc + torrc_additions)\n+# append the additions\n+open(path_torrc, 'w').write(torrc + torrc_additions)\n \n- # reload tor\n- subprocess.call(['/usr/sbin/service', 'tor', 'reload'])\n+# reload tor\n+try:\n+ subprocess.check_call(['systemctl', 'reload', '[email protected]'])\n+except subprocess.CalledProcessError:\n+ sys.exit('Error reloading Tor')\n \n- # success\n- subprocess.call(['/usr/bin/sudo', '-u', 'amnesia', '/usr/bin/notify-send', '-i', '/home/amnesia/Persistent/.securedrop/securedrop_icon.png',\n- 'Updated torrc!', 'You can now connect to your SecureDrop\\ndocument interface.'])\n+# notify the user\n+subprocess.call(['tails-notify-user',\n+ 'SecureDrop successfully auto-configured!',\n+ 'You can now access the Document Interface.\\nIf you are an admin, you can now SSH to the servers.'])\n", "issue": "NetworkManager hook notifications broken on Tails 2.x\nThe invocation of `notify-send` in `securedrop_init.py` does not show a notification in Tails 2.x like it did in Tails 1.x. This is due to dbus-related changes in Debian Jessie, and is a known issue as a quick [search](https://labs.riseup.net/code/projects/tails/search?utf8=%E2%9C%93&changesets=1&q=notify-send) of the Tails issue tracker demonstrates.\n\nFurthermore, it looks like Tails has a special wrapper script, `tails-notify-user`, specifically meant for the use case of displaying notifications to the user from background scripts running as different users, so we should just use that instead.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport subprocess\n\n\nif __name__ == '__main__':\n # check for root\n if os.geteuid() != 0:\n sys.exit('You need to run this as root')\n\n # paths\n path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\n path_torrc_backup = '/etc/tor/torrc.bak'\n path_torrc = '/etc/tor/torrc'\n\n # load torrc_additions\n if os.path.isfile(path_torrc_additions):\n torrc_additions = open(path_torrc_additions).read()\n else:\n sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n\n # load torrc\n if os.path.isfile(path_torrc_backup):\n torrc = open(path_torrc_backup).read()\n else:\n if os.path.isfile(path_torrc):\n torrc = open(path_torrc).read()\n else:\n sys.exit('Error opening {0} for reading'.format(path_torrc))\n\n # save a backup\n open(path_torrc_backup, 'w').write(torrc)\n\n # append the additions\n open(path_torrc, 'w').write(torrc + torrc_additions)\n\n # reload tor\n subprocess.call(['/usr/sbin/service', 'tor', 'reload'])\n\n # success\n subprocess.call(['/usr/bin/sudo', '-u', 'amnesia', '/usr/bin/notify-send', '-i', '/home/amnesia/Persistent/.securedrop/securedrop_icon.png',\n 'Updated torrc!', 'You can now connect to your SecureDrop\\ndocument interface.'])\n", "path": "tails_files/securedrop_init.py"}], "after_files": [{"content": "#!/usr/bin/python\n\nimport os\nimport sys\nimport subprocess\n\n\n# check for root\nif os.geteuid() != 0:\n sys.exit('You need to run this as root')\n\n# paths\npath_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\npath_torrc_backup = '/etc/tor/torrc.bak'\npath_torrc = '/etc/tor/torrc'\n\n# load torrc_additions\nif os.path.isfile(path_torrc_additions):\n torrc_additions = open(path_torrc_additions).read()\nelse:\n sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n\n# load torrc\nif os.path.isfile(path_torrc_backup):\n torrc = open(path_torrc_backup).read()\nelse:\n if os.path.isfile(path_torrc):\n torrc = open(path_torrc).read()\n else:\n sys.exit('Error opening {0} for reading'.format(path_torrc))\n\n # save a backup\n open(path_torrc_backup, 'w').write(torrc)\n\n# append the additions\nopen(path_torrc, 'w').write(torrc + torrc_additions)\n\n# reload tor\ntry:\n subprocess.check_call(['systemctl', 'reload', '[email protected]'])\nexcept subprocess.CalledProcessError:\n sys.exit('Error reloading Tor')\n\n# notify the user\nsubprocess.call(['tails-notify-user',\n 'SecureDrop successfully auto-configured!',\n 'You can now access the Document Interface.\\nIf you are an admin, you can now SSH to the servers.'])\n", "path": "tails_files/securedrop_init.py"}]} | 889 | 842 |
gh_patches_debug_38860 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-1745 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] some PDFs raise a NotImplementedError
### Description
Since 1.9.2 (I skipped 1.9 and 1.9.1) account statements of german dkb bank (also ones previously working fine) get stuck in processing.
There are some statements about barcode separation, maybe something broken there, as there is no barcode visible in the PDF.
Unfortunately also "reprinting" the PDF with a tool usually fixing issues for files which cannot be processed in papaerless is not working here.
Unfortunately I cannot share samples as its bank data....
Have barcode separation enabled:
PAPERLESS_CONSUMER_ENABLE_BARCODES=true
PAPERLESS_CONSUMER_BARCODE_STRING=PATCHT
### Steps to reproduce
1. get the document consumed
2. process gets stuck in frontend

3. shows up as queued for quite some time

4. ends up as failed task
### Webserver logs
```bash
Kontoauszug_1023735317_Nr_2022_003_per_2022_08_01.pdf
HISTORY
Id:
79423d05e71d4a10b2457bf23f91cd60
Name:
Kontoauszug_1023735317_Nr_2022_003_per_2022_08_01.pdf
Func:
documents.tasks.consume_file
Hook:
-
Args:
/tmp/paperless/paperless-upload-0n2e8jty
Kwargs:
{'override_filename': 'Kontoauszug_1023735317_Nr_2022_003_per_2022_08_01.pdf', 'override_title': None, 'override_correspondent_id': None, 'override_document_type_id': None, 'override_tag_ids': None, 'task_id': 'dc8d5b5b-5eec-4395-b29a-22f4ee8df01a', 'override_created': None}
Result:
Not sure how to handle PDF image of this type : Traceback (most recent call last):
File "/usr/src/paperless/src/src/django-q/django_q/cluster.py", line 454, in worker
res = f(*task["args"], **task["kwargs"])
File "/usr/src/paperless/src/documents/tasks.py", line 99, in consume_file
pdf_filepath, separators = barcodes.scan_file_for_separating_barcodes(path)
File "/usr/src/paperless/src/documents/barcodes.py", line 126, in scan_file_for_separating_barcodes
pillow_img = pdfimage.as_pil_image()
File "/usr/local/lib/python3.9/site-packages/pikepdf/models/image.py", line 742, in as_pil_image
im = self._extract_transcoded()
File "/usr/local/lib/python3.9/site-packages/pikepdf/models/image.py", line 606, in _extract_transcoded
if self.mode in {'DeviceN', 'Separation'}:
File "/usr/local/lib/python3.9/site-packages/pikepdf/models/image.py", line 301, in mode
raise NotImplementedError(
NotImplementedError: Not sure how to handle PDF image of this type
Group:
-
Started:
Oct. 4, 2022, 5:52 p.m.
Stopped:
Oct. 4, 2022, 5:56 p.m.
```
### Paperless-ngx version
1.9.2
### Host OS
Synology with Docker
### Installation method
Docker - ghcr.io image
webserver:
image: ghcr.io/paperless-ngx/paperless-ngx:latest
### Browser
Chrome
### Configuration changes
barcodes for page separation was enabled, problem is resolved after commenting it out in compose file:
#PAPERLESS_CONSUMER_ENABLE_BARCODES=true
#PAPERLESS_CONSUMER_BARCODE_STRING=PATCHT
### Other
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/documents/barcodes.py`
Content:
```
1 import logging
2 import os
3 import shutil
4 import tempfile
5 from functools import lru_cache
6 from typing import List
7 from typing import Optional
8 from typing import Tuple
9
10 import magic
11 from django.conf import settings
12 from pikepdf import Page
13 from pikepdf import Pdf
14 from pikepdf import PdfImage
15 from PIL import Image
16 from PIL import ImageSequence
17 from pyzbar import pyzbar
18
19 logger = logging.getLogger("paperless.barcodes")
20
21
22 @lru_cache(maxsize=8)
23 def supported_file_type(mime_type) -> bool:
24 """
25 Determines if the file is valid for barcode
26 processing, based on MIME type and settings
27
28 :return: True if the file is supported, False otherwise
29 """
30 supported_mime = ["application/pdf"]
31 if settings.CONSUMER_BARCODE_TIFF_SUPPORT:
32 supported_mime += ["image/tiff"]
33
34 return mime_type in supported_mime
35
36
37 def barcode_reader(image: Image) -> List[str]:
38 """
39 Read any barcodes contained in image
40 Returns a list containing all found barcodes
41 """
42 barcodes = []
43 # Decode the barcode image
44 detected_barcodes = pyzbar.decode(image)
45
46 if detected_barcodes:
47 # Traverse through all the detected barcodes in image
48 for barcode in detected_barcodes:
49 if barcode.data:
50 decoded_barcode = barcode.data.decode("utf-8")
51 barcodes.append(decoded_barcode)
52 logger.debug(
53 f"Barcode of type {str(barcode.type)} found: {decoded_barcode}",
54 )
55 return barcodes
56
57
58 def get_file_mime_type(path: str) -> str:
59 """
60 Determines the file type, based on MIME type.
61
62 Returns the MIME type.
63 """
64 mime_type = magic.from_file(path, mime=True)
65 logger.debug(f"Detected mime type: {mime_type}")
66 return mime_type
67
68
69 def convert_from_tiff_to_pdf(filepath: str) -> str:
70 """
71 converts a given TIFF image file to pdf into a temporary directory.
72
73 Returns the new pdf file.
74 """
75 file_name = os.path.splitext(os.path.basename(filepath))[0]
76 mime_type = get_file_mime_type(filepath)
77 tempdir = tempfile.mkdtemp(prefix="paperless-", dir=settings.SCRATCH_DIR)
78 # use old file name with pdf extension
79 if mime_type == "image/tiff":
80 newpath = os.path.join(tempdir, file_name + ".pdf")
81 else:
82 logger.warning(
83 f"Cannot convert mime type {str(mime_type)} from {str(filepath)} to pdf.",
84 )
85 return None
86 with Image.open(filepath) as image:
87 images = []
88 for i, page in enumerate(ImageSequence.Iterator(image)):
89 page = page.convert("RGB")
90 images.append(page)
91 try:
92 if len(images) == 1:
93 images[0].save(newpath)
94 else:
95 images[0].save(newpath, save_all=True, append_images=images[1:])
96 except OSError as e:
97 logger.warning(
98 f"Could not save the file as pdf. Error: {str(e)}",
99 )
100 return None
101 return newpath
102
103
104 def scan_file_for_separating_barcodes(filepath: str) -> Tuple[Optional[str], List[int]]:
105 """
106 Scan the provided pdf file for page separating barcodes
107 Returns a PDF filepath and a list of pagenumbers,
108 which separate the file into new files
109 """
110
111 separator_page_numbers = []
112 pdf_filepath = None
113
114 mime_type = get_file_mime_type(filepath)
115
116 if supported_file_type(mime_type):
117 pdf_filepath = filepath
118 if mime_type == "image/tiff":
119 pdf_filepath = convert_from_tiff_to_pdf(filepath)
120
121 pdf = Pdf.open(pdf_filepath)
122
123 for page_num, page in enumerate(pdf.pages):
124 for image_key in page.images:
125 pdfimage = PdfImage(page.images[image_key])
126 pillow_img = pdfimage.as_pil_image()
127
128 detected_barcodes = barcode_reader(pillow_img)
129
130 if settings.CONSUMER_BARCODE_STRING in detected_barcodes:
131 separator_page_numbers.append(page_num)
132 else:
133 logger.warning(
134 f"Unsupported file format for barcode reader: {str(mime_type)}",
135 )
136 return pdf_filepath, separator_page_numbers
137
138
139 def separate_pages(filepath: str, pages_to_split_on: List[int]) -> List[str]:
140 """
141 Separate the provided pdf file on the pages_to_split_on.
142 The pages which are defined by page_numbers will be removed.
143 Returns a list of (temporary) filepaths to consume.
144 These will need to be deleted later.
145 """
146
147 document_paths = []
148
149 if not pages_to_split_on:
150 logger.warning("No pages to split on!")
151 return document_paths
152
153 os.makedirs(settings.SCRATCH_DIR, exist_ok=True)
154 tempdir = tempfile.mkdtemp(prefix="paperless-", dir=settings.SCRATCH_DIR)
155 fname = os.path.splitext(os.path.basename(filepath))[0]
156 pdf = Pdf.open(filepath)
157
158 # A list of documents, ie a list of lists of pages
159 documents: List[List[Page]] = []
160 # A single document, ie a list of pages
161 document: List[Page] = []
162
163 for idx, page in enumerate(pdf.pages):
164 # Keep building the new PDF as long as it is not a
165 # separator index
166 if idx not in pages_to_split_on:
167 document.append(page)
168 # Make sure to append the very last document to the documents
169 if idx == (len(pdf.pages) - 1):
170 documents.append(document)
171 document = []
172 else:
173 # This is a split index, save the current PDF pages, and restart
174 # a new destination page listing
175 logger.debug(f"Starting new document at idx {idx}")
176 documents.append(document)
177 document = []
178
179 documents = [x for x in documents if len(x)]
180
181 logger.debug(f"Split into {len(documents)} new documents")
182
183 # Write the new documents out
184 for doc_idx, document in enumerate(documents):
185 dst = Pdf.new()
186 dst.pages.extend(document)
187
188 output_filename = f"{fname}_document_{doc_idx}.pdf"
189
190 logger.debug(f"pdf no:{doc_idx} has {len(dst.pages)} pages")
191 savepath = os.path.join(tempdir, output_filename)
192 with open(savepath, "wb") as out:
193 dst.save(out)
194 document_paths.append(savepath)
195
196 return document_paths
197
198
199 def save_to_dir(
200 filepath: str,
201 newname: str = None,
202 target_dir: str = settings.CONSUMPTION_DIR,
203 ):
204 """
205 Copies filepath to target_dir.
206 Optionally rename the file.
207 """
208 if os.path.isfile(filepath) and os.path.isdir(target_dir):
209 dst = shutil.copy(filepath, target_dir)
210 logging.debug(f"saved {str(filepath)} to {str(dst)}")
211 if newname:
212 dst_new = os.path.join(target_dir, newname)
213 logger.debug(f"moving {str(dst)} to {str(dst_new)}")
214 os.rename(dst, dst_new)
215 else:
216 logger.warning(f"{str(filepath)} or {str(target_dir)} don't exist.")
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/documents/barcodes.py b/src/documents/barcodes.py
--- a/src/documents/barcodes.py
+++ b/src/documents/barcodes.py
@@ -9,6 +9,7 @@
import magic
from django.conf import settings
+from pdf2image import convert_from_path
from pikepdf import Page
from pikepdf import Pdf
from pikepdf import PdfImage
@@ -19,6 +20,10 @@
logger = logging.getLogger("paperless.barcodes")
+class BarcodeImageFormatError(Exception):
+ pass
+
+
@lru_cache(maxsize=8)
def supported_file_type(mime_type) -> bool:
"""
@@ -108,6 +113,33 @@
which separate the file into new files
"""
+ def _pikepdf_barcode_scan(pdf_filepath: str):
+ with Pdf.open(pdf_filepath) as pdf:
+ for page_num, page in enumerate(pdf.pages):
+ for image_key in page.images:
+ pdfimage = PdfImage(page.images[image_key])
+
+ if "/CCITTFaxDecode" in pdfimage.filters:
+ raise BarcodeImageFormatError()
+
+ # Not all images can be transcoded to a PIL image, which
+ # is what pyzbar expects to receive
+ pillow_img = pdfimage.as_pil_image()
+
+ detected_barcodes = barcode_reader(pillow_img)
+
+ if settings.CONSUMER_BARCODE_STRING in detected_barcodes:
+ separator_page_numbers.append(page_num)
+
+ def _pdf2image_barcode_scan(pdf_filepath: str):
+ # use a temporary directory in case the file os too big to handle in memory
+ with tempfile.TemporaryDirectory() as path:
+ pages_from_path = convert_from_path(pdf_filepath, output_folder=path)
+ for current_page_number, page in enumerate(pages_from_path):
+ current_barcodes = barcode_reader(page)
+ if settings.CONSUMER_BARCODE_STRING in current_barcodes:
+ separator_page_numbers.append(current_page_number)
+
separator_page_numbers = []
pdf_filepath = None
@@ -118,17 +150,17 @@
if mime_type == "image/tiff":
pdf_filepath = convert_from_tiff_to_pdf(filepath)
- pdf = Pdf.open(pdf_filepath)
-
- for page_num, page in enumerate(pdf.pages):
- for image_key in page.images:
- pdfimage = PdfImage(page.images[image_key])
- pillow_img = pdfimage.as_pil_image()
+ try:
+ _pikepdf_barcode_scan(pdf_filepath)
+ except Exception as e:
- detected_barcodes = barcode_reader(pillow_img)
+ logger.warning(
+ f"Exception using pikepdf for barcodes, falling back to pdf2image: {e}",
+ )
+ # Reset this incase pikepdf got part way through
+ separator_page_numbers = []
+ _pdf2image_barcode_scan(pdf_filepath)
- if settings.CONSUMER_BARCODE_STRING in detected_barcodes:
- separator_page_numbers.append(page_num)
else:
logger.warning(
f"Unsupported file format for barcode reader: {str(mime_type)}",
| {"golden_diff": "diff --git a/src/documents/barcodes.py b/src/documents/barcodes.py\n--- a/src/documents/barcodes.py\n+++ b/src/documents/barcodes.py\n@@ -9,6 +9,7 @@\n \n import magic\n from django.conf import settings\n+from pdf2image import convert_from_path\n from pikepdf import Page\n from pikepdf import Pdf\n from pikepdf import PdfImage\n@@ -19,6 +20,10 @@\n logger = logging.getLogger(\"paperless.barcodes\")\n \n \n+class BarcodeImageFormatError(Exception):\n+ pass\n+\n+\n @lru_cache(maxsize=8)\n def supported_file_type(mime_type) -> bool:\n \"\"\"\n@@ -108,6 +113,33 @@\n which separate the file into new files\n \"\"\"\n \n+ def _pikepdf_barcode_scan(pdf_filepath: str):\n+ with Pdf.open(pdf_filepath) as pdf:\n+ for page_num, page in enumerate(pdf.pages):\n+ for image_key in page.images:\n+ pdfimage = PdfImage(page.images[image_key])\n+\n+ if \"/CCITTFaxDecode\" in pdfimage.filters:\n+ raise BarcodeImageFormatError()\n+\n+ # Not all images can be transcoded to a PIL image, which\n+ # is what pyzbar expects to receive\n+ pillow_img = pdfimage.as_pil_image()\n+\n+ detected_barcodes = barcode_reader(pillow_img)\n+\n+ if settings.CONSUMER_BARCODE_STRING in detected_barcodes:\n+ separator_page_numbers.append(page_num)\n+\n+ def _pdf2image_barcode_scan(pdf_filepath: str):\n+ # use a temporary directory in case the file os too big to handle in memory\n+ with tempfile.TemporaryDirectory() as path:\n+ pages_from_path = convert_from_path(pdf_filepath, output_folder=path)\n+ for current_page_number, page in enumerate(pages_from_path):\n+ current_barcodes = barcode_reader(page)\n+ if settings.CONSUMER_BARCODE_STRING in current_barcodes:\n+ separator_page_numbers.append(current_page_number)\n+\n separator_page_numbers = []\n pdf_filepath = None\n \n@@ -118,17 +150,17 @@\n if mime_type == \"image/tiff\":\n pdf_filepath = convert_from_tiff_to_pdf(filepath)\n \n- pdf = Pdf.open(pdf_filepath)\n-\n- for page_num, page in enumerate(pdf.pages):\n- for image_key in page.images:\n- pdfimage = PdfImage(page.images[image_key])\n- pillow_img = pdfimage.as_pil_image()\n+ try:\n+ _pikepdf_barcode_scan(pdf_filepath)\n+ except Exception as e:\n \n- detected_barcodes = barcode_reader(pillow_img)\n+ logger.warning(\n+ f\"Exception using pikepdf for barcodes, falling back to pdf2image: {e}\",\n+ )\n+ # Reset this incase pikepdf got part way through\n+ separator_page_numbers = []\n+ _pdf2image_barcode_scan(pdf_filepath)\n \n- if settings.CONSUMER_BARCODE_STRING in detected_barcodes:\n- separator_page_numbers.append(page_num)\n else:\n logger.warning(\n f\"Unsupported file format for barcode reader: {str(mime_type)}\",\n", "issue": "[BUG] some PDFs raise a NotImplementedError\n### Description\r\n\r\nSince 1.9.2 (I skipped 1.9 and 1.9.1) account statements of german dkb bank (also ones previously working fine) get stuck in processing.\r\nThere are some statements about barcode separation, maybe something broken there, as there is no barcode visible in the PDF.\r\nUnfortunately also \"reprinting\" the PDF with a tool usually fixing issues for files which cannot be processed in papaerless is not working here.\r\nUnfortunately I cannot share samples as its bank data....\r\n\r\nHave barcode separation enabled:\r\nPAPERLESS_CONSUMER_ENABLE_BARCODES=true\r\nPAPERLESS_CONSUMER_BARCODE_STRING=PATCHT\r\n\r\n### Steps to reproduce\r\n\r\n1. get the document consumed\r\n2. process gets stuck in frontend\r\n\r\n3. shows up as queued for quite some time\r\n\r\n4. ends up as failed task\r\n\r\n### Webserver logs\r\n\r\n```bash\r\nKontoauszug_1023735317_Nr_2022_003_per_2022_08_01.pdf\r\nHISTORY\r\n\r\nId:\r\n79423d05e71d4a10b2457bf23f91cd60\r\nName:\r\nKontoauszug_1023735317_Nr_2022_003_per_2022_08_01.pdf\r\nFunc:\r\ndocuments.tasks.consume_file\r\nHook:\r\n-\r\nArgs:\r\n/tmp/paperless/paperless-upload-0n2e8jty\r\nKwargs:\r\n{'override_filename': 'Kontoauszug_1023735317_Nr_2022_003_per_2022_08_01.pdf', 'override_title': None, 'override_correspondent_id': None, 'override_document_type_id': None, 'override_tag_ids': None, 'task_id': 'dc8d5b5b-5eec-4395-b29a-22f4ee8df01a', 'override_created': None}\r\nResult:\r\nNot sure how to handle PDF image of this type : Traceback (most recent call last):\r\nFile \"/usr/src/paperless/src/src/django-q/django_q/cluster.py\", line 454, in worker\r\nres = f(*task[\"args\"], **task[\"kwargs\"])\r\nFile \"/usr/src/paperless/src/documents/tasks.py\", line 99, in consume_file\r\npdf_filepath, separators = barcodes.scan_file_for_separating_barcodes(path)\r\nFile \"/usr/src/paperless/src/documents/barcodes.py\", line 126, in scan_file_for_separating_barcodes\r\npillow_img = pdfimage.as_pil_image()\r\nFile \"/usr/local/lib/python3.9/site-packages/pikepdf/models/image.py\", line 742, in as_pil_image\r\nim = self._extract_transcoded()\r\nFile \"/usr/local/lib/python3.9/site-packages/pikepdf/models/image.py\", line 606, in _extract_transcoded\r\nif self.mode in {'DeviceN', 'Separation'}:\r\nFile \"/usr/local/lib/python3.9/site-packages/pikepdf/models/image.py\", line 301, in mode\r\nraise NotImplementedError(\r\nNotImplementedError: Not sure how to handle PDF image of this type\r\nGroup:\r\n-\r\nStarted:\r\nOct. 4, 2022, 5:52 p.m.\r\nStopped:\r\nOct. 4, 2022, 5:56 p.m.\r\n```\r\n\r\n\r\n### Paperless-ngx version\r\n\r\n1.9.2\r\n\r\n### Host OS\r\n\r\nSynology with Docker\r\n\r\n### Installation method\r\n\r\nDocker - ghcr.io image\r\nwebserver:\r\n image: ghcr.io/paperless-ngx/paperless-ngx:latest\r\n\r\n### Browser\r\n\r\nChrome\r\n\r\n### Configuration changes\r\n\r\nbarcodes for page separation was enabled, problem is resolved after commenting it out in compose file:\r\n#PAPERLESS_CONSUMER_ENABLE_BARCODES=true\r\n#PAPERLESS_CONSUMER_BARCODE_STRING=PATCHT\r\n\r\n### Other\r\n\r\n_No response_\n", "before_files": [{"content": "import logging\nimport os\nimport shutil\nimport tempfile\nfrom functools import lru_cache\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nimport magic\nfrom django.conf import settings\nfrom pikepdf import Page\nfrom pikepdf import Pdf\nfrom pikepdf import PdfImage\nfrom PIL import Image\nfrom PIL import ImageSequence\nfrom pyzbar import pyzbar\n\nlogger = logging.getLogger(\"paperless.barcodes\")\n\n\n@lru_cache(maxsize=8)\ndef supported_file_type(mime_type) -> bool:\n \"\"\"\n Determines if the file is valid for barcode\n processing, based on MIME type and settings\n\n :return: True if the file is supported, False otherwise\n \"\"\"\n supported_mime = [\"application/pdf\"]\n if settings.CONSUMER_BARCODE_TIFF_SUPPORT:\n supported_mime += [\"image/tiff\"]\n\n return mime_type in supported_mime\n\n\ndef barcode_reader(image: Image) -> List[str]:\n \"\"\"\n Read any barcodes contained in image\n Returns a list containing all found barcodes\n \"\"\"\n barcodes = []\n # Decode the barcode image\n detected_barcodes = pyzbar.decode(image)\n\n if detected_barcodes:\n # Traverse through all the detected barcodes in image\n for barcode in detected_barcodes:\n if barcode.data:\n decoded_barcode = barcode.data.decode(\"utf-8\")\n barcodes.append(decoded_barcode)\n logger.debug(\n f\"Barcode of type {str(barcode.type)} found: {decoded_barcode}\",\n )\n return barcodes\n\n\ndef get_file_mime_type(path: str) -> str:\n \"\"\"\n Determines the file type, based on MIME type.\n\n Returns the MIME type.\n \"\"\"\n mime_type = magic.from_file(path, mime=True)\n logger.debug(f\"Detected mime type: {mime_type}\")\n return mime_type\n\n\ndef convert_from_tiff_to_pdf(filepath: str) -> str:\n \"\"\"\n converts a given TIFF image file to pdf into a temporary directory.\n\n Returns the new pdf file.\n \"\"\"\n file_name = os.path.splitext(os.path.basename(filepath))[0]\n mime_type = get_file_mime_type(filepath)\n tempdir = tempfile.mkdtemp(prefix=\"paperless-\", dir=settings.SCRATCH_DIR)\n # use old file name with pdf extension\n if mime_type == \"image/tiff\":\n newpath = os.path.join(tempdir, file_name + \".pdf\")\n else:\n logger.warning(\n f\"Cannot convert mime type {str(mime_type)} from {str(filepath)} to pdf.\",\n )\n return None\n with Image.open(filepath) as image:\n images = []\n for i, page in enumerate(ImageSequence.Iterator(image)):\n page = page.convert(\"RGB\")\n images.append(page)\n try:\n if len(images) == 1:\n images[0].save(newpath)\n else:\n images[0].save(newpath, save_all=True, append_images=images[1:])\n except OSError as e:\n logger.warning(\n f\"Could not save the file as pdf. Error: {str(e)}\",\n )\n return None\n return newpath\n\n\ndef scan_file_for_separating_barcodes(filepath: str) -> Tuple[Optional[str], List[int]]:\n \"\"\"\n Scan the provided pdf file for page separating barcodes\n Returns a PDF filepath and a list of pagenumbers,\n which separate the file into new files\n \"\"\"\n\n separator_page_numbers = []\n pdf_filepath = None\n\n mime_type = get_file_mime_type(filepath)\n\n if supported_file_type(mime_type):\n pdf_filepath = filepath\n if mime_type == \"image/tiff\":\n pdf_filepath = convert_from_tiff_to_pdf(filepath)\n\n pdf = Pdf.open(pdf_filepath)\n\n for page_num, page in enumerate(pdf.pages):\n for image_key in page.images:\n pdfimage = PdfImage(page.images[image_key])\n pillow_img = pdfimage.as_pil_image()\n\n detected_barcodes = barcode_reader(pillow_img)\n\n if settings.CONSUMER_BARCODE_STRING in detected_barcodes:\n separator_page_numbers.append(page_num)\n else:\n logger.warning(\n f\"Unsupported file format for barcode reader: {str(mime_type)}\",\n )\n return pdf_filepath, separator_page_numbers\n\n\ndef separate_pages(filepath: str, pages_to_split_on: List[int]) -> List[str]:\n \"\"\"\n Separate the provided pdf file on the pages_to_split_on.\n The pages which are defined by page_numbers will be removed.\n Returns a list of (temporary) filepaths to consume.\n These will need to be deleted later.\n \"\"\"\n\n document_paths = []\n\n if not pages_to_split_on:\n logger.warning(\"No pages to split on!\")\n return document_paths\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n tempdir = tempfile.mkdtemp(prefix=\"paperless-\", dir=settings.SCRATCH_DIR)\n fname = os.path.splitext(os.path.basename(filepath))[0]\n pdf = Pdf.open(filepath)\n\n # A list of documents, ie a list of lists of pages\n documents: List[List[Page]] = []\n # A single document, ie a list of pages\n document: List[Page] = []\n\n for idx, page in enumerate(pdf.pages):\n # Keep building the new PDF as long as it is not a\n # separator index\n if idx not in pages_to_split_on:\n document.append(page)\n # Make sure to append the very last document to the documents\n if idx == (len(pdf.pages) - 1):\n documents.append(document)\n document = []\n else:\n # This is a split index, save the current PDF pages, and restart\n # a new destination page listing\n logger.debug(f\"Starting new document at idx {idx}\")\n documents.append(document)\n document = []\n\n documents = [x for x in documents if len(x)]\n\n logger.debug(f\"Split into {len(documents)} new documents\")\n\n # Write the new documents out\n for doc_idx, document in enumerate(documents):\n dst = Pdf.new()\n dst.pages.extend(document)\n\n output_filename = f\"{fname}_document_{doc_idx}.pdf\"\n\n logger.debug(f\"pdf no:{doc_idx} has {len(dst.pages)} pages\")\n savepath = os.path.join(tempdir, output_filename)\n with open(savepath, \"wb\") as out:\n dst.save(out)\n document_paths.append(savepath)\n\n return document_paths\n\n\ndef save_to_dir(\n filepath: str,\n newname: str = None,\n target_dir: str = settings.CONSUMPTION_DIR,\n):\n \"\"\"\n Copies filepath to target_dir.\n Optionally rename the file.\n \"\"\"\n if os.path.isfile(filepath) and os.path.isdir(target_dir):\n dst = shutil.copy(filepath, target_dir)\n logging.debug(f\"saved {str(filepath)} to {str(dst)}\")\n if newname:\n dst_new = os.path.join(target_dir, newname)\n logger.debug(f\"moving {str(dst)} to {str(dst_new)}\")\n os.rename(dst, dst_new)\n else:\n logger.warning(f\"{str(filepath)} or {str(target_dir)} don't exist.\")\n", "path": "src/documents/barcodes.py"}], "after_files": [{"content": "import logging\nimport os\nimport shutil\nimport tempfile\nfrom functools import lru_cache\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nimport magic\nfrom django.conf import settings\nfrom pdf2image import convert_from_path\nfrom pikepdf import Page\nfrom pikepdf import Pdf\nfrom pikepdf import PdfImage\nfrom PIL import Image\nfrom PIL import ImageSequence\nfrom pyzbar import pyzbar\n\nlogger = logging.getLogger(\"paperless.barcodes\")\n\n\nclass BarcodeImageFormatError(Exception):\n pass\n\n\n@lru_cache(maxsize=8)\ndef supported_file_type(mime_type) -> bool:\n \"\"\"\n Determines if the file is valid for barcode\n processing, based on MIME type and settings\n\n :return: True if the file is supported, False otherwise\n \"\"\"\n supported_mime = [\"application/pdf\"]\n if settings.CONSUMER_BARCODE_TIFF_SUPPORT:\n supported_mime += [\"image/tiff\"]\n\n return mime_type in supported_mime\n\n\ndef barcode_reader(image: Image) -> List[str]:\n \"\"\"\n Read any barcodes contained in image\n Returns a list containing all found barcodes\n \"\"\"\n barcodes = []\n # Decode the barcode image\n detected_barcodes = pyzbar.decode(image)\n\n if detected_barcodes:\n # Traverse through all the detected barcodes in image\n for barcode in detected_barcodes:\n if barcode.data:\n decoded_barcode = barcode.data.decode(\"utf-8\")\n barcodes.append(decoded_barcode)\n logger.debug(\n f\"Barcode of type {str(barcode.type)} found: {decoded_barcode}\",\n )\n return barcodes\n\n\ndef get_file_mime_type(path: str) -> str:\n \"\"\"\n Determines the file type, based on MIME type.\n\n Returns the MIME type.\n \"\"\"\n mime_type = magic.from_file(path, mime=True)\n logger.debug(f\"Detected mime type: {mime_type}\")\n return mime_type\n\n\ndef convert_from_tiff_to_pdf(filepath: str) -> str:\n \"\"\"\n converts a given TIFF image file to pdf into a temporary directory.\n\n Returns the new pdf file.\n \"\"\"\n file_name = os.path.splitext(os.path.basename(filepath))[0]\n mime_type = get_file_mime_type(filepath)\n tempdir = tempfile.mkdtemp(prefix=\"paperless-\", dir=settings.SCRATCH_DIR)\n # use old file name with pdf extension\n if mime_type == \"image/tiff\":\n newpath = os.path.join(tempdir, file_name + \".pdf\")\n else:\n logger.warning(\n f\"Cannot convert mime type {str(mime_type)} from {str(filepath)} to pdf.\",\n )\n return None\n with Image.open(filepath) as image:\n images = []\n for i, page in enumerate(ImageSequence.Iterator(image)):\n page = page.convert(\"RGB\")\n images.append(page)\n try:\n if len(images) == 1:\n images[0].save(newpath)\n else:\n images[0].save(newpath, save_all=True, append_images=images[1:])\n except OSError as e:\n logger.warning(\n f\"Could not save the file as pdf. Error: {str(e)}\",\n )\n return None\n return newpath\n\n\ndef scan_file_for_separating_barcodes(filepath: str) -> Tuple[Optional[str], List[int]]:\n \"\"\"\n Scan the provided pdf file for page separating barcodes\n Returns a PDF filepath and a list of pagenumbers,\n which separate the file into new files\n \"\"\"\n\n def _pikepdf_barcode_scan(pdf_filepath: str):\n with Pdf.open(pdf_filepath) as pdf:\n for page_num, page in enumerate(pdf.pages):\n for image_key in page.images:\n pdfimage = PdfImage(page.images[image_key])\n\n if \"/CCITTFaxDecode\" in pdfimage.filters:\n raise BarcodeImageFormatError()\n\n # Not all images can be transcoded to a PIL image, which\n # is what pyzbar expects to receive\n pillow_img = pdfimage.as_pil_image()\n\n detected_barcodes = barcode_reader(pillow_img)\n\n if settings.CONSUMER_BARCODE_STRING in detected_barcodes:\n separator_page_numbers.append(page_num)\n\n def _pdf2image_barcode_scan(pdf_filepath: str):\n # use a temporary directory in case the file os too big to handle in memory\n with tempfile.TemporaryDirectory() as path:\n pages_from_path = convert_from_path(pdf_filepath, output_folder=path)\n for current_page_number, page in enumerate(pages_from_path):\n current_barcodes = barcode_reader(page)\n if settings.CONSUMER_BARCODE_STRING in current_barcodes:\n separator_page_numbers.append(current_page_number)\n\n separator_page_numbers = []\n pdf_filepath = None\n\n mime_type = get_file_mime_type(filepath)\n\n if supported_file_type(mime_type):\n pdf_filepath = filepath\n if mime_type == \"image/tiff\":\n pdf_filepath = convert_from_tiff_to_pdf(filepath)\n\n try:\n _pikepdf_barcode_scan(pdf_filepath)\n except Exception as e:\n\n logger.warning(\n f\"Exception using pikepdf for barcodes, falling back to pdf2image: {e}\",\n )\n # Reset this incase pikepdf got part way through\n separator_page_numbers = []\n _pdf2image_barcode_scan(pdf_filepath)\n\n else:\n logger.warning(\n f\"Unsupported file format for barcode reader: {str(mime_type)}\",\n )\n return pdf_filepath, separator_page_numbers\n\n\ndef separate_pages(filepath: str, pages_to_split_on: List[int]) -> List[str]:\n \"\"\"\n Separate the provided pdf file on the pages_to_split_on.\n The pages which are defined by page_numbers will be removed.\n Returns a list of (temporary) filepaths to consume.\n These will need to be deleted later.\n \"\"\"\n\n document_paths = []\n\n if not pages_to_split_on:\n logger.warning(\"No pages to split on!\")\n return document_paths\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n tempdir = tempfile.mkdtemp(prefix=\"paperless-\", dir=settings.SCRATCH_DIR)\n fname = os.path.splitext(os.path.basename(filepath))[0]\n pdf = Pdf.open(filepath)\n\n # A list of documents, ie a list of lists of pages\n documents: List[List[Page]] = []\n # A single document, ie a list of pages\n document: List[Page] = []\n\n for idx, page in enumerate(pdf.pages):\n # Keep building the new PDF as long as it is not a\n # separator index\n if idx not in pages_to_split_on:\n document.append(page)\n # Make sure to append the very last document to the documents\n if idx == (len(pdf.pages) - 1):\n documents.append(document)\n document = []\n else:\n # This is a split index, save the current PDF pages, and restart\n # a new destination page listing\n logger.debug(f\"Starting new document at idx {idx}\")\n documents.append(document)\n document = []\n\n documents = [x for x in documents if len(x)]\n\n logger.debug(f\"Split into {len(documents)} new documents\")\n\n # Write the new documents out\n for doc_idx, document in enumerate(documents):\n dst = Pdf.new()\n dst.pages.extend(document)\n\n output_filename = f\"{fname}_document_{doc_idx}.pdf\"\n\n logger.debug(f\"pdf no:{doc_idx} has {len(dst.pages)} pages\")\n savepath = os.path.join(tempdir, output_filename)\n with open(savepath, \"wb\") as out:\n dst.save(out)\n document_paths.append(savepath)\n\n return document_paths\n\n\ndef save_to_dir(\n filepath: str,\n newname: str = None,\n target_dir: str = settings.CONSUMPTION_DIR,\n):\n \"\"\"\n Copies filepath to target_dir.\n Optionally rename the file.\n \"\"\"\n if os.path.isfile(filepath) and os.path.isdir(target_dir):\n dst = shutil.copy(filepath, target_dir)\n logging.debug(f\"saved {str(filepath)} to {str(dst)}\")\n if newname:\n dst_new = os.path.join(target_dir, newname)\n logger.debug(f\"moving {str(dst)} to {str(dst_new)}\")\n os.rename(dst, dst_new)\n else:\n logger.warning(f\"{str(filepath)} or {str(target_dir)} don't exist.\")\n", "path": "src/documents/barcodes.py"}]} | 3,382 | 704 |
gh_patches_debug_56077 | rasdani/github-patches | git_diff | pypa__pip-8124 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'pip cache info' fails when no-cache-dir set
pip version: pip 20.1b1
Python version: CPython 3.8.1
OS: Win 10 64
Testing 20.1 beta, execute 'pip cache info' and crashes. I'm guessing it's due to pip.ini turning off caching.
pip.ini:
```
[global]
no-cache-dir = false
```
Command execution:
```
> pip cache info
ERROR: Exception:
Traceback (most recent call last):
File "c:\program files\python38\lib\site-packages\pip\_internal\cli\base_command.py", line 188, in _main
status = self.run(options, args)
File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 62, in run
handlers[action](options, args[1:])
File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 74, in get_cache_info
num_packages = len(self._find_wheels(options, '*'))
File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 145, in _find_wheels
wheel_dir = self._wheels_cache_dir(options)
File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 141, in _wheels_cache_dir
return os.path.join(options.cache_dir, 'wheels')
File "c:\program files\python38\lib\ntpath.py", line 78, in join
path = os.fspath(path)
TypeError: expected str, bytes or os.PathLike object, not bool
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/commands/cache.py`
Content:
```
1 from __future__ import absolute_import
2
3 import logging
4 import os
5 import textwrap
6
7 import pip._internal.utils.filesystem as filesystem
8 from pip._internal.cli.base_command import Command
9 from pip._internal.cli.status_codes import ERROR, SUCCESS
10 from pip._internal.exceptions import CommandError, PipError
11 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
12
13 if MYPY_CHECK_RUNNING:
14 from optparse import Values
15 from typing import Any, List
16
17
18 logger = logging.getLogger(__name__)
19
20
21 class CacheCommand(Command):
22 """
23 Inspect and manage pip's wheel cache.
24
25 Subcommands:
26
27 info: Show information about the cache.
28 list: List filenames of packages stored in the cache.
29 remove: Remove one or more package from the cache.
30 purge: Remove all items from the cache.
31
32 <pattern> can be a glob expression or a package name.
33 """
34
35 usage = """
36 %prog info
37 %prog list [<pattern>]
38 %prog remove <pattern>
39 %prog purge
40 """
41
42 def run(self, options, args):
43 # type: (Values, List[Any]) -> int
44 handlers = {
45 "info": self.get_cache_info,
46 "list": self.list_cache_items,
47 "remove": self.remove_cache_items,
48 "purge": self.purge_cache,
49 }
50
51 # Determine action
52 if not args or args[0] not in handlers:
53 logger.error("Need an action ({}) to perform.".format(
54 ", ".join(sorted(handlers)))
55 )
56 return ERROR
57
58 action = args[0]
59
60 # Error handling happens here, not in the action-handlers.
61 try:
62 handlers[action](options, args[1:])
63 except PipError as e:
64 logger.error(e.args[0])
65 return ERROR
66
67 return SUCCESS
68
69 def get_cache_info(self, options, args):
70 # type: (Values, List[Any]) -> None
71 if args:
72 raise CommandError('Too many arguments')
73
74 num_packages = len(self._find_wheels(options, '*'))
75
76 cache_location = self._wheels_cache_dir(options)
77 cache_size = filesystem.format_directory_size(cache_location)
78
79 message = textwrap.dedent("""
80 Location: {location}
81 Size: {size}
82 Number of wheels: {package_count}
83 """).format(
84 location=cache_location,
85 package_count=num_packages,
86 size=cache_size,
87 ).strip()
88
89 logger.info(message)
90
91 def list_cache_items(self, options, args):
92 # type: (Values, List[Any]) -> None
93 if len(args) > 1:
94 raise CommandError('Too many arguments')
95
96 if args:
97 pattern = args[0]
98 else:
99 pattern = '*'
100
101 files = self._find_wheels(options, pattern)
102
103 if not files:
104 logger.info('Nothing cached.')
105 return
106
107 results = []
108 for filename in files:
109 wheel = os.path.basename(filename)
110 size = filesystem.format_file_size(filename)
111 results.append(' - {} ({})'.format(wheel, size))
112 logger.info('Cache contents:\n')
113 logger.info('\n'.join(sorted(results)))
114
115 def remove_cache_items(self, options, args):
116 # type: (Values, List[Any]) -> None
117 if len(args) > 1:
118 raise CommandError('Too many arguments')
119
120 if not args:
121 raise CommandError('Please provide a pattern')
122
123 files = self._find_wheels(options, args[0])
124 if not files:
125 raise CommandError('No matching packages')
126
127 for filename in files:
128 os.unlink(filename)
129 logger.debug('Removed %s', filename)
130 logger.info('Files removed: %s', len(files))
131
132 def purge_cache(self, options, args):
133 # type: (Values, List[Any]) -> None
134 if args:
135 raise CommandError('Too many arguments')
136
137 return self.remove_cache_items(options, ['*'])
138
139 def _wheels_cache_dir(self, options):
140 # type: (Values) -> str
141 return os.path.join(options.cache_dir, 'wheels')
142
143 def _find_wheels(self, options, pattern):
144 # type: (Values, str) -> List[str]
145 wheel_dir = self._wheels_cache_dir(options)
146
147 # The wheel filename format, as specified in PEP 427, is:
148 # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
149 #
150 # Additionally, non-alphanumeric values in the distribution are
151 # normalized to underscores (_), meaning hyphens can never occur
152 # before `-{version}`.
153 #
154 # Given that information:
155 # - If the pattern we're given contains a hyphen (-), the user is
156 # providing at least the version. Thus, we can just append `*.whl`
157 # to match the rest of it.
158 # - If the pattern we're given doesn't contain a hyphen (-), the
159 # user is only providing the name. Thus, we append `-*.whl` to
160 # match the hyphen before the version, followed by anything else.
161 #
162 # PEP 427: https://www.python.org/dev/peps/pep-0427/
163 pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl")
164
165 return filesystem.find_files(wheel_dir, pattern)
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/commands/cache.py b/src/pip/_internal/commands/cache.py
--- a/src/pip/_internal/commands/cache.py
+++ b/src/pip/_internal/commands/cache.py
@@ -48,6 +48,11 @@
"purge": self.purge_cache,
}
+ if not options.cache_dir:
+ logger.error("pip cache commands can not "
+ "function since cache is disabled.")
+ return ERROR
+
# Determine action
if not args or args[0] not in handlers:
logger.error("Need an action ({}) to perform.".format(
| {"golden_diff": "diff --git a/src/pip/_internal/commands/cache.py b/src/pip/_internal/commands/cache.py\n--- a/src/pip/_internal/commands/cache.py\n+++ b/src/pip/_internal/commands/cache.py\n@@ -48,6 +48,11 @@\n \"purge\": self.purge_cache,\n }\n \n+ if not options.cache_dir:\n+ logger.error(\"pip cache commands can not \"\n+ \"function since cache is disabled.\")\n+ return ERROR\n+\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\"Need an action ({}) to perform.\".format(\n", "issue": "'pip cache info' fails when no-cache-dir set\npip version: pip 20.1b1\r\nPython version: CPython 3.8.1\r\nOS: Win 10 64\r\n\r\nTesting 20.1 beta, execute 'pip cache info' and crashes. I'm guessing it's due to pip.ini turning off caching.\r\n\r\npip.ini:\r\n```\r\n[global]\r\nno-cache-dir = false\r\n```\r\n\r\nCommand execution:\r\n```\r\n> pip cache info\r\nERROR: Exception:\r\nTraceback (most recent call last):\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\cli\\base_command.py\", line 188, in _main\r\n status = self.run(options, args)\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\commands\\cache.py\", line 62, in run\r\n handlers[action](options, args[1:])\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\commands\\cache.py\", line 74, in get_cache_info\r\n num_packages = len(self._find_wheels(options, '*'))\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\commands\\cache.py\", line 145, in _find_wheels\r\n wheel_dir = self._wheels_cache_dir(options)\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\commands\\cache.py\", line 141, in _wheels_cache_dir\r\n return os.path.join(options.cache_dir, 'wheels')\r\n File \"c:\\program files\\python38\\lib\\ntpath.py\", line 78, in join\r\n path = os.fspath(path)\r\nTypeError: expected str, bytes or os.PathLike object, not bool\r\n```\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os\nimport textwrap\n\nimport pip._internal.utils.filesystem as filesystem\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import ERROR, SUCCESS\nfrom pip._internal.exceptions import CommandError, PipError\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from optparse import Values\n from typing import Any, List\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CacheCommand(Command):\n \"\"\"\n Inspect and manage pip's wheel cache.\n\n Subcommands:\n\n info: Show information about the cache.\n list: List filenames of packages stored in the cache.\n remove: Remove one or more package from the cache.\n purge: Remove all items from the cache.\n\n <pattern> can be a glob expression or a package name.\n \"\"\"\n\n usage = \"\"\"\n %prog info\n %prog list [<pattern>]\n %prog remove <pattern>\n %prog purge\n \"\"\"\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n handlers = {\n \"info\": self.get_cache_info,\n \"list\": self.list_cache_items,\n \"remove\": self.remove_cache_items,\n \"purge\": self.purge_cache,\n }\n\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\"Need an action ({}) to perform.\".format(\n \", \".join(sorted(handlers)))\n )\n return ERROR\n\n action = args[0]\n\n # Error handling happens here, not in the action-handlers.\n try:\n handlers[action](options, args[1:])\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n return SUCCESS\n\n def get_cache_info(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n num_packages = len(self._find_wheels(options, '*'))\n\n cache_location = self._wheels_cache_dir(options)\n cache_size = filesystem.format_directory_size(cache_location)\n\n message = textwrap.dedent(\"\"\"\n Location: {location}\n Size: {size}\n Number of wheels: {package_count}\n \"\"\").format(\n location=cache_location,\n package_count=num_packages,\n size=cache_size,\n ).strip()\n\n logger.info(message)\n\n def list_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if args:\n pattern = args[0]\n else:\n pattern = '*'\n\n files = self._find_wheels(options, pattern)\n\n if not files:\n logger.info('Nothing cached.')\n return\n\n results = []\n for filename in files:\n wheel = os.path.basename(filename)\n size = filesystem.format_file_size(filename)\n results.append(' - {} ({})'.format(wheel, size))\n logger.info('Cache contents:\\n')\n logger.info('\\n'.join(sorted(results)))\n\n def remove_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if not args:\n raise CommandError('Please provide a pattern')\n\n files = self._find_wheels(options, args[0])\n if not files:\n raise CommandError('No matching packages')\n\n for filename in files:\n os.unlink(filename)\n logger.debug('Removed %s', filename)\n logger.info('Files removed: %s', len(files))\n\n def purge_cache(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n return self.remove_cache_items(options, ['*'])\n\n def _wheels_cache_dir(self, options):\n # type: (Values) -> str\n return os.path.join(options.cache_dir, 'wheels')\n\n def _find_wheels(self, options, pattern):\n # type: (Values, str) -> List[str]\n wheel_dir = self._wheels_cache_dir(options)\n\n # The wheel filename format, as specified in PEP 427, is:\n # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl\n #\n # Additionally, non-alphanumeric values in the distribution are\n # normalized to underscores (_), meaning hyphens can never occur\n # before `-{version}`.\n #\n # Given that information:\n # - If the pattern we're given contains a hyphen (-), the user is\n # providing at least the version. Thus, we can just append `*.whl`\n # to match the rest of it.\n # - If the pattern we're given doesn't contain a hyphen (-), the\n # user is only providing the name. Thus, we append `-*.whl` to\n # match the hyphen before the version, followed by anything else.\n #\n # PEP 427: https://www.python.org/dev/peps/pep-0427/\n pattern = pattern + (\"*.whl\" if \"-\" in pattern else \"-*.whl\")\n\n return filesystem.find_files(wheel_dir, pattern)\n", "path": "src/pip/_internal/commands/cache.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os\nimport textwrap\n\nimport pip._internal.utils.filesystem as filesystem\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import ERROR, SUCCESS\nfrom pip._internal.exceptions import CommandError, PipError\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from optparse import Values\n from typing import Any, List\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CacheCommand(Command):\n \"\"\"\n Inspect and manage pip's wheel cache.\n\n Subcommands:\n\n info: Show information about the cache.\n list: List filenames of packages stored in the cache.\n remove: Remove one or more package from the cache.\n purge: Remove all items from the cache.\n\n <pattern> can be a glob expression or a package name.\n \"\"\"\n\n usage = \"\"\"\n %prog info\n %prog list [<pattern>]\n %prog remove <pattern>\n %prog purge\n \"\"\"\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n handlers = {\n \"info\": self.get_cache_info,\n \"list\": self.list_cache_items,\n \"remove\": self.remove_cache_items,\n \"purge\": self.purge_cache,\n }\n\n if not options.cache_dir:\n logger.error(\"pip cache commands can not \"\n \"function since cache is disabled.\")\n return ERROR\n\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\"Need an action ({}) to perform.\".format(\n \", \".join(sorted(handlers)))\n )\n return ERROR\n\n action = args[0]\n\n # Error handling happens here, not in the action-handlers.\n try:\n handlers[action](options, args[1:])\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n return SUCCESS\n\n def get_cache_info(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n num_packages = len(self._find_wheels(options, '*'))\n\n cache_location = self._wheels_cache_dir(options)\n cache_size = filesystem.format_directory_size(cache_location)\n\n message = textwrap.dedent(\"\"\"\n Location: {location}\n Size: {size}\n Number of wheels: {package_count}\n \"\"\").format(\n location=cache_location,\n package_count=num_packages,\n size=cache_size,\n ).strip()\n\n logger.info(message)\n\n def list_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if args:\n pattern = args[0]\n else:\n pattern = '*'\n\n files = self._find_wheels(options, pattern)\n\n if not files:\n logger.info('Nothing cached.')\n return\n\n results = []\n for filename in files:\n wheel = os.path.basename(filename)\n size = filesystem.format_file_size(filename)\n results.append(' - {} ({})'.format(wheel, size))\n logger.info('Cache contents:\\n')\n logger.info('\\n'.join(sorted(results)))\n\n def remove_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if not args:\n raise CommandError('Please provide a pattern')\n\n files = self._find_wheels(options, args[0])\n if not files:\n raise CommandError('No matching packages')\n\n for filename in files:\n os.unlink(filename)\n logger.debug('Removed %s', filename)\n logger.info('Files removed: %s', len(files))\n\n def purge_cache(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n return self.remove_cache_items(options, ['*'])\n\n def _wheels_cache_dir(self, options):\n # type: (Values) -> str\n return os.path.join(options.cache_dir, 'wheels')\n\n def _find_wheels(self, options, pattern):\n # type: (Values, str) -> List[str]\n wheel_dir = self._wheels_cache_dir(options)\n\n # The wheel filename format, as specified in PEP 427, is:\n # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl\n #\n # Additionally, non-alphanumeric values in the distribution are\n # normalized to underscores (_), meaning hyphens can never occur\n # before `-{version}`.\n #\n # Given that information:\n # - If the pattern we're given contains a hyphen (-), the user is\n # providing at least the version. Thus, we can just append `*.whl`\n # to match the rest of it.\n # - If the pattern we're given doesn't contain a hyphen (-), the\n # user is only providing the name. Thus, we append `-*.whl` to\n # match the hyphen before the version, followed by anything else.\n #\n # PEP 427: https://www.python.org/dev/peps/pep-0427/\n pattern = pattern + (\"*.whl\" if \"-\" in pattern else \"-*.whl\")\n\n return filesystem.find_files(wheel_dir, pattern)\n", "path": "src/pip/_internal/commands/cache.py"}]} | 2,257 | 140 |
gh_patches_debug_39865 | rasdani/github-patches | git_diff | streamlink__streamlink-2586 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New WWE Network Plugin stopped working
## Plugin Issue
- [x ] This is a plugin issue and I have read the contribution guidelines.
### Description
Presumably due to updates to the website (I know they have been working on fixing some issues with the new site), the WWE Network plugin is no longer able to find streams.
### Reproduction steps / Explicit stream URLs to test
<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->
1. Install latest streamlink nightly
2. Try using any WWE Network link (i.e. https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316)
### Log output
streamlink https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316 best --loglevel debug
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.6.6
[cli][debug] Streamlink: 1.1.1+74.g0f011ae
[cli][debug] Requests(2.22.0), Socks(1.7.0), Websocket(0.56.0)
[cli][info] Found matching plugin wwenetwork for URL https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316
[cli][debug] Plugin specific arguments:
[cli][debug] --wwenetwork-email= (email)
[cli][debug] --wwenetwork-password= (password)
[plugin.wwenetwork][debug] Attempting login as
[plugin.wwenetwork][debug] API request: POST https://dce-frontoffice.imggaming.com/api/v2/login
[plugin.wwenetwork][debug] Searching for content ID
[plugin.wwenetwork][debug] Loading page config
error: No playable streams found on this URL: https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316
### Additional comments, screenshots, etc.
There had been audio sync problems with the network, although I believe this was across all sources and not a streamlink issue, and their solution for this issue might have caused the plugin to break.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/wwenetwork.py`
Content:
```
1 from __future__ import print_function
2
3 import json
4 import logging
5 import re
6
7 from streamlink import PluginError
8 from streamlink.plugin import Plugin, PluginArguments, PluginArgument
9 from streamlink.plugin.api import useragents
10 from streamlink.stream import HLSStream
11 from streamlink.utils import memoize
12 from streamlink.compat import urlparse, parse_qsl
13 from streamlink.utils.times import seconds_to_hhmmss
14
15 log = logging.getLogger(__name__)
16
17
18 class WWENetwork(Plugin):
19 url_re = re.compile(r"https?://watch.wwe.com/(channel)?")
20 site_config_re = re.compile(r'''">window.__data = (\{.*?\})</script>''')
21 stream_url = "https://dce-frontoffice.imggaming.com/api/v2/stream/{id}"
22 live_url = "https://dce-frontoffice.imggaming.com/api/v2/event/live"
23 login_url = "https://dce-frontoffice.imggaming.com/api/v2/login"
24 API_KEY = "cca51ea0-7837-40df-a055-75eb6347b2e7"
25
26 customer_id = 16
27 arguments = PluginArguments(
28 PluginArgument(
29 "email",
30 required=True,
31 metavar="EMAIL",
32 requires=["password"],
33 help="""
34 The email associated with your WWE Network account,
35 required to access any WWE Network stream.
36 """
37 ),
38 PluginArgument(
39 "password",
40 sensitive=True,
41 metavar="PASSWORD",
42 help="""
43 A WWE Network account password to use with --wwenetwork-email.
44 """
45 )
46 )
47
48 def __init__(self, url):
49 super(WWENetwork, self).__init__(url)
50 self.session.http.headers.update({"User-Agent": useragents.CHROME})
51 self.auth_token = None
52
53 @classmethod
54 def can_handle_url(cls, url):
55 return cls.url_re.match(url) is not None
56
57 def get_title(self):
58 if self.page_config:
59 for page in self.page_config["cache"]["page"].values():
60 return page['item']['title']
61
62 def request(self, method, url, **kwargs):
63 headers = kwargs.pop("headers", {})
64 headers.update({"x-api-key": self.API_KEY,
65 "Origin": "https://watch.wwe.com",
66 "Referer": "https://watch.wwe.com/signin",
67 "Accept": "application/json",
68 "Realm": "dce.wwe"})
69 if self.auth_token:
70 headers["Authorization"] = "Bearer {0}".format(self.auth_token)
71
72 kwargs["raise_for_status"] = False
73 log.debug("API request: {0} {1}".format(method, url))
74 res = self.session.http.request(method, url, headers=headers, **kwargs)
75 data = self.session.http.json(res)
76
77 if "status" in data and data["status"] != 200:
78 log.debug("API request failed: {0}:{1} ({2})".format(data["status"], data.get("code"), "; ".join(data.get("messages", []))))
79 return data
80
81 def login(self, email, password):
82 self.logger.debug("Attempting login as {0}", email)
83 # sets some required cookies to login
84 data = self.request('POST', self.login_url,
85 data=json.dumps({"id": email, "secret": password}),
86 headers={"Content-Type": "application/json"})
87 if "authorisationToken" in data:
88 self.auth_token = data["authorisationToken"]
89
90 return self.auth_token
91
92 @property
93 @memoize
94 def page_config(self):
95 log.debug("Loading page config")
96 res = self.session.http.get(self.url)
97 m = self.site_config_re.search(res.text)
98 return m and json.loads(m.group(1))
99
100 def _get_media_info(self, content_id):
101 """
102 Get the info about the content, based on the ID
103 :param content_id: contentId for the video
104 :return:
105 """
106 info = self.request('GET', self.stream_url.format(id=content_id))
107 return self.request('GET', info.get("playerUrlCallback"))
108
109 def _get_video_id(self):
110 # check the page to find the contentId
111 log.debug("Searching for content ID")
112 if self.page_config:
113 for page in self.page_config["cache"]["page"].values():
114 try:
115 if page['item']['type'] == "channel":
116 return self._get_live_id()
117 else:
118 return "vod/{id}".format(id=page['item']['customFields']['DiceVideoId'])
119 except KeyError:
120 log.error("Could not find video ID")
121 return
122
123 def _get_live_id(self):
124 log.debug("Loading live event")
125 res = self.request('GET', self.live_url)
126 for event in res.get('events', []):
127 return "event/{sportId}/{propertyId}/{tournamentId}/{id}".format(**event)
128
129 def _get_streams(self):
130 if not self.login(self.get_option("email"), self.get_option("password")):
131 raise PluginError("Login failed")
132
133 try:
134 start_point = int(float(dict(parse_qsl(urlparse(self.url).query)).get("startPoint", 0.0)))
135 if start_point > 0:
136 log.info("Stream will start at {0}".format(seconds_to_hhmmss(start_point)))
137 except ValueError:
138 start_point = 0
139
140 content_id = self._get_video_id()
141
142 if content_id:
143 self.logger.debug("Found content ID: {0}", content_id)
144 info = self._get_media_info(content_id)
145 if info.get("hlsUrl"):
146 for s in HLSStream.parse_variant_playlist(self.session, info["hlsUrl"], start_offset=start_point).items():
147 yield s
148 else:
149 log.error("Could not find the HLS URL")
150
151
152 __plugin__ = WWENetwork
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/wwenetwork.py b/src/streamlink/plugins/wwenetwork.py
--- a/src/streamlink/plugins/wwenetwork.py
+++ b/src/streamlink/plugins/wwenetwork.py
@@ -21,6 +21,7 @@
stream_url = "https://dce-frontoffice.imggaming.com/api/v2/stream/{id}"
live_url = "https://dce-frontoffice.imggaming.com/api/v2/event/live"
login_url = "https://dce-frontoffice.imggaming.com/api/v2/login"
+ page_config_url = "https://cdn.watch.wwe.com/api/page"
API_KEY = "cca51ea0-7837-40df-a055-75eb6347b2e7"
customer_id = 16
@@ -55,9 +56,7 @@
return cls.url_re.match(url) is not None
def get_title(self):
- if self.page_config:
- for page in self.page_config["cache"]["page"].values():
- return page['item']['title']
+ return self.item_config['title']
def request(self, method, url, **kwargs):
headers = kwargs.pop("headers", {})
@@ -91,11 +90,22 @@
@property
@memoize
- def page_config(self):
+ def item_config(self):
log.debug("Loading page config")
- res = self.session.http.get(self.url)
- m = self.site_config_re.search(res.text)
- return m and json.loads(m.group(1))
+ p = urlparse(self.url)
+ res = self.session.http.get(self.page_config_url,
+ params=dict(device="web_browser",
+ ff="idp,ldp",
+ item_detail_expand="all",
+ lang="en-US",
+ list_page_size="1",
+ max_list_prefetch="1",
+ path=p.path,
+ segments="es",
+ sub="Registered",
+ text_entry_format="html"))
+ data = self.session.http.json(res)
+ return data["item"]
def _get_media_info(self, content_id):
"""
@@ -109,16 +119,14 @@
def _get_video_id(self):
# check the page to find the contentId
log.debug("Searching for content ID")
- if self.page_config:
- for page in self.page_config["cache"]["page"].values():
- try:
- if page['item']['type'] == "channel":
- return self._get_live_id()
- else:
- return "vod/{id}".format(id=page['item']['customFields']['DiceVideoId'])
- except KeyError:
- log.error("Could not find video ID")
- return
+ try:
+ if self.item_config['type'] == "channel":
+ return self._get_live_id()
+ else:
+ return "vod/{id}".format(id=self.item_config['customFields']['DiceVideoId'])
+ except KeyError:
+ log.error("Could not find video ID")
+ return
def _get_live_id(self):
log.debug("Loading live event")
| {"golden_diff": "diff --git a/src/streamlink/plugins/wwenetwork.py b/src/streamlink/plugins/wwenetwork.py\n--- a/src/streamlink/plugins/wwenetwork.py\n+++ b/src/streamlink/plugins/wwenetwork.py\n@@ -21,6 +21,7 @@\n stream_url = \"https://dce-frontoffice.imggaming.com/api/v2/stream/{id}\"\n live_url = \"https://dce-frontoffice.imggaming.com/api/v2/event/live\"\n login_url = \"https://dce-frontoffice.imggaming.com/api/v2/login\"\n+ page_config_url = \"https://cdn.watch.wwe.com/api/page\"\n API_KEY = \"cca51ea0-7837-40df-a055-75eb6347b2e7\"\n \n customer_id = 16\n@@ -55,9 +56,7 @@\n return cls.url_re.match(url) is not None\n \n def get_title(self):\n- if self.page_config:\n- for page in self.page_config[\"cache\"][\"page\"].values():\n- return page['item']['title']\n+ return self.item_config['title']\n \n def request(self, method, url, **kwargs):\n headers = kwargs.pop(\"headers\", {})\n@@ -91,11 +90,22 @@\n \n @property\n @memoize\n- def page_config(self):\n+ def item_config(self):\n log.debug(\"Loading page config\")\n- res = self.session.http.get(self.url)\n- m = self.site_config_re.search(res.text)\n- return m and json.loads(m.group(1))\n+ p = urlparse(self.url)\n+ res = self.session.http.get(self.page_config_url,\n+ params=dict(device=\"web_browser\",\n+ ff=\"idp,ldp\",\n+ item_detail_expand=\"all\",\n+ lang=\"en-US\",\n+ list_page_size=\"1\",\n+ max_list_prefetch=\"1\",\n+ path=p.path,\n+ segments=\"es\",\n+ sub=\"Registered\",\n+ text_entry_format=\"html\"))\n+ data = self.session.http.json(res)\n+ return data[\"item\"]\n \n def _get_media_info(self, content_id):\n \"\"\"\n@@ -109,16 +119,14 @@\n def _get_video_id(self):\n # check the page to find the contentId\n log.debug(\"Searching for content ID\")\n- if self.page_config:\n- for page in self.page_config[\"cache\"][\"page\"].values():\n- try:\n- if page['item']['type'] == \"channel\":\n- return self._get_live_id()\n- else:\n- return \"vod/{id}\".format(id=page['item']['customFields']['DiceVideoId'])\n- except KeyError:\n- log.error(\"Could not find video ID\")\n- return\n+ try:\n+ if self.item_config['type'] == \"channel\":\n+ return self._get_live_id()\n+ else:\n+ return \"vod/{id}\".format(id=self.item_config['customFields']['DiceVideoId'])\n+ except KeyError:\n+ log.error(\"Could not find video ID\")\n+ return\n \n def _get_live_id(self):\n log.debug(\"Loading live event\")\n", "issue": "New WWE Network Plugin stopped working\n## Plugin Issue\r\n- [x ] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\n\r\nPresumably due to updates to the website (I know they have been working on fixing some issues with the new site), the WWE Network plugin is no longer able to find streams.\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->\r\n\r\n1. Install latest streamlink nightly\r\n2. Try using any WWE Network link (i.e. https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316)\r\n\r\n\r\n### Log output\r\n\r\nstreamlink https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316 best --loglevel debug\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.6.6\r\n[cli][debug] Streamlink: 1.1.1+74.g0f011ae\r\n[cli][debug] Requests(2.22.0), Socks(1.7.0), Websocket(0.56.0)\r\n[cli][info] Found matching plugin wwenetwork for URL https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316\r\n[cli][debug] Plugin specific arguments:\r\n[cli][debug] --wwenetwork-email= (email)\r\n[cli][debug] --wwenetwork-password= (password)\r\n[plugin.wwenetwork][debug] Attempting login as \r\n[plugin.wwenetwork][debug] API request: POST https://dce-frontoffice.imggaming.com/api/v2/login\r\n[plugin.wwenetwork][debug] Searching for content ID\r\n[plugin.wwenetwork][debug] Loading page config\r\nerror: No playable streams found on this URL: https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316\r\n\r\n### Additional comments, screenshots, etc.\r\n\r\nThere had been audio sync problems with the network, although I believe this was across all sources and not a streamlink issue, and their solution for this issue might have caused the plugin to break.\n", "before_files": [{"content": "from __future__ import print_function\n\nimport json\nimport logging\nimport re\n\nfrom streamlink import PluginError\nfrom streamlink.plugin import Plugin, PluginArguments, PluginArgument\nfrom streamlink.plugin.api import useragents\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils import memoize\nfrom streamlink.compat import urlparse, parse_qsl\nfrom streamlink.utils.times import seconds_to_hhmmss\n\nlog = logging.getLogger(__name__)\n\n\nclass WWENetwork(Plugin):\n url_re = re.compile(r\"https?://watch.wwe.com/(channel)?\")\n site_config_re = re.compile(r'''\">window.__data = (\\{.*?\\})</script>''')\n stream_url = \"https://dce-frontoffice.imggaming.com/api/v2/stream/{id}\"\n live_url = \"https://dce-frontoffice.imggaming.com/api/v2/event/live\"\n login_url = \"https://dce-frontoffice.imggaming.com/api/v2/login\"\n API_KEY = \"cca51ea0-7837-40df-a055-75eb6347b2e7\"\n\n customer_id = 16\n arguments = PluginArguments(\n PluginArgument(\n \"email\",\n required=True,\n metavar=\"EMAIL\",\n requires=[\"password\"],\n help=\"\"\"\n The email associated with your WWE Network account,\n required to access any WWE Network stream.\n \"\"\"\n ),\n PluginArgument(\n \"password\",\n sensitive=True,\n metavar=\"PASSWORD\",\n help=\"\"\"\n A WWE Network account password to use with --wwenetwork-email.\n \"\"\"\n )\n )\n\n def __init__(self, url):\n super(WWENetwork, self).__init__(url)\n self.session.http.headers.update({\"User-Agent\": useragents.CHROME})\n self.auth_token = None\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def get_title(self):\n if self.page_config:\n for page in self.page_config[\"cache\"][\"page\"].values():\n return page['item']['title']\n\n def request(self, method, url, **kwargs):\n headers = kwargs.pop(\"headers\", {})\n headers.update({\"x-api-key\": self.API_KEY,\n \"Origin\": \"https://watch.wwe.com\",\n \"Referer\": \"https://watch.wwe.com/signin\",\n \"Accept\": \"application/json\",\n \"Realm\": \"dce.wwe\"})\n if self.auth_token:\n headers[\"Authorization\"] = \"Bearer {0}\".format(self.auth_token)\n\n kwargs[\"raise_for_status\"] = False\n log.debug(\"API request: {0} {1}\".format(method, url))\n res = self.session.http.request(method, url, headers=headers, **kwargs)\n data = self.session.http.json(res)\n\n if \"status\" in data and data[\"status\"] != 200:\n log.debug(\"API request failed: {0}:{1} ({2})\".format(data[\"status\"], data.get(\"code\"), \"; \".join(data.get(\"messages\", []))))\n return data\n\n def login(self, email, password):\n self.logger.debug(\"Attempting login as {0}\", email)\n # sets some required cookies to login\n data = self.request('POST', self.login_url,\n data=json.dumps({\"id\": email, \"secret\": password}),\n headers={\"Content-Type\": \"application/json\"})\n if \"authorisationToken\" in data:\n self.auth_token = data[\"authorisationToken\"]\n\n return self.auth_token\n\n @property\n @memoize\n def page_config(self):\n log.debug(\"Loading page config\")\n res = self.session.http.get(self.url)\n m = self.site_config_re.search(res.text)\n return m and json.loads(m.group(1))\n\n def _get_media_info(self, content_id):\n \"\"\"\n Get the info about the content, based on the ID\n :param content_id: contentId for the video\n :return:\n \"\"\"\n info = self.request('GET', self.stream_url.format(id=content_id))\n return self.request('GET', info.get(\"playerUrlCallback\"))\n\n def _get_video_id(self):\n # check the page to find the contentId\n log.debug(\"Searching for content ID\")\n if self.page_config:\n for page in self.page_config[\"cache\"][\"page\"].values():\n try:\n if page['item']['type'] == \"channel\":\n return self._get_live_id()\n else:\n return \"vod/{id}\".format(id=page['item']['customFields']['DiceVideoId'])\n except KeyError:\n log.error(\"Could not find video ID\")\n return\n\n def _get_live_id(self):\n log.debug(\"Loading live event\")\n res = self.request('GET', self.live_url)\n for event in res.get('events', []):\n return \"event/{sportId}/{propertyId}/{tournamentId}/{id}\".format(**event)\n\n def _get_streams(self):\n if not self.login(self.get_option(\"email\"), self.get_option(\"password\")):\n raise PluginError(\"Login failed\")\n\n try:\n start_point = int(float(dict(parse_qsl(urlparse(self.url).query)).get(\"startPoint\", 0.0)))\n if start_point > 0:\n log.info(\"Stream will start at {0}\".format(seconds_to_hhmmss(start_point)))\n except ValueError:\n start_point = 0\n\n content_id = self._get_video_id()\n\n if content_id:\n self.logger.debug(\"Found content ID: {0}\", content_id)\n info = self._get_media_info(content_id)\n if info.get(\"hlsUrl\"):\n for s in HLSStream.parse_variant_playlist(self.session, info[\"hlsUrl\"], start_offset=start_point).items():\n yield s\n else:\n log.error(\"Could not find the HLS URL\")\n\n\n__plugin__ = WWENetwork\n", "path": "src/streamlink/plugins/wwenetwork.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport json\nimport logging\nimport re\n\nfrom streamlink import PluginError\nfrom streamlink.plugin import Plugin, PluginArguments, PluginArgument\nfrom streamlink.plugin.api import useragents\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils import memoize\nfrom streamlink.compat import urlparse, parse_qsl\nfrom streamlink.utils.times import seconds_to_hhmmss\n\nlog = logging.getLogger(__name__)\n\n\nclass WWENetwork(Plugin):\n url_re = re.compile(r\"https?://watch.wwe.com/(channel)?\")\n site_config_re = re.compile(r'''\">window.__data = (\\{.*?\\})</script>''')\n stream_url = \"https://dce-frontoffice.imggaming.com/api/v2/stream/{id}\"\n live_url = \"https://dce-frontoffice.imggaming.com/api/v2/event/live\"\n login_url = \"https://dce-frontoffice.imggaming.com/api/v2/login\"\n page_config_url = \"https://cdn.watch.wwe.com/api/page\"\n API_KEY = \"cca51ea0-7837-40df-a055-75eb6347b2e7\"\n\n customer_id = 16\n arguments = PluginArguments(\n PluginArgument(\n \"email\",\n required=True,\n metavar=\"EMAIL\",\n requires=[\"password\"],\n help=\"\"\"\n The email associated with your WWE Network account,\n required to access any WWE Network stream.\n \"\"\"\n ),\n PluginArgument(\n \"password\",\n sensitive=True,\n metavar=\"PASSWORD\",\n help=\"\"\"\n A WWE Network account password to use with --wwenetwork-email.\n \"\"\"\n )\n )\n\n def __init__(self, url):\n super(WWENetwork, self).__init__(url)\n self.session.http.headers.update({\"User-Agent\": useragents.CHROME})\n self.auth_token = None\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def get_title(self):\n return self.item_config['title']\n\n def request(self, method, url, **kwargs):\n headers = kwargs.pop(\"headers\", {})\n headers.update({\"x-api-key\": self.API_KEY,\n \"Origin\": \"https://watch.wwe.com\",\n \"Referer\": \"https://watch.wwe.com/signin\",\n \"Accept\": \"application/json\",\n \"Realm\": \"dce.wwe\"})\n if self.auth_token:\n headers[\"Authorization\"] = \"Bearer {0}\".format(self.auth_token)\n\n kwargs[\"raise_for_status\"] = False\n log.debug(\"API request: {0} {1}\".format(method, url))\n res = self.session.http.request(method, url, headers=headers, **kwargs)\n data = self.session.http.json(res)\n\n if \"status\" in data and data[\"status\"] != 200:\n log.debug(\"API request failed: {0}:{1} ({2})\".format(data[\"status\"], data.get(\"code\"), \"; \".join(data.get(\"messages\", []))))\n return data\n\n def login(self, email, password):\n self.logger.debug(\"Attempting login as {0}\", email)\n # sets some required cookies to login\n data = self.request('POST', self.login_url,\n data=json.dumps({\"id\": email, \"secret\": password}),\n headers={\"Content-Type\": \"application/json\"})\n if \"authorisationToken\" in data:\n self.auth_token = data[\"authorisationToken\"]\n\n return self.auth_token\n\n @property\n @memoize\n def item_config(self):\n log.debug(\"Loading page config\")\n p = urlparse(self.url)\n res = self.session.http.get(self.page_config_url,\n params=dict(device=\"web_browser\",\n ff=\"idp,ldp\",\n item_detail_expand=\"all\",\n lang=\"en-US\",\n list_page_size=\"1\",\n max_list_prefetch=\"1\",\n path=p.path,\n segments=\"es\",\n sub=\"Registered\",\n text_entry_format=\"html\"))\n data = self.session.http.json(res)\n return data[\"item\"]\n\n def _get_media_info(self, content_id):\n \"\"\"\n Get the info about the content, based on the ID\n :param content_id: contentId for the video\n :return:\n \"\"\"\n info = self.request('GET', self.stream_url.format(id=content_id))\n return self.request('GET', info.get(\"playerUrlCallback\"))\n\n def _get_video_id(self):\n # check the page to find the contentId\n log.debug(\"Searching for content ID\")\n try:\n if self.item_config['type'] == \"channel\":\n return self._get_live_id()\n else:\n return \"vod/{id}\".format(id=self.item_config['customFields']['DiceVideoId'])\n except KeyError:\n log.error(\"Could not find video ID\")\n return\n\n def _get_live_id(self):\n log.debug(\"Loading live event\")\n res = self.request('GET', self.live_url)\n for event in res.get('events', []):\n return \"event/{sportId}/{propertyId}/{tournamentId}/{id}\".format(**event)\n\n def _get_streams(self):\n if not self.login(self.get_option(\"email\"), self.get_option(\"password\")):\n raise PluginError(\"Login failed\")\n\n try:\n start_point = int(float(dict(parse_qsl(urlparse(self.url).query)).get(\"startPoint\", 0.0)))\n if start_point > 0:\n log.info(\"Stream will start at {0}\".format(seconds_to_hhmmss(start_point)))\n except ValueError:\n start_point = 0\n\n content_id = self._get_video_id()\n\n if content_id:\n self.logger.debug(\"Found content ID: {0}\", content_id)\n info = self._get_media_info(content_id)\n if info.get(\"hlsUrl\"):\n for s in HLSStream.parse_variant_playlist(self.session, info[\"hlsUrl\"], start_offset=start_point).items():\n yield s\n else:\n log.error(\"Could not find the HLS URL\")\n\n\n__plugin__ = WWENetwork\n", "path": "src/streamlink/plugins/wwenetwork.py"}]} | 2,419 | 713 |
gh_patches_debug_15710 | rasdani/github-patches | git_diff | biolab__orange3-3454 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OWTSNE crashes when reopening workflow with selected data
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
master
##### Expected behavior
I can open a workflow properly.
##### Actual behavior
```
Traceback (most recent call last):
File "/home/pavlin/dev/orange3/Orange/canvas/scheme/widgetsscheme.py", line 1083, in process_signals_for_widget
widget.handleNewSignals()
File "/home/pavlin/dev/orange3/Orange/widgets/visualize/utils/widget.py", line 453, in handleNewSignals
self.commit()
File "/home/pavlin/dev/orange3/Orange/widgets/gui.py", line 2042, in unconditional_commit
do_commit()
File "/home/pavlin/dev/orange3/Orange/widgets/gui.py", line 2050, in do_commit
commit()
File "/home/pavlin/dev/orange3/Orange/widgets/unsupervised/owtsne.py", line 253, in commit
super().commit()
File "/home/pavlin/dev/orange3/Orange/widgets/visualize/utils/widget.py", line 511, in commit
self.send_data()
File "/home/pavlin/dev/orange3/Orange/widgets/visualize/utils/widget.py", line 516, in send_data
group_sel = np.zeros(len(data), dtype=int)
TypeError: object of type 'NoneType' has no len()
```
##### Steps to reproduce the behavior
1. Connect File to t-SNE and select some data. Save the workflow
2. Reopen the workflow → t-sNE crash
##### Additional info (worksheets, data, screenshots, ...)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Orange/widgets/unsupervised/owtsne.py`
Content:
```
1 import numpy as np
2
3 from AnyQt.QtCore import Qt, QTimer
4 from AnyQt.QtWidgets import QFormLayout
5
6 from Orange.data import Table, Domain
7 from Orange.preprocess.preprocess import Preprocess, ApplyDomain
8 from Orange.projection import PCA, TSNE
9 from Orange.widgets import gui
10 from Orange.widgets.settings import Setting, SettingProvider
11 from Orange.widgets.utils.widgetpreview import WidgetPreview
12 from Orange.widgets.visualize.owscatterplotgraph import OWScatterPlotBase
13 from Orange.widgets.visualize.utils.widget import OWDataProjectionWidget
14 from Orange.widgets.widget import Msg, Output
15
16
17 def compute_tsne(data, perplexity, iter, init):
18 negative_gradient_method = 'fft' if len(data.X) > 10000 else 'bh'
19 neighbor_method = 'approx' if len(data.X) > 10000 else 'exact'
20 tsne = TSNE(
21 perplexity=perplexity, n_iter=iter, initialization=init, theta=.8,
22 early_exaggeration_iter=0, negative_gradient_method=negative_gradient_method,
23 neighbors=neighbor_method, random_state=0
24 )
25 return tsne(data)
26
27
28 class OWtSNEGraph(OWScatterPlotBase):
29 def update_coordinates(self):
30 super().update_coordinates()
31 if self.scatterplot_item is not None:
32 self.view_box.setAspectLocked(True, 1)
33
34
35 class OWtSNE(OWDataProjectionWidget):
36 name = "t-SNE"
37 description = "Two-dimensional data projection with t-SNE."
38 icon = "icons/TSNE.svg"
39 priority = 920
40 keywords = ["tsne"]
41
42 settings_version = 3
43 max_iter = Setting(300)
44 perplexity = Setting(30)
45 pca_components = Setting(20)
46
47 GRAPH_CLASS = OWtSNEGraph
48 graph = SettingProvider(OWtSNEGraph)
49 embedding_variables_names = ("t-SNE-x", "t-SNE-y")
50
51 #: Runtime state
52 Running, Finished, Waiting = 1, 2, 3
53
54 class Outputs(OWDataProjectionWidget.Outputs):
55 preprocessor = Output("Preprocessor", Preprocess)
56
57 class Error(OWDataProjectionWidget.Error):
58 not_enough_rows = Msg("Input data needs at least 2 rows")
59 constant_data = Msg("Input data is constant")
60 no_attributes = Msg("Data has no attributes")
61 out_of_memory = Msg("Out of memory")
62 optimization_error = Msg("Error during optimization\n{}")
63 no_valid_data = Msg("No projection due to no valid data")
64
65 def __init__(self):
66 super().__init__()
67 self.pca_data = None
68 self.projection = None
69 self.__update_loop = None
70 # timer for scheduling updates
71 self.__timer = QTimer(self, singleShot=True, interval=1,
72 timeout=self.__next_step)
73 self.__state = OWtSNE.Waiting
74 self.__in_next_step = False
75 self.__draw_similar_pairs = False
76
77 def _add_controls(self):
78 self._add_controls_start_box()
79 super()._add_controls()
80 # Because sc data frequently has many genes,
81 # showing all attributes in combo boxes can cause problems
82 # QUICKFIX: Remove a separator and attributes from order
83 # (leaving just the class and metas)
84 self.models = self.gui.points_models
85 for model in self.models:
86 model.order = model.order[:-2]
87
88 def _add_controls_start_box(self):
89 box = gui.vBox(self.controlArea, True)
90 form = QFormLayout(
91 labelAlignment=Qt.AlignLeft,
92 formAlignment=Qt.AlignLeft,
93 fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow,
94 verticalSpacing=10
95 )
96
97 form.addRow(
98 "Max iterations:",
99 gui.spin(box, self, "max_iter", 1, 2000, step=50))
100
101 form.addRow(
102 "Perplexity:",
103 gui.spin(box, self, "perplexity", 1, 100, step=1))
104
105 box.layout().addLayout(form)
106
107 gui.separator(box, 10)
108 self.runbutton = gui.button(box, self, "Run", callback=self._toggle_run)
109
110 gui.separator(box, 10)
111 gui.hSlider(box, self, "pca_components", label="PCA components:",
112 minValue=2, maxValue=50, step=1)
113
114 def check_data(self):
115 def error(err):
116 err()
117 self.data = None
118
119 super().check_data()
120 if self.data is not None:
121 if len(self.data) < 2:
122 error(self.Error.not_enough_rows)
123 elif not self.data.domain.attributes:
124 error(self.Error.no_attributes)
125 elif not self.data.is_sparse() and \
126 np.allclose(self.data.X - self.data.X[0], 0):
127 error(self.Error.constant_data)
128 elif not self.data.is_sparse() and \
129 np.all(~np.isfinite(self.data.X)):
130 error(self.Error.no_valid_data)
131
132 def get_embedding(self):
133 if self.data is None:
134 self.valid_data = None
135 return None
136 elif self.projection is None:
137 embedding = np.random.normal(size=(len(self.data), 2))
138 else:
139 embedding = self.projection.embedding.X
140 self.valid_data = np.ones(len(embedding), dtype=bool)
141 return embedding
142
143 def _toggle_run(self):
144 if self.__state == OWtSNE.Running:
145 self.stop()
146 self.commit()
147 else:
148 self.start()
149
150 def start(self):
151 if not self.data or self.__state == OWtSNE.Running:
152 self.graph.update_coordinates()
153 elif self.__state in (OWtSNE.Finished, OWtSNE.Waiting):
154 self.__start()
155
156 def stop(self):
157 if self.__state == OWtSNE.Running:
158 self.__set_update_loop(None)
159
160 def pca_preprocessing(self):
161 if self.pca_data is not None and \
162 self.pca_data.X.shape[1] == self.pca_components:
163 return
164 pca = PCA(n_components=self.pca_components, random_state=0)
165 model = pca(self.data)
166 self.pca_data = model(self.data)
167
168 def __start(self):
169 self.pca_preprocessing()
170 initial = 'random' if self.projection is None \
171 else self.projection.embedding.X
172 step_size = 50
173
174 def update_loop(data, max_iter, step, embedding):
175 # NOTE: this code MUST NOT call into QApplication.processEvents
176 done = False
177 iterations_done = 0
178
179 while not done:
180 step_iter = min(max_iter - iterations_done, step)
181 projection = compute_tsne(
182 data, self.perplexity, step_iter, embedding)
183 embedding = projection.embedding.X
184 iterations_done += step_iter
185 if iterations_done >= max_iter:
186 done = True
187
188 yield projection, iterations_done / max_iter
189
190 self.__set_update_loop(update_loop(
191 self.pca_data, self.max_iter, step_size, initial))
192 self.progressBarInit(processEvents=None)
193
194 def __set_update_loop(self, loop):
195 if self.__update_loop is not None:
196 self.__update_loop.close()
197 self.__update_loop = None
198 self.progressBarFinished(processEvents=None)
199
200 self.__update_loop = loop
201
202 if loop is not None:
203 self.setBlocking(True)
204 self.progressBarInit(processEvents=None)
205 self.setStatusMessage("Running")
206 self.runbutton.setText("Stop")
207 self.__state = OWtSNE.Running
208 self.__timer.start()
209 else:
210 self.setBlocking(False)
211 self.setStatusMessage("")
212 self.runbutton.setText("Start")
213 self.__state = OWtSNE.Finished
214 self.__timer.stop()
215
216 def __next_step(self):
217 if self.__update_loop is None:
218 return
219
220 assert not self.__in_next_step
221 self.__in_next_step = True
222
223 loop = self.__update_loop
224 self.Error.out_of_memory.clear()
225 self.Error.optimization_error.clear()
226 try:
227 projection, progress = next(self.__update_loop)
228 assert self.__update_loop is loop
229 except StopIteration:
230 self.__set_update_loop(None)
231 self.unconditional_commit()
232 except MemoryError:
233 self.Error.out_of_memory()
234 self.__set_update_loop(None)
235 except Exception as exc:
236 self.Error.optimization_error(str(exc))
237 self.__set_update_loop(None)
238 else:
239 self.progressBarSet(100.0 * progress, processEvents=None)
240 self.projection = projection
241 self.graph.update_coordinates()
242 self.graph.update_density()
243 # schedule next update
244 self.__timer.start()
245
246 self.__in_next_step = False
247
248 def setup_plot(self):
249 super().setup_plot()
250 self.start()
251
252 def commit(self):
253 super().commit()
254 self.send_preprocessor()
255
256 def _get_projection_data(self):
257 if self.data is None or self.projection is None:
258 return None
259 data = self.data.transform(
260 Domain(self.data.domain.attributes,
261 self.data.domain.class_vars,
262 self.data.domain.metas + self.projection.domain.attributes))
263 data.metas[:, -2:] = self.get_embedding()
264 return data
265
266 def send_preprocessor(self):
267 prep = None
268 if self.data is not None and self.projection is not None:
269 prep = ApplyDomain(self.projection.domain, self.projection.name)
270 self.Outputs.preprocessor.send(prep)
271
272 def clear(self):
273 super().clear()
274 self.__set_update_loop(None)
275 self.__state = OWtSNE.Waiting
276 self.pca_data = None
277 self.projection = None
278
279 @classmethod
280 def migrate_settings(cls, settings, version):
281 if version < 3:
282 if "selection_indices" in settings:
283 settings["selection"] = settings["selection_indices"]
284
285 @classmethod
286 def migrate_context(cls, context, version):
287 if version < 3:
288 values = context.values
289 values["attr_color"] = values["graph"]["attr_color"]
290 values["attr_size"] = values["graph"]["attr_size"]
291 values["attr_shape"] = values["graph"]["attr_shape"]
292 values["attr_label"] = values["graph"]["attr_label"]
293
294
295 if __name__ == "__main__":
296 data = Table("iris")
297 WidgetPreview(OWtSNE).run(
298 set_data=data,
299 set_subset_data=data[np.random.choice(len(data), 10)])
300
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Orange/widgets/unsupervised/owtsne.py b/Orange/widgets/unsupervised/owtsne.py
--- a/Orange/widgets/unsupervised/owtsne.py
+++ b/Orange/widgets/unsupervised/owtsne.py
@@ -254,12 +254,16 @@
self.send_preprocessor()
def _get_projection_data(self):
- if self.data is None or self.projection is None:
+ if self.data is None:
return None
+ if self.projection is None:
+ variables = self._get_projection_variables()
+ else:
+ variables = self.projection.domain.attributes
data = self.data.transform(
Domain(self.data.domain.attributes,
self.data.domain.class_vars,
- self.data.domain.metas + self.projection.domain.attributes))
+ self.data.domain.metas + variables))
data.metas[:, -2:] = self.get_embedding()
return data
| {"golden_diff": "diff --git a/Orange/widgets/unsupervised/owtsne.py b/Orange/widgets/unsupervised/owtsne.py\n--- a/Orange/widgets/unsupervised/owtsne.py\n+++ b/Orange/widgets/unsupervised/owtsne.py\n@@ -254,12 +254,16 @@\n self.send_preprocessor()\n \n def _get_projection_data(self):\n- if self.data is None or self.projection is None:\n+ if self.data is None:\n return None\n+ if self.projection is None:\n+ variables = self._get_projection_variables()\n+ else:\n+ variables = self.projection.domain.attributes\n data = self.data.transform(\n Domain(self.data.domain.attributes,\n self.data.domain.class_vars,\n- self.data.domain.metas + self.projection.domain.attributes))\n+ self.data.domain.metas + variables))\n data.metas[:, -2:] = self.get_embedding()\n return data\n", "issue": "OWTSNE crashes when reopening workflow with selected data\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\nmaster\r\n\r\n##### Expected behavior\r\nI can open a workflow properly.\r\n\r\n\r\n##### Actual behavior\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/pavlin/dev/orange3/Orange/canvas/scheme/widgetsscheme.py\", line 1083, in process_signals_for_widget\r\n widget.handleNewSignals()\r\n File \"/home/pavlin/dev/orange3/Orange/widgets/visualize/utils/widget.py\", line 453, in handleNewSignals\r\n self.commit()\r\n File \"/home/pavlin/dev/orange3/Orange/widgets/gui.py\", line 2042, in unconditional_commit\r\n do_commit()\r\n File \"/home/pavlin/dev/orange3/Orange/widgets/gui.py\", line 2050, in do_commit\r\n commit()\r\n File \"/home/pavlin/dev/orange3/Orange/widgets/unsupervised/owtsne.py\", line 253, in commit\r\n super().commit()\r\n File \"/home/pavlin/dev/orange3/Orange/widgets/visualize/utils/widget.py\", line 511, in commit\r\n self.send_data()\r\n File \"/home/pavlin/dev/orange3/Orange/widgets/visualize/utils/widget.py\", line 516, in send_data\r\n group_sel = np.zeros(len(data), dtype=int)\r\nTypeError: object of type 'NoneType' has no len()\r\n\r\n```\r\n\r\n\r\n##### Steps to reproduce the behavior\r\n1. Connect File to t-SNE and select some data. Save the workflow\r\n2. Reopen the workflow \u2192 t-sNE crash\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\n\r\n\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom AnyQt.QtCore import Qt, QTimer\nfrom AnyQt.QtWidgets import QFormLayout\n\nfrom Orange.data import Table, Domain\nfrom Orange.preprocess.preprocess import Preprocess, ApplyDomain\nfrom Orange.projection import PCA, TSNE\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting, SettingProvider\nfrom Orange.widgets.utils.widgetpreview import WidgetPreview\nfrom Orange.widgets.visualize.owscatterplotgraph import OWScatterPlotBase\nfrom Orange.widgets.visualize.utils.widget import OWDataProjectionWidget\nfrom Orange.widgets.widget import Msg, Output\n\n\ndef compute_tsne(data, perplexity, iter, init):\n negative_gradient_method = 'fft' if len(data.X) > 10000 else 'bh'\n neighbor_method = 'approx' if len(data.X) > 10000 else 'exact'\n tsne = TSNE(\n perplexity=perplexity, n_iter=iter, initialization=init, theta=.8,\n early_exaggeration_iter=0, negative_gradient_method=negative_gradient_method,\n neighbors=neighbor_method, random_state=0\n )\n return tsne(data)\n\n\nclass OWtSNEGraph(OWScatterPlotBase):\n def update_coordinates(self):\n super().update_coordinates()\n if self.scatterplot_item is not None:\n self.view_box.setAspectLocked(True, 1)\n\n\nclass OWtSNE(OWDataProjectionWidget):\n name = \"t-SNE\"\n description = \"Two-dimensional data projection with t-SNE.\"\n icon = \"icons/TSNE.svg\"\n priority = 920\n keywords = [\"tsne\"]\n\n settings_version = 3\n max_iter = Setting(300)\n perplexity = Setting(30)\n pca_components = Setting(20)\n\n GRAPH_CLASS = OWtSNEGraph\n graph = SettingProvider(OWtSNEGraph)\n embedding_variables_names = (\"t-SNE-x\", \"t-SNE-y\")\n\n #: Runtime state\n Running, Finished, Waiting = 1, 2, 3\n\n class Outputs(OWDataProjectionWidget.Outputs):\n preprocessor = Output(\"Preprocessor\", Preprocess)\n\n class Error(OWDataProjectionWidget.Error):\n not_enough_rows = Msg(\"Input data needs at least 2 rows\")\n constant_data = Msg(\"Input data is constant\")\n no_attributes = Msg(\"Data has no attributes\")\n out_of_memory = Msg(\"Out of memory\")\n optimization_error = Msg(\"Error during optimization\\n{}\")\n no_valid_data = Msg(\"No projection due to no valid data\")\n\n def __init__(self):\n super().__init__()\n self.pca_data = None\n self.projection = None\n self.__update_loop = None\n # timer for scheduling updates\n self.__timer = QTimer(self, singleShot=True, interval=1,\n timeout=self.__next_step)\n self.__state = OWtSNE.Waiting\n self.__in_next_step = False\n self.__draw_similar_pairs = False\n\n def _add_controls(self):\n self._add_controls_start_box()\n super()._add_controls()\n # Because sc data frequently has many genes,\n # showing all attributes in combo boxes can cause problems\n # QUICKFIX: Remove a separator and attributes from order\n # (leaving just the class and metas)\n self.models = self.gui.points_models\n for model in self.models:\n model.order = model.order[:-2]\n\n def _add_controls_start_box(self):\n box = gui.vBox(self.controlArea, True)\n form = QFormLayout(\n labelAlignment=Qt.AlignLeft,\n formAlignment=Qt.AlignLeft,\n fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow,\n verticalSpacing=10\n )\n\n form.addRow(\n \"Max iterations:\",\n gui.spin(box, self, \"max_iter\", 1, 2000, step=50))\n\n form.addRow(\n \"Perplexity:\",\n gui.spin(box, self, \"perplexity\", 1, 100, step=1))\n\n box.layout().addLayout(form)\n\n gui.separator(box, 10)\n self.runbutton = gui.button(box, self, \"Run\", callback=self._toggle_run)\n\n gui.separator(box, 10)\n gui.hSlider(box, self, \"pca_components\", label=\"PCA components:\",\n minValue=2, maxValue=50, step=1)\n\n def check_data(self):\n def error(err):\n err()\n self.data = None\n\n super().check_data()\n if self.data is not None:\n if len(self.data) < 2:\n error(self.Error.not_enough_rows)\n elif not self.data.domain.attributes:\n error(self.Error.no_attributes)\n elif not self.data.is_sparse() and \\\n np.allclose(self.data.X - self.data.X[0], 0):\n error(self.Error.constant_data)\n elif not self.data.is_sparse() and \\\n np.all(~np.isfinite(self.data.X)):\n error(self.Error.no_valid_data)\n\n def get_embedding(self):\n if self.data is None:\n self.valid_data = None\n return None\n elif self.projection is None:\n embedding = np.random.normal(size=(len(self.data), 2))\n else:\n embedding = self.projection.embedding.X\n self.valid_data = np.ones(len(embedding), dtype=bool)\n return embedding\n\n def _toggle_run(self):\n if self.__state == OWtSNE.Running:\n self.stop()\n self.commit()\n else:\n self.start()\n\n def start(self):\n if not self.data or self.__state == OWtSNE.Running:\n self.graph.update_coordinates()\n elif self.__state in (OWtSNE.Finished, OWtSNE.Waiting):\n self.__start()\n\n def stop(self):\n if self.__state == OWtSNE.Running:\n self.__set_update_loop(None)\n\n def pca_preprocessing(self):\n if self.pca_data is not None and \\\n self.pca_data.X.shape[1] == self.pca_components:\n return\n pca = PCA(n_components=self.pca_components, random_state=0)\n model = pca(self.data)\n self.pca_data = model(self.data)\n\n def __start(self):\n self.pca_preprocessing()\n initial = 'random' if self.projection is None \\\n else self.projection.embedding.X\n step_size = 50\n\n def update_loop(data, max_iter, step, embedding):\n # NOTE: this code MUST NOT call into QApplication.processEvents\n done = False\n iterations_done = 0\n\n while not done:\n step_iter = min(max_iter - iterations_done, step)\n projection = compute_tsne(\n data, self.perplexity, step_iter, embedding)\n embedding = projection.embedding.X\n iterations_done += step_iter\n if iterations_done >= max_iter:\n done = True\n\n yield projection, iterations_done / max_iter\n\n self.__set_update_loop(update_loop(\n self.pca_data, self.max_iter, step_size, initial))\n self.progressBarInit(processEvents=None)\n\n def __set_update_loop(self, loop):\n if self.__update_loop is not None:\n self.__update_loop.close()\n self.__update_loop = None\n self.progressBarFinished(processEvents=None)\n\n self.__update_loop = loop\n\n if loop is not None:\n self.setBlocking(True)\n self.progressBarInit(processEvents=None)\n self.setStatusMessage(\"Running\")\n self.runbutton.setText(\"Stop\")\n self.__state = OWtSNE.Running\n self.__timer.start()\n else:\n self.setBlocking(False)\n self.setStatusMessage(\"\")\n self.runbutton.setText(\"Start\")\n self.__state = OWtSNE.Finished\n self.__timer.stop()\n\n def __next_step(self):\n if self.__update_loop is None:\n return\n\n assert not self.__in_next_step\n self.__in_next_step = True\n\n loop = self.__update_loop\n self.Error.out_of_memory.clear()\n self.Error.optimization_error.clear()\n try:\n projection, progress = next(self.__update_loop)\n assert self.__update_loop is loop\n except StopIteration:\n self.__set_update_loop(None)\n self.unconditional_commit()\n except MemoryError:\n self.Error.out_of_memory()\n self.__set_update_loop(None)\n except Exception as exc:\n self.Error.optimization_error(str(exc))\n self.__set_update_loop(None)\n else:\n self.progressBarSet(100.0 * progress, processEvents=None)\n self.projection = projection\n self.graph.update_coordinates()\n self.graph.update_density()\n # schedule next update\n self.__timer.start()\n\n self.__in_next_step = False\n\n def setup_plot(self):\n super().setup_plot()\n self.start()\n\n def commit(self):\n super().commit()\n self.send_preprocessor()\n\n def _get_projection_data(self):\n if self.data is None or self.projection is None:\n return None\n data = self.data.transform(\n Domain(self.data.domain.attributes,\n self.data.domain.class_vars,\n self.data.domain.metas + self.projection.domain.attributes))\n data.metas[:, -2:] = self.get_embedding()\n return data\n\n def send_preprocessor(self):\n prep = None\n if self.data is not None and self.projection is not None:\n prep = ApplyDomain(self.projection.domain, self.projection.name)\n self.Outputs.preprocessor.send(prep)\n\n def clear(self):\n super().clear()\n self.__set_update_loop(None)\n self.__state = OWtSNE.Waiting\n self.pca_data = None\n self.projection = None\n\n @classmethod\n def migrate_settings(cls, settings, version):\n if version < 3:\n if \"selection_indices\" in settings:\n settings[\"selection\"] = settings[\"selection_indices\"]\n\n @classmethod\n def migrate_context(cls, context, version):\n if version < 3:\n values = context.values\n values[\"attr_color\"] = values[\"graph\"][\"attr_color\"]\n values[\"attr_size\"] = values[\"graph\"][\"attr_size\"]\n values[\"attr_shape\"] = values[\"graph\"][\"attr_shape\"]\n values[\"attr_label\"] = values[\"graph\"][\"attr_label\"]\n\n\nif __name__ == \"__main__\":\n data = Table(\"iris\")\n WidgetPreview(OWtSNE).run(\n set_data=data,\n set_subset_data=data[np.random.choice(len(data), 10)])\n", "path": "Orange/widgets/unsupervised/owtsne.py"}], "after_files": [{"content": "import numpy as np\n\nfrom AnyQt.QtCore import Qt, QTimer\nfrom AnyQt.QtWidgets import QFormLayout\n\nfrom Orange.data import Table, Domain\nfrom Orange.preprocess.preprocess import Preprocess, ApplyDomain\nfrom Orange.projection import PCA, TSNE\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting, SettingProvider\nfrom Orange.widgets.utils.widgetpreview import WidgetPreview\nfrom Orange.widgets.visualize.owscatterplotgraph import OWScatterPlotBase\nfrom Orange.widgets.visualize.utils.widget import OWDataProjectionWidget\nfrom Orange.widgets.widget import Msg, Output\n\n\ndef compute_tsne(data, perplexity, iter, init):\n negative_gradient_method = 'fft' if len(data.X) > 10000 else 'bh'\n neighbor_method = 'approx' if len(data.X) > 10000 else 'exact'\n tsne = TSNE(\n perplexity=perplexity, n_iter=iter, initialization=init, theta=.8,\n early_exaggeration_iter=0, negative_gradient_method=negative_gradient_method,\n neighbors=neighbor_method, random_state=0\n )\n return tsne(data)\n\n\nclass OWtSNEGraph(OWScatterPlotBase):\n def update_coordinates(self):\n super().update_coordinates()\n if self.scatterplot_item is not None:\n self.view_box.setAspectLocked(True, 1)\n\n\nclass OWtSNE(OWDataProjectionWidget):\n name = \"t-SNE\"\n description = \"Two-dimensional data projection with t-SNE.\"\n icon = \"icons/TSNE.svg\"\n priority = 920\n keywords = [\"tsne\"]\n\n settings_version = 3\n max_iter = Setting(300)\n perplexity = Setting(30)\n pca_components = Setting(20)\n\n GRAPH_CLASS = OWtSNEGraph\n graph = SettingProvider(OWtSNEGraph)\n embedding_variables_names = (\"t-SNE-x\", \"t-SNE-y\")\n\n #: Runtime state\n Running, Finished, Waiting = 1, 2, 3\n\n class Outputs(OWDataProjectionWidget.Outputs):\n preprocessor = Output(\"Preprocessor\", Preprocess)\n\n class Error(OWDataProjectionWidget.Error):\n not_enough_rows = Msg(\"Input data needs at least 2 rows\")\n constant_data = Msg(\"Input data is constant\")\n no_attributes = Msg(\"Data has no attributes\")\n out_of_memory = Msg(\"Out of memory\")\n optimization_error = Msg(\"Error during optimization\\n{}\")\n no_valid_data = Msg(\"No projection due to no valid data\")\n\n def __init__(self):\n super().__init__()\n self.pca_data = None\n self.projection = None\n self.__update_loop = None\n # timer for scheduling updates\n self.__timer = QTimer(self, singleShot=True, interval=1,\n timeout=self.__next_step)\n self.__state = OWtSNE.Waiting\n self.__in_next_step = False\n self.__draw_similar_pairs = False\n\n def _add_controls(self):\n self._add_controls_start_box()\n super()._add_controls()\n # Because sc data frequently has many genes,\n # showing all attributes in combo boxes can cause problems\n # QUICKFIX: Remove a separator and attributes from order\n # (leaving just the class and metas)\n self.models = self.gui.points_models\n for model in self.models:\n model.order = model.order[:-2]\n\n def _add_controls_start_box(self):\n box = gui.vBox(self.controlArea, True)\n form = QFormLayout(\n labelAlignment=Qt.AlignLeft,\n formAlignment=Qt.AlignLeft,\n fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow,\n verticalSpacing=10\n )\n\n form.addRow(\n \"Max iterations:\",\n gui.spin(box, self, \"max_iter\", 1, 2000, step=50))\n\n form.addRow(\n \"Perplexity:\",\n gui.spin(box, self, \"perplexity\", 1, 100, step=1))\n\n box.layout().addLayout(form)\n\n gui.separator(box, 10)\n self.runbutton = gui.button(box, self, \"Run\", callback=self._toggle_run)\n\n gui.separator(box, 10)\n gui.hSlider(box, self, \"pca_components\", label=\"PCA components:\",\n minValue=2, maxValue=50, step=1)\n\n def check_data(self):\n def error(err):\n err()\n self.data = None\n\n super().check_data()\n if self.data is not None:\n if len(self.data) < 2:\n error(self.Error.not_enough_rows)\n elif not self.data.domain.attributes:\n error(self.Error.no_attributes)\n elif not self.data.is_sparse() and \\\n np.allclose(self.data.X - self.data.X[0], 0):\n error(self.Error.constant_data)\n elif not self.data.is_sparse() and \\\n np.all(~np.isfinite(self.data.X)):\n error(self.Error.no_valid_data)\n\n def get_embedding(self):\n if self.data is None:\n self.valid_data = None\n return None\n elif self.projection is None:\n embedding = np.random.normal(size=(len(self.data), 2))\n else:\n embedding = self.projection.embedding.X\n self.valid_data = np.ones(len(embedding), dtype=bool)\n return embedding\n\n def _toggle_run(self):\n if self.__state == OWtSNE.Running:\n self.stop()\n self.commit()\n else:\n self.start()\n\n def start(self):\n if not self.data or self.__state == OWtSNE.Running:\n self.graph.update_coordinates()\n elif self.__state in (OWtSNE.Finished, OWtSNE.Waiting):\n self.__start()\n\n def stop(self):\n if self.__state == OWtSNE.Running:\n self.__set_update_loop(None)\n\n def pca_preprocessing(self):\n if self.pca_data is not None and \\\n self.pca_data.X.shape[1] == self.pca_components:\n return\n pca = PCA(n_components=self.pca_components, random_state=0)\n model = pca(self.data)\n self.pca_data = model(self.data)\n\n def __start(self):\n self.pca_preprocessing()\n initial = 'random' if self.projection is None \\\n else self.projection.embedding.X\n step_size = 50\n\n def update_loop(data, max_iter, step, embedding):\n # NOTE: this code MUST NOT call into QApplication.processEvents\n done = False\n iterations_done = 0\n\n while not done:\n step_iter = min(max_iter - iterations_done, step)\n projection = compute_tsne(\n data, self.perplexity, step_iter, embedding)\n embedding = projection.embedding.X\n iterations_done += step_iter\n if iterations_done >= max_iter:\n done = True\n\n yield projection, iterations_done / max_iter\n\n self.__set_update_loop(update_loop(\n self.pca_data, self.max_iter, step_size, initial))\n self.progressBarInit(processEvents=None)\n\n def __set_update_loop(self, loop):\n if self.__update_loop is not None:\n self.__update_loop.close()\n self.__update_loop = None\n self.progressBarFinished(processEvents=None)\n\n self.__update_loop = loop\n\n if loop is not None:\n self.setBlocking(True)\n self.progressBarInit(processEvents=None)\n self.setStatusMessage(\"Running\")\n self.runbutton.setText(\"Stop\")\n self.__state = OWtSNE.Running\n self.__timer.start()\n else:\n self.setBlocking(False)\n self.setStatusMessage(\"\")\n self.runbutton.setText(\"Start\")\n self.__state = OWtSNE.Finished\n self.__timer.stop()\n\n def __next_step(self):\n if self.__update_loop is None:\n return\n\n assert not self.__in_next_step\n self.__in_next_step = True\n\n loop = self.__update_loop\n self.Error.out_of_memory.clear()\n self.Error.optimization_error.clear()\n try:\n projection, progress = next(self.__update_loop)\n assert self.__update_loop is loop\n except StopIteration:\n self.__set_update_loop(None)\n self.unconditional_commit()\n except MemoryError:\n self.Error.out_of_memory()\n self.__set_update_loop(None)\n except Exception as exc:\n self.Error.optimization_error(str(exc))\n self.__set_update_loop(None)\n else:\n self.progressBarSet(100.0 * progress, processEvents=None)\n self.projection = projection\n self.graph.update_coordinates()\n self.graph.update_density()\n # schedule next update\n self.__timer.start()\n\n self.__in_next_step = False\n\n def setup_plot(self):\n super().setup_plot()\n self.start()\n\n def commit(self):\n super().commit()\n self.send_preprocessor()\n\n def _get_projection_data(self):\n if self.data is None:\n return None\n if self.projection is None:\n variables = self._get_projection_variables()\n else:\n variables = self.projection.domain.attributes\n data = self.data.transform(\n Domain(self.data.domain.attributes,\n self.data.domain.class_vars,\n self.data.domain.metas + variables))\n data.metas[:, -2:] = self.get_embedding()\n return data\n\n def send_preprocessor(self):\n prep = None\n if self.data is not None and self.projection is not None:\n prep = ApplyDomain(self.projection.domain, self.projection.name)\n self.Outputs.preprocessor.send(prep)\n\n def clear(self):\n super().clear()\n self.__set_update_loop(None)\n self.__state = OWtSNE.Waiting\n self.pca_data = None\n self.projection = None\n\n @classmethod\n def migrate_settings(cls, settings, version):\n if version < 3:\n if \"selection_indices\" in settings:\n settings[\"selection\"] = settings[\"selection_indices\"]\n\n @classmethod\n def migrate_context(cls, context, version):\n if version < 3:\n values = context.values\n values[\"attr_color\"] = values[\"graph\"][\"attr_color\"]\n values[\"attr_size\"] = values[\"graph\"][\"attr_size\"]\n values[\"attr_shape\"] = values[\"graph\"][\"attr_shape\"]\n values[\"attr_label\"] = values[\"graph\"][\"attr_label\"]\n\n\nif __name__ == \"__main__\":\n data = Table(\"iris\")\n WidgetPreview(OWtSNE).run(\n set_data=data,\n set_subset_data=data[np.random.choice(len(data), 10)])\n", "path": "Orange/widgets/unsupervised/owtsne.py"}]} | 3,754 | 212 |
gh_patches_debug_16097 | rasdani/github-patches | git_diff | quantumlib__Cirq-3016 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changing qubits in PointOptimizer causes ValueError
The cirq.PointOptimizer class assumes that an optimizer won't change qubits (or at least won't change qubits in a way that pushes the new gate backwards). If two gates are modified in the same moment to use the same qubits, a ValueError is raised.
A check similar to https://github.com/quantumlib/Cirq/blob/master/cirq/circuits/optimization_pass.py#L128
needs to be added at or around Line 148 to detect this condition and adjust the 'i' index or frontier accordingly.
A minimal example that causes the issue is below.
```
class EverythingIs42(cirq.PointOptimizer):
def optimization_at(self, circuit, index, op):
if len(op.qubits) == 1:
gate = op.gate
new_op = op.gate(cirq.LineQubit(42))
return cirq.PointOptimizationSummary(clear_span=1,
clear_qubits=op.qubits,
new_operations=new_op)
c=cirq.Circuit(cirq.X(cirq.LineQubit(0)), cirq.X(cirq.LineQubit(1)))
EverythingIs42().optimize_circuit(c)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/circuits/optimization_pass.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Defines the OptimizationPass type."""
16 from typing import (Dict, Callable, Iterable, Optional, Sequence, TYPE_CHECKING,
17 Tuple, cast)
18
19 import abc
20 from collections import defaultdict
21
22 from cirq import ops
23 from cirq.circuits.circuit import Circuit
24
25 if TYPE_CHECKING:
26 import cirq
27 from cirq.ops import Qid
28
29
30 class PointOptimizationSummary:
31 """A description of a local optimization to perform."""
32
33 def __init__(self,
34 clear_span: int,
35 clear_qubits: Iterable['cirq.Qid'],
36 new_operations: 'cirq.OP_TREE',
37 preserve_moments: bool = False) -> None:
38 """
39 Args:
40 clear_span: Defines the range of moments to affect. Specifically,
41 refers to the indices in range(start, start+clear_span) where
42 start is an index known from surrounding context.
43 clear_qubits: Defines the set of qubits that should be cleared
44 with each affected moment.
45 new_operations: The operations to replace the cleared out
46 operations with.
47 preserve_moments: If set, `cirq.Moment` instances within
48 `new_operations` will be preserved exactly. Normally the
49 operations would be repacked to fit better into the
50 target space, which may move them between moments.
51 Please be advised that a PointOptimizer consuming this
52 summary will flatten operations no matter what,
53 see https://github.com/quantumlib/Cirq/issues/2406.
54 """
55 self.new_operations = tuple(
56 ops.flatten_op_tree(new_operations,
57 preserve_moments=preserve_moments))
58 self.clear_span = clear_span
59 self.clear_qubits = tuple(clear_qubits)
60
61 def __eq__(self, other):
62 if not isinstance(other, type(self)):
63 return NotImplemented
64 return (self.clear_span == other.clear_span and
65 self.clear_qubits == other.clear_qubits and
66 self.new_operations == other.new_operations)
67
68 def __ne__(self, other):
69 return not self == other
70
71 def __hash__(self) -> int:
72 return hash((PointOptimizationSummary,
73 self.clear_span,
74 self.clear_qubits,
75 self.new_operations))
76
77 def __repr__(self) -> str:
78 return (f'cirq.PointOptimizationSummary({self.clear_span!r}, '
79 f'{self.clear_qubits!r}, {self.new_operations!r})')
80
81
82 class PointOptimizer:
83 """Makes circuit improvements focused on a specific location."""
84
85 def __init__(self,
86 post_clean_up: Callable[[Sequence['cirq.Operation']], ops.
87 OP_TREE] = lambda op_list: op_list
88 ) -> None:
89 """
90 Args:
91 post_clean_up: This function is called on each set of optimized
92 operations before they are put into the circuit to replace the
93 old operations.
94 """
95 self.post_clean_up = post_clean_up
96
97 def __call__(self, circuit: Circuit):
98 return self.optimize_circuit(circuit)
99
100 @abc.abstractmethod
101 def optimization_at(self, circuit: Circuit, index: int, op: 'cirq.Operation'
102 ) -> Optional[PointOptimizationSummary]:
103 """Describes how to change operations near the given location.
104
105 For example, this method could realize that the given operation is an
106 X gate and that in the very next moment there is a Z gate. It would
107 indicate that they should be combined into a Y gate by returning
108 PointOptimizationSummary(clear_span=2,
109 clear_qubits=op.qubits,
110 new_operations=cirq.Y(op.qubits[0]))
111
112 Args:
113 circuit: The circuit to improve.
114 index: The index of the moment with the operation to focus on.
115 op: The operation to focus improvements upon.
116
117 Returns:
118 A description of the optimization to perform, or else None if no
119 change should be made.
120 """
121
122 def optimize_circuit(self, circuit: Circuit):
123 frontier: Dict['Qid', int] = defaultdict(lambda: 0)
124 i = 0
125 while i < len(circuit): # Note: circuit may mutate as we go.
126 for op in circuit[i].operations:
127 # Don't touch stuff inserted by previous optimizations.
128 if any(frontier[q] > i for q in op.qubits):
129 continue
130
131 # Skip if an optimization removed the circuit underneath us.
132 if i >= len(circuit):
133 continue
134 # Skip if an optimization removed the op we're considering.
135 if op not in circuit[i].operations:
136 continue
137 opt = self.optimization_at(circuit, i, op)
138 # Skip if the optimization did nothing.
139 if opt is None:
140 continue
141
142 # Clear target area, and insert new operations.
143 circuit.clear_operations_touching(
144 opt.clear_qubits,
145 [e for e in range(i, i + opt.clear_span)])
146 new_operations = self.post_clean_up(
147 cast(Tuple[ops.Operation], opt.new_operations))
148 circuit.insert_at_frontier(new_operations, i, frontier)
149
150 i += 1
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq/circuits/optimization_pass.py b/cirq/circuits/optimization_pass.py
--- a/cirq/circuits/optimization_pass.py
+++ b/cirq/circuits/optimization_pass.py
@@ -145,6 +145,18 @@
[e for e in range(i, i + opt.clear_span)])
new_operations = self.post_clean_up(
cast(Tuple[ops.Operation], opt.new_operations))
- circuit.insert_at_frontier(new_operations, i, frontier)
+ flat_new_operations = tuple(ops.flatten_to_ops(new_operations))
+
+ new_qubits = set()
+ for flat_op in flat_new_operations:
+ for q in flat_op.qubits:
+ new_qubits.add(q)
+
+ if not new_qubits.issubset(set(opt.clear_qubits)):
+ raise ValueError(
+ 'New operations in PointOptimizer should not act on new'
+ ' qubits.')
+
+ circuit.insert_at_frontier(flat_new_operations, i, frontier)
i += 1
| {"golden_diff": "diff --git a/cirq/circuits/optimization_pass.py b/cirq/circuits/optimization_pass.py\n--- a/cirq/circuits/optimization_pass.py\n+++ b/cirq/circuits/optimization_pass.py\n@@ -145,6 +145,18 @@\n [e for e in range(i, i + opt.clear_span)])\n new_operations = self.post_clean_up(\n cast(Tuple[ops.Operation], opt.new_operations))\n- circuit.insert_at_frontier(new_operations, i, frontier)\n \n+ flat_new_operations = tuple(ops.flatten_to_ops(new_operations))\n+\n+ new_qubits = set()\n+ for flat_op in flat_new_operations:\n+ for q in flat_op.qubits:\n+ new_qubits.add(q)\n+\n+ if not new_qubits.issubset(set(opt.clear_qubits)):\n+ raise ValueError(\n+ 'New operations in PointOptimizer should not act on new'\n+ ' qubits.')\n+\n+ circuit.insert_at_frontier(flat_new_operations, i, frontier)\n i += 1\n", "issue": "Changing qubits in PointOptimizer causes ValueError\nThe cirq.PointOptimizer class assumes that an optimizer won't change qubits (or at least won't change qubits in a way that pushes the new gate backwards). If two gates are modified in the same moment to use the same qubits, a ValueError is raised.\r\n\r\nA check similar to https://github.com/quantumlib/Cirq/blob/master/cirq/circuits/optimization_pass.py#L128\r\nneeds to be added at or around Line 148 to detect this condition and adjust the 'i' index or frontier accordingly.\r\n\r\nA minimal example that causes the issue is below.\r\n```\r\nclass EverythingIs42(cirq.PointOptimizer):\r\n def optimization_at(self, circuit, index, op):\r\n if len(op.qubits) == 1:\r\n gate = op.gate\r\n new_op = op.gate(cirq.LineQubit(42))\r\n return cirq.PointOptimizationSummary(clear_span=1,\r\n clear_qubits=op.qubits,\r\n new_operations=new_op)\r\n\r\nc=cirq.Circuit(cirq.X(cirq.LineQubit(0)), cirq.X(cirq.LineQubit(1)))\r\nEverythingIs42().optimize_circuit(c)\r\n```\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Defines the OptimizationPass type.\"\"\"\nfrom typing import (Dict, Callable, Iterable, Optional, Sequence, TYPE_CHECKING,\n Tuple, cast)\n\nimport abc\nfrom collections import defaultdict\n\nfrom cirq import ops\nfrom cirq.circuits.circuit import Circuit\n\nif TYPE_CHECKING:\n import cirq\n from cirq.ops import Qid\n\n\nclass PointOptimizationSummary:\n \"\"\"A description of a local optimization to perform.\"\"\"\n\n def __init__(self,\n clear_span: int,\n clear_qubits: Iterable['cirq.Qid'],\n new_operations: 'cirq.OP_TREE',\n preserve_moments: bool = False) -> None:\n \"\"\"\n Args:\n clear_span: Defines the range of moments to affect. Specifically,\n refers to the indices in range(start, start+clear_span) where\n start is an index known from surrounding context.\n clear_qubits: Defines the set of qubits that should be cleared\n with each affected moment.\n new_operations: The operations to replace the cleared out\n operations with.\n preserve_moments: If set, `cirq.Moment` instances within\n `new_operations` will be preserved exactly. Normally the\n operations would be repacked to fit better into the\n target space, which may move them between moments.\n Please be advised that a PointOptimizer consuming this\n summary will flatten operations no matter what,\n see https://github.com/quantumlib/Cirq/issues/2406.\n \"\"\"\n self.new_operations = tuple(\n ops.flatten_op_tree(new_operations,\n preserve_moments=preserve_moments))\n self.clear_span = clear_span\n self.clear_qubits = tuple(clear_qubits)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n return (self.clear_span == other.clear_span and\n self.clear_qubits == other.clear_qubits and\n self.new_operations == other.new_operations)\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self) -> int:\n return hash((PointOptimizationSummary,\n self.clear_span,\n self.clear_qubits,\n self.new_operations))\n\n def __repr__(self) -> str:\n return (f'cirq.PointOptimizationSummary({self.clear_span!r}, '\n f'{self.clear_qubits!r}, {self.new_operations!r})')\n\n\nclass PointOptimizer:\n \"\"\"Makes circuit improvements focused on a specific location.\"\"\"\n\n def __init__(self,\n post_clean_up: Callable[[Sequence['cirq.Operation']], ops.\n OP_TREE] = lambda op_list: op_list\n ) -> None:\n \"\"\"\n Args:\n post_clean_up: This function is called on each set of optimized\n operations before they are put into the circuit to replace the\n old operations.\n \"\"\"\n self.post_clean_up = post_clean_up\n\n def __call__(self, circuit: Circuit):\n return self.optimize_circuit(circuit)\n\n @abc.abstractmethod\n def optimization_at(self, circuit: Circuit, index: int, op: 'cirq.Operation'\n ) -> Optional[PointOptimizationSummary]:\n \"\"\"Describes how to change operations near the given location.\n\n For example, this method could realize that the given operation is an\n X gate and that in the very next moment there is a Z gate. It would\n indicate that they should be combined into a Y gate by returning\n PointOptimizationSummary(clear_span=2,\n clear_qubits=op.qubits,\n new_operations=cirq.Y(op.qubits[0]))\n\n Args:\n circuit: The circuit to improve.\n index: The index of the moment with the operation to focus on.\n op: The operation to focus improvements upon.\n\n Returns:\n A description of the optimization to perform, or else None if no\n change should be made.\n \"\"\"\n\n def optimize_circuit(self, circuit: Circuit):\n frontier: Dict['Qid', int] = defaultdict(lambda: 0)\n i = 0\n while i < len(circuit): # Note: circuit may mutate as we go.\n for op in circuit[i].operations:\n # Don't touch stuff inserted by previous optimizations.\n if any(frontier[q] > i for q in op.qubits):\n continue\n\n # Skip if an optimization removed the circuit underneath us.\n if i >= len(circuit):\n continue\n # Skip if an optimization removed the op we're considering.\n if op not in circuit[i].operations:\n continue\n opt = self.optimization_at(circuit, i, op)\n # Skip if the optimization did nothing.\n if opt is None:\n continue\n\n # Clear target area, and insert new operations.\n circuit.clear_operations_touching(\n opt.clear_qubits,\n [e for e in range(i, i + opt.clear_span)])\n new_operations = self.post_clean_up(\n cast(Tuple[ops.Operation], opt.new_operations))\n circuit.insert_at_frontier(new_operations, i, frontier)\n\n i += 1\n", "path": "cirq/circuits/optimization_pass.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Defines the OptimizationPass type.\"\"\"\nfrom typing import (Dict, Callable, Iterable, Optional, Sequence, TYPE_CHECKING,\n Tuple, cast)\n\nimport abc\nfrom collections import defaultdict\n\nfrom cirq import ops\nfrom cirq.circuits.circuit import Circuit\n\nif TYPE_CHECKING:\n import cirq\n from cirq.ops import Qid\n\n\nclass PointOptimizationSummary:\n \"\"\"A description of a local optimization to perform.\"\"\"\n\n def __init__(self,\n clear_span: int,\n clear_qubits: Iterable['cirq.Qid'],\n new_operations: 'cirq.OP_TREE',\n preserve_moments: bool = False) -> None:\n \"\"\"\n Args:\n clear_span: Defines the range of moments to affect. Specifically,\n refers to the indices in range(start, start+clear_span) where\n start is an index known from surrounding context.\n clear_qubits: Defines the set of qubits that should be cleared\n with each affected moment.\n new_operations: The operations to replace the cleared out\n operations with.\n preserve_moments: If set, `cirq.Moment` instances within\n `new_operations` will be preserved exactly. Normally the\n operations would be repacked to fit better into the\n target space, which may move them between moments.\n Please be advised that a PointOptimizer consuming this\n summary will flatten operations no matter what,\n see https://github.com/quantumlib/Cirq/issues/2406.\n \"\"\"\n self.new_operations = tuple(\n ops.flatten_op_tree(new_operations,\n preserve_moments=preserve_moments))\n self.clear_span = clear_span\n self.clear_qubits = tuple(clear_qubits)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n return (self.clear_span == other.clear_span and\n self.clear_qubits == other.clear_qubits and\n self.new_operations == other.new_operations)\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self) -> int:\n return hash((PointOptimizationSummary,\n self.clear_span,\n self.clear_qubits,\n self.new_operations))\n\n def __repr__(self) -> str:\n return (f'cirq.PointOptimizationSummary({self.clear_span!r}, '\n f'{self.clear_qubits!r}, {self.new_operations!r})')\n\n\nclass PointOptimizer:\n \"\"\"Makes circuit improvements focused on a specific location.\"\"\"\n\n def __init__(self,\n post_clean_up: Callable[[Sequence['cirq.Operation']], ops.\n OP_TREE] = lambda op_list: op_list\n ) -> None:\n \"\"\"\n Args:\n post_clean_up: This function is called on each set of optimized\n operations before they are put into the circuit to replace the\n old operations.\n \"\"\"\n self.post_clean_up = post_clean_up\n\n def __call__(self, circuit: Circuit):\n return self.optimize_circuit(circuit)\n\n @abc.abstractmethod\n def optimization_at(self, circuit: Circuit, index: int, op: 'cirq.Operation'\n ) -> Optional[PointOptimizationSummary]:\n \"\"\"Describes how to change operations near the given location.\n\n For example, this method could realize that the given operation is an\n X gate and that in the very next moment there is a Z gate. It would\n indicate that they should be combined into a Y gate by returning\n PointOptimizationSummary(clear_span=2,\n clear_qubits=op.qubits,\n new_operations=cirq.Y(op.qubits[0]))\n\n Args:\n circuit: The circuit to improve.\n index: The index of the moment with the operation to focus on.\n op: The operation to focus improvements upon.\n\n Returns:\n A description of the optimization to perform, or else None if no\n change should be made.\n \"\"\"\n\n def optimize_circuit(self, circuit: Circuit):\n frontier: Dict['Qid', int] = defaultdict(lambda: 0)\n i = 0\n while i < len(circuit): # Note: circuit may mutate as we go.\n for op in circuit[i].operations:\n # Don't touch stuff inserted by previous optimizations.\n if any(frontier[q] > i for q in op.qubits):\n continue\n\n # Skip if an optimization removed the circuit underneath us.\n if i >= len(circuit):\n continue\n # Skip if an optimization removed the op we're considering.\n if op not in circuit[i].operations:\n continue\n opt = self.optimization_at(circuit, i, op)\n # Skip if the optimization did nothing.\n if opt is None:\n continue\n\n # Clear target area, and insert new operations.\n circuit.clear_operations_touching(\n opt.clear_qubits,\n [e for e in range(i, i + opt.clear_span)])\n new_operations = self.post_clean_up(\n cast(Tuple[ops.Operation], opt.new_operations))\n\n flat_new_operations = tuple(ops.flatten_to_ops(new_operations))\n\n new_qubits = set()\n for flat_op in flat_new_operations:\n for q in flat_op.qubits:\n new_qubits.add(q)\n\n if not new_qubits.issubset(set(opt.clear_qubits)):\n raise ValueError(\n 'New operations in PointOptimizer should not act on new'\n ' qubits.')\n\n circuit.insert_at_frontier(flat_new_operations, i, frontier)\n i += 1\n", "path": "cirq/circuits/optimization_pass.py"}]} | 2,107 | 236 |
gh_patches_debug_4121 | rasdani/github-patches | git_diff | aio-libs__aiohttp-5012 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Connections are not correctly kept after HEAD requests
When the client makes a HEAD request, it parses the response as if it was a regular message.
But the HTTP specs has a nice trick: HEAD requests `SHOULD` respond with the exact same headers than the corresponding GET request. This includes `Content-Length` or `Transfer-Encoding: chunked` headers.
This means that when the client receives a HEAD response with a `Content-Length: 5` response, it should not expect any body and `payload.is_eof()` should be True immediately, not after receiving 5 more bytes.
Currently `ResponseHandler.should_close` is True for HEAD requests with a `Content-Length: …` because `payload.is_eof()` is False when it should be True, hence the underlying connection is closed and not re-used when it could be.
This is only inefficient on the client side, not at all a bug, and does not produce any wrong results.
(also, I was mistaken a few minutes ago: returning a Content-Length header with an empty body is perfectly valid, and the `allow_head=True` parameter of the `add_get` method works perfectly well, no problem at all on the server side)
reproducer:
==========
https://github.com/aio-libs/aiohttp/issues/1769#issuecomment-614775045
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aiohttp/client_proto.py`
Content:
```
1 import asyncio
2 from contextlib import suppress
3 from typing import Any, Optional, Tuple
4
5 from .base_protocol import BaseProtocol
6 from .client_exceptions import (
7 ClientOSError,
8 ClientPayloadError,
9 ServerDisconnectedError,
10 ServerTimeoutError,
11 )
12 from .helpers import BaseTimerContext, set_exception, set_result
13 from .http import HttpResponseParser, RawResponseMessage
14 from .streams import EMPTY_PAYLOAD, DataQueue, StreamReader
15
16
17 class ResponseHandler(BaseProtocol,
18 DataQueue[Tuple[RawResponseMessage, StreamReader]]):
19 """Helper class to adapt between Protocol and StreamReader."""
20
21 def __init__(self,
22 loop: asyncio.AbstractEventLoop) -> None:
23 BaseProtocol.__init__(self, loop=loop)
24 DataQueue.__init__(self, loop)
25
26 self._should_close = False
27
28 self._payload = None
29 self._skip_payload = False
30 self._payload_parser = None
31
32 self._timer = None
33
34 self._tail = b''
35 self._upgraded = False
36 self._parser = None # type: Optional[HttpResponseParser]
37
38 self._read_timeout = None # type: Optional[float]
39 self._read_timeout_handle = None # type: Optional[asyncio.TimerHandle]
40
41 self.closed = self._loop.create_future() # type: asyncio.Future[None]
42
43 @property
44 def upgraded(self) -> bool:
45 return self._upgraded
46
47 @property
48 def should_close(self) -> bool:
49 if (self._payload is not None and
50 not self._payload.is_eof() or self._upgraded):
51 return True
52
53 return (self._should_close or self._upgraded or
54 self.exception() is not None or
55 self._payload_parser is not None or
56 len(self) > 0 or bool(self._tail))
57
58 def force_close(self) -> None:
59 self._should_close = True
60
61 def close(self) -> None:
62 transport = self.transport
63 if transport is not None:
64 transport.close()
65 self.transport = None
66 self._payload = None
67 self._drop_timeout()
68
69 def is_connected(self) -> bool:
70 return self.transport is not None
71
72 def connection_lost(self, exc: Optional[BaseException]) -> None:
73 self._drop_timeout()
74
75 if exc is not None:
76 set_exception(self.closed, exc)
77 else:
78 set_result(self.closed, None)
79
80 if self._payload_parser is not None:
81 with suppress(Exception):
82 self._payload_parser.feed_eof()
83
84 uncompleted = None
85 if self._parser is not None:
86 try:
87 uncompleted = self._parser.feed_eof()
88 except Exception:
89 if self._payload is not None:
90 self._payload.set_exception(
91 ClientPayloadError(
92 'Response payload is not completed'))
93
94 if not self.is_eof():
95 if isinstance(exc, OSError):
96 exc = ClientOSError(*exc.args)
97 if exc is None:
98 exc = ServerDisconnectedError(uncompleted)
99 # assigns self._should_close to True as side effect,
100 # we do it anyway below
101 self.set_exception(exc)
102
103 self._should_close = True
104 self._parser = None
105 self._payload = None
106 self._payload_parser = None
107 self._reading_paused = False
108
109 super().connection_lost(exc)
110
111 def eof_received(self) -> None:
112 # should call parser.feed_eof() most likely
113 self._drop_timeout()
114
115 def pause_reading(self) -> None:
116 super().pause_reading()
117 self._drop_timeout()
118
119 def resume_reading(self) -> None:
120 super().resume_reading()
121 self._reschedule_timeout()
122
123 def set_exception(self, exc: BaseException) -> None:
124 self._should_close = True
125 self._drop_timeout()
126 super().set_exception(exc)
127
128 def set_parser(self, parser: Any, payload: Any) -> None:
129 # TODO: actual types are:
130 # parser: WebSocketReader
131 # payload: FlowControlDataQueue
132 # but they are not generi enough
133 # Need an ABC for both types
134 self._payload = payload
135 self._payload_parser = parser
136
137 self._drop_timeout()
138
139 if self._tail:
140 data, self._tail = self._tail, b''
141 self.data_received(data)
142
143 def set_response_params(self, *, timer: BaseTimerContext=None,
144 skip_payload: bool=False,
145 read_until_eof: bool=False,
146 auto_decompress: bool=True,
147 read_timeout: Optional[float]=None) -> None:
148 self._skip_payload = skip_payload
149
150 self._read_timeout = read_timeout
151 self._reschedule_timeout()
152
153 self._parser = HttpResponseParser(
154 self, self._loop, timer=timer,
155 payload_exception=ClientPayloadError,
156 read_until_eof=read_until_eof,
157 auto_decompress=auto_decompress)
158
159 if self._tail:
160 data, self._tail = self._tail, b''
161 self.data_received(data)
162
163 def _drop_timeout(self) -> None:
164 if self._read_timeout_handle is not None:
165 self._read_timeout_handle.cancel()
166 self._read_timeout_handle = None
167
168 def _reschedule_timeout(self) -> None:
169 timeout = self._read_timeout
170 if self._read_timeout_handle is not None:
171 self._read_timeout_handle.cancel()
172
173 if timeout:
174 self._read_timeout_handle = self._loop.call_later(
175 timeout, self._on_read_timeout)
176 else:
177 self._read_timeout_handle = None
178
179 def _on_read_timeout(self) -> None:
180 exc = ServerTimeoutError("Timeout on reading data from socket")
181 self.set_exception(exc)
182 if self._payload is not None:
183 self._payload.set_exception(exc)
184
185 def data_received(self, data: bytes) -> None:
186 self._reschedule_timeout()
187
188 if not data:
189 return
190
191 # custom payload parser
192 if self._payload_parser is not None:
193 eof, tail = self._payload_parser.feed_data(data)
194 if eof:
195 self._payload = None
196 self._payload_parser = None
197
198 if tail:
199 self.data_received(tail)
200 return
201 else:
202 if self._upgraded or self._parser is None:
203 # i.e. websocket connection, websocket parser is not set yet
204 self._tail += data
205 else:
206 # parse http messages
207 try:
208 messages, upgraded, tail = self._parser.feed_data(data)
209 except BaseException as exc:
210 if self.transport is not None:
211 # connection.release() could be called BEFORE
212 # data_received(), the transport is already
213 # closed in this case
214 self.transport.close()
215 # should_close is True after the call
216 self.set_exception(exc)
217 return
218
219 self._upgraded = upgraded
220
221 payload = None
222 for message, payload in messages:
223 if message.should_close:
224 self._should_close = True
225
226 self._payload = payload
227
228 if self._skip_payload or message.code in (204, 304):
229 self.feed_data((message, EMPTY_PAYLOAD), 0) # type: ignore # noqa
230 else:
231 self.feed_data((message, payload), 0)
232 if payload is not None:
233 # new message(s) was processed
234 # register timeout handler unsubscribing
235 # either on end-of-stream or immediately for
236 # EMPTY_PAYLOAD
237 if payload is not EMPTY_PAYLOAD:
238 payload.on_eof(self._drop_timeout)
239 else:
240 self._drop_timeout()
241
242 if tail:
243 if upgraded:
244 self.data_received(tail)
245 else:
246 self._tail = tail
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aiohttp/client_proto.py b/aiohttp/client_proto.py
--- a/aiohttp/client_proto.py
+++ b/aiohttp/client_proto.py
@@ -153,6 +153,7 @@
self._parser = HttpResponseParser(
self, self._loop, timer=timer,
payload_exception=ClientPayloadError,
+ response_with_body=not skip_payload,
read_until_eof=read_until_eof,
auto_decompress=auto_decompress)
| {"golden_diff": "diff --git a/aiohttp/client_proto.py b/aiohttp/client_proto.py\n--- a/aiohttp/client_proto.py\n+++ b/aiohttp/client_proto.py\n@@ -153,6 +153,7 @@\n self._parser = HttpResponseParser(\n self, self._loop, timer=timer,\n payload_exception=ClientPayloadError,\n+ response_with_body=not skip_payload,\n read_until_eof=read_until_eof,\n auto_decompress=auto_decompress)\n", "issue": "Connections are not correctly kept after HEAD requests\nWhen the client makes a HEAD request, it parses the response as if it was a regular message.\r\n\r\nBut the HTTP specs has a nice trick: HEAD requests `SHOULD` respond with the exact same headers than the corresponding GET request. This includes `Content-Length` or `Transfer-Encoding: chunked` headers.\r\n\r\nThis means that when the client receives a HEAD response with a `Content-Length: 5` response, it should not expect any body and `payload.is_eof()` should be True immediately, not after receiving 5\u00a0more bytes.\r\n\r\nCurrently `ResponseHandler.should_close` is True for HEAD requests with a `Content-Length: \u2026` because `payload.is_eof()` is False when it should be True, hence the underlying connection is closed and not re-used when it could be.\r\n\r\n\r\nThis is only inefficient on the client side, not at all a bug, and does not produce any wrong results.\r\n\r\n\r\n(also, I was mistaken a few minutes ago: returning a Content-Length header with an empty body is perfectly valid, and the `allow_head=True` parameter of the `add_get` method works perfectly well, no problem at all on the server side)\r\n\r\n\r\nreproducer:\r\n==========\r\n\r\nhttps://github.com/aio-libs/aiohttp/issues/1769#issuecomment-614775045\n", "before_files": [{"content": "import asyncio\nfrom contextlib import suppress\nfrom typing import Any, Optional, Tuple\n\nfrom .base_protocol import BaseProtocol\nfrom .client_exceptions import (\n ClientOSError,\n ClientPayloadError,\n ServerDisconnectedError,\n ServerTimeoutError,\n)\nfrom .helpers import BaseTimerContext, set_exception, set_result\nfrom .http import HttpResponseParser, RawResponseMessage\nfrom .streams import EMPTY_PAYLOAD, DataQueue, StreamReader\n\n\nclass ResponseHandler(BaseProtocol,\n DataQueue[Tuple[RawResponseMessage, StreamReader]]):\n \"\"\"Helper class to adapt between Protocol and StreamReader.\"\"\"\n\n def __init__(self,\n loop: asyncio.AbstractEventLoop) -> None:\n BaseProtocol.__init__(self, loop=loop)\n DataQueue.__init__(self, loop)\n\n self._should_close = False\n\n self._payload = None\n self._skip_payload = False\n self._payload_parser = None\n\n self._timer = None\n\n self._tail = b''\n self._upgraded = False\n self._parser = None # type: Optional[HttpResponseParser]\n\n self._read_timeout = None # type: Optional[float]\n self._read_timeout_handle = None # type: Optional[asyncio.TimerHandle]\n\n self.closed = self._loop.create_future() # type: asyncio.Future[None]\n\n @property\n def upgraded(self) -> bool:\n return self._upgraded\n\n @property\n def should_close(self) -> bool:\n if (self._payload is not None and\n not self._payload.is_eof() or self._upgraded):\n return True\n\n return (self._should_close or self._upgraded or\n self.exception() is not None or\n self._payload_parser is not None or\n len(self) > 0 or bool(self._tail))\n\n def force_close(self) -> None:\n self._should_close = True\n\n def close(self) -> None:\n transport = self.transport\n if transport is not None:\n transport.close()\n self.transport = None\n self._payload = None\n self._drop_timeout()\n\n def is_connected(self) -> bool:\n return self.transport is not None\n\n def connection_lost(self, exc: Optional[BaseException]) -> None:\n self._drop_timeout()\n\n if exc is not None:\n set_exception(self.closed, exc)\n else:\n set_result(self.closed, None)\n\n if self._payload_parser is not None:\n with suppress(Exception):\n self._payload_parser.feed_eof()\n\n uncompleted = None\n if self._parser is not None:\n try:\n uncompleted = self._parser.feed_eof()\n except Exception:\n if self._payload is not None:\n self._payload.set_exception(\n ClientPayloadError(\n 'Response payload is not completed'))\n\n if not self.is_eof():\n if isinstance(exc, OSError):\n exc = ClientOSError(*exc.args)\n if exc is None:\n exc = ServerDisconnectedError(uncompleted)\n # assigns self._should_close to True as side effect,\n # we do it anyway below\n self.set_exception(exc)\n\n self._should_close = True\n self._parser = None\n self._payload = None\n self._payload_parser = None\n self._reading_paused = False\n\n super().connection_lost(exc)\n\n def eof_received(self) -> None:\n # should call parser.feed_eof() most likely\n self._drop_timeout()\n\n def pause_reading(self) -> None:\n super().pause_reading()\n self._drop_timeout()\n\n def resume_reading(self) -> None:\n super().resume_reading()\n self._reschedule_timeout()\n\n def set_exception(self, exc: BaseException) -> None:\n self._should_close = True\n self._drop_timeout()\n super().set_exception(exc)\n\n def set_parser(self, parser: Any, payload: Any) -> None:\n # TODO: actual types are:\n # parser: WebSocketReader\n # payload: FlowControlDataQueue\n # but they are not generi enough\n # Need an ABC for both types\n self._payload = payload\n self._payload_parser = parser\n\n self._drop_timeout()\n\n if self._tail:\n data, self._tail = self._tail, b''\n self.data_received(data)\n\n def set_response_params(self, *, timer: BaseTimerContext=None,\n skip_payload: bool=False,\n read_until_eof: bool=False,\n auto_decompress: bool=True,\n read_timeout: Optional[float]=None) -> None:\n self._skip_payload = skip_payload\n\n self._read_timeout = read_timeout\n self._reschedule_timeout()\n\n self._parser = HttpResponseParser(\n self, self._loop, timer=timer,\n payload_exception=ClientPayloadError,\n read_until_eof=read_until_eof,\n auto_decompress=auto_decompress)\n\n if self._tail:\n data, self._tail = self._tail, b''\n self.data_received(data)\n\n def _drop_timeout(self) -> None:\n if self._read_timeout_handle is not None:\n self._read_timeout_handle.cancel()\n self._read_timeout_handle = None\n\n def _reschedule_timeout(self) -> None:\n timeout = self._read_timeout\n if self._read_timeout_handle is not None:\n self._read_timeout_handle.cancel()\n\n if timeout:\n self._read_timeout_handle = self._loop.call_later(\n timeout, self._on_read_timeout)\n else:\n self._read_timeout_handle = None\n\n def _on_read_timeout(self) -> None:\n exc = ServerTimeoutError(\"Timeout on reading data from socket\")\n self.set_exception(exc)\n if self._payload is not None:\n self._payload.set_exception(exc)\n\n def data_received(self, data: bytes) -> None:\n self._reschedule_timeout()\n\n if not data:\n return\n\n # custom payload parser\n if self._payload_parser is not None:\n eof, tail = self._payload_parser.feed_data(data)\n if eof:\n self._payload = None\n self._payload_parser = None\n\n if tail:\n self.data_received(tail)\n return\n else:\n if self._upgraded or self._parser is None:\n # i.e. websocket connection, websocket parser is not set yet\n self._tail += data\n else:\n # parse http messages\n try:\n messages, upgraded, tail = self._parser.feed_data(data)\n except BaseException as exc:\n if self.transport is not None:\n # connection.release() could be called BEFORE\n # data_received(), the transport is already\n # closed in this case\n self.transport.close()\n # should_close is True after the call\n self.set_exception(exc)\n return\n\n self._upgraded = upgraded\n\n payload = None\n for message, payload in messages:\n if message.should_close:\n self._should_close = True\n\n self._payload = payload\n\n if self._skip_payload or message.code in (204, 304):\n self.feed_data((message, EMPTY_PAYLOAD), 0) # type: ignore # noqa\n else:\n self.feed_data((message, payload), 0)\n if payload is not None:\n # new message(s) was processed\n # register timeout handler unsubscribing\n # either on end-of-stream or immediately for\n # EMPTY_PAYLOAD\n if payload is not EMPTY_PAYLOAD:\n payload.on_eof(self._drop_timeout)\n else:\n self._drop_timeout()\n\n if tail:\n if upgraded:\n self.data_received(tail)\n else:\n self._tail = tail\n", "path": "aiohttp/client_proto.py"}], "after_files": [{"content": "import asyncio\nfrom contextlib import suppress\nfrom typing import Any, Optional, Tuple\n\nfrom .base_protocol import BaseProtocol\nfrom .client_exceptions import (\n ClientOSError,\n ClientPayloadError,\n ServerDisconnectedError,\n ServerTimeoutError,\n)\nfrom .helpers import BaseTimerContext, set_exception, set_result\nfrom .http import HttpResponseParser, RawResponseMessage\nfrom .streams import EMPTY_PAYLOAD, DataQueue, StreamReader\n\n\nclass ResponseHandler(BaseProtocol,\n DataQueue[Tuple[RawResponseMessage, StreamReader]]):\n \"\"\"Helper class to adapt between Protocol and StreamReader.\"\"\"\n\n def __init__(self,\n loop: asyncio.AbstractEventLoop) -> None:\n BaseProtocol.__init__(self, loop=loop)\n DataQueue.__init__(self, loop)\n\n self._should_close = False\n\n self._payload = None\n self._skip_payload = False\n self._payload_parser = None\n\n self._timer = None\n\n self._tail = b''\n self._upgraded = False\n self._parser = None # type: Optional[HttpResponseParser]\n\n self._read_timeout = None # type: Optional[float]\n self._read_timeout_handle = None # type: Optional[asyncio.TimerHandle]\n\n self.closed = self._loop.create_future() # type: asyncio.Future[None]\n\n @property\n def upgraded(self) -> bool:\n return self._upgraded\n\n @property\n def should_close(self) -> bool:\n if (self._payload is not None and\n not self._payload.is_eof() or self._upgraded):\n return True\n\n return (self._should_close or self._upgraded or\n self.exception() is not None or\n self._payload_parser is not None or\n len(self) > 0 or bool(self._tail))\n\n def force_close(self) -> None:\n self._should_close = True\n\n def close(self) -> None:\n transport = self.transport\n if transport is not None:\n transport.close()\n self.transport = None\n self._payload = None\n self._drop_timeout()\n\n def is_connected(self) -> bool:\n return self.transport is not None\n\n def connection_lost(self, exc: Optional[BaseException]) -> None:\n self._drop_timeout()\n\n if exc is not None:\n set_exception(self.closed, exc)\n else:\n set_result(self.closed, None)\n\n if self._payload_parser is not None:\n with suppress(Exception):\n self._payload_parser.feed_eof()\n\n uncompleted = None\n if self._parser is not None:\n try:\n uncompleted = self._parser.feed_eof()\n except Exception:\n if self._payload is not None:\n self._payload.set_exception(\n ClientPayloadError(\n 'Response payload is not completed'))\n\n if not self.is_eof():\n if isinstance(exc, OSError):\n exc = ClientOSError(*exc.args)\n if exc is None:\n exc = ServerDisconnectedError(uncompleted)\n # assigns self._should_close to True as side effect,\n # we do it anyway below\n self.set_exception(exc)\n\n self._should_close = True\n self._parser = None\n self._payload = None\n self._payload_parser = None\n self._reading_paused = False\n\n super().connection_lost(exc)\n\n def eof_received(self) -> None:\n # should call parser.feed_eof() most likely\n self._drop_timeout()\n\n def pause_reading(self) -> None:\n super().pause_reading()\n self._drop_timeout()\n\n def resume_reading(self) -> None:\n super().resume_reading()\n self._reschedule_timeout()\n\n def set_exception(self, exc: BaseException) -> None:\n self._should_close = True\n self._drop_timeout()\n super().set_exception(exc)\n\n def set_parser(self, parser: Any, payload: Any) -> None:\n # TODO: actual types are:\n # parser: WebSocketReader\n # payload: FlowControlDataQueue\n # but they are not generi enough\n # Need an ABC for both types\n self._payload = payload\n self._payload_parser = parser\n\n self._drop_timeout()\n\n if self._tail:\n data, self._tail = self._tail, b''\n self.data_received(data)\n\n def set_response_params(self, *, timer: BaseTimerContext=None,\n skip_payload: bool=False,\n read_until_eof: bool=False,\n auto_decompress: bool=True,\n read_timeout: Optional[float]=None) -> None:\n self._skip_payload = skip_payload\n\n self._read_timeout = read_timeout\n self._reschedule_timeout()\n\n self._parser = HttpResponseParser(\n self, self._loop, timer=timer,\n payload_exception=ClientPayloadError,\n response_with_body=not skip_payload,\n read_until_eof=read_until_eof,\n auto_decompress=auto_decompress)\n\n if self._tail:\n data, self._tail = self._tail, b''\n self.data_received(data)\n\n def _drop_timeout(self) -> None:\n if self._read_timeout_handle is not None:\n self._read_timeout_handle.cancel()\n self._read_timeout_handle = None\n\n def _reschedule_timeout(self) -> None:\n timeout = self._read_timeout\n if self._read_timeout_handle is not None:\n self._read_timeout_handle.cancel()\n\n if timeout:\n self._read_timeout_handle = self._loop.call_later(\n timeout, self._on_read_timeout)\n else:\n self._read_timeout_handle = None\n\n def _on_read_timeout(self) -> None:\n exc = ServerTimeoutError(\"Timeout on reading data from socket\")\n self.set_exception(exc)\n if self._payload is not None:\n self._payload.set_exception(exc)\n\n def data_received(self, data: bytes) -> None:\n self._reschedule_timeout()\n\n if not data:\n return\n\n # custom payload parser\n if self._payload_parser is not None:\n eof, tail = self._payload_parser.feed_data(data)\n if eof:\n self._payload = None\n self._payload_parser = None\n\n if tail:\n self.data_received(tail)\n return\n else:\n if self._upgraded or self._parser is None:\n # i.e. websocket connection, websocket parser is not set yet\n self._tail += data\n else:\n # parse http messages\n try:\n messages, upgraded, tail = self._parser.feed_data(data)\n except BaseException as exc:\n if self.transport is not None:\n # connection.release() could be called BEFORE\n # data_received(), the transport is already\n # closed in this case\n self.transport.close()\n # should_close is True after the call\n self.set_exception(exc)\n return\n\n self._upgraded = upgraded\n\n payload = None\n for message, payload in messages:\n if message.should_close:\n self._should_close = True\n\n self._payload = payload\n\n if self._skip_payload or message.code in (204, 304):\n self.feed_data((message, EMPTY_PAYLOAD), 0) # type: ignore # noqa\n else:\n self.feed_data((message, payload), 0)\n if payload is not None:\n # new message(s) was processed\n # register timeout handler unsubscribing\n # either on end-of-stream or immediately for\n # EMPTY_PAYLOAD\n if payload is not EMPTY_PAYLOAD:\n payload.on_eof(self._drop_timeout)\n else:\n self._drop_timeout()\n\n if tail:\n if upgraded:\n self.data_received(tail)\n else:\n self._tail = tail\n", "path": "aiohttp/client_proto.py"}]} | 2,860 | 104 |
gh_patches_debug_22367 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-tf-515 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Subword tokenisation spacer can mark the beginning of word
Certain sequence noising operations need to retrieve a list of words from the raw list of subword tokens. For example:
* Decoding with word removal/reordering to produce noisy back-translations as in [Scaling BT paper](https://arxiv.org/abs/1808.09381)
* Word omission to support the new contrastive learning feature as in the [contrastive learning paper](https://www.aclweb.org/anthology/P19-1623.pdf)
* Presumably more features relying on word level noise might come up in the future
In these cases the user should specify some details for the sub-tokenisation process:
1. What subword tokens was used? (`decoding_subword_token`)
2. Was that token a joiner or a spacer? (`decoding_subword_token_is_spacer`)
When the user specifies (explicitly or implicitly) a spacer, the framework assumes that the spacer symbol appears at the beginning of each word, similar to what SentencePiece does. However this does not have to be the case, the spacer could also appear at the end of each word - for example [this one does](https://github.com/kovalevfm/SubTokenizer). If that extra sub-tokenisation flexibility is desired, we can add this configuration parameter. A sample implementation could look like [this](https://github.com/steremma/OpenNMT-tf/commit/d109af49911431e424b28def575fb94f07bfec47).
I realise that most user's rely on standard tools that are covered by the current implementation. If there is a user base for which the extra flexibility is desired, I can submit a PR that reads this option from the YAML.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opennmt/data/text.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Text manipulation."""
4
5 import tensorflow as tf
6
7
8 def tokens_to_chars(tokens):
9 """Splits tokens into unicode characters.
10
11 Args:
12 tokens: A string ``tf.Tensor`` of shape :math:`[T]`.
13
14 Returns:
15 The characters as a 2D string ``tf.RaggedTensor``.
16 """
17 return tf.strings.unicode_split(tokens, "UTF-8")
18
19 def tokens_to_words(tokens, subword_token="■", is_spacer=None):
20 """Converts a sequence of tokens to a sequence of words.
21
22 For example, if a BPE tokenization produces this sequence:
23
24 ["He@@", "llo", "W@@", "orld", "@@!"]
25
26 this function will return the tensor:
27
28 [["He@@", "llo", ""], ["W@@", "orld", "@@!"]]
29
30 Args:
31 tokens: A 1D string ``tf.Tensor``.
32 subword_token: The special token used by the subword tokenizer.
33 is_spacer: Whether :obj:`subword_token` is used as a spacer (as in
34 SentencePiece) or a joiner (as in BPE). If ``None``, will infer
35 directly from :obj:`subword_token`.
36
37 Returns:
38 The words as a 2D string ``tf.RaggedTensor``.
39 """
40 if is_spacer is None:
41 is_spacer = subword_token == "▁"
42 if is_spacer:
43 subword = tf.strings.regex_full_match(tokens, "[^%s].*" % subword_token)
44 else:
45 right = tf.strings.regex_full_match(tokens, ".*%s" % subword_token)
46 left = tf.strings.regex_full_match(tokens, "%s.*" % subword_token)
47 subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)
48 start = tf.logical_not(subword)
49 start_indices = tf.squeeze(tf.where(start), -1)
50 return tf.RaggedTensor.from_row_starts(tokens, start_indices)
51
52 def alignment_matrix_from_pharaoh(alignment_line,
53 source_length,
54 target_length,
55 dtype=tf.float32):
56 """Parse Pharaoh alignments into an alignment matrix.
57
58 Args:
59 alignment_line: A string ``tf.Tensor`` in the Pharaoh format.
60 source_length: The length of the source sentence, without special symbols.
61 target_length: The length of the target sentence, without special symbols.
62 dtype: The output matrix dtype. Defaults to ``tf.float32`` for convenience
63 when computing the guided alignment loss.
64
65 Returns:
66 The alignment matrix as a 2-D ``tf.Tensor`` of type :obj:`dtype` and shape
67 ``[target_length, source_length]``, where ``[i, j] = 1`` if the ``i`` th
68 target word is aligned with the ``j`` th source word.
69 """
70 align_pairs_str = tf.strings.split([alignment_line]).values
71 align_pairs_flat_str = tf.strings.split(align_pairs_str, sep="-").values
72 align_pairs_flat = tf.strings.to_number(align_pairs_flat_str, out_type=tf.int64)
73 sparse_indices = tf.reshape(align_pairs_flat, [-1, 2])
74 sparse_values = tf.ones([tf.shape(sparse_indices)[0]], dtype=dtype)
75 source_length = tf.cast(source_length, tf.int64)
76 target_length = tf.cast(target_length, tf.int64)
77 alignment_matrix_sparse = tf.sparse.SparseTensor(
78 sparse_indices, sparse_values, [source_length, target_length])
79 alignment_matrix = tf.sparse.to_dense(alignment_matrix_sparse, validate_indices=False)
80 return tf.transpose(alignment_matrix)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opennmt/data/text.py b/opennmt/data/text.py
--- a/opennmt/data/text.py
+++ b/opennmt/data/text.py
@@ -40,13 +40,18 @@
if is_spacer is None:
is_spacer = subword_token == "▁"
if is_spacer:
- subword = tf.strings.regex_full_match(tokens, "[^%s].*" % subword_token)
+ # First token implicitly starts with a spacer.
+ left_and_single = tf.logical_or(
+ tf.strings.regex_full_match(tokens, "%s.*" % subword_token),
+ tf.one_hot(0, tf.shape(tokens)[0], on_value=True, off_value=False))
+ right = tf.strings.regex_full_match(tokens, ".+%s" % subword_token)
+ word_start = tf.logical_or(tf.roll(right, shift=1, axis=0), left_and_single)
else:
right = tf.strings.regex_full_match(tokens, ".*%s" % subword_token)
left = tf.strings.regex_full_match(tokens, "%s.*" % subword_token)
subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)
- start = tf.logical_not(subword)
- start_indices = tf.squeeze(tf.where(start), -1)
+ word_start = tf.logical_not(subword)
+ start_indices = tf.squeeze(tf.where(word_start), -1)
return tf.RaggedTensor.from_row_starts(tokens, start_indices)
def alignment_matrix_from_pharaoh(alignment_line,
| {"golden_diff": "diff --git a/opennmt/data/text.py b/opennmt/data/text.py\n--- a/opennmt/data/text.py\n+++ b/opennmt/data/text.py\n@@ -40,13 +40,18 @@\n if is_spacer is None:\n is_spacer = subword_token == \"\u2581\"\n if is_spacer:\n- subword = tf.strings.regex_full_match(tokens, \"[^%s].*\" % subword_token)\n+ # First token implicitly starts with a spacer.\n+ left_and_single = tf.logical_or(\n+ tf.strings.regex_full_match(tokens, \"%s.*\" % subword_token),\n+ tf.one_hot(0, tf.shape(tokens)[0], on_value=True, off_value=False))\n+ right = tf.strings.regex_full_match(tokens, \".+%s\" % subword_token)\n+ word_start = tf.logical_or(tf.roll(right, shift=1, axis=0), left_and_single)\n else:\n right = tf.strings.regex_full_match(tokens, \".*%s\" % subword_token)\n left = tf.strings.regex_full_match(tokens, \"%s.*\" % subword_token)\n subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)\n- start = tf.logical_not(subword)\n- start_indices = tf.squeeze(tf.where(start), -1)\n+ word_start = tf.logical_not(subword)\n+ start_indices = tf.squeeze(tf.where(word_start), -1)\n return tf.RaggedTensor.from_row_starts(tokens, start_indices)\n \n def alignment_matrix_from_pharaoh(alignment_line,\n", "issue": "Subword tokenisation spacer can mark the beginning of word\nCertain sequence noising operations need to retrieve a list of words from the raw list of subword tokens. For example:\r\n\r\n* Decoding with word removal/reordering to produce noisy back-translations as in [Scaling BT paper](https://arxiv.org/abs/1808.09381)\r\n\r\n* Word omission to support the new contrastive learning feature as in the [contrastive learning paper](https://www.aclweb.org/anthology/P19-1623.pdf)\r\n\r\n* Presumably more features relying on word level noise might come up in the future\r\n\r\nIn these cases the user should specify some details for the sub-tokenisation process: \r\n1. What subword tokens was used? (`decoding_subword_token`)\r\n2. Was that token a joiner or a spacer? (`decoding_subword_token_is_spacer`)\r\n\r\nWhen the user specifies (explicitly or implicitly) a spacer, the framework assumes that the spacer symbol appears at the beginning of each word, similar to what SentencePiece does. However this does not have to be the case, the spacer could also appear at the end of each word - for example [this one does](https://github.com/kovalevfm/SubTokenizer). If that extra sub-tokenisation flexibility is desired, we can add this configuration parameter. A sample implementation could look like [this](https://github.com/steremma/OpenNMT-tf/commit/d109af49911431e424b28def575fb94f07bfec47).\r\n\r\nI realise that most user's rely on standard tools that are covered by the current implementation. If there is a user base for which the extra flexibility is desired, I can submit a PR that reads this option from the YAML.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Text manipulation.\"\"\"\n\nimport tensorflow as tf\n\n\ndef tokens_to_chars(tokens):\n \"\"\"Splits tokens into unicode characters.\n\n Args:\n tokens: A string ``tf.Tensor`` of shape :math:`[T]`.\n\n Returns:\n The characters as a 2D string ``tf.RaggedTensor``.\n \"\"\"\n return tf.strings.unicode_split(tokens, \"UTF-8\")\n\ndef tokens_to_words(tokens, subword_token=\"\uffed\", is_spacer=None):\n \"\"\"Converts a sequence of tokens to a sequence of words.\n\n For example, if a BPE tokenization produces this sequence:\n\n [\"He@@\", \"llo\", \"W@@\", \"orld\", \"@@!\"]\n\n this function will return the tensor:\n\n [[\"He@@\", \"llo\", \"\"], [\"W@@\", \"orld\", \"@@!\"]]\n\n Args:\n tokens: A 1D string ``tf.Tensor``.\n subword_token: The special token used by the subword tokenizer.\n is_spacer: Whether :obj:`subword_token` is used as a spacer (as in\n SentencePiece) or a joiner (as in BPE). If ``None``, will infer\n directly from :obj:`subword_token`.\n\n Returns:\n The words as a 2D string ``tf.RaggedTensor``.\n \"\"\"\n if is_spacer is None:\n is_spacer = subword_token == \"\u2581\"\n if is_spacer:\n subword = tf.strings.regex_full_match(tokens, \"[^%s].*\" % subword_token)\n else:\n right = tf.strings.regex_full_match(tokens, \".*%s\" % subword_token)\n left = tf.strings.regex_full_match(tokens, \"%s.*\" % subword_token)\n subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)\n start = tf.logical_not(subword)\n start_indices = tf.squeeze(tf.where(start), -1)\n return tf.RaggedTensor.from_row_starts(tokens, start_indices)\n\ndef alignment_matrix_from_pharaoh(alignment_line,\n source_length,\n target_length,\n dtype=tf.float32):\n \"\"\"Parse Pharaoh alignments into an alignment matrix.\n\n Args:\n alignment_line: A string ``tf.Tensor`` in the Pharaoh format.\n source_length: The length of the source sentence, without special symbols.\n target_length: The length of the target sentence, without special symbols.\n dtype: The output matrix dtype. Defaults to ``tf.float32`` for convenience\n when computing the guided alignment loss.\n\n Returns:\n The alignment matrix as a 2-D ``tf.Tensor`` of type :obj:`dtype` and shape\n ``[target_length, source_length]``, where ``[i, j] = 1`` if the ``i`` th\n target word is aligned with the ``j`` th source word.\n \"\"\"\n align_pairs_str = tf.strings.split([alignment_line]).values\n align_pairs_flat_str = tf.strings.split(align_pairs_str, sep=\"-\").values\n align_pairs_flat = tf.strings.to_number(align_pairs_flat_str, out_type=tf.int64)\n sparse_indices = tf.reshape(align_pairs_flat, [-1, 2])\n sparse_values = tf.ones([tf.shape(sparse_indices)[0]], dtype=dtype)\n source_length = tf.cast(source_length, tf.int64)\n target_length = tf.cast(target_length, tf.int64)\n alignment_matrix_sparse = tf.sparse.SparseTensor(\n sparse_indices, sparse_values, [source_length, target_length])\n alignment_matrix = tf.sparse.to_dense(alignment_matrix_sparse, validate_indices=False)\n return tf.transpose(alignment_matrix)\n", "path": "opennmt/data/text.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Text manipulation.\"\"\"\n\nimport tensorflow as tf\n\n\ndef tokens_to_chars(tokens):\n \"\"\"Splits tokens into unicode characters.\n\n Args:\n tokens: A string ``tf.Tensor`` of shape :math:`[T]`.\n\n Returns:\n The characters as a 2D string ``tf.RaggedTensor``.\n \"\"\"\n return tf.strings.unicode_split(tokens, \"UTF-8\")\n\ndef tokens_to_words(tokens, subword_token=\"\uffed\", is_spacer=None):\n \"\"\"Converts a sequence of tokens to a sequence of words.\n\n For example, if a BPE tokenization produces this sequence:\n\n [\"He@@\", \"llo\", \"W@@\", \"orld\", \"@@!\"]\n\n this function will return the tensor:\n\n [[\"He@@\", \"llo\", \"\"], [\"W@@\", \"orld\", \"@@!\"]]\n\n Args:\n tokens: A 1D string ``tf.Tensor``.\n subword_token: The special token used by the subword tokenizer.\n is_spacer: Whether :obj:`subword_token` is used as a spacer (as in\n SentencePiece) or a joiner (as in BPE). If ``None``, will infer\n directly from :obj:`subword_token`.\n\n Returns:\n The words as a 2D string ``tf.RaggedTensor``.\n \"\"\"\n if is_spacer is None:\n is_spacer = subword_token == \"\u2581\"\n if is_spacer:\n # First token implicitly starts with a spacer.\n left_and_single = tf.logical_or(\n tf.strings.regex_full_match(tokens, \"%s.*\" % subword_token),\n tf.one_hot(0, tf.shape(tokens)[0], on_value=True, off_value=False))\n right = tf.strings.regex_full_match(tokens, \".+%s\" % subword_token)\n word_start = tf.logical_or(tf.roll(right, shift=1, axis=0), left_and_single)\n else:\n right = tf.strings.regex_full_match(tokens, \".*%s\" % subword_token)\n left = tf.strings.regex_full_match(tokens, \"%s.*\" % subword_token)\n subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)\n word_start = tf.logical_not(subword)\n start_indices = tf.squeeze(tf.where(word_start), -1)\n return tf.RaggedTensor.from_row_starts(tokens, start_indices)\n\ndef alignment_matrix_from_pharaoh(alignment_line,\n source_length,\n target_length,\n dtype=tf.float32):\n \"\"\"Parse Pharaoh alignments into an alignment matrix.\n\n Args:\n alignment_line: A string ``tf.Tensor`` in the Pharaoh format.\n source_length: The length of the source sentence, without special symbols.\n target_length: The length of the target sentence, without special symbols.\n dtype: The output matrix dtype. Defaults to ``tf.float32`` for convenience\n when computing the guided alignment loss.\n\n Returns:\n The alignment matrix as a 2-D ``tf.Tensor`` of type :obj:`dtype` and shape\n ``[target_length, source_length]``, where ``[i, j] = 1`` if the ``i`` th\n target word is aligned with the ``j`` th source word.\n \"\"\"\n align_pairs_str = tf.strings.split([alignment_line]).values\n align_pairs_flat_str = tf.strings.split(align_pairs_str, sep=\"-\").values\n align_pairs_flat = tf.strings.to_number(align_pairs_flat_str, out_type=tf.int64)\n sparse_indices = tf.reshape(align_pairs_flat, [-1, 2])\n sparse_values = tf.ones([tf.shape(sparse_indices)[0]], dtype=dtype)\n source_length = tf.cast(source_length, tf.int64)\n target_length = tf.cast(target_length, tf.int64)\n alignment_matrix_sparse = tf.sparse.SparseTensor(\n sparse_indices, sparse_values, [source_length, target_length])\n alignment_matrix = tf.sparse.to_dense(alignment_matrix_sparse, validate_indices=False)\n return tf.transpose(alignment_matrix)\n", "path": "opennmt/data/text.py"}]} | 1,603 | 343 |
gh_patches_debug_35389 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-206 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mycroft should only repeat the query in wolfram when short to speak
When the estimated time to speak "I am searching for blah blah blah blah" will take longer than the estimated time to get the result from wolfram, only the result should be said rather than the redundant "I am searching for ...".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mycroft/skills/wolfram_alpha/__init__.py`
Content:
```
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 from StringIO import StringIO
20 from os.path import dirname
21
22 import re
23 import requests
24 import wolframalpha
25 from six.moves import urllib
26
27 from mycroft.identity import IdentityManager
28 from mycroft.skills.core import MycroftSkill
29 from mycroft.util import CerberusAccessDenied
30 from mycroft.util.log import getLogger
31 from mycroft.messagebus.message import Message
32
33 __author__ = 'seanfitz'
34
35 logger = getLogger(__name__)
36
37
38 class EnglishQuestionParser(object):
39 """
40 Poor-man's english question parser. Not even close to conclusive, but
41 appears to construct some decent w|a queries and responses.
42 """
43
44 def __init__(self):
45 self.regexes = [
46 re.compile(
47 ".*(?P<QuestionWord>who|what|when|where|why|which) "
48 "(?P<Query1>.*) (?P<QuestionVerb>is|are|was|were) "
49 "(?P<Query2>.*)"),
50 re.compile(
51 ".*(?P<QuestionWord>who|what|when|where|why|which) "
52 "(?P<QuestionVerb>\w+) (?P<Query>.*)")
53 ]
54
55 def _normalize(self, groupdict):
56 if 'Query' in groupdict:
57 return groupdict
58 elif 'Query1' and 'Query2' in groupdict:
59 return {
60 'QuestionWord': groupdict.get('QuestionWord'),
61 'QuestionVerb': groupdict.get('QuestionVerb'),
62 'Query': ' '.join([groupdict.get('Query1'), groupdict.get(
63 'Query2')])
64 }
65
66 def parse(self, utterance):
67 for regex in self.regexes:
68 match = regex.match(utterance)
69 if match:
70 return self._normalize(match.groupdict())
71 return None
72
73
74 class CerberusWolframAlphaClient(object):
75 """
76 Wolfram|Alpha v2.0 client
77 """
78
79 def query(self, query):
80 """
81 Query Wolfram|Alpha with query using the v2.0 API
82 """
83 identity = IdentityManager().get()
84 bearer_token = 'Bearer %s:%s' % (identity.device_id, identity.token)
85 query = urllib.parse.urlencode(dict(input=query))
86 url = 'https://cerberus.mycroft.ai/wolframalpha/v2/query?' + query
87 headers = {'Authorization': bearer_token}
88 response = requests.get(url, headers=headers)
89 if response.status_code == 401:
90 raise CerberusAccessDenied()
91 logger.debug(response.content)
92 return wolframalpha.Result(StringIO(response.content))
93
94
95 class WolframAlphaSkill(MycroftSkill):
96 def __init__(self):
97 MycroftSkill.__init__(self, name="WolframAlphaSkill")
98 self.__init_client()
99 self.question_parser = EnglishQuestionParser()
100
101 def __init_client(self):
102 key = self.config.get('api_key')
103 if key:
104 self.client = wolframalpha.Client(key)
105 else:
106 self.client = CerberusWolframAlphaClient()
107
108 def initialize(self):
109 self.init_dialog(dirname(__file__))
110 self.emitter.on('intent_failure', self.handle_fallback)
111
112 def get_result(self, res):
113 result = None
114 try:
115 result = next(res.results).text
116 return result
117 except:
118 try:
119 result = self.__find_pod_id(res.pods, 'Value')
120 if not result:
121 result = self.__find_pod_id(
122 res.pods, 'NotableFacts:PeopleData')
123 if not result:
124 result = self.__find_pod_id(
125 res.pods, 'BasicInformation:PeopleData')
126 if not result:
127 result = self.__find_pod_id(res.pods, 'Definition')
128 if not result:
129 result = self.__find_pod_id(
130 res.pods, 'DecimalApproximation')
131 if result:
132 result = result[:5]
133 else:
134 result = self.__find_num(
135 res.pods, '200')
136 return result
137 except:
138 return result
139
140 def handle_fallback(self, message):
141 logger.debug(
142 "Could not determine intent, falling back to WolframAlpha Skill!")
143 utterance = message.metadata.get('utterance')
144 parsed_question = self.question_parser.parse(utterance)
145
146 # biding some time
147 if parsed_question:
148 self.speak("I am searching for " + parsed_question.get('Query'))
149 else:
150 self.speak("I am searching for " + utterance)
151 query = utterance
152 if parsed_question:
153 query = "%s %s %s" % (parsed_question.get('QuestionWord'),
154 parsed_question.get('QuestionVerb'),
155 parsed_question.get('Query'))
156
157 try:
158 res = self.client.query(query)
159 result = self.get_result(res)
160 others = self._find_did_you_mean(res)
161 except CerberusAccessDenied as e:
162 self.speak_dialog('not.paired')
163 return
164 except Exception as e:
165 logger.exception(e)
166 self.speak_dialog("not.understood")
167 return
168
169 if result:
170 input_interpretation = self.__find_pod_id(res.pods, 'Input')
171 verb = "is"
172 structured_syntax_regex = re.compile(".*(\||\[|\\\\|\]).*")
173 if parsed_question:
174 if not input_interpretation or structured_syntax_regex.match(
175 input_interpretation):
176 input_interpretation = parsed_question.get('Query')
177 verb = parsed_question.get('QuestionVerb')
178
179 if "|" in result: # Assuming "|" indicates a list of items
180 verb = ":"
181
182 result = self.process_wolfram_string(result)
183 input_interpretation = \
184 self.process_wolfram_string(input_interpretation)
185 response = "%s %s %s" % (input_interpretation, verb, result)
186
187 self.speak(response)
188 else:
189 if len(others) > 0:
190 self.speak_dialog('search.again',
191 data={'utterance': utterance, 'alternative':
192 others[0]})
193 self.handle_fallback(Message('intent_failure',
194 metadata={'utterance':
195 others[0]}))
196 else:
197 self.speak_dialog("not.understood")
198
199 @staticmethod
200 def __find_pod_id(pods, pod_id):
201 for pod in pods:
202 if pod_id in pod.id:
203 return pod.text
204 return None
205
206 @staticmethod
207 def __find_num(pods, pod_num):
208 for pod in pods:
209 if pod.node.attrib['position'] == pod_num:
210 return pod.text
211 return None
212
213 @staticmethod
214 def _find_did_you_mean(res):
215 value = []
216 root = res.tree.find('didyoumeans')
217 if root is not None:
218 for result in root:
219 value.append(result.text)
220 return value
221
222 @staticmethod
223 def process_wolfram_string(text):
224 # Remove extra whitespace
225 text = re.sub(r" \s+", r" ", text)
226
227 # Convert | symbols to commas
228 text = re.sub(r" \| ", r", ", text)
229
230 # Convert newlines to commas
231 text = re.sub(r"\n", r", ", text)
232
233 # Convert !s to factorial
234 text = re.sub(r"!", r",factorial", text)
235 return text
236
237 def stop(self):
238 pass
239
240
241 def create_skill():
242 return WolframAlphaSkill()
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mycroft/skills/wolfram_alpha/__init__.py b/mycroft/skills/wolfram_alpha/__init__.py
--- a/mycroft/skills/wolfram_alpha/__init__.py
+++ b/mycroft/skills/wolfram_alpha/__init__.py
@@ -138,21 +138,22 @@
return result
def handle_fallback(self, message):
+ self.enclosure.mouth_think()
logger.debug(
"Could not determine intent, falling back to WolframAlpha Skill!")
utterance = message.metadata.get('utterance')
parsed_question = self.question_parser.parse(utterance)
- # biding some time
- if parsed_question:
- self.speak("I am searching for " + parsed_question.get('Query'))
- else:
- self.speak("I am searching for " + utterance)
query = utterance
if parsed_question:
- query = "%s %s %s" % (parsed_question.get('QuestionWord'),
- parsed_question.get('QuestionVerb'),
- parsed_question.get('Query'))
+ # Try to store pieces of utterance (None if not parsed_question)
+ utt_word = parsed_question.get('QuestionWord')
+ utt_verb = parsed_question.get('QuestionVerb')
+ utt_query = parsed_question.get('Query')
+ query = "%s %s %s" % (utt_word, utt_verb, utt_query)
+ phrase = "know %s %s %s" % (utt_word, utt_query, utt_verb)
+ else: # TODO: Localization
+ phrase = "understand the phrase " + utterance
try:
res = self.client.query(query)
@@ -163,7 +164,7 @@
return
except Exception as e:
logger.exception(e)
- self.speak_dialog("not.understood")
+ self.speak_dialog("not.understood", data={'phrase': phrase})
return
if result:
@@ -194,7 +195,7 @@
metadata={'utterance':
others[0]}))
else:
- self.speak_dialog("not.understood")
+ self.speak_dialog("not.understood", data={'phrase': phrase})
@staticmethod
def __find_pod_id(pods, pod_id):
| {"golden_diff": "diff --git a/mycroft/skills/wolfram_alpha/__init__.py b/mycroft/skills/wolfram_alpha/__init__.py\n--- a/mycroft/skills/wolfram_alpha/__init__.py\n+++ b/mycroft/skills/wolfram_alpha/__init__.py\n@@ -138,21 +138,22 @@\n return result\n \n def handle_fallback(self, message):\n+ self.enclosure.mouth_think()\n logger.debug(\n \"Could not determine intent, falling back to WolframAlpha Skill!\")\n utterance = message.metadata.get('utterance')\n parsed_question = self.question_parser.parse(utterance)\n \n- # biding some time\n- if parsed_question:\n- self.speak(\"I am searching for \" + parsed_question.get('Query'))\n- else:\n- self.speak(\"I am searching for \" + utterance)\n query = utterance\n if parsed_question:\n- query = \"%s %s %s\" % (parsed_question.get('QuestionWord'),\n- parsed_question.get('QuestionVerb'),\n- parsed_question.get('Query'))\n+ # Try to store pieces of utterance (None if not parsed_question)\n+ utt_word = parsed_question.get('QuestionWord')\n+ utt_verb = parsed_question.get('QuestionVerb')\n+ utt_query = parsed_question.get('Query')\n+ query = \"%s %s %s\" % (utt_word, utt_verb, utt_query)\n+ phrase = \"know %s %s %s\" % (utt_word, utt_query, utt_verb)\n+ else: # TODO: Localization\n+ phrase = \"understand the phrase \" + utterance\n \n try:\n res = self.client.query(query)\n@@ -163,7 +164,7 @@\n return\n except Exception as e:\n logger.exception(e)\n- self.speak_dialog(\"not.understood\")\n+ self.speak_dialog(\"not.understood\", data={'phrase': phrase})\n return\n \n if result:\n@@ -194,7 +195,7 @@\n metadata={'utterance':\n others[0]}))\n else:\n- self.speak_dialog(\"not.understood\")\n+ self.speak_dialog(\"not.understood\", data={'phrase': phrase})\n \n @staticmethod\n def __find_pod_id(pods, pod_id):\n", "issue": "Mycroft should only repeat the query in wolfram when short to speak\nWhen the estimated time to speak \"I am searching for blah blah blah blah\" will take longer than the estimated time to get the result from wolfram, only the result should be said rather than the redundant \"I am searching for ...\".\n\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nfrom StringIO import StringIO\nfrom os.path import dirname\n\nimport re\nimport requests\nimport wolframalpha\nfrom six.moves import urllib\n\nfrom mycroft.identity import IdentityManager\nfrom mycroft.skills.core import MycroftSkill\nfrom mycroft.util import CerberusAccessDenied\nfrom mycroft.util.log import getLogger\nfrom mycroft.messagebus.message import Message\n\n__author__ = 'seanfitz'\n\nlogger = getLogger(__name__)\n\n\nclass EnglishQuestionParser(object):\n \"\"\"\n Poor-man's english question parser. Not even close to conclusive, but\n appears to construct some decent w|a queries and responses.\n \"\"\"\n\n def __init__(self):\n self.regexes = [\n re.compile(\n \".*(?P<QuestionWord>who|what|when|where|why|which) \"\n \"(?P<Query1>.*) (?P<QuestionVerb>is|are|was|were) \"\n \"(?P<Query2>.*)\"),\n re.compile(\n \".*(?P<QuestionWord>who|what|when|where|why|which) \"\n \"(?P<QuestionVerb>\\w+) (?P<Query>.*)\")\n ]\n\n def _normalize(self, groupdict):\n if 'Query' in groupdict:\n return groupdict\n elif 'Query1' and 'Query2' in groupdict:\n return {\n 'QuestionWord': groupdict.get('QuestionWord'),\n 'QuestionVerb': groupdict.get('QuestionVerb'),\n 'Query': ' '.join([groupdict.get('Query1'), groupdict.get(\n 'Query2')])\n }\n\n def parse(self, utterance):\n for regex in self.regexes:\n match = regex.match(utterance)\n if match:\n return self._normalize(match.groupdict())\n return None\n\n\nclass CerberusWolframAlphaClient(object):\n \"\"\"\n Wolfram|Alpha v2.0 client\n \"\"\"\n\n def query(self, query):\n \"\"\"\n Query Wolfram|Alpha with query using the v2.0 API\n \"\"\"\n identity = IdentityManager().get()\n bearer_token = 'Bearer %s:%s' % (identity.device_id, identity.token)\n query = urllib.parse.urlencode(dict(input=query))\n url = 'https://cerberus.mycroft.ai/wolframalpha/v2/query?' + query\n headers = {'Authorization': bearer_token}\n response = requests.get(url, headers=headers)\n if response.status_code == 401:\n raise CerberusAccessDenied()\n logger.debug(response.content)\n return wolframalpha.Result(StringIO(response.content))\n\n\nclass WolframAlphaSkill(MycroftSkill):\n def __init__(self):\n MycroftSkill.__init__(self, name=\"WolframAlphaSkill\")\n self.__init_client()\n self.question_parser = EnglishQuestionParser()\n\n def __init_client(self):\n key = self.config.get('api_key')\n if key:\n self.client = wolframalpha.Client(key)\n else:\n self.client = CerberusWolframAlphaClient()\n\n def initialize(self):\n self.init_dialog(dirname(__file__))\n self.emitter.on('intent_failure', self.handle_fallback)\n\n def get_result(self, res):\n result = None\n try:\n result = next(res.results).text\n return result\n except:\n try:\n result = self.__find_pod_id(res.pods, 'Value')\n if not result:\n result = self.__find_pod_id(\n res.pods, 'NotableFacts:PeopleData')\n if not result:\n result = self.__find_pod_id(\n res.pods, 'BasicInformation:PeopleData')\n if not result:\n result = self.__find_pod_id(res.pods, 'Definition')\n if not result:\n result = self.__find_pod_id(\n res.pods, 'DecimalApproximation')\n if result:\n result = result[:5]\n else:\n result = self.__find_num(\n res.pods, '200')\n return result\n except:\n return result\n\n def handle_fallback(self, message):\n logger.debug(\n \"Could not determine intent, falling back to WolframAlpha Skill!\")\n utterance = message.metadata.get('utterance')\n parsed_question = self.question_parser.parse(utterance)\n\n # biding some time\n if parsed_question:\n self.speak(\"I am searching for \" + parsed_question.get('Query'))\n else:\n self.speak(\"I am searching for \" + utterance)\n query = utterance\n if parsed_question:\n query = \"%s %s %s\" % (parsed_question.get('QuestionWord'),\n parsed_question.get('QuestionVerb'),\n parsed_question.get('Query'))\n\n try:\n res = self.client.query(query)\n result = self.get_result(res)\n others = self._find_did_you_mean(res)\n except CerberusAccessDenied as e:\n self.speak_dialog('not.paired')\n return\n except Exception as e:\n logger.exception(e)\n self.speak_dialog(\"not.understood\")\n return\n\n if result:\n input_interpretation = self.__find_pod_id(res.pods, 'Input')\n verb = \"is\"\n structured_syntax_regex = re.compile(\".*(\\||\\[|\\\\\\\\|\\]).*\")\n if parsed_question:\n if not input_interpretation or structured_syntax_regex.match(\n input_interpretation):\n input_interpretation = parsed_question.get('Query')\n verb = parsed_question.get('QuestionVerb')\n\n if \"|\" in result: # Assuming \"|\" indicates a list of items\n verb = \":\"\n\n result = self.process_wolfram_string(result)\n input_interpretation = \\\n self.process_wolfram_string(input_interpretation)\n response = \"%s %s %s\" % (input_interpretation, verb, result)\n\n self.speak(response)\n else:\n if len(others) > 0:\n self.speak_dialog('search.again',\n data={'utterance': utterance, 'alternative':\n others[0]})\n self.handle_fallback(Message('intent_failure',\n metadata={'utterance':\n others[0]}))\n else:\n self.speak_dialog(\"not.understood\")\n\n @staticmethod\n def __find_pod_id(pods, pod_id):\n for pod in pods:\n if pod_id in pod.id:\n return pod.text\n return None\n\n @staticmethod\n def __find_num(pods, pod_num):\n for pod in pods:\n if pod.node.attrib['position'] == pod_num:\n return pod.text\n return None\n\n @staticmethod\n def _find_did_you_mean(res):\n value = []\n root = res.tree.find('didyoumeans')\n if root is not None:\n for result in root:\n value.append(result.text)\n return value\n\n @staticmethod\n def process_wolfram_string(text):\n # Remove extra whitespace\n text = re.sub(r\" \\s+\", r\" \", text)\n\n # Convert | symbols to commas\n text = re.sub(r\" \\| \", r\", \", text)\n\n # Convert newlines to commas\n text = re.sub(r\"\\n\", r\", \", text)\n\n # Convert !s to factorial\n text = re.sub(r\"!\", r\",factorial\", text)\n return text\n\n def stop(self):\n pass\n\n\ndef create_skill():\n return WolframAlphaSkill()\n", "path": "mycroft/skills/wolfram_alpha/__init__.py"}], "after_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nfrom StringIO import StringIO\nfrom os.path import dirname\n\nimport re\nimport requests\nimport wolframalpha\nfrom six.moves import urllib\n\nfrom mycroft.identity import IdentityManager\nfrom mycroft.skills.core import MycroftSkill\nfrom mycroft.util import CerberusAccessDenied\nfrom mycroft.util.log import getLogger\nfrom mycroft.messagebus.message import Message\n\n__author__ = 'seanfitz'\n\nlogger = getLogger(__name__)\n\n\nclass EnglishQuestionParser(object):\n \"\"\"\n Poor-man's english question parser. Not even close to conclusive, but\n appears to construct some decent w|a queries and responses.\n \"\"\"\n\n def __init__(self):\n self.regexes = [\n re.compile(\n \".*(?P<QuestionWord>who|what|when|where|why|which) \"\n \"(?P<Query1>.*) (?P<QuestionVerb>is|are|was|were) \"\n \"(?P<Query2>.*)\"),\n re.compile(\n \".*(?P<QuestionWord>who|what|when|where|why|which) \"\n \"(?P<QuestionVerb>\\w+) (?P<Query>.*)\")\n ]\n\n def _normalize(self, groupdict):\n if 'Query' in groupdict:\n return groupdict\n elif 'Query1' and 'Query2' in groupdict:\n return {\n 'QuestionWord': groupdict.get('QuestionWord'),\n 'QuestionVerb': groupdict.get('QuestionVerb'),\n 'Query': ' '.join([groupdict.get('Query1'), groupdict.get(\n 'Query2')])\n }\n\n def parse(self, utterance):\n for regex in self.regexes:\n match = regex.match(utterance)\n if match:\n return self._normalize(match.groupdict())\n return None\n\n\nclass CerberusWolframAlphaClient(object):\n \"\"\"\n Wolfram|Alpha v2.0 client\n \"\"\"\n\n def query(self, query):\n \"\"\"\n Query Wolfram|Alpha with query using the v2.0 API\n \"\"\"\n identity = IdentityManager().get()\n bearer_token = 'Bearer %s:%s' % (identity.device_id, identity.token)\n query = urllib.parse.urlencode(dict(input=query))\n url = 'https://cerberus.mycroft.ai/wolframalpha/v2/query?' + query\n headers = {'Authorization': bearer_token}\n response = requests.get(url, headers=headers)\n if response.status_code == 401:\n raise CerberusAccessDenied()\n logger.debug(response.content)\n return wolframalpha.Result(StringIO(response.content))\n\n\nclass WolframAlphaSkill(MycroftSkill):\n def __init__(self):\n MycroftSkill.__init__(self, name=\"WolframAlphaSkill\")\n self.__init_client()\n self.question_parser = EnglishQuestionParser()\n\n def __init_client(self):\n key = self.config.get('api_key')\n if key:\n self.client = wolframalpha.Client(key)\n else:\n self.client = CerberusWolframAlphaClient()\n\n def initialize(self):\n self.init_dialog(dirname(__file__))\n self.emitter.on('intent_failure', self.handle_fallback)\n\n def get_result(self, res):\n result = None\n try:\n result = next(res.results).text\n return result\n except:\n try:\n result = self.__find_pod_id(res.pods, 'Value')\n if not result:\n result = self.__find_pod_id(\n res.pods, 'NotableFacts:PeopleData')\n if not result:\n result = self.__find_pod_id(\n res.pods, 'BasicInformation:PeopleData')\n if not result:\n result = self.__find_pod_id(res.pods, 'Definition')\n if not result:\n result = self.__find_pod_id(\n res.pods, 'DecimalApproximation')\n if result:\n result = result[:5]\n else:\n result = self.__find_num(\n res.pods, '200')\n return result\n except:\n return result\n\n def handle_fallback(self, message):\n self.enclosure.mouth_think()\n logger.debug(\n \"Could not determine intent, falling back to WolframAlpha Skill!\")\n utterance = message.metadata.get('utterance')\n parsed_question = self.question_parser.parse(utterance)\n\n query = utterance\n if parsed_question:\n # Try to store pieces of utterance (None if not parsed_question)\n utt_word = parsed_question.get('QuestionWord')\n utt_verb = parsed_question.get('QuestionVerb')\n utt_query = parsed_question.get('Query')\n query = \"%s %s %s\" % (utt_word, utt_verb, utt_query)\n phrase = \"know %s %s %s\" % (utt_word, utt_query, utt_verb)\n else: # TODO: Localization\n phrase = \"understand the phrase \" + utterance\n\n try:\n res = self.client.query(query)\n result = self.get_result(res)\n others = self._find_did_you_mean(res)\n except CerberusAccessDenied as e:\n self.speak_dialog('not.paired')\n return\n except Exception as e:\n logger.exception(e)\n self.speak_dialog(\"not.understood\", data={'phrase': phrase})\n return\n\n if result:\n input_interpretation = self.__find_pod_id(res.pods, 'Input')\n verb = \"is\"\n structured_syntax_regex = re.compile(\".*(\\||\\[|\\\\\\\\|\\]).*\")\n if parsed_question:\n if not input_interpretation or structured_syntax_regex.match(\n input_interpretation):\n input_interpretation = parsed_question.get('Query')\n verb = parsed_question.get('QuestionVerb')\n\n if \"|\" in result: # Assuming \"|\" indicates a list of items\n verb = \":\"\n\n result = self.process_wolfram_string(result)\n input_interpretation = \\\n self.process_wolfram_string(input_interpretation)\n response = \"%s %s %s\" % (input_interpretation, verb, result)\n\n self.speak(response)\n else:\n if len(others) > 0:\n self.speak_dialog('search.again',\n data={'utterance': utterance, 'alternative':\n others[0]})\n self.handle_fallback(Message('intent_failure',\n metadata={'utterance':\n others[0]}))\n else:\n self.speak_dialog(\"not.understood\", data={'phrase': phrase})\n\n @staticmethod\n def __find_pod_id(pods, pod_id):\n for pod in pods:\n if pod_id in pod.id:\n return pod.text\n return None\n\n @staticmethod\n def __find_num(pods, pod_num):\n for pod in pods:\n if pod.node.attrib['position'] == pod_num:\n return pod.text\n return None\n\n @staticmethod\n def _find_did_you_mean(res):\n value = []\n root = res.tree.find('didyoumeans')\n if root is not None:\n for result in root:\n value.append(result.text)\n return value\n\n @staticmethod\n def process_wolfram_string(text):\n # Remove extra whitespace\n text = re.sub(r\" \\s+\", r\" \", text)\n\n # Convert | symbols to commas\n text = re.sub(r\" \\| \", r\", \", text)\n\n # Convert newlines to commas\n text = re.sub(r\"\\n\", r\", \", text)\n\n # Convert !s to factorial\n text = re.sub(r\"!\", r\",factorial\", text)\n return text\n\n def stop(self):\n pass\n\n\ndef create_skill():\n return WolframAlphaSkill()\n", "path": "mycroft/skills/wolfram_alpha/__init__.py"}]} | 2,747 | 525 |
gh_patches_debug_29109 | rasdani/github-patches | git_diff | saleor__saleor-8874 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
management/commands errors
There are some queries that reference deleted fields in this module (examples below). I was wondering does it matter if this module is updated because it seems like this file hasn't been updated in a while, or are there other reasons that these queries still exist? Thanks.
https://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L45
https://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L51
https://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L52
https://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L53
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/core/management/commands/change_currency.py`
Content:
```
1 from babel.numbers import UnknownCurrencyError, validate_currency
2 from django.core.management.base import BaseCommand, CommandError
3
4 from ....checkout.models import Checkout
5 from ....discount.models import Voucher
6 from ....giftcard.models import GiftCard
7 from ....order.models import Order, OrderLine
8 from ....payment.models import Payment, Transaction
9 from ....product.models import Product, ProductVariant
10 from ....shipping.models import ShippingMethod
11
12
13 class Command(BaseCommand):
14 help = (
15 "Change currency in all models in the database. "
16 "Note, that this command only changes currency code "
17 "without doing any conversion. "
18 "Currency set by this command must match "
19 "with the value set in DEFAULT_CURRENCY environment variable."
20 )
21
22 def add_arguments(self, parser):
23 parser.add_argument("currency", type=str)
24
25 parser.add_argument(
26 "--force",
27 action="store_true",
28 help="Allows running command without validation.",
29 )
30
31 def handle(self, **options):
32 force = options.get("force", False)
33 currency = options["currency"]
34
35 if not force:
36 try:
37 validate_currency(currency)
38 except UnknownCurrencyError:
39 raise CommandError(
40 "Unknown currency. "
41 "Use `--force` flag to force migration currencies."
42 )
43
44 Checkout.objects.update(currency=currency)
45 Voucher.objects.update(currency=currency)
46 GiftCard.objects.update(currency=currency)
47 Order.objects.update(currency=currency)
48 OrderLine.objects.update(currency=currency)
49 Payment.objects.update(currency=currency)
50 Transaction.objects.update(currency=currency)
51 Product.objects.update(currency=currency)
52 ProductVariant.objects.update(currency=currency)
53 ShippingMethod.objects.update(currency=currency)
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/core/management/commands/change_currency.py b/saleor/core/management/commands/change_currency.py
deleted file mode 100644
--- a/saleor/core/management/commands/change_currency.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from babel.numbers import UnknownCurrencyError, validate_currency
-from django.core.management.base import BaseCommand, CommandError
-
-from ....checkout.models import Checkout
-from ....discount.models import Voucher
-from ....giftcard.models import GiftCard
-from ....order.models import Order, OrderLine
-from ....payment.models import Payment, Transaction
-from ....product.models import Product, ProductVariant
-from ....shipping.models import ShippingMethod
-
-
-class Command(BaseCommand):
- help = (
- "Change currency in all models in the database. "
- "Note, that this command only changes currency code "
- "without doing any conversion. "
- "Currency set by this command must match "
- "with the value set in DEFAULT_CURRENCY environment variable."
- )
-
- def add_arguments(self, parser):
- parser.add_argument("currency", type=str)
-
- parser.add_argument(
- "--force",
- action="store_true",
- help="Allows running command without validation.",
- )
-
- def handle(self, **options):
- force = options.get("force", False)
- currency = options["currency"]
-
- if not force:
- try:
- validate_currency(currency)
- except UnknownCurrencyError:
- raise CommandError(
- "Unknown currency. "
- "Use `--force` flag to force migration currencies."
- )
-
- Checkout.objects.update(currency=currency)
- Voucher.objects.update(currency=currency)
- GiftCard.objects.update(currency=currency)
- Order.objects.update(currency=currency)
- OrderLine.objects.update(currency=currency)
- Payment.objects.update(currency=currency)
- Transaction.objects.update(currency=currency)
- Product.objects.update(currency=currency)
- ProductVariant.objects.update(currency=currency)
- ShippingMethod.objects.update(currency=currency)
| {"golden_diff": "diff --git a/saleor/core/management/commands/change_currency.py b/saleor/core/management/commands/change_currency.py\ndeleted file mode 100644\n--- a/saleor/core/management/commands/change_currency.py\n+++ /dev/null\n@@ -1,53 +0,0 @@\n-from babel.numbers import UnknownCurrencyError, validate_currency\n-from django.core.management.base import BaseCommand, CommandError\n-\n-from ....checkout.models import Checkout\n-from ....discount.models import Voucher\n-from ....giftcard.models import GiftCard\n-from ....order.models import Order, OrderLine\n-from ....payment.models import Payment, Transaction\n-from ....product.models import Product, ProductVariant\n-from ....shipping.models import ShippingMethod\n-\n-\n-class Command(BaseCommand):\n- help = (\n- \"Change currency in all models in the database. \"\n- \"Note, that this command only changes currency code \"\n- \"without doing any conversion. \"\n- \"Currency set by this command must match \"\n- \"with the value set in DEFAULT_CURRENCY environment variable.\"\n- )\n-\n- def add_arguments(self, parser):\n- parser.add_argument(\"currency\", type=str)\n-\n- parser.add_argument(\n- \"--force\",\n- action=\"store_true\",\n- help=\"Allows running command without validation.\",\n- )\n-\n- def handle(self, **options):\n- force = options.get(\"force\", False)\n- currency = options[\"currency\"]\n-\n- if not force:\n- try:\n- validate_currency(currency)\n- except UnknownCurrencyError:\n- raise CommandError(\n- \"Unknown currency. \"\n- \"Use `--force` flag to force migration currencies.\"\n- )\n-\n- Checkout.objects.update(currency=currency)\n- Voucher.objects.update(currency=currency)\n- GiftCard.objects.update(currency=currency)\n- Order.objects.update(currency=currency)\n- OrderLine.objects.update(currency=currency)\n- Payment.objects.update(currency=currency)\n- Transaction.objects.update(currency=currency)\n- Product.objects.update(currency=currency)\n- ProductVariant.objects.update(currency=currency)\n- ShippingMethod.objects.update(currency=currency)\n", "issue": "management/commands errors\nThere are some queries that reference deleted fields in this module (examples below). I was wondering does it matter if this module is updated because it seems like this file hasn't been updated in a while, or are there other reasons that these queries still exist? Thanks.\r\n\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L45\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L51\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L52\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L53\n", "before_files": [{"content": "from babel.numbers import UnknownCurrencyError, validate_currency\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom ....checkout.models import Checkout\nfrom ....discount.models import Voucher\nfrom ....giftcard.models import GiftCard\nfrom ....order.models import Order, OrderLine\nfrom ....payment.models import Payment, Transaction\nfrom ....product.models import Product, ProductVariant\nfrom ....shipping.models import ShippingMethod\n\n\nclass Command(BaseCommand):\n help = (\n \"Change currency in all models in the database. \"\n \"Note, that this command only changes currency code \"\n \"without doing any conversion. \"\n \"Currency set by this command must match \"\n \"with the value set in DEFAULT_CURRENCY environment variable.\"\n )\n\n def add_arguments(self, parser):\n parser.add_argument(\"currency\", type=str)\n\n parser.add_argument(\n \"--force\",\n action=\"store_true\",\n help=\"Allows running command without validation.\",\n )\n\n def handle(self, **options):\n force = options.get(\"force\", False)\n currency = options[\"currency\"]\n\n if not force:\n try:\n validate_currency(currency)\n except UnknownCurrencyError:\n raise CommandError(\n \"Unknown currency. \"\n \"Use `--force` flag to force migration currencies.\"\n )\n\n Checkout.objects.update(currency=currency)\n Voucher.objects.update(currency=currency)\n GiftCard.objects.update(currency=currency)\n Order.objects.update(currency=currency)\n OrderLine.objects.update(currency=currency)\n Payment.objects.update(currency=currency)\n Transaction.objects.update(currency=currency)\n Product.objects.update(currency=currency)\n ProductVariant.objects.update(currency=currency)\n ShippingMethod.objects.update(currency=currency)\n", "path": "saleor/core/management/commands/change_currency.py"}], "after_files": [{"content": null, "path": "saleor/core/management/commands/change_currency.py"}]} | 895 | 462 |
gh_patches_debug_11801 | rasdani/github-patches | git_diff | getmoto__moto-399 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 2.6 issues with wheels and dependencies
My Travis tests failed on Python 2.6: piskvorky/smart_open#15 .
After some digging around it appears this is because of `moto`. Moto apparently depends on some `ordereddict` package, but that package is not installed (nor mentioned anywhere in the docs, AFAICS).
Do you think you could make `ordereddict` a dependency for moto, when installing on Python 2.6?
In other words, after I successfully run `pip install moto`, I'd expect moto to work, even on Python 2.6.
And thanks for the great package!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import unicode_literals
3 from setuptools import setup, find_packages
4
5 install_requires = [
6 "Jinja2",
7 "boto>=2.20.0",
8 "flask",
9 "httpretty>=0.6.1",
10 "requests",
11 "xmltodict",
12 "six",
13 "werkzeug",
14 ]
15
16 import sys
17
18 if sys.version_info < (2, 7):
19 # No buildint OrderedDict before 2.7
20 install_requires.append('ordereddict')
21
22 setup(
23 name='moto',
24 version='0.4.10',
25 description='A library that allows your python tests to easily'
26 ' mock out the boto library',
27 author='Steve Pulec',
28 author_email='spulec@gmail',
29 url='https://github.com/spulec/moto',
30 entry_points={
31 'console_scripts': [
32 'moto_server = moto.server:main',
33 ],
34 },
35 packages=find_packages(exclude=("tests", "tests.*")),
36 install_requires=install_requires,
37 license="Apache",
38 test_suite="tests",
39 classifiers=[
40 "Programming Language :: Python :: 2",
41 "Programming Language :: Python :: 2.6",
42 "Programming Language :: Python :: 2.7",
43 "Programming Language :: Python :: 3",
44 "Programming Language :: Python :: 3.3",
45 "License :: OSI Approved :: Apache Software License",
46 "Topic :: Software Development :: Testing",
47 ],
48 )
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,11 +13,10 @@
"werkzeug",
]
-import sys
-
-if sys.version_info < (2, 7):
- # No buildint OrderedDict before 2.7
- install_requires.append('ordereddict')
+extras_require = {
+ # No builtin OrderedDict before 2.7
+ ':python_version=="2.6"': ['ordereddict'],
+}
setup(
name='moto',
@@ -34,6 +33,7 @@
},
packages=find_packages(exclude=("tests", "tests.*")),
install_requires=install_requires,
+ extras_require=extras_require,
license="Apache",
test_suite="tests",
classifiers=[
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,11 +13,10 @@\n \"werkzeug\",\n ]\n \n-import sys\n-\n-if sys.version_info < (2, 7):\n- # No buildint OrderedDict before 2.7\n- install_requires.append('ordereddict')\n+extras_require = {\n+ # No builtin OrderedDict before 2.7\n+ ':python_version==\"2.6\"': ['ordereddict'],\n+}\n \n setup(\n name='moto',\n@@ -34,6 +33,7 @@\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n+ extras_require=extras_require,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n", "issue": "Python 2.6 issues with wheels and dependencies\nMy Travis tests failed on Python 2.6: piskvorky/smart_open#15 .\n\nAfter some digging around it appears this is because of `moto`. Moto apparently depends on some `ordereddict` package, but that package is not installed (nor mentioned anywhere in the docs, AFAICS).\n\nDo you think you could make `ordereddict` a dependency for moto, when installing on Python 2.6?\n\nIn other words, after I successfully run `pip install moto`, I'd expect moto to work, even on Python 2.6.\n\nAnd thanks for the great package!\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nfrom setuptools import setup, find_packages\n\ninstall_requires = [\n \"Jinja2\",\n \"boto>=2.20.0\",\n \"flask\",\n \"httpretty>=0.6.1\",\n \"requests\",\n \"xmltodict\",\n \"six\",\n \"werkzeug\",\n]\n\nimport sys\n\nif sys.version_info < (2, 7):\n # No buildint OrderedDict before 2.7\n install_requires.append('ordereddict')\n\nsetup(\n name='moto',\n version='0.4.10',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='spulec@gmail',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nfrom setuptools import setup, find_packages\n\ninstall_requires = [\n \"Jinja2\",\n \"boto>=2.20.0\",\n \"flask\",\n \"httpretty>=0.6.1\",\n \"requests\",\n \"xmltodict\",\n \"six\",\n \"werkzeug\",\n]\n\nextras_require = {\n # No builtin OrderedDict before 2.7\n ':python_version==\"2.6\"': ['ordereddict'],\n}\n\nsetup(\n name='moto',\n version='0.4.10',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='spulec@gmail',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py"}]} | 807 | 177 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.